summaryrefslogtreecommitdiff
path: root/mali_kbase
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2019-05-06 12:21:44 +0200
committerSidath Senanayake <sidaths@google.com>2019-05-06 12:21:44 +0200
commitac90f0dd5fbae0b94e9720203a8bb2e81fd4b679 (patch)
treeb8939b75ce4e0f1e8ae07e36648edef7f3a3ce59 /mali_kbase
parente972f6531ef8c9d01eae567f52db4f0fd37d1428 (diff)
downloadgpu-ac90f0dd5fbae0b94e9720203a8bb2e81fd4b679.tar.gz
Mali Bifrost DDK r18p0 KMD
Provenance: ee36a6687 (collaborate/EAC/b_r18p0) BX304L01B-BU-00000-r18p0-01rel0 BX304L06A-BU-00000-r18p0-01rel0 BX304X07X-BU-00000-r18p0-01rel0 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Ie26cb00b475d697c2778306ac09c6a799201ac77
Diffstat (limited to 'mali_kbase')
-rw-r--r--mali_kbase/Kbuild7
-rw-r--r--mali_kbase/Kconfig23
-rw-r--r--mali_kbase/Makefile4
-rw-r--r--mali_kbase/Mconfig24
-rw-r--r--mali_kbase/backend/gpu/Kbuild3
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_devfreq.c117
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_devfreq.h11
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_gpu.c22
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c11
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_hw.c25
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_rb.c4
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c108
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.h44
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c4
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h8
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c5
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_defs.h7
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c86
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_policy.c29
-rw-r--r--mali_kbase/build.bp9
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_debugfs.c2
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c15
-rw-r--r--mali_kbase/mali_base_hwconfig_features.h12
-rw-r--r--mali_kbase/mali_base_hwconfig_issues.h32
-rw-r--r--mali_kbase/mali_base_kernel.h144
-rw-r--r--mali_kbase/mali_kbase.h10
-rw-r--r--mali_kbase/mali_kbase_bits.h41
-rw-r--r--mali_kbase/mali_kbase_config_defaults.h44
-rw-r--r--mali_kbase/mali_kbase_context.c81
-rw-r--r--mali_kbase/mali_kbase_context.h38
-rw-r--r--mali_kbase/mali_kbase_core_linux.c703
-rw-r--r--mali_kbase/mali_kbase_debug_mem_view.c36
-rw-r--r--mali_kbase/mali_kbase_debug_mem_view.h14
-rw-r--r--mali_kbase/mali_kbase_defs.h161
-rw-r--r--mali_kbase/mali_kbase_device.c7
-rw-r--r--mali_kbase/mali_kbase_event.c4
-rw-r--r--mali_kbase/mali_kbase_gator.h5
-rw-r--r--mali_kbase/mali_kbase_gator_hwcnt_names_tbex.h6
-rw-r--r--mali_kbase/mali_kbase_gator_hwcnt_names_tnax.h16
-rw-r--r--mali_kbase/mali_kbase_gpu_memory_debugfs.c10
-rw-r--r--mali_kbase/mali_kbase_gpuprops.c92
-rw-r--r--mali_kbase/mali_kbase_gpuprops.h10
-rw-r--r--mali_kbase/mali_kbase_hw.c6
-rw-r--r--mali_kbase/mali_kbase_hwaccess_backend.h16
-rw-r--r--mali_kbase/mali_kbase_hwaccess_gpuprops.h17
-rw-r--r--mali_kbase/mali_kbase_hwaccess_pm.h15
-rw-r--r--mali_kbase/mali_kbase_hwcnt_backend_gpu.c42
-rw-r--r--mali_kbase/mali_kbase_ioctl.h21
-rw-r--r--mali_kbase/mali_kbase_jd.c4
-rw-r--r--mali_kbase/mali_kbase_jd_debugfs.c9
-rw-r--r--mali_kbase/mali_kbase_js.c4
-rw-r--r--mali_kbase/mali_kbase_mem.c399
-rw-r--r--mali_kbase/mali_kbase_mem.h91
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c513
-rw-r--r--mali_kbase/mali_kbase_mem_linux.h10
-rw-r--r--mali_kbase/mali_kbase_mem_profile_debugfs.c6
-rw-r--r--mali_kbase/mali_kbase_mipe_gen_header.h120
-rw-r--r--mali_kbase/mali_kbase_mipe_proto.h113
-rw-r--r--mali_kbase/mali_kbase_mmu.c29
-rw-r--r--mali_kbase/mali_kbase_mmu_hw.h8
-rw-r--r--mali_kbase/mali_kbase_softjobs.c12
-rw-r--r--mali_kbase/mali_kbase_timeline.c341
-rw-r--r--mali_kbase/mali_kbase_timeline.h121
-rw-r--r--mali_kbase/mali_kbase_timeline_io.c313
-rw-r--r--mali_kbase/mali_kbase_timeline_priv.h63
-rw-r--r--mali_kbase/mali_kbase_tl_serialize.h127
-rw-r--r--mali_kbase/mali_kbase_tlstream.c2698
-rw-r--r--mali_kbase/mali_kbase_tlstream.h877
-rw-r--r--mali_kbase/mali_kbase_tracepoints.c2766
-rw-r--r--mali_kbase/mali_kbase_tracepoints.h2358
-rw-r--r--mali_kbase/mali_midg_regmap.h27
-rw-r--r--mali_kbase/mali_midg_regmap_jm.h6
-rw-r--r--mali_kbase/thirdparty/mali_kbase_mmap.c9
73 files changed, 8926 insertions, 4249 deletions
diff --git a/mali_kbase/Kbuild b/mali_kbase/Kbuild
index 09674bf..0ec3721 100644
--- a/mali_kbase/Kbuild
+++ b/mali_kbase/Kbuild
@@ -21,7 +21,7 @@
# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r17p0-01rel0"
+MALI_RELEASE_NAME ?= "r18p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
@@ -108,7 +108,10 @@ SRC := \
mali_kbase_mem_pool.c \
mali_kbase_mem_pool_debugfs.c \
mali_kbase_debugfs_helper.c \
+ mali_kbase_timeline.c \
+ mali_kbase_timeline_io.c \
mali_kbase_tlstream.c \
+ mali_kbase_tracepoints.c \
mali_kbase_strings.c \
mali_kbase_as_fault_debugfs.c \
mali_kbase_regs_history_debugfs.c \
@@ -120,7 +123,7 @@ ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
endif
ifeq ($(MALI_UNIT_TEST),1)
- SRC += mali_kbase_tlstream_test.c
+ SRC += mali_kbase_timeline_test.c
endif
ifeq ($(MALI_CUSTOMER_RELEASE),0)
diff --git a/mali_kbase/Kconfig b/mali_kbase/Kconfig
index 2b35d83..6a7d935 100644
--- a/mali_kbase/Kconfig
+++ b/mali_kbase/Kconfig
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -206,6 +206,27 @@ config MALI_MEMORY_FULLY_BACKED
option only affects allocations of grow-on-GPU-page-fault
memory.
+config MALI_DMA_BUF_MAP_ON_DEMAND
+ bool "Map imported dma-bufs on demand"
+ depends on DMA_SHARED_BUFFER && MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ This option caused kbase to set up the GPU mapping of imported
+ dma-buf when needed to run atoms. This is the legacy behaviour.
+
+ This is intended for testing and the option will get removed in the
+ future.
+
+config MALI_DMA_BUF_LEGACY_COMPAT
+ bool "Enable legacy compatibility cache flush on dma-buf map"
+ depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
+ default n
+ help
+ This option enables compatibility with legacy dma-buf mapping
+ behavior, then the dma-buf is mapped on import, by adding cache
+ maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
+ including a cache flush.
+
# Instrumentation options.
config MALI_JOB_DUMP
diff --git a/mali_kbase/Makefile b/mali_kbase/Makefile
index 08b2fa9..53a1209 100644
--- a/mali_kbase/Makefile
+++ b/mali_kbase/Makefile
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -25,7 +25,7 @@ KDIR ?= /lib/modules/$(shell uname -r)/build
BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
KBASE_PATH_RELATIVE = $(CURDIR)
-ifeq ($(CONFIG_MALI_FPGA_BUS_LOGGER),y)
+ifeq ($(CONFIG_MALI_BUSLOG),y)
#Add bus logger symbols
EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
endif
diff --git a/mali_kbase/Mconfig b/mali_kbase/Mconfig
index 1f61c9f..d0e78a2 100644
--- a/mali_kbase/Mconfig
+++ b/mali_kbase/Mconfig
@@ -199,11 +199,6 @@ config MALI_2MB_ALLOC
If in doubt, say N
-config MALI_FPGA_BUS_LOGGER
- bool "Enable bus log integration"
- depends on MALI_MIDGARD && MALI_EXPERT
- default n
-
config MALI_PWRSOFT_765
bool "PWRSOFT-765 ticket"
depends on MALI_MIDGARD && MALI_EXPERT
@@ -220,6 +215,25 @@ config MALI_MEMORY_FULLY_BACKED
This option enables full backing of all virtual memory allocations
for the kernel. This only affects grow-on-GPU-page-fault memory.
+config MALI_DMA_BUF_MAP_ON_DEMAND
+ bool "Map imported dma-bufs on demand"
+ depends on MALI_MIDGARD
+ default n
+ default y if !DMA_BUF_SYNC_IOCTL_SUPPORTED
+ help
+ This option caused kbase to set up the GPU mapping of imported
+ dma-buf when needed to run atoms. This is the legacy behaviour.
+
+config MALI_DMA_BUF_LEGACY_COMPAT
+ bool "Enable legacy compatibility cache flush on dma-buf map"
+ depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
+ default n
+ help
+ This option enables compatibility with legacy dma-buf mapping
+ behavior, then the dma-buf is mapped on import, by adding cache
+ maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
+ including a cache flush.
+
# Instrumentation options.
# config MALI_JOB_DUMP exists in the Kernel Kconfig but is configured using CINSTR_JOB_DUMP in Mconfig.
diff --git a/mali_kbase/backend/gpu/Kbuild b/mali_kbase/backend/gpu/Kbuild
index 2dc1455..2414d51 100644
--- a/mali_kbase/backend/gpu/Kbuild
+++ b/mali_kbase/backend/gpu/Kbuild
@@ -39,7 +39,8 @@ BACKEND += \
backend/gpu/mali_kbase_pm_always_on.c \
backend/gpu/mali_kbase_pm_coarse_demand.c \
backend/gpu/mali_kbase_pm_policy.c \
- backend/gpu/mali_kbase_time.c
+ backend/gpu/mali_kbase_time.c \
+ backend/gpu/mali_kbase_l2_mmu_config.c
ifeq ($(MALI_CUSTOMER_RELEASE),0)
BACKEND += \
diff --git a/mali_kbase/backend/gpu/mali_kbase_devfreq.c b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
index df50dd6..8e8ac04 100644
--- a/mali_kbase/backend/gpu/mali_kbase_devfreq.c
+++ b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,7 +21,7 @@
*/
#include <mali_kbase.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_config_defaults.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
@@ -334,6 +334,105 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
return 0;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+
+static const char *kbase_devfreq_req_type_name(enum kbase_devfreq_work_type type)
+{
+ const char *p;
+
+ switch (type) {
+ case DEVFREQ_WORK_NONE:
+ p = "devfreq_none";
+ break;
+ case DEVFREQ_WORK_SUSPEND:
+ p = "devfreq_suspend";
+ break;
+ case DEVFREQ_WORK_RESUME:
+ p = "devfreq_resume";
+ break;
+ default:
+ p = "Unknown devfreq_type";
+ }
+ return p;
+}
+
+static void kbase_devfreq_suspend_resume_worker(struct work_struct *work)
+{
+ struct kbase_devfreq_queue_info *info = container_of(work,
+ struct kbase_devfreq_queue_info, work);
+ struct kbase_device *kbdev = container_of(info, struct kbase_device,
+ devfreq_queue);
+ unsigned long flags;
+ enum kbase_devfreq_work_type type, acted_type;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ type = kbdev->devfreq_queue.req_type;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ acted_type = kbdev->devfreq_queue.acted_type;
+ dev_dbg(kbdev->dev, "Worker handles queued req: %s (acted: %s)\n",
+ kbase_devfreq_req_type_name(type),
+ kbase_devfreq_req_type_name(acted_type));
+ switch (type) {
+ case DEVFREQ_WORK_SUSPEND:
+ case DEVFREQ_WORK_RESUME:
+ if (type != acted_type) {
+ if (type == DEVFREQ_WORK_RESUME)
+ devfreq_resume_device(kbdev->devfreq);
+ else
+ devfreq_suspend_device(kbdev->devfreq);
+ dev_dbg(kbdev->dev, "Devfreq transition occured: %s => %s\n",
+ kbase_devfreq_req_type_name(acted_type),
+ kbase_devfreq_req_type_name(type));
+ kbdev->devfreq_queue.acted_type = type;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+void kbase_devfreq_enqueue_work(struct kbase_device *kbdev,
+ enum kbase_devfreq_work_type work_type)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ unsigned long flags;
+
+ WARN_ON(work_type == DEVFREQ_WORK_NONE);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->devfreq_queue.req_type = work_type;
+ queue_work(kbdev->devfreq_queue.workq, &kbdev->devfreq_queue.work);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ dev_dbg(kbdev->dev, "Enqueuing devfreq req: %s\n",
+ kbase_devfreq_req_type_name(work_type));
+#endif
+}
+
+static int kbase_devfreq_work_init(struct kbase_device *kbdev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ kbdev->devfreq_queue.req_type = DEVFREQ_WORK_NONE;
+ kbdev->devfreq_queue.acted_type = DEVFREQ_WORK_RESUME;
+
+ kbdev->devfreq_queue.workq = alloc_ordered_workqueue("devfreq_workq", 0);
+ if (!kbdev->devfreq_queue.workq)
+ return -ENOMEM;
+
+ INIT_WORK(&kbdev->devfreq_queue.work,
+ kbase_devfreq_suspend_resume_worker);
+#endif
+ return 0;
+}
+
+static void kbase_devfreq_work_term(struct kbase_device *kbdev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ destroy_workqueue(kbdev->devfreq_queue.workq);
+#endif
+}
+
int kbase_devfreq_init(struct kbase_device *kbdev)
{
struct devfreq_dev_profile *dp;
@@ -369,11 +468,19 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
if (err)
return err;
+ /* Initialise devfreq suspend/resume workqueue */
+ err = kbase_devfreq_work_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Devfreq initialization failed");
+ return err;
+ }
+
kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
"simple_ondemand", NULL);
if (IS_ERR(kbdev->devfreq)) {
kfree(dp->freq_table);
- return PTR_ERR(kbdev->devfreq);
+ err = PTR_ERR(kbdev->devfreq);
+ goto add_device_failed;
}
/* devfreq_add_device only copies a few of kbdev->dev's fields, so
@@ -418,6 +525,8 @@ opp_notifier_failed:
dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
else
kbdev->devfreq = NULL;
+add_device_failed:
+ kbase_devfreq_work_term(kbdev);
return err;
}
@@ -444,4 +553,6 @@ void kbase_devfreq_term(struct kbase_device *kbdev)
kbdev->devfreq = NULL;
kfree(kbdev->opp_table);
+
+ kbase_devfreq_work_term(kbdev);
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_devfreq.h b/mali_kbase/backend/gpu/mali_kbase_devfreq.h
index 0634038..6ffdcd8 100644
--- a/mali_kbase/backend/gpu/mali_kbase_devfreq.h
+++ b/mali_kbase/backend/gpu/mali_kbase_devfreq.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,6 +24,15 @@
#define _BASE_DEVFREQ_H_
int kbase_devfreq_init(struct kbase_device *kbdev);
+
void kbase_devfreq_term(struct kbase_device *kbdev);
+/**
+ * kbase_devfreq_enqueue_work - Enqueue a work item for suspend/resume devfreq.
+ * @kbdev: Device pointer
+ * @work_type: The type of the devfreq work item, i.e. suspend or resume
+ */
+void kbase_devfreq_enqueue_work(struct kbase_device *kbdev,
+ enum kbase_devfreq_work_type work_type);
+
#endif /* _BASE_DEVFREQ_H_ */
diff --git a/mali_kbase/backend/gpu/mali_kbase_gpu.c b/mali_kbase/backend/gpu/mali_kbase_gpu.c
index 995d34d..c9c10e6 100644
--- a/mali_kbase/backend/gpu/mali_kbase_gpu.c
+++ b/mali_kbase/backend/gpu/mali_kbase_gpu.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -110,10 +110,29 @@ int kbase_backend_late_init(struct kbase_device *kbdev)
if (err)
goto fail_job_slot;
+ /* Do the initialisation of devfreq.
+ * Devfreq needs backend_timer_init() for completion of its
+ * initialisation and it also needs to catch the first callback
+ * occurence of the runtime_suspend event for maintaining state
+ * coherence with the backend power management, hence needs to be
+ * placed before the kbase_pm_context_idle().
+ */
+ err = kbase_backend_devfreq_init(kbdev);
+ if (err)
+ goto fail_devfreq_init;
+
+ /* Idle the GPU and/or cores, if the policy wants it to */
+ kbase_pm_context_idle(kbdev);
+
+ /* Update gpuprops with L2_FEATURES if applicable */
+ kbase_gpuprops_update_l2_features(kbdev);
+
init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
return 0;
+fail_devfreq_init:
+ kbase_job_slot_term(kbdev);
fail_job_slot:
#ifdef CONFIG_MALI_DEBUG
@@ -133,6 +152,7 @@ fail_pm_powerup:
void kbase_backend_late_term(struct kbase_device *kbdev)
{
+ kbase_backend_devfreq_term(kbdev);
kbase_job_slot_halt(kbdev);
kbase_job_slot_term(kbdev);
kbase_backend_timer_term(kbdev);
diff --git a/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c b/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c
index 39773e6..29018b2 100644
--- a/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -114,3 +114,12 @@ void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
}
}
+void kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump)
+{
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG)) {
+ regdump->l2_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_FEATURES));
+ }
+}
+
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
index e7bfa39..26e9fcf 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
@@ -27,7 +27,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config.h>
#include <mali_midg_regmap.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_ctx_sched.h>
@@ -233,14 +233,12 @@ static void kbasep_job_slot_update_head_start_timestamp(
int js,
ktime_t end_timestamp)
{
- if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) {
- struct kbase_jd_atom *katom;
- ktime_t timestamp_diff;
- /* The atom in the HEAD */
- katom = kbase_gpu_inspect(kbdev, js, 0);
-
- KBASE_DEBUG_ASSERT(katom != NULL);
+ ktime_t timestamp_diff;
+ struct kbase_jd_atom *katom;
+ /* Checking the HEAD position for the job slot */
+ katom = kbase_gpu_inspect(kbdev, js, 0);
+ if (katom != NULL) {
timestamp_diff = ktime_sub(end_timestamp,
katom->start_timestamp);
if (ktime_to_ns(timestamp_diff) >= 0) {
@@ -334,6 +332,17 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
}
kbase_gpu_irq_evict(kbdev, i, completion_code);
+
+ /* Some jobs that encounter a BUS FAULT may result in corrupted
+ * state causing future jobs to hang. Reset GPU before
+ * allowing any other jobs on the slot to continue. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_3076)) {
+ if (completion_code == BASE_JD_EVENT_JOB_BUS_FAULT) {
+ if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
+ kbase_reset_gpu_locked(kbdev);
+ }
+ }
+ }
}
kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
index 5ea6130..eec8765 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,7 +29,7 @@
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_jm.h>
#include <mali_kbase_js.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_hwcnt_context.h>
#include <mali_kbase_10969_workaround.h>
#include <backend/gpu/mali_kbase_cache_policy_backend.h>
diff --git a/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c b/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c
new file mode 100644
index 0000000..7bf9e4d
--- /dev/null
+++ b/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_bits.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_l2_mmu_config.h"
+
+/**
+ * struct l2_mmu_config_limit_region
+ *
+ * @value: The default value to load into the L2_MMU_CONFIG register
+ * @mask: The shifted mask of the field in the L2_MMU_CONFIG register
+ * @shift: The shift of where the field starts in the L2_MMU_CONFIG register
+ * This should be the same value as the smaller of the two mask
+ * values
+ */
+struct l2_mmu_config_limit_region {
+ u32 value, mask, shift;
+};
+
+/**
+ * struct l2_mmu_config_limit
+ *
+ * @product_model: The GPU for which this entry applies
+ * @read: Values for the read limit field
+ * @write: Values for the write limit field
+ */
+struct l2_mmu_config_limit {
+ u32 product_model;
+ struct l2_mmu_config_limit_region read;
+ struct l2_mmu_config_limit_region write;
+};
+
+/*
+ * Zero represents no limit
+ *
+ * For TBEX TTRX and TNAX:
+ * The value represents the number of outstanding reads (6 bits) or writes (5 bits)
+ *
+ * For all other GPUS it is a fraction see: mali_kbase_config_defaults.h
+ */
+static const struct l2_mmu_config_limit limits[] = {
+ /* GPU read write */
+ {GPU_ID2_PRODUCT_TBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
+ {GPU_ID2_PRODUCT_TTRX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
+ {GPU_ID2_PRODUCT_TNAX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
+ {GPU_ID2_PRODUCT_TGOX,
+ {KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
+ {KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
+ {GPU_ID2_PRODUCT_TNOX,
+ {KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
+ {KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
+};
+
+void kbase_set_mmu_quirks(struct kbase_device *kbdev)
+{
+ /* All older GPUs had 2 bits for both fields, this is a default */
+ struct l2_mmu_config_limit limit = {
+ 0, /* Any GPU not in the limits array defined above */
+ {KBASE_AID_32, GENMASK(25, 24), 24},
+ {KBASE_AID_32, GENMASK(27, 26), 26}
+ };
+ u32 product_model, gpu_id;
+ u32 mmu_config;
+ int i;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_model = gpu_id & GPU_ID2_PRODUCT_MODEL;
+
+ for (i = 0; i < ARRAY_SIZE(limits); i++) {
+ if (product_model == limits[i].product_model) {
+ limit = limits[i];
+ break;
+ }
+ }
+
+ mmu_config = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG));
+
+ mmu_config &= ~(limit.read.mask | limit.write.mask);
+ /* Can't use FIELD_PREP() macro here as the mask isn't constant */
+ mmu_config |= (limit.read.value << limit.read.shift) |
+ (limit.write.value << limit.write.shift);
+
+ kbdev->hw_quirks_mmu = mmu_config;
+}
diff --git a/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.h b/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.h
new file mode 100644
index 0000000..25636ee
--- /dev/null
+++ b/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ */
+
+#ifndef _KBASE_L2_MMU_CONFIG_H_
+#define _KBASE_L2_MMU_CONFIG_H_
+/**
+ * kbase_set_mmu_quirks - Set the hw_quirks_mmu field of kbdev
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Use this function to initialise the hw_quirks_mmu field, for instance to set
+ * the MAX_READS and MAX_WRITES to sane defaults for each GPU.
+ */
+void kbase_set_mmu_quirks(struct kbase_device *kbdev);
+
+#endif /* _KBASE_L2_MMU_CONFIG_H */
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
index b926f4c..a70439d 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,7 +25,7 @@
#include <mali_kbase.h>
#include <mali_kbase_mem.h>
#include <mali_kbase_mmu_hw.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <mali_kbase_as_fault_debugfs.h>
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
index 1f76eed..a5bbdf5 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
+++ b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,8 +29,8 @@
* register access implementation of the MMU hardware interface
*/
-#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_
-#define _MALI_KBASE_MMU_HW_DIRECT_H_
+#ifndef _KBASE_MMU_HW_DIRECT_H_
+#define _KBASE_MMU_HW_DIRECT_H_
#include <mali_kbase_defs.h>
@@ -44,4 +44,4 @@
*/
void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
-#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */
+#endif /* _KBASE_MMU_HW_DIRECT_H_ */
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index 9509875..e04ab73 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -437,9 +437,6 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
- /* Idle the GPU and/or cores, if the policy wants it to */
- kbase_pm_context_idle(kbdev);
-
return 0;
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
index e11cb75..1a865d5 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -38,6 +38,11 @@ struct kbase_device;
struct kbase_jd_atom;
/**
+ * Maximum number of PM policies that may be active on a device.
+ */
+#define KBASE_PM_MAX_NUM_POLICIES (10)
+
+/**
* enum kbase_pm_core_type - The types of core in a GPU.
*
* These enumerated values are used in calls to
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index 3184e57..1172f12 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -29,7 +29,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config_defaults.h>
#include <mali_midg_regmap.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_smc.h>
@@ -40,6 +40,7 @@
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_l2_mmu_config.h>
#include <linux/of.h>
@@ -450,6 +451,41 @@ static void kbase_pm_trigger_hwcnt_disable(struct kbase_device *kbdev)
}
}
+static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
+{
+ u32 val;
+
+ /*
+ * Skip if it is not supported
+ */
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG))
+ return;
+
+ /*
+ * Skip if size and hash are not given explicitly,
+ * which means default values are used.
+ */
+ if ((kbdev->l2_size_override == 0) && (kbdev->l2_hash_override == 0))
+ return;
+
+ val = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_CONFIG));
+
+ if (kbdev->l2_size_override) {
+ val &= ~L2_CONFIG_SIZE_MASK;
+ val |= (kbdev->l2_size_override << L2_CONFIG_SIZE_SHIFT);
+ }
+
+ if (kbdev->l2_hash_override) {
+ val &= ~L2_CONFIG_HASH_MASK;
+ val |= (kbdev->l2_hash_override << L2_CONFIG_HASH_SHIFT);
+ }
+
+ dev_dbg(kbdev->dev, "Program 0x%x to L2_CONFIG\n", val);
+
+ /* Write L2_CONFIG to override */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_CONFIG), val);
+}
+
static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
{
struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
@@ -481,6 +517,12 @@ static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
switch (backend->l2_state) {
case KBASE_L2_OFF:
if (kbase_pm_is_l2_desired(kbdev)) {
+ /*
+ * Set the desired config for L2 before powering
+ * it on
+ */
+ kbase_pm_l2_config_override(kbdev);
+
/* L2 is required, power on. Powering on the
* tiler will also power the first L2 cache.
*/
@@ -1329,9 +1371,11 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
reset_required = kbdev->pm.backend.callback_power_on(kbdev);
}
- spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock(&kbdev->pm.backend.gpu_powered_lock);
kbdev->pm.backend.gpu_powered = true;
- spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (reset_required) {
/* GPU state was lost, reset GPU to ensure it is in a
@@ -1383,13 +1427,14 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
/* Ensure that any IRQ handlers have finished */
kbase_synchronize_irqs(kbdev);
- spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock(&kbdev->pm.backend.gpu_powered_lock);
if (atomic_read(&kbdev->faults_pending)) {
/* Page/bus faults are still being processed. The GPU can not
* be powered off until they have completed */
- spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
- flags);
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return false;
}
@@ -1397,7 +1442,8 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
/* The GPU power may be turned off from this point */
kbdev->pm.backend.gpu_powered = false;
- spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (is_suspend && kbdev->pm.backend.callback_power_suspend)
kbdev->pm.backend.callback_power_suspend(kbdev);
@@ -1493,6 +1539,9 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
}
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_2968_TTRX_3162))
+ kbdev->hw_quirks_sc |= SC_VAR_ALGORITHM;
+
if (!kbdev->hw_quirks_sc)
kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
GPU_CONTROL_REG(SHADER_CONFIG));
@@ -1505,28 +1554,7 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
/* Limit the GPU bus bandwidth if the platform needs this. */
- kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_MMU_CONFIG));
-
-
- /* Limit read & write ID width for AXI */
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG)) {
- kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS);
- kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_ARID_LIMIT & 0x7) <<
- L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT;
-
- kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
- kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_AWID_LIMIT & 0x7) <<
- L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT;
- } else {
- kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
- kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
- L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
-
- kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
- kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
- L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
- }
+ kbase_set_mmu_quirks(kbdev);
if (kbdev->system_coherency == COHERENCY_ACE) {
/* Allow memory configuration disparity to be ignored, we
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
index 2f06a0a..795b99f 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -46,34 +46,30 @@ static const struct kbase_pm_policy *const all_policy_list[] = {
#endif /* CONFIG_MALI_NO_MALI */
};
-/* A filtered list of policies available in the system, calculated by filtering
- * all_policy_list based on the flags provided by each policy.
- */
-static const struct kbase_pm_policy *enabled_policy_list[ARRAY_SIZE(all_policy_list)];
-static size_t enabled_policy_count;
-
-static void generate_filtered_policy_list(void)
+static void generate_filtered_policy_list(struct kbase_device *kbdev)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(all_policy_list); ++i) {
const struct kbase_pm_policy *pol = all_policy_list[i];
+ BUILD_BUG_ON(ARRAY_SIZE(all_policy_list) >
+ KBASE_PM_MAX_NUM_POLICIES);
if (platform_power_down_only &&
(pol->flags & KBASE_PM_POLICY_FLAG_DISABLED_WITH_POWER_DOWN_ONLY))
continue;
- enabled_policy_list[enabled_policy_count++] = pol;
+ kbdev->policy_list[kbdev->policy_count++] = pol;
}
}
int kbase_pm_policy_init(struct kbase_device *kbdev)
{
- generate_filtered_policy_list();
- if (enabled_policy_count == 0)
+ generate_filtered_policy_list(kbdev);
+ if (kbdev->policy_count == 0)
return -EINVAL;
- kbdev->pm.backend.pm_current_policy = enabled_policy_list[0];
+ kbdev->pm.backend.pm_current_policy = kbdev->policy_list[0];
kbdev->pm.backend.pm_current_policy->init(kbdev);
return 0;
@@ -180,13 +176,14 @@ void kbase_pm_update_cores_state(struct kbase_device *kbdev)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
-int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
+int kbase_pm_list_policies(struct kbase_device *kbdev,
+ const struct kbase_pm_policy * const **list)
{
- WARN_ON(enabled_policy_count == 0);
+ WARN_ON(kbdev->policy_count == 0);
if (list)
- *list = enabled_policy_list;
+ *list = kbdev->policy_list;
- return enabled_policy_count;
+ return kbdev->policy_count;
}
KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
diff --git a/mali_kbase/build.bp b/mali_kbase/build.bp
index a971143..b388f23 100644
--- a/mali_kbase/build.bp
+++ b/mali_kbase/build.bp
@@ -28,8 +28,8 @@ bob_defaults {
mali_debug: {
kbuild_options: ["CONFIG_MALI_DEBUG=y"],
},
- mali_fpga_bus_logger: {
- kbuild_options: ["CONFIG_MALI_FPGA_BUS_LOGGER=y"],
+ buslog: {
+ kbuild_options: ["CONFIG_MALI_BUSLOG=y"],
},
cinstr_job_dump: {
kbuild_options: ["CONFIG_MALI_JOB_DUMP=y"],
@@ -52,6 +52,9 @@ bob_defaults {
mali_memory_fully_backed: {
kbuild_options: ["CONFIG_MALI_MEMORY_FULLY_BACKED=y"],
},
+ mali_dma_buf_map_on_demand: {
+ kbuild_options: ["CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND=y"],
+ },
kbuild_options: [
"MALI_UNIT_TEST={{.unit_test_code}}",
"MALI_CUSTOMER_RELEASE={{.release}}",
@@ -87,7 +90,7 @@ bob_kernel_module {
"CONFIG_MALI_NO_MALI_DEFAULT_GPU={{.gpu}}",
"CONFIG_MALI_PLATFORM_NAME={{.mali_platform_name}}",
],
- mali_fpga_bus_logger: {
+ buslog: {
extra_symbols: [
"bus_logger",
],
diff --git a/mali_kbase/ipa/mali_kbase_ipa_debugfs.c b/mali_kbase/ipa/mali_kbase_ipa_debugfs.c
index 6e8c23c..bc247f2 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_debugfs.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c b/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c
index 1a6ba01..9fae8f1 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -51,9 +51,16 @@ static inline u32 kbase_ipa_read_hwcnt(
static inline s64 kbase_ipa_add_saturate(s64 a, s64 b)
{
- if (S64_MAX - a < b)
- return S64_MAX;
- return a + b;
+ s64 rtn;
+
+ if (a > 0 && (S64_MAX - a) < b)
+ rtn = S64_MAX;
+ else if (a < 0 && (S64_MIN - a) > b)
+ rtn = S64_MIN;
+ else
+ rtn = a + b;
+
+ return rtn;
}
s64 kbase_ipa_sum_all_shader_cores(
diff --git a/mali_kbase/mali_base_hwconfig_features.h b/mali_kbase/mali_base_hwconfig_features.h
index 03e326f..78dc8db 100644
--- a/mali_kbase/mali_base_hwconfig_features.h
+++ b/mali_kbase/mali_base_hwconfig_features.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -58,9 +58,9 @@ enum base_hw_feature {
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_TLS_HASHING,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
BASE_HW_FEATURE_END
};
@@ -319,7 +319,6 @@ static const enum base_hw_feature base_hw_features_tNOx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_TLS_HASHING,
- BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_END
};
@@ -350,7 +349,6 @@ static const enum base_hw_feature base_hw_features_tGOx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_TLS_HASHING,
- BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_END
};
@@ -463,6 +461,7 @@ static const enum base_hw_feature base_hw_features_tBEx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
BASE_HW_FEATURE_END
};
@@ -491,6 +490,7 @@ static const enum base_hw_feature base_hw_features_tULx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
BASE_HW_FEATURE_END
};
@@ -519,6 +519,7 @@ static const enum base_hw_feature base_hw_features_tDUx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
BASE_HW_FEATURE_END
};
@@ -575,6 +576,7 @@ static const enum base_hw_feature base_hw_features_tIDx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
BASE_HW_FEATURE_END
};
@@ -603,6 +605,7 @@ static const enum base_hw_feature base_hw_features_tVAx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
BASE_HW_FEATURE_END
};
@@ -632,7 +635,6 @@ static const enum base_hw_feature base_hw_features_tEGx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_TLS_HASHING,
- BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
BASE_HW_FEATURE_END
};
diff --git a/mali_kbase/mali_base_hwconfig_issues.h b/mali_kbase/mali_base_hwconfig_issues.h
index 5b3a854..59610b4 100644
--- a/mali_kbase/mali_base_hwconfig_issues.h
+++ b/mali_kbase/mali_base_hwconfig_issues.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -129,6 +129,9 @@ enum base_hw_issue {
BASE_HW_ISSUE_TNOX_1194,
BASE_HW_ISSUE_TGOX_R1_1234,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TSIX_1792,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+ BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_END
};
@@ -1102,6 +1105,7 @@ static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_1116,
BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TSIX_1792,
BASE_HW_ISSUE_END
};
@@ -1111,6 +1115,7 @@ static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_1116,
BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TSIX_1792,
BASE_HW_ISSUE_END
};
@@ -1224,6 +1229,18 @@ static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+ BASE_HW_ISSUE_TTRX_3076,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tTRx_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+ BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_END
};
@@ -1241,6 +1258,18 @@ static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+ BASE_HW_ISSUE_TTRX_3076,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tNAx_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+ BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_END
};
@@ -1258,6 +1287,7 @@ static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_END
};
diff --git a/mali_kbase/mali_base_kernel.h b/mali_kbase/mali_base_kernel.h
index 02a823a..0beb62c 100644
--- a/mali_kbase/mali_base_kernel.h
+++ b/mali_kbase/mali_base_kernel.h
@@ -90,6 +90,10 @@ typedef struct base_mem_handle {
*/
#define BASE_MEM_GROUP_DEFAULT (0)
+/* Number of physical memory groups.
+ */
+#define BASE_MEM_GROUP_COUNT (16)
+
/**
* typedef base_mem_alloc_flags - Memory allocation, access/hint flags.
*
@@ -133,7 +137,7 @@ typedef u32 base_mem_alloc_flags;
/* Will be permanently mapped in kernel space.
* Flag is only allowed on allocations originating from kbase.
*/
-#define BASE_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
/* The allocation will completely reside within the same 4GB chunk in the GPU
* virtual space.
@@ -143,7 +147,11 @@ typedef u32 base_mem_alloc_flags;
*/
#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
-#define BASE_MEM_RESERVED_BIT_7 ((base_mem_alloc_flags)1 << 7)
+/* Userspace is not allowed to free this memory.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
+
#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
/* Grow backing store on GPU Page Fault
@@ -214,11 +222,26 @@ typedef u32 base_mem_alloc_flags;
*/
#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
-/* Number of bits used as flags for base memory management
+/*
+ * Bits [22:25] for group_id (0~15).
+ *
+ * In user space, inline function base_mem_group_id_set() can be used with
+ * numeric value (0~15) to generate a specific memory group ID.
+ *
+ * group_id is packed into in.flags of kbase_ioctl_mem_alloc to be delivered to
+ * kernel space via ioctl and then kernel driver can use inline function
+ * base_mem_group_id_get() to extract group_id from flags.
+ */
+#define BASEP_MEM_GROUP_ID_SHIFT 22
+#define BASE_MEM_GROUP_ID_MASK \
+ ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+
+/**
+ * Number of bits used as flags for base memory management
*
* Must be kept in sync with the base_mem_alloc_flags flags
*/
-#define BASE_MEM_FLAGS_NR_BITS 22
+#define BASE_MEM_FLAGS_NR_BITS 26
/* A mask for all output bits, excluding IN/OUT bits.
*/
@@ -229,6 +252,43 @@ typedef u32 base_mem_alloc_flags;
#define BASE_MEM_FLAGS_INPUT_MASK \
(((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+/**
+ * base_mem_group_id_get() - Get group ID from flags
+ * @flags: Flags to pass to base_mem_alloc
+ *
+ * This inline function extracts the encoded group ID from flags
+ * and converts it into numeric value (0~15).
+ *
+ * Return: group ID(0~15) extracted from the parameter
+ */
+static inline int base_mem_group_id_get(base_mem_alloc_flags flags)
+{
+ LOCAL_ASSERT((flags & ~BASE_MEM_FLAGS_INPUT_MASK) == 0);
+ return (int)((flags & BASE_MEM_GROUP_ID_MASK) >>
+ BASEP_MEM_GROUP_ID_SHIFT);
+}
+
+/**
+ * base_mem_group_id_set() - Set group ID into base_mem_alloc_flags
+ * @id: group ID(0~15) you want to encode
+ *
+ * This inline function encodes specific group ID into base_mem_alloc_flags.
+ * Parameter 'id' should lie in-between 0 to 15.
+ *
+ * Return: base_mem_alloc_flags with the group ID (id) encoded
+ *
+ * The return value can be combined with other flags against base_mem_alloc
+ * to identify a specific memory group.
+ */
+static inline base_mem_alloc_flags base_mem_group_id_set(int id)
+{
+ LOCAL_ASSERT(id >= 0);
+ LOCAL_ASSERT(id < BASE_MEM_GROUP_COUNT);
+
+ return ((base_mem_alloc_flags)id << BASEP_MEM_GROUP_ID_SHIFT) &
+ BASE_MEM_GROUP_ID_MASK;
+}
+
/* A mask for all the flags which are modifiable via the base_mem_set_flags
* interface.
*/
@@ -240,13 +300,13 @@ typedef u32 base_mem_alloc_flags;
/* A mask of all currently reserved flags
*/
#define BASE_MEM_FLAGS_RESERVED \
- (BASE_MEM_RESERVED_BIT_7 | BASE_MEM_RESERVED_BIT_8 | \
- BASE_MEM_MAYBE_RESERVED_BIT_19)
+ (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_MAYBE_RESERVED_BIT_19)
/* A mask of all the flags which are only valid for allocations within kbase,
* and may not be passed from user space.
*/
-#define BASE_MEM_FLAGS_KERNEL_ONLY (BASE_MEM_PERMANENT_KERNEL_MAPPING)
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
/* A mask of all the flags that can be returned via the base_mem_get_flags()
* interface.
@@ -255,7 +315,7 @@ typedef u32 base_mem_alloc_flags;
(BASE_MEM_FLAGS_INPUT_MASK & ~(BASE_MEM_SAME_VA | \
BASE_MEM_COHERENT_SYSTEM_REQUIRED | BASE_MEM_DONT_NEED | \
BASE_MEM_IMPORT_SHARED | BASE_MEM_FLAGS_RESERVED | \
- BASE_MEM_FLAGS_KERNEL_ONLY))
+ BASEP_MEM_FLAGS_KERNEL_ONLY))
/**
* enum base_mem_import_type - Memory types supported by @a base_mem_import
@@ -1629,20 +1689,27 @@ typedef u32 base_context_create_flags;
((base_context_create_flags)1 << 1)
-/**
- * Bitpattern describing the ::base_context_create_flags that can be
- * passed to base_context_init()
+/* Bit-shift used to encode a memory group ID in base_context_create_flags
*/
-#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
- (BASE_CONTEXT_CCTX_EMBEDDED | \
- BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
+#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3)
-/**
- * Bitpattern describing the ::base_context_create_flags that can be
+/* Bitmask used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+ ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+
+/* Bitpattern describing the base_context_create_flags that can be
* passed to the kernel
*/
-#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
- BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+ (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \
+ BASEP_CONTEXT_MMU_GROUP_ID_MASK)
+
+/* Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
/*
* Private flags used on the base context
@@ -1653,7 +1720,46 @@ typedef u32 base_context_create_flags;
* not collide with them.
*/
/** Private flag tracking whether job descriptor dumping is disabled */
-#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31))
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \
+ ((base_context_create_flags)(1 << 31))
+
+/**
+ * base_context_mmu_group_id_set - Encode a memory group ID in
+ * base_context_create_flags
+ *
+ * Memory allocated for GPU page tables will come from the specified group.
+ *
+ * @group_id: Physical memory group ID. Range is 0..(BASE_MEM_GROUP_COUNT-1).
+ *
+ * Return: Bitmask of flags to pass to base_context_init.
+ */
+static inline base_context_create_flags base_context_mmu_group_id_set(
+ int const group_id)
+{
+ LOCAL_ASSERT(group_id >= 0);
+ LOCAL_ASSERT(group_id < BASE_MEM_GROUP_COUNT);
+ return BASEP_CONTEXT_MMU_GROUP_ID_MASK &
+ ((base_context_create_flags)group_id <<
+ BASEP_CONTEXT_MMU_GROUP_ID_SHIFT);
+}
+
+/**
+ * base_context_mmu_group_id_get - Decode a memory group ID from
+ * base_context_create_flags
+ *
+ * Memory allocated for GPU page tables will come from the returned group.
+ *
+ * @flags: Bitmask of flags to pass to base_context_init.
+ *
+ * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1).
+ */
+static inline int base_context_mmu_group_id_get(
+ base_context_create_flags const flags)
+{
+ LOCAL_ASSERT(flags == (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
+ return (int)((flags & BASEP_CONTEXT_MMU_GROUP_ID_MASK) >>
+ BASEP_CONTEXT_MMU_GROUP_ID_SHIFT);
+}
/** @} end group base_user_api_core */
diff --git a/mali_kbase/mali_kbase.h b/mali_kbase/mali_kbase.h
index 24a021d..a5cdd2a 100644
--- a/mali_kbase/mali_kbase.h
+++ b/mali_kbase/mali_kbase.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -111,9 +111,9 @@ void kbase_release_device(struct kbase_device *kbdev);
/**
- * kbase_get_unmapped_area() - get an address range which is currently
- * unmapped.
- * @filp: File operations associated with kbase device.
+ * kbase_context_get_unmapped_area() - get an address range which is currently
+ * unmapped.
+ * @kctx: A kernel base context (which has its own GPU address space).
* @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
* as Mali GPU driver decides about the mapping).
* @len: Length of the address range.
@@ -148,7 +148,7 @@ void kbase_release_device(struct kbase_device *kbdev);
* Return: if successful, address of the unmapped area aligned as required;
* error code (negative) in case of failure;
*/
-unsigned long kbase_get_unmapped_area(struct file *filp,
+unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx,
const unsigned long addr, const unsigned long len,
const unsigned long pgoff, const unsigned long flags);
diff --git a/mali_kbase/mali_kbase_bits.h b/mali_kbase/mali_kbase_bits.h
new file mode 100644
index 0000000..2c11093
--- /dev/null
+++ b/mali_kbase/mali_kbase_bits.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef _KBASE_BITS_H_
+#define _KBASE_BITS_H_
+
+#if (KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE)
+#include <linux/bits.h>
+#else
+#include <linux/bitops.h>
+#endif
+
+#endif /* _KBASE_BITS_H_ */
diff --git a/mali_kbase/mali_kbase_config_defaults.h b/mali_kbase/mali_kbase_config_defaults.h
index cfb9a41..447e059 100644
--- a/mali_kbase/mali_kbase_config_defaults.h
+++ b/mali_kbase/mali_kbase_config_defaults.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -109,48 +109,6 @@ enum {
};
/**
- * Default setting for read Address ID limiting on AXI bus.
- *
- * Attached value: u32 register value
- * KBASE_AID_32 - use the full 32 IDs (5 ID bits)
- * KBASE_AID_16 - use 16 IDs (4 ID bits)
- * KBASE_AID_8 - use 8 IDs (3 ID bits)
- * KBASE_AID_4 - use 4 IDs (2 ID bits)
- * Default value: KBASE_AID_32 (no limit). Note hardware implementation
- * may limit to a lower value.
- */
-#define DEFAULT_ARID_LIMIT KBASE_AID_32
-
-/**
- * Default setting for write Address ID limiting on AXI.
- *
- * Attached value: u32 register value
- * KBASE_AID_32 - use the full 32 IDs (5 ID bits)
- * KBASE_AID_16 - use 16 IDs (4 ID bits)
- * KBASE_AID_8 - use 8 IDs (3 ID bits)
- * KBASE_AID_4 - use 4 IDs (2 ID bits)
- * Default value: KBASE_AID_32 (no limit). Note hardware implementation
- * may limit to a lower value.
- */
-#define DEFAULT_AWID_LIMIT KBASE_AID_32
-
-/**
- * Default setting for read Address ID limiting on AXI bus.
- *
- * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation
- * may limit to a lower value.
- */
-#define DEFAULT_3BIT_ARID_LIMIT KBASE_3BIT_AID_32
-
-/**
- * Default setting for write Address ID limiting on AXI.
- *
- * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation
- * may limit to a lower value.
- */
-#define DEFAULT_3BIT_AWID_LIMIT KBASE_3BIT_AID_32
-
-/**
* Default period for DVFS sampling
*/
#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */
diff --git a/mali_kbase/mali_kbase_context.c b/mali_kbase/mali_kbase_context.c
index 6489a4f..d4cbb8b 100644
--- a/mali_kbase/mali_kbase_context.c
+++ b/mali_kbase/mali_kbase_context.c
@@ -32,15 +32,26 @@
#include <mali_kbase_dma_fence.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_mem_pool_group.h>
+#include <mali_kbase_tracepoints.h>
struct kbase_context *
-kbase_create_context(struct kbase_device *kbdev, bool is_compat)
+kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+ base_context_create_flags const flags,
+ unsigned long const api_version,
+ struct file *const filp)
{
struct kbase_context *kctx;
int err;
struct page *p;
+ struct kbasep_js_kctx_info *js_kctx_info = NULL;
+ unsigned long irq_flags = 0;
- KBASE_DEBUG_ASSERT(kbdev != NULL);
+ if (WARN_ON(!kbdev))
+ goto out;
+
+ /* Validate flags */
+ if (WARN_ON(flags != (flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS)))
+ goto out;
/* zero-inited as lot of code assume it's zero'ed out on create */
kctx = vzalloc(sizeof(*kctx));
@@ -61,8 +72,6 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
#endif /* !defined(CONFIG_64BIT) */
- atomic_set(&kctx->setup_complete, 0);
- atomic_set(&kctx->setup_in_progress, 0);
spin_lock_init(&kctx->mm_update_lock);
kctx->process_mm = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
@@ -106,7 +115,8 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
if (err)
goto free_event;
- err = kbase_mmu_init(kbdev, &kctx->mmu, kctx);
+ err = kbase_mmu_init(kbdev, &kctx->mmu, kctx,
+ base_context_mmu_group_id_get(flags));
if (err)
goto term_dma_fence;
@@ -145,6 +155,26 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
kbase_timer_setup(&kctx->soft_job_timeout,
kbasep_soft_job_timeout_worker);
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_add(&kctx->kctx_list_link, &kbdev->kctx_list);
+ KBASE_TLSTREAM_TL_NEW_CTX(kbdev, kctx, kctx->id, (u32)(kctx->tgid));
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ kctx->api_version = api_version;
+ kctx->filp = filp;
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+ /* Translate the flags */
+ if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
return kctx;
no_jit:
@@ -194,10 +224,17 @@ void kbase_destroy_context(struct kbase_context *kctx)
unsigned long flags;
struct page *p;
- KBASE_DEBUG_ASSERT(NULL != kctx);
+ if (WARN_ON(!kctx))
+ return;
kbdev = kctx->kbdev;
- KBASE_DEBUG_ASSERT(NULL != kbdev);
+ if (WARN_ON(!kbdev))
+ return;
+
+ mutex_lock(&kbdev->kctx_list_lock);
+ KBASE_TLSTREAM_TL_DEL_CTX(kbdev, kctx);
+ list_del(&kctx->kctx_list_link);
+ mutex_unlock(&kbdev->kctx_list_lock);
KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
@@ -292,33 +329,3 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_pm_context_idle(kbdev);
}
KBASE_EXPORT_SYMBOL(kbase_destroy_context);
-
-int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
-{
- int err = 0;
- struct kbasep_js_kctx_info *js_kctx_info;
- unsigned long irq_flags;
-
- KBASE_DEBUG_ASSERT(NULL != kctx);
-
- js_kctx_info = &kctx->jctx.sched_info;
-
- /* Validate flags */
- if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
- err = -EINVAL;
- goto out;
- }
-
- mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
-
- /* Translate the flags */
- if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
- kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
-
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
- mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
- out:
- return err;
-}
-KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);
diff --git a/mali_kbase/mali_kbase_context.h b/mali_kbase/mali_kbase_context.h
index 30b0f64..5037b4e 100644
--- a/mali_kbase/mali_kbase_context.h
+++ b/mali_kbase/mali_kbase_context.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016, 2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2017, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,36 +27,38 @@
/**
* kbase_create_context() - Create a kernel base context.
- * @kbdev: Kbase device
- * @is_compat: Force creation of a 32-bit context
*
- * Allocate and init a kernel base context.
+ * @kbdev: Object representing an instance of GPU platform device,
+ * allocated from the probe method of the Mali driver.
+ * @is_compat: Force creation of a 32-bit context
+ * @flags: Flags to set, which shall be any combination of
+ * BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
+ * @api_version: Application program interface version, as encoded in
+ * a single integer by the KBASE_API_VERSION macro.
+ * @filp: Pointer to the struct file corresponding to device file
+ * /dev/malixx instance, passed to the file's open method.
*
- * Return: new kbase context
+ * Up to one context can be created for each client that opens the device file
+ * /dev/malixx. Context creation is deferred until a special ioctl() system call
+ * is made on the device file. Each context has its own GPU address space.
+ *
+ * Return: new kbase context or NULL on failure
*/
struct kbase_context *
-kbase_create_context(struct kbase_device *kbdev, bool is_compat);
+kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+ base_context_create_flags const flags,
+ unsigned long api_version,
+ struct file *filp);
/**
* kbase_destroy_context - Destroy a kernel base context.
* @kctx: Context to destroy
*
- * Calls kbase_destroy_os_context() to free OS specific structures.
* Will release all outstanding regions.
*/
void kbase_destroy_context(struct kbase_context *kctx);
/**
- * kbase_context_set_create_flags - Set creation flags on a context
- * @kctx: Kbase context
- * @flags: Flags to set, which shall be one of the flags of
- * BASE_CONTEXT_CREATE_KERNEL_FLAGS.
- *
- * Return: 0 on success, -EINVAL otherwise when an invalid flag is specified.
- */
-int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
-
-/**
* kbase_ctx_flag - Check if @flag is set on @kctx
* @kctx: Pointer to kbase context to check
* @flag: Flag to check
@@ -107,7 +109,7 @@ static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
/**
* kbase_ctx_flag_set - Set @flag on @kctx
* @kctx: Pointer to kbase context
- * @flag: Flag to clear
+ * @flag: Flag to set
*
* Set the @flag on @kctx. This is done atomically, so other flags being
* cleared or set at the same time will be safe.
diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c
index 96f4d01..d0d584e 100644
--- a/mali_kbase/mali_kbase_core_linux.c
+++ b/mali_kbase/mali_kbase_core_linux.c
@@ -97,7 +97,7 @@
#include <linux/pm_runtime.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_timeline.h>
#include <mali_kbase_as_fault_debugfs.h>
@@ -113,9 +113,183 @@ static LIST_HEAD(kbase_dev_list);
#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
-static int kbase_api_handshake(struct kbase_context *kctx,
+/**
+ * kbase_file_new - Create an object representing a device file
+ *
+ * @kbdev: An instance of the GPU platform device, allocated from the probe
+ * method of the driver.
+ * @filp: Pointer to the struct file corresponding to device file
+ * /dev/malixx instance, passed to the file's open method.
+ *
+ * In its initial state, the device file has no context (i.e. no GPU
+ * address space) and no API version number. Both must be assigned before
+ * kbase_file_get_kctx_if_setup_complete() can be used successfully.
+ *
+ * @return Address of an object representing a simulated device file, or NULL
+ * on failure.
+ */
+static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
+ struct file *const filp)
+{
+ struct kbase_file *const kfile = kmalloc(sizeof(*kfile), GFP_KERNEL);
+
+ if (kfile) {
+ kfile->kbdev = kbdev;
+ kfile->filp = filp;
+ kfile->kctx = NULL;
+ kfile->api_version = 0;
+ atomic_set(&kfile->setup_state, KBASE_FILE_NEED_VSN);
+ }
+ return kfile;
+}
+
+/**
+ * kbase_file_get_api_version - Set the application programmer interface version
+ *
+ * @kfile: A device file created by kbase_file_new()
+ * @major: Major version number (must not exceed 12 bits)
+ * @minor: Major version number (must not exceed 12 bits)
+ *
+ * An application programmer interface (API) version must be specified
+ * before calling kbase_file_create_kctx(), otherwise an error is returned.
+ *
+ * If a version number was already set for the given @kfile (or is in the
+ * process of being set by another thread) then an error is returned.
+ *
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int kbase_file_set_api_version(struct kbase_file *const kfile,
+ u16 const major, u16 const minor)
+{
+ if (WARN_ON(!kfile))
+ return -EINVAL;
+
+ /* setup pending, try to signal that we'll do the setup,
+ * if setup was already in progress, err this call
+ */
+ if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN,
+ KBASE_FILE_VSN_IN_PROGRESS) != KBASE_FILE_NEED_VSN)
+ return -EPERM;
+
+ /* save the proposed version number for later use */
+ kfile->api_version = KBASE_API_VERSION(major, minor);
+
+ atomic_set(&kfile->setup_state, KBASE_FILE_NEED_CTX);
+ return 0;
+}
+
+/**
+ * kbase_file_get_api_version - Get the application programmer interface version
+ *
+ * @kfile: A device file created by kbase_file_new()
+ *
+ * Return: The version number (encoded with KBASE_API_VERSION) or 0 if none has
+ * been set.
+ */
+static unsigned long kbase_file_get_api_version(struct kbase_file *const kfile)
+{
+ if (WARN_ON(!kfile))
+ return 0;
+
+ if (atomic_read(&kfile->setup_state) < KBASE_FILE_NEED_CTX)
+ return 0;
+
+ return kfile->api_version;
+}
+
+/**
+ * kbase_file_create_kctx - Create a kernel base context
+ *
+ * @kfile: A device file created by kbase_file_new()
+ * @flags: Flags to set, which can be any combination of
+ * BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
+ *
+ * This creates a new context for the GPU platform device instance that was
+ * specified when kbase_file_new() was called. Each context has its own GPU
+ * address space. If a context was already created for the given @kfile (or is
+ * in the process of being created for it by another thread) then an error is
+ * returned.
+ *
+ * An API version number must have been set by kbase_file_set_api_version()
+ * before calling this function, otherwise an error is returned.
+ *
+ * Return: 0 if a new context was created, otherwise a negative error code.
+ */
+static int kbase_file_create_kctx(struct kbase_file *kfile,
+ base_context_create_flags flags);
+
+/**
+ * kbase_file_get_kctx_if_setup_complete - Get a kernel base context
+ * pointer from a device file
+ *
+ * @kfile: A device file created by kbase_file_new()
+ *
+ * This function returns an error code (encoded with ERR_PTR) if no context
+ * has been created for the given @kfile. This makes it safe to use in
+ * circumstances where the order of initialization cannot be enforced, but
+ * only if the caller checks the return value.
+ *
+ * Return: Address of the kernel base context associated with the @kfile, or
+ * NULL if no context exists.
+ */
+static struct kbase_context *kbase_file_get_kctx_if_setup_complete(
+ struct kbase_file *const kfile)
+{
+ if (WARN_ON(!kfile) ||
+ atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
+ WARN_ON(!kfile->kctx))
+ return NULL;
+
+ return kfile->kctx;
+}
+
+/**
+ * kbase_file_delete - Destroy an object representing a device file
+ *
+ * @kfile: A device file created by kbase_file_new()
+ *
+ * If any context was created for the @kfile then it is destroyed.
+ */
+static void kbase_file_delete(struct kbase_file *const kfile)
+{
+ struct kbase_device *kbdev = NULL;
+
+ if (WARN_ON(!kfile))
+ return;
+
+ kfile->filp->private_data = NULL;
+ kbdev = kfile->kbdev;
+
+ if (atomic_read(&kfile->setup_state) == KBASE_FILE_COMPLETE) {
+ struct kbase_context *kctx = kfile->kctx;
+
+#ifdef CONFIG_DEBUG_FS
+ kbasep_mem_profile_debugfs_remove(kctx);
+#endif
+
+ mutex_lock(&kctx->legacy_hwcnt_lock);
+ /* If this client was performing hardware counter dumping and
+ * did not explicitly detach itself, destroy it now
+ */
+ kbase_hwcnt_legacy_client_destroy(kctx->legacy_hwcnt_cli);
+ kctx->legacy_hwcnt_cli = NULL;
+ mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+ kbase_destroy_context(kctx);
+
+ dev_dbg(kbdev->dev, "deleted base context\n");
+ }
+
+ kbase_release_device(kbdev);
+
+ kfree(kfile);
+}
+
+static int kbase_api_handshake(struct kbase_file *kfile,
struct kbase_ioctl_version_check *version)
{
+ int err = 0;
+
switch (version->major) {
case BASE_UK_VERSION_MAJOR:
/* set minor to be the lowest common */
@@ -134,9 +308,19 @@ static int kbase_api_handshake(struct kbase_context *kctx,
}
/* save the proposed version number for later use */
- kctx->api_version = KBASE_API_VERSION(version->major, version->minor);
+ err = kbase_file_set_api_version(kfile, version->major, version->minor);
+ if (unlikely(err))
+ return err;
- return 0;
+ /* For backward compatibility, we may need to create the context before
+ * the flags have been set. Originally it was created on file open
+ * (with job submission disabled) but we don't support that usage.
+ */
+ if (kbase_file_get_api_version(kfile) < KBASE_API_VERSION(11, 15))
+ err = kbase_file_create_kctx(kfile,
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
+
+ return err;
}
/**
@@ -391,34 +575,38 @@ static const struct file_operations kbase_force_same_va_fops = {
.read = read_ctx_force_same_va,
};
-static int kbase_open(struct inode *inode, struct file *filp)
+static int kbase_file_create_kctx(struct kbase_file *const kfile,
+ base_context_create_flags const flags)
{
struct kbase_device *kbdev = NULL;
- struct kbase_context *kctx;
- int ret = 0;
+ struct kbase_context *kctx = NULL;
#ifdef CONFIG_DEBUG_FS
char kctx_name[64];
#endif
- kbdev = kbase_find_device(iminor(inode));
+ if (WARN_ON(!kfile))
+ return -EINVAL;
- if (!kbdev)
- return -ENODEV;
+ /* setup pending, try to signal that we'll do the setup,
+ * if setup was already in progress, err this call
+ */
+ if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX,
+ KBASE_FILE_CTX_IN_PROGRESS) != KBASE_FILE_NEED_CTX)
+ return -EPERM;
+
+ kbdev = kfile->kbdev;
#if (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE)
- kctx = kbase_create_context(kbdev, in_compat_syscall());
+ kctx = kbase_create_context(kbdev, in_compat_syscall(),
+ flags, kfile->api_version, kfile->filp);
#else
- kctx = kbase_create_context(kbdev, is_compat_task());
+ kctx = kbase_create_context(kbdev, is_compat_task(),
+ flags, kfile->api_version, kfile->filp);
#endif /* (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) */
- if (!kctx) {
- ret = -ENOMEM;
- goto out;
- }
- init_waitqueue_head(&kctx->event_queue);
- filp->private_data = kctx;
- filp->f_mode |= FMODE_UNSIGNED_OFFSET;
- kctx->filp = filp;
+ /* if bad flags, will stay stuck in setup mode */
+ if (!kctx)
+ return -ENOMEM;
if (kbdev->infinite_cache_active_default)
kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
@@ -430,115 +618,113 @@ static int kbase_open(struct inode *inode, struct file *filp)
kbdev->debugfs_ctx_directory);
if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
- ret = -ENOMEM;
- goto out;
- }
-
- debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
- kctx, &kbase_infinite_cache_fops);
- debugfs_create_file("force_same_va", S_IRUSR | S_IWUSR,
- kctx->kctx_dentry, kctx, &kbase_force_same_va_fops);
+ /* we don't treat this as a fail - just warn about it */
+ dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
+ } else {
+ debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
+ kctx, &kbase_infinite_cache_fops);
+ debugfs_create_file("force_same_va", 0600,
+ kctx->kctx_dentry, kctx,
+ &kbase_force_same_va_fops);
- mutex_init(&kctx->mem_profile_lock);
+ mutex_init(&kctx->mem_profile_lock);
- kbasep_jd_debugfs_ctx_init(kctx);
- kbase_debug_mem_view_init(filp);
+ kbasep_jd_debugfs_ctx_init(kctx);
+ kbase_debug_mem_view_init(kctx);
- kbase_debug_job_fault_context_init(kctx);
+ kbase_debug_job_fault_context_init(kctx);
- kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx);
+ kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx);
- kbase_jit_debugfs_init(kctx);
+ kbase_jit_debugfs_init(kctx);
+ }
#endif /* CONFIG_DEBUG_FS */
dev_dbg(kbdev->dev, "created base context\n");
- {
- struct kbasep_kctx_list_element *element;
-
- element = kzalloc(sizeof(*element), GFP_KERNEL);
- if (element) {
- mutex_lock(&kbdev->kctx_list_lock);
- element->kctx = kctx;
- list_add(&element->link, &kbdev->kctx_list);
- KBASE_TLSTREAM_TL_NEW_CTX(
- kbdev,
- element->kctx,
- element->kctx->id,
- (u32)(element->kctx->tgid));
- mutex_unlock(&kbdev->kctx_list_lock);
- } else {
- /* we don't treat this as a fail - just warn about it */
- dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
- }
- }
- return 0;
+ kfile->kctx = kctx;
+ atomic_set(&kfile->setup_state, KBASE_FILE_COMPLETE);
- out:
- kbase_release_device(kbdev);
- return ret;
+ return 0;
}
-static int kbase_release(struct inode *inode, struct file *filp)
+static int kbase_open(struct inode *inode, struct file *filp)
{
- struct kbase_context *kctx = filp->private_data;
- struct kbase_device *kbdev = kctx->kbdev;
- struct kbasep_kctx_list_element *element, *tmp;
- bool found_element = false;
+ struct kbase_device *kbdev = NULL;
+ struct kbase_file *kfile;
+ int ret = 0;
- KBASE_TLSTREAM_TL_DEL_CTX(kbdev, kctx);
+ kbdev = kbase_find_device(iminor(inode));
-#ifdef CONFIG_DEBUG_FS
- kbasep_mem_profile_debugfs_remove(kctx);
-#endif
+ if (!kbdev)
+ return -ENODEV;
- mutex_lock(&kbdev->kctx_list_lock);
- list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
- if (element->kctx == kctx) {
- list_del(&element->link);
- kfree(element);
- found_element = true;
- }
+ kfile = kbase_file_new(kbdev, filp);
+ if (!kfile) {
+ ret = -ENOMEM;
+ goto out;
}
- mutex_unlock(&kbdev->kctx_list_lock);
- if (!found_element)
- dev_warn(kbdev->dev, "kctx not in kctx_list\n");
-
- filp->private_data = NULL;
- mutex_lock(&kctx->legacy_hwcnt_lock);
- /* If this client was performing hwcnt dumping and did not explicitly
- * detach itself, destroy it now
- */
- kbase_hwcnt_legacy_client_destroy(kctx->legacy_hwcnt_cli);
- kctx->legacy_hwcnt_cli = NULL;
- mutex_unlock(&kctx->legacy_hwcnt_lock);
+ filp->private_data = kfile;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
- kbase_destroy_context(kctx);
+ return 0;
- dev_dbg(kbdev->dev, "deleted base context\n");
+ out:
kbase_release_device(kbdev);
+ return ret;
+}
+
+static int kbase_release(struct inode *inode, struct file *filp)
+{
+ struct kbase_file *const kfile = filp->private_data;
+
+ kbase_file_delete(kfile);
return 0;
}
-static int kbase_api_set_flags(struct kbase_context *kctx,
+static int kbase_api_set_flags(struct kbase_file *kfile,
struct kbase_ioctl_set_flags *flags)
{
- int err;
+ int err = 0;
+ unsigned long const api_version = kbase_file_get_api_version(kfile);
+ struct kbase_context *kctx = NULL;
- /* setup pending, try to signal that we'll do the setup,
- * if setup was already in progress, err this call
- */
- if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
+ /* Validate flags */
+ if (flags->create_flags !=
+ (flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS))
return -EINVAL;
- err = kbase_context_set_create_flags(kctx, flags->create_flags);
- /* if bad flags, will stay stuck in setup mode */
- if (err)
- return err;
+ /* For backward compatibility, the context may have been created before
+ * the flags were set.
+ */
+ if (api_version >= KBASE_API_VERSION(11, 15)) {
+ err = kbase_file_create_kctx(kfile, flags->create_flags);
+ } else {
+ struct kbasep_js_kctx_info *js_kctx_info = NULL;
+ unsigned long irq_flags = 0;
- atomic_set(&kctx->setup_complete, 1);
- return 0;
+ /* If setup is incomplete (e.g. because the API version
+ * wasn't set) then we have to give up.
+ */
+ kctx = kbase_file_get_kctx_if_setup_complete(kfile);
+ if (unlikely(!kctx))
+ return -EPERM;
+
+ js_kctx_info = &kctx->jctx.sched_info;
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+ /* Translate the flags */
+ if ((flags->create_flags &
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ }
+
+ return err;
}
static int kbase_api_job_submit(struct kbase_context *kctx,
@@ -597,7 +783,7 @@ static int kbase_api_mem_alloc(struct kbase_context *kctx,
}
rcu_read_unlock();
- if (flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+ if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
return -ENOMEM;
/* Force SAME_VA if a 64-bit client.
@@ -754,7 +940,7 @@ static int kbase_api_mem_jit_init_old(struct kbase_context *kctx,
return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
DEFAULT_MAX_JIT_ALLOCATIONS,
- JIT_LEGACY_TRIM_LEVEL);
+ JIT_LEGACY_TRIM_LEVEL, BASE_MEM_GROUP_DEFAULT);
}
static int kbase_api_mem_jit_init(struct kbase_context *kctx,
@@ -773,7 +959,8 @@ static int kbase_api_mem_jit_init(struct kbase_context *kctx,
}
return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
- jit_init->max_allocations, jit_init->trim_level);
+ jit_init->max_allocations, jit_init->trim_level,
+ jit_init->group_id);
}
static int kbase_api_mem_exec_init(struct kbase_context *kctx,
@@ -827,12 +1014,12 @@ static int kbase_api_get_context_id(struct kbase_context *kctx,
static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
struct kbase_ioctl_tlstream_acquire *acquire)
{
- return kbase_tlstream_acquire(kctx->kbdev, acquire->flags);
+ return kbase_timeline_io_acquire(kctx->kbdev, acquire->flags);
}
static int kbase_api_tlstream_flush(struct kbase_context *kctx)
{
- kbase_tlstream_flush_streams(kctx->kbdev->timeline);
+ kbase_timeline_streams_flush(kctx->kbdev->timeline);
return 0;
}
@@ -869,7 +1056,7 @@ static int kbase_api_mem_alias(struct kbase_context *kctx,
}
flags = alias->in.flags;
- if (flags & BASE_MEM_FLAGS_KERNEL_ONLY) {
+ if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
vfree(ai);
return -EINVAL;
}
@@ -894,7 +1081,7 @@ static int kbase_api_mem_import(struct kbase_context *kctx,
int ret;
u64 flags = import->in.flags;
- if (flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+ if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
return -ENOMEM;
ret = kbase_mem_import(kctx,
@@ -913,7 +1100,7 @@ static int kbase_api_mem_import(struct kbase_context *kctx,
static int kbase_api_mem_flags_change(struct kbase_context *kctx,
struct kbase_ioctl_mem_flags_change *change)
{
- if (change->flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+ if (change->flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
return -ENOMEM;
return kbase_mem_flags_change(kctx, change->gpu_va,
@@ -1058,7 +1245,7 @@ static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
static int kbase_api_tlstream_test(struct kbase_context *kctx,
struct kbase_ioctl_tlstream_test *test)
{
- kbase_tlstream_test(
+ kbase_timeline_test(
kctx->kbdev,
test->tpw_count,
test->msg_delay,
@@ -1071,7 +1258,7 @@ static int kbase_api_tlstream_test(struct kbase_context *kctx,
static int kbase_api_tlstream_stats(struct kbase_context *kctx,
struct kbase_ioctl_tlstream_stats *stats)
{
- kbase_tlstream_stats(kctx->kbdev->timeline,
+ kbase_timeline_stats(kctx->kbdev->timeline,
&stats->bytes_collected,
&stats->bytes_generated);
@@ -1080,57 +1267,58 @@ static int kbase_api_tlstream_stats(struct kbase_context *kctx,
#endif /* MALI_UNIT_TEST */
-#define KBASE_HANDLE_IOCTL(cmd, function) \
- do { \
- BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
- return function(kctx); \
+#define KBASE_HANDLE_IOCTL(cmd, function, arg) \
+ do { \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
+ return function(arg); \
} while (0)
-#define KBASE_HANDLE_IOCTL_IN(cmd, function, type) \
- do { \
+#define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg) \
+ do { \
type param; \
int err; \
BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
err = copy_from_user(&param, uarg, sizeof(param)); \
if (err) \
- return -EFAULT; \
- return function(kctx, &param); \
+ return -EFAULT; \
+ return function(arg, &param); \
} while (0)
-#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type) \
- do { \
+#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg) \
+ do { \
type param; \
int ret, err; \
BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
- ret = function(kctx, &param); \
+ ret = function(arg, &param); \
err = copy_to_user(uarg, &param, sizeof(param)); \
if (err) \
- return -EFAULT; \
+ return -EFAULT; \
return ret; \
} while (0)
-#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type) \
- do { \
+#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg) \
+ do { \
type param; \
int ret, err; \
BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)); \
BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
err = copy_from_user(&param, uarg, sizeof(param)); \
if (err) \
- return -EFAULT; \
- ret = function(kctx, &param); \
+ return -EFAULT; \
+ ret = function(arg, &param); \
err = copy_to_user(uarg, &param, sizeof(param)); \
if (err) \
- return -EFAULT; \
+ return -EFAULT; \
return ret; \
} while (0)
static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct kbase_context *kctx = filp->private_data;
- struct kbase_device *kbdev = kctx->kbdev;
+ struct kbase_file *const kfile = filp->private_data;
+ struct kbase_context *kctx = NULL;
+ struct kbase_device *kbdev = kfile->kbdev;
void __user *uarg = (void __user *)arg;
/* Only these ioctls are available until setup is complete */
@@ -1138,207 +1326,246 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case KBASE_IOCTL_VERSION_CHECK:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
kbase_api_handshake,
- struct kbase_ioctl_version_check);
+ struct kbase_ioctl_version_check,
+ kfile);
break;
case KBASE_IOCTL_SET_FLAGS:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
kbase_api_set_flags,
- struct kbase_ioctl_set_flags);
+ struct kbase_ioctl_set_flags,
+ kfile);
break;
}
- /* Block call until version handshake and setup is complete */
- if (kctx->api_version == 0 || !atomic_read(&kctx->setup_complete))
- return -EINVAL;
+ kctx = kbase_file_get_kctx_if_setup_complete(kfile);
+ if (unlikely(!kctx))
+ return -EPERM;
/* Normal ioctls */
switch (cmd) {
case KBASE_IOCTL_JOB_SUBMIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
kbase_api_job_submit,
- struct kbase_ioctl_job_submit);
+ struct kbase_ioctl_job_submit,
+ kctx);
break;
case KBASE_IOCTL_GET_GPUPROPS:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
kbase_api_get_gpuprops,
- struct kbase_ioctl_get_gpuprops);
+ struct kbase_ioctl_get_gpuprops,
+ kctx);
break;
case KBASE_IOCTL_POST_TERM:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
- kbase_api_post_term);
+ kbase_api_post_term,
+ kctx);
break;
case KBASE_IOCTL_MEM_ALLOC:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
kbase_api_mem_alloc,
- union kbase_ioctl_mem_alloc);
+ union kbase_ioctl_mem_alloc,
+ kctx);
break;
case KBASE_IOCTL_MEM_QUERY:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
kbase_api_mem_query,
- union kbase_ioctl_mem_query);
+ union kbase_ioctl_mem_query,
+ kctx);
break;
case KBASE_IOCTL_MEM_FREE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
kbase_api_mem_free,
- struct kbase_ioctl_mem_free);
+ struct kbase_ioctl_mem_free,
+ kctx);
break;
case KBASE_IOCTL_DISJOINT_QUERY:
KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
kbase_api_disjoint_query,
- struct kbase_ioctl_disjoint_query);
+ struct kbase_ioctl_disjoint_query,
+ kctx);
break;
case KBASE_IOCTL_GET_DDK_VERSION:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
kbase_api_get_ddk_version,
- struct kbase_ioctl_get_ddk_version);
+ struct kbase_ioctl_get_ddk_version,
+ kctx);
break;
case KBASE_IOCTL_MEM_JIT_INIT_OLD:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT_OLD,
kbase_api_mem_jit_init_old,
- struct kbase_ioctl_mem_jit_init_old);
+ struct kbase_ioctl_mem_jit_init_old,
+ kctx);
break;
case KBASE_IOCTL_MEM_JIT_INIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
kbase_api_mem_jit_init,
- struct kbase_ioctl_mem_jit_init);
+ struct kbase_ioctl_mem_jit_init,
+ kctx);
break;
case KBASE_IOCTL_MEM_EXEC_INIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
kbase_api_mem_exec_init,
- struct kbase_ioctl_mem_exec_init);
+ struct kbase_ioctl_mem_exec_init,
+ kctx);
break;
case KBASE_IOCTL_MEM_SYNC:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
kbase_api_mem_sync,
- struct kbase_ioctl_mem_sync);
+ struct kbase_ioctl_mem_sync,
+ kctx);
break;
case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
kbase_api_mem_find_cpu_offset,
- union kbase_ioctl_mem_find_cpu_offset);
+ union kbase_ioctl_mem_find_cpu_offset,
+ kctx);
break;
case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
kbase_api_mem_find_gpu_start_and_offset,
- union kbase_ioctl_mem_find_gpu_start_and_offset);
+ union kbase_ioctl_mem_find_gpu_start_and_offset,
+ kctx);
break;
case KBASE_IOCTL_GET_CONTEXT_ID:
KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
kbase_api_get_context_id,
- struct kbase_ioctl_get_context_id);
+ struct kbase_ioctl_get_context_id,
+ kctx);
break;
case KBASE_IOCTL_TLSTREAM_ACQUIRE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
kbase_api_tlstream_acquire,
- struct kbase_ioctl_tlstream_acquire);
+ struct kbase_ioctl_tlstream_acquire,
+ kctx);
break;
case KBASE_IOCTL_TLSTREAM_FLUSH:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
- kbase_api_tlstream_flush);
+ kbase_api_tlstream_flush,
+ kctx);
break;
case KBASE_IOCTL_MEM_COMMIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
kbase_api_mem_commit,
- struct kbase_ioctl_mem_commit);
+ struct kbase_ioctl_mem_commit,
+ kctx);
break;
case KBASE_IOCTL_MEM_ALIAS:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
kbase_api_mem_alias,
- union kbase_ioctl_mem_alias);
+ union kbase_ioctl_mem_alias,
+ kctx);
break;
case KBASE_IOCTL_MEM_IMPORT:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
kbase_api_mem_import,
- union kbase_ioctl_mem_import);
+ union kbase_ioctl_mem_import,
+ kctx);
break;
case KBASE_IOCTL_MEM_FLAGS_CHANGE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
kbase_api_mem_flags_change,
- struct kbase_ioctl_mem_flags_change);
+ struct kbase_ioctl_mem_flags_change,
+ kctx);
break;
case KBASE_IOCTL_STREAM_CREATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
kbase_api_stream_create,
- struct kbase_ioctl_stream_create);
+ struct kbase_ioctl_stream_create,
+ kctx);
break;
case KBASE_IOCTL_FENCE_VALIDATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
kbase_api_fence_validate,
- struct kbase_ioctl_fence_validate);
+ struct kbase_ioctl_fence_validate,
+ kctx);
break;
case KBASE_IOCTL_MEM_PROFILE_ADD:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
kbase_api_mem_profile_add,
- struct kbase_ioctl_mem_profile_add);
+ struct kbase_ioctl_mem_profile_add,
+ kctx);
break;
case KBASE_IOCTL_SOFT_EVENT_UPDATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
kbase_api_soft_event_update,
- struct kbase_ioctl_soft_event_update);
+ struct kbase_ioctl_soft_event_update,
+ kctx);
break;
case KBASE_IOCTL_STICKY_RESOURCE_MAP:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
kbase_api_sticky_resource_map,
- struct kbase_ioctl_sticky_resource_map);
+ struct kbase_ioctl_sticky_resource_map,
+ kctx);
break;
case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
kbase_api_sticky_resource_unmap,
- struct kbase_ioctl_sticky_resource_unmap);
+ struct kbase_ioctl_sticky_resource_unmap,
+ kctx);
break;
/* Instrumentation. */
case KBASE_IOCTL_HWCNT_READER_SETUP:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
kbase_api_hwcnt_reader_setup,
- struct kbase_ioctl_hwcnt_reader_setup);
+ struct kbase_ioctl_hwcnt_reader_setup,
+ kctx);
break;
case KBASE_IOCTL_HWCNT_ENABLE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
kbase_api_hwcnt_enable,
- struct kbase_ioctl_hwcnt_enable);
+ struct kbase_ioctl_hwcnt_enable,
+ kctx);
break;
case KBASE_IOCTL_HWCNT_DUMP:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
- kbase_api_hwcnt_dump);
+ kbase_api_hwcnt_dump,
+ kctx);
break;
case KBASE_IOCTL_HWCNT_CLEAR:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
- kbase_api_hwcnt_clear);
+ kbase_api_hwcnt_clear,
+ kctx);
break;
#ifdef CONFIG_MALI_NO_MALI
case KBASE_IOCTL_HWCNT_SET:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
kbase_api_hwcnt_set,
- struct kbase_ioctl_hwcnt_values);
+ struct kbase_ioctl_hwcnt_values,
+ kctx);
break;
#endif
#ifdef CONFIG_MALI_CINSTR_GWT
case KBASE_IOCTL_CINSTR_GWT_START:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
- kbase_gpu_gwt_start);
+ kbase_gpu_gwt_start,
+ kctx);
break;
case KBASE_IOCTL_CINSTR_GWT_STOP:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
- kbase_gpu_gwt_stop);
+ kbase_gpu_gwt_stop,
+ kctx);
break;
case KBASE_IOCTL_CINSTR_GWT_DUMP:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
kbase_gpu_gwt_dump,
- union kbase_ioctl_cinstr_gwt_dump);
+ union kbase_ioctl_cinstr_gwt_dump,
+ kctx);
break;
#endif
#if MALI_UNIT_TEST
case KBASE_IOCTL_TLSTREAM_TEST:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
kbase_api_tlstream_test,
- struct kbase_ioctl_tlstream_test);
+ struct kbase_ioctl_tlstream_test,
+ kctx);
break;
case KBASE_IOCTL_TLSTREAM_STATS:
KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
kbase_api_tlstream_stats,
- struct kbase_ioctl_tlstream_stats);
+ struct kbase_ioctl_tlstream_stats,
+ kctx);
break;
#endif
}
@@ -1350,10 +1577,15 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
- struct kbase_context *kctx = filp->private_data;
+ struct kbase_file *const kfile = filp->private_data;
+ struct kbase_context *const kctx =
+ kbase_file_get_kctx_if_setup_complete(kfile);
struct base_jd_event_v2 uevent;
int out_count = 0;
+ if (unlikely(!kctx))
+ return -EPERM;
+
if (count < sizeof(uevent))
return -ENOBUFS;
@@ -1389,7 +1621,12 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
static unsigned int kbase_poll(struct file *filp, poll_table *wait)
{
- struct kbase_context *kctx = filp->private_data;
+ struct kbase_file *const kfile = filp->private_data;
+ struct kbase_context *const kctx =
+ kbase_file_get_kctx_if_setup_complete(kfile);
+
+ if (unlikely(!kctx))
+ return POLLERR;
poll_wait(filp, &kctx->event_queue, wait);
if (kbase_event_pending(kctx))
@@ -1407,6 +1644,18 @@ void kbase_event_wakeup(struct kbase_context *kctx)
KBASE_EXPORT_TEST_API(kbase_event_wakeup);
+static int kbase_mmap(struct file *const filp, struct vm_area_struct *const vma)
+{
+ struct kbase_file *const kfile = filp->private_data;
+ struct kbase_context *const kctx =
+ kbase_file_get_kctx_if_setup_complete(kfile);
+
+ if (unlikely(!kctx))
+ return -EPERM;
+
+ return kbase_context_mmap(kctx, vma);
+}
+
static int kbase_check_flags(int flags)
{
/* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
@@ -1418,6 +1667,20 @@ static int kbase_check_flags(int flags)
return 0;
}
+static unsigned long kbase_get_unmapped_area(struct file *const filp,
+ const unsigned long addr, const unsigned long len,
+ const unsigned long pgoff, const unsigned long flags)
+{
+ struct kbase_file *const kfile = filp->private_data;
+ struct kbase_context *const kctx =
+ kbase_file_get_kctx_if_setup_complete(kfile);
+
+ if (unlikely(!kctx))
+ return -EPERM;
+
+ return kbase_context_get_unmapped_area(kctx, addr, len, pgoff, flags);
+}
+
static const struct file_operations kbase_fops = {
.owner = THIS_MODULE,
.open = kbase_open,
@@ -1460,7 +1723,7 @@ static ssize_t show_policy(struct device *dev, struct device_attribute *attr, ch
current_policy = kbase_pm_get_policy(kbdev);
- policy_count = kbase_pm_list_policies(&policy_list);
+ policy_count = kbase_pm_list_policies(kbdev, &policy_list);
for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
if (policy_list[i] == current_policy)
@@ -1508,7 +1771,7 @@ static ssize_t set_policy(struct device *dev, struct device_attribute *attr, con
if (!kbdev)
return -ENODEV;
- policy_count = kbase_pm_list_policies(&policy_list);
+ policy_count = kbase_pm_list_policies(kbdev, &policy_list);
for (i = 0; i < policy_count; i++) {
if (sysfs_streq(policy_list[i]->name, buf)) {
@@ -2836,7 +3099,7 @@ static ssize_t show_js_ctx_scheduling_mode(struct device *dev,
static ssize_t set_js_ctx_scheduling_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct kbasep_kctx_list_element *element;
+ struct kbase_context *kctx;
u32 new_js_ctx_scheduling_mode;
struct kbase_device *kbdev;
unsigned long flags;
@@ -2864,8 +3127,8 @@ static ssize_t set_js_ctx_scheduling_mode(struct device *dev,
kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
/* Adjust priority of all the contexts as per the new mode */
- list_for_each_entry(element, &kbdev->kctx_list, link)
- kbase_js_update_ctx_priority(element->kctx);
+ list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link)
+ kbase_js_update_ctx_priority(kctx);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->kctx_list_lock);
@@ -3584,7 +3847,7 @@ static void kbase_device_coherency_init(struct kbase_device *kbdev,
kbdev->system_coherency;
}
-#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#ifdef CONFIG_MALI_BUSLOG
/* Callback used by the kbase bus logger client, to initiate a GPU reset
* when the bus log is restarted. GPU reset is used as reference point
@@ -3639,14 +3902,13 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
kfree(kbdev->gpu_props.prop_buffer);
-#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#ifdef CONFIG_MALI_BUSLOG
if (kbdev->inited_subsys & inited_buslogger) {
bl_core_client_unregister(kbdev->buslogger);
kbdev->inited_subsys &= ~inited_buslogger;
}
#endif
-
if (kbdev->inited_subsys & inited_dev_list) {
dev_list = kbase_dev_list_get();
list_del(&kbdev->entry);
@@ -3681,13 +3943,6 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
kbdev->inited_subsys &= ~inited_job_fault;
}
-#ifdef CONFIG_MALI_DEVFREQ
- if (kbdev->inited_subsys & inited_devfreq) {
- kbase_devfreq_term(kbdev);
- kbdev->inited_subsys &= ~inited_devfreq;
- }
-#endif
-
if (kbdev->inited_subsys & inited_backend_late) {
kbase_backend_late_term(kbdev);
@@ -3715,7 +3970,7 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
}
if (kbdev->inited_subsys & inited_tlstream) {
- kbase_tlstream_term(kbdev->timeline);
+ kbase_timeline_term(kbdev->timeline);
kbdev->inited_subsys &= ~inited_tlstream;
}
@@ -3787,6 +4042,29 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
return 0;
}
+void kbase_backend_devfreq_term(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ if (kbdev->inited_subsys & inited_devfreq) {
+ kbase_devfreq_term(kbdev);
+ kbdev->inited_subsys &= ~inited_devfreq;
+ }
+#endif
+}
+
+int kbase_backend_devfreq_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+ /* Devfreq uses hardware counters, so must be initialized after it. */
+ int err = kbase_devfreq_init(kbdev);
+
+ if (!err)
+ kbdev->inited_subsys |= inited_devfreq;
+ else
+ dev_err(kbdev->dev, "Continuing without devfreq\n");
+#endif /* CONFIG_MALI_DEVFREQ */
+ return 0;
+}
/* Number of register accesses for the buffer that we allocate during
* initialization time. The buffer size can be changed later via debugfs. */
@@ -3930,7 +4208,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
kbdev->inited_subsys |= inited_js;
atomic_set(&kbdev->timeline_is_enabled, 0);
- err = kbase_tlstream_init(&kbdev->timeline, &kbdev->timeline_is_enabled);
+ err = kbase_timeline_init(&kbdev->timeline, &kbdev->timeline_is_enabled);
if (err) {
dev_err(kbdev->dev, "Timeline stream initialization failed\n");
kbase_platform_device_remove(pdev);
@@ -3938,10 +4216,6 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
}
kbdev->inited_subsys |= inited_tlstream;
- /* Initialize the kctx list. This is used by vinstr. */
- mutex_init(&kbdev->kctx_list_lock);
- INIT_LIST_HEAD(&kbdev->kctx_list);
-
err = kbase_hwcnt_backend_gpu_create(kbdev, &kbdev->hwcnt_gpu_iface);
if (err) {
dev_err(kbdev->dev, "GPU hwcnt backend creation failed\n");
@@ -3981,6 +4255,9 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
}
kbdev->inited_subsys |= inited_vinstr;
+ /* The initialization of the devfreq is now embedded inside the
+ * kbase_backend_late_init(), calling the kbase_backend_devfreq_init()
+ * before the first trigger of pm_context_idle(). */
err = kbase_backend_late_init(kbdev);
if (err) {
dev_err(kbdev->dev, "Late backend initialization failed\n");
@@ -3990,16 +4267,6 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
kbdev->inited_subsys |= inited_backend_late;
-
-#ifdef CONFIG_MALI_DEVFREQ
- /* Devfreq uses hardware counters, so must be initialized after it. */
- err = kbase_devfreq_init(kbdev);
- if (!err)
- kbdev->inited_subsys |= inited_devfreq;
- else
- dev_err(kbdev->dev, "Continuing without devfreq\n");
-#endif /* CONFIG_MALI_DEVFREQ */
-
#ifdef MALI_KBASE_BUILD
err = kbase_debug_job_fault_dev_init(kbdev);
if (err) {
@@ -4054,7 +4321,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
kbdev->inited_subsys |= inited_misc_register;
-#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#ifdef CONFIG_MALI_BUSLOG
err = bl_core_client_register(kbdev->devname,
kbase_logging_started_cb,
kbdev, &kbdev->buslogger,
@@ -4102,13 +4369,16 @@ static int kbase_device_suspend(struct device *dev)
if (!kbdev)
return -ENODEV;
+ kbase_pm_suspend(kbdev);
+
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- if (kbdev->inited_subsys & inited_devfreq)
- devfreq_suspend_device(kbdev->devfreq);
+ dev_dbg(dev, "Callback %s\n", __func__);
+ if (kbdev->inited_subsys & inited_devfreq) {
+ kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
+ flush_workqueue(kbdev->devfreq_queue.workq);
+ }
#endif
-
- kbase_pm_suspend(kbdev);
return 0;
}
@@ -4132,8 +4402,14 @@ static int kbase_device_resume(struct device *dev)
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- if (kbdev->inited_subsys & inited_devfreq)
- devfreq_resume_device(kbdev->devfreq);
+ dev_dbg(dev, "Callback %s\n", __func__);
+ if (kbdev->inited_subsys & inited_devfreq) {
+ mutex_lock(&kbdev->pm.lock);
+ if (kbdev->pm.active_count > 0)
+ kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
+ mutex_unlock(&kbdev->pm.lock);
+ flush_workqueue(kbdev->devfreq_queue.workq);
+ }
#endif
return 0;
}
@@ -4157,10 +4433,11 @@ static int kbase_device_runtime_suspend(struct device *dev)
if (!kbdev)
return -ENODEV;
+ dev_dbg(dev, "Callback %s\n", __func__);
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
if (kbdev->inited_subsys & inited_devfreq)
- devfreq_suspend_device(kbdev->devfreq);
+ kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
#endif
if (kbdev->pm.backend.callback_power_runtime_off) {
@@ -4190,6 +4467,7 @@ static int kbase_device_runtime_resume(struct device *dev)
if (!kbdev)
return -ENODEV;
+ dev_dbg(dev, "Callback %s\n", __func__);
if (kbdev->pm.backend.callback_power_runtime_on) {
ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
dev_dbg(dev, "runtime resume\n");
@@ -4198,7 +4476,7 @@ static int kbase_device_runtime_resume(struct device *dev)
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
if (kbdev->inited_subsys & inited_devfreq)
- devfreq_resume_device(kbdev->devfreq);
+ kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
#endif
return ret;
@@ -4224,6 +4502,7 @@ static int kbase_device_runtime_idle(struct device *dev)
if (!kbdev)
return -ENODEV;
+ dev_dbg(dev, "Callback %s\n", __func__);
/* Use platform specific implementation if it exists. */
if (kbdev->pm.backend.callback_power_runtime_idle)
return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
diff --git a/mali_kbase/mali_kbase_debug_mem_view.c b/mali_kbase/mali_kbase_debug_mem_view.c
index 8f46117..5cfc6d1 100644
--- a/mali_kbase/mali_kbase_debug_mem_view.c
+++ b/mali_kbase/mali_kbase_debug_mem_view.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -198,12 +198,11 @@ out:
static int debug_mem_open(struct inode *i, struct file *file)
{
- struct file *kctx_file = i->i_private;
- struct kbase_context *kctx = kctx_file->private_data;
+ struct kbase_context *const kctx = i->i_private;
struct debug_mem_data *mem_data;
int ret;
- if (get_file_rcu(kctx_file) == 0)
+ if (get_file_rcu(kctx->filp) == 0)
return -ENOENT;
ret = seq_open(file, &ops);
@@ -255,14 +254,14 @@ out:
}
seq_release(i, file);
open_fail:
- fput(kctx_file);
+ fput(kctx->filp);
return ret;
}
static int debug_mem_release(struct inode *inode, struct file *file)
{
- struct file *kctx_file = inode->i_private;
+ struct kbase_context *const kctx = inode->i_private;
struct seq_file *sfile = file->private_data;
struct debug_mem_data *mem_data = sfile->private;
struct debug_mem_mapping *mapping;
@@ -279,7 +278,7 @@ static int debug_mem_release(struct inode *inode, struct file *file)
kfree(mem_data);
- fput(kctx_file);
+ fput(kctx->filp);
return 0;
}
@@ -291,21 +290,16 @@ static const struct file_operations kbase_debug_mem_view_fops = {
.llseek = seq_lseek
};
-/**
- * kbase_debug_mem_view_init - Initialise the mem_view sysfs file
- * @kctx_file: The /dev/mali0 file instance for the context
- *
- * This function creates a "mem_view" file which can be used to get a view of
- * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
- *
- * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
- * parent directory.
- */
-void kbase_debug_mem_view_init(struct file *kctx_file)
+void kbase_debug_mem_view_init(struct kbase_context *const kctx)
{
- struct kbase_context *kctx = kctx_file->private_data;
-
- debugfs_create_file("mem_view", S_IRUSR, kctx->kctx_dentry, kctx_file,
+ /* Caller already ensures this, but we keep the pattern for
+ * maintenance safety.
+ */
+ if (WARN_ON(!kctx) ||
+ WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+ return;
+
+ debugfs_create_file("mem_view", 0400, kctx->kctx_dentry, kctx,
&kbase_debug_mem_view_fops);
}
diff --git a/mali_kbase/mali_kbase_debug_mem_view.h b/mali_kbase/mali_kbase_debug_mem_view.h
index 886ca94..b948b7c 100644
--- a/mali_kbase/mali_kbase_debug_mem_view.h
+++ b/mali_kbase/mali_kbase_debug_mem_view.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2015, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,6 +25,16 @@
#include <mali_kbase.h>
-void kbase_debug_mem_view_init(struct file *kctx_file);
+/**
+ * kbase_debug_mem_view_init - Initialize the mem_view sysfs file
+ * @kctx: Pointer to kernel base context
+ *
+ * This function creates a "mem_view" file which can be used to get a view of
+ * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
+ *
+ * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
+ * parent directory.
+ */
+void kbase_debug_mem_view_init(struct kbase_context *kctx);
#endif
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index cbb406a..93edef1 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -49,7 +49,7 @@
#include <linux/file.h>
#include <linux/sizes.h>
-#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#ifdef CONFIG_MALI_BUSLOG
#include <linux/bus_logger.h>
#endif
@@ -928,6 +928,9 @@ struct kbase_as {
* level page table of the context, this is used for
* MMU HW programming as the address translation will
* start from the top level page table.
+ * @group_id: A memory group ID to be passed to a platform-specific
+ * memory group manager.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
* @kctx: If this set of MMU tables belongs to a context then
* this is a back-reference to the context, otherwise
* it is NULL
@@ -936,6 +939,7 @@ struct kbase_mmu_table {
u64 *mmu_teardown_pages;
struct mutex mmu_lock;
phys_addr_t pgd;
+ u8 group_id;
struct kbase_context *kctx;
};
@@ -1019,11 +1023,6 @@ struct kbase_trace {
u8 flags;
};
-struct kbasep_kctx_list_element {
- struct list_head link;
- struct kbase_context *kctx;
-};
-
/**
* Data stored per device for power management.
*
@@ -1223,6 +1222,36 @@ struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
#define DEVNAME_SIZE 16
/**
+ * enum kbase_devfreq_work_type - The type of work to perform in the devfreq
+ * suspend/resume worker.
+ * @DEVFREQ_WORK_NONE: Initilisation state.
+ * @DEVFREQ_WORK_SUSPEND: Call devfreq_suspend_device().
+ * @DEVFREQ_WORK_RESUME: Call devfreq_resume_device().
+ */
+enum kbase_devfreq_work_type {
+ DEVFREQ_WORK_NONE,
+ DEVFREQ_WORK_SUSPEND,
+ DEVFREQ_WORK_RESUME
+};
+
+/**
+ * struct kbase_devfreq_queue_info - Object representing an instance for managing
+ * the queued devfreq suspend/resume works.
+ * @workq: Workqueue for devfreq suspend/resume requests
+ * @work: Work item for devfreq suspend & resume
+ * @req_type: Requested work type to be performed by the devfreq
+ * suspend/resume worker
+ * @acted_type: Work type has been acted on by the worker, i.e. the
+ * internal recorded state of the suspend/resume
+ */
+struct kbase_devfreq_queue_info {
+ struct workqueue_struct *workq;
+ struct work_struct work;
+ enum kbase_devfreq_work_type req_type;
+ enum kbase_devfreq_work_type acted_type;
+};
+
+/**
* struct kbase_device - Object representing an instance of GPU platform device,
* allocated from the probe method of mali driver.
* @hw_quirks_sc: Configuration to be used for the shader cores as per
@@ -1339,8 +1368,9 @@ struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
* @cache_clean_wait: Signalled when a cache clean has finished.
* @platform_context: Platform specific private data to be accessed by
* platform specific config files only.
- * @kctx_list: List of kbase_contexts created for the device, including
- * the kbase_context created for vinstr_ctx.
+ * @kctx_list: List of kbase_contexts created for the device,
+ * including any contexts that might be created for
+ * hardware counters.
* @kctx_list_lock: Lock protecting concurrent accesses to @kctx_list.
* @devfreq_profile: Describes devfreq profile for the Mali GPU device, passed
* to devfreq_add_device() to add devfreq feature to Mali
@@ -1367,6 +1397,8 @@ struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
* to operating-points-v2-mali table in devicetree.
* @num_opps: Number of operating performance points available for the Mali
* GPU device.
+ * @devfreq_queue: Per device object for storing data that manages devfreq
+ * suspend & resume request queue and the related items.
* @devfreq_cooling: Pointer returned on registering devfreq cooling device
* corresponding to @devfreq.
* @ipa_protection_mode_switched: is set to TRUE when GPU is put into protected
@@ -1465,6 +1497,10 @@ struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
* on disabling of GWT.
* @js_ctx_scheduling_mode: Context scheduling mode currently being used by
* Job Scheduler
+ * @l2_size_override: Used to set L2 cache size via device tree blob
+ * @l2_hash_override: Used to set L2 cache hash via device tree blob
+ * @policy_list: A filtered list of policies available in the system.
+ * @policy_count: Number of policies in the @policy_list.
*/
struct kbase_device {
u32 hw_quirks_sc;
@@ -1575,6 +1611,11 @@ struct kbase_device {
struct kbase_devfreq_opp *opp_table;
int num_opps;
struct kbasep_pm_metrics last_devfreq_metrics;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ struct kbase_devfreq_queue_info devfreq_queue;
+#endif
+
#ifdef CONFIG_DEVFREQ_THERMAL
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
struct devfreq_cooling_device *devfreq_cooling;
@@ -1677,7 +1718,7 @@ struct kbase_device {
bool protected_mode_support;
-#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#ifdef CONFIG_MALI_BUSLOG
struct bus_logger_client *buslogger;
#endif
@@ -1696,9 +1737,15 @@ struct kbase_device {
u8 backup_serialize_jobs;
#endif
+ u8 l2_size_override;
+ u8 l2_hash_override;
+
/* See KBASE_JS_*_PRIORITY_MODE for details. */
u32 js_ctx_scheduling_mode;
+
+ const struct kbase_pm_policy *policy_list[KBASE_PM_MAX_NUM_POLICIES];
+ int policy_count;
};
/**
@@ -1722,6 +1769,56 @@ struct jsctx_queue {
((0 & 0xFF) << 0))
/**
+ * enum kbase_file_state - Initialization state of a file opened by @kbase_open
+ *
+ * @KBASE_FILE_NEED_VSN: Initial state, awaiting API version.
+ * @KBASE_FILE_VSN_IN_PROGRESS: Indicates if setting an API version is in
+ * progress and other setup calls shall be
+ * rejected.
+ * @KBASE_FILE_NEED_CTX: Indicates if the API version handshake has
+ * completed, awaiting context creation flags.
+ * @KBASE_FILE_CTX_IN_PROGRESS: Indicates if the context's setup is in progress
+ * and other setup calls shall be rejected.
+ * @KBASE_FILE_COMPLETE: Indicates if the setup for context has
+ * completed, i.e. flags have been set for the
+ * context.
+ *
+ * The driver allows only limited interaction with user-space until setup
+ * is complete.
+ */
+enum kbase_file_state {
+ KBASE_FILE_NEED_VSN,
+ KBASE_FILE_VSN_IN_PROGRESS,
+ KBASE_FILE_NEED_CTX,
+ KBASE_FILE_CTX_IN_PROGRESS,
+ KBASE_FILE_COMPLETE
+};
+
+/**
+ * struct kbase_file - Object representing a file opened by @kbase_open
+ *
+ * @kbdev: Object representing an instance of GPU platform device,
+ * allocated from the probe method of the Mali driver.
+ * @filp: Pointer to the struct file corresponding to device file
+ * /dev/malixx instance, passed to the file's open method.
+ * @kctx: Object representing an entity, among which GPU is
+ * scheduled and which gets its own GPU address space.
+ * Invalid until @setup_state is KBASE_FILE_COMPLETE.
+ * @api_version: Contains the version number for User/kernel interface,
+ * used for compatibility check. Invalid until
+ * @setup_state is KBASE_FILE_NEED_CTX.
+ * @setup_state: Initialization state of the file. Values come from
+ * the kbase_file_state enumeration.
+ */
+struct kbase_file {
+ struct kbase_device *kbdev;
+ struct file *filp;
+ struct kbase_context *kctx;
+ unsigned long api_version;
+ atomic_t setup_state;
+};
+
+/**
* enum kbase_context_flags - Flags for kbase contexts
*
* @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
@@ -1803,12 +1900,12 @@ struct kbase_sub_alloc {
};
/**
- * struct kbase_context - Object representing an entity, among which GPU is
- * scheduled and gets its own GPU address space.
- * Created when the device file /dev/malixx is opened.
+ * struct kbase_context - Kernel base context
+ *
* @filp: Pointer to the struct file corresponding to device file
* /dev/malixx instance, passed to the file's open method.
* @kbdev: Pointer to the Kbase device for which the context is created.
+ * @kctx_list_link: Node into Kbase device list of contexts.
* @mmu: Structure holding details of the MMU tables for this
* context
* @id: Unique identifier for the context, indicates the number of
@@ -1832,12 +1929,6 @@ struct kbase_sub_alloc {
* @event_coalesce_count: Count of the events present in @event_coalesce_list.
* @flags: bitmap of enums from kbase_context_flags, indicating the
* state & attributes for the context.
- * @setup_complete: Indicates if the setup for context has completed, i.e.
- * flags have been set for the context. Driver allows only
- * 2 ioctls until the setup is done. Valid only for
- * @api_version value 0.
- * @setup_in_progress: Indicates if the context's setup is in progress and other
- * setup calls during that shall be rejected.
* @aliasing_sink_page: Special page used for KBASE_MEM_TYPE_ALIAS allocations,
* which can alias number of memory regions. The page is
* represent a region where it is mapped with a write-alloc
@@ -1873,10 +1964,16 @@ struct kbase_sub_alloc {
* @event_queue: Wait queue used for blocking the thread, which consumes
* the base_jd_event corresponding to an atom, when there
* are no more posted events.
- * @tgid: thread group id of the process, whose thread opened the
- * device file /dev/malixx instance to create a context.
- * @pid: id of the thread, corresponding to process @tgid, which
- * actually which opened the device file.
+ * @tgid: Thread group ID of the process whose thread created
+ * the context (by calling KBASE_IOCTL_VERSION_CHECK or
+ * KBASE_IOCTL_SET_FLAGS, depending on the @api_version).
+ * This is usually, but not necessarily, the same as the
+ * process whose thread opened the device file
+ * /dev/malixx instance.
+ * @pid: ID of the thread, corresponding to process @tgid,
+ * which actually created the context. This is usually,
+ * but not necessarily, the same as the thread which
+ * opened the device file /dev/malixx instance.
* @jctx: object encapsulating all the Job dispatcher related state,
* including the array of atoms.
* @used_pages: Keeps a track of the number of 4KB physical pages in use
@@ -1985,6 +2082,9 @@ struct kbase_sub_alloc {
* old or new version of interface for JIT allocations
* 1 -> client used KBASE_IOCTL_MEM_JIT_INIT_OLD
* 2 -> client used KBASE_IOCTL_MEM_JIT_INIT
+ * @jit_group_id: A memory group ID to be passed to a platform-specific
+ * memory group manager.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
* @jit_active_head: List containing the JIT allocations which are in use.
* @jit_pool_head: List containing the JIT allocations which have been
* freed up by userpsace and so not being used by them.
@@ -2024,10 +2124,17 @@ struct kbase_sub_alloc {
* @priority: Indicates the context priority. Used along with @atoms_count
* for context scheduling, protected by hwaccess_lock.
* @atoms_count: Number of gpu atoms currently in use, per priority
+ *
+ * A kernel base context is an entity among which the GPU is scheduled.
+ * Each context has its own GPU address space.
+ * Up to one context can be created for each client that opens the device file
+ * /dev/malixx. Context creation is deferred until a special ioctl() system call
+ * is made on the device file.
*/
struct kbase_context {
struct file *filp;
struct kbase_device *kbdev;
+ struct list_head kctx_list_link;
struct kbase_mmu_table mmu;
u32 id;
@@ -2042,9 +2149,6 @@ struct kbase_context {
atomic_t flags;
- atomic_t setup_complete;
- atomic_t setup_in_progress;
-
struct tagged_addr aliasing_sink_page;
spinlock_t mem_partials_lock;
@@ -2086,12 +2190,6 @@ struct kbase_context {
atomic_t refcount;
- /* NOTE:
- *
- * Flags are in jctx.sched_info.ctx.flags
- * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
- *
- * All other flags must be added there */
spinlock_t mm_update_lock;
struct mm_struct __rcu *process_mm;
u64 same_va_end;
@@ -2138,6 +2236,7 @@ struct kbase_context {
u8 jit_current_allocations;
u8 jit_current_allocations_per_bin[256];
u8 jit_version;
+ u8 jit_group_id;
struct list_head jit_active_head;
struct list_head jit_pool_head;
struct list_head jit_destroy_head;
diff --git a/mali_kbase/mali_kbase_device.c b/mali_kbase/mali_kbase_device.c
index 530bb45..16535ec 100644
--- a/mali_kbase/mali_kbase_device.c
+++ b/mali_kbase/mali_kbase_device.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -241,6 +241,9 @@ int kbase_device_init(struct kbase_device * const kbdev)
else
kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
+ mutex_init(&kbdev->kctx_list_lock);
+ INIT_LIST_HEAD(&kbdev->kctx_list);
+
return 0;
term_trace:
kbasep_trace_term(kbdev);
@@ -256,6 +259,8 @@ void kbase_device_term(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev);
+ WARN_ON(!list_empty(&kbdev->kctx_list));
+
#if KBASE_TRACE_ENABLE
kbase_debug_assert_register_hook(NULL, NULL);
#endif
diff --git a/mali_kbase/mali_kbase_event.c b/mali_kbase/mali_kbase_event.c
index 874170d..721af69 100644
--- a/mali_kbase/mali_kbase_event.c
+++ b/mali_kbase/mali_kbase_event.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016,2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2016,2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,7 +24,7 @@
#include <mali_kbase.h>
#include <mali_kbase_debug.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
{
diff --git a/mali_kbase/mali_kbase_gator.h b/mali_kbase/mali_kbase_gator.h
index 3e3fb06..6428f08 100644
--- a/mali_kbase/mali_kbase_gator.h
+++ b/mali_kbase/mali_kbase_gator.h
@@ -27,9 +27,12 @@
* and s/w counter reporting. We cannot use the enums in mali_uk_types.h because
* they are unknown inside gator.
*/
+
#ifndef _KBASE_GATOR_H_
#define _KBASE_GATOR_H_
+#include <linux/types.h>
+
#define GATOR_JOB_SLOT_START 1
#define GATOR_JOB_SLOT_STOP 2
#define GATOR_JOB_SLOT_SOFT_STOPPED 3
@@ -38,6 +41,8 @@
#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16))
+struct kbase_context;
+
void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx, u8 atom_id);
void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value);
void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value);
diff --git a/mali_kbase/mali_kbase_gator_hwcnt_names_tbex.h b/mali_kbase/mali_kbase_gator_hwcnt_names_tbex.h
index 592bb2e..8282a39 100644
--- a/mali_kbase/mali_kbase_gator_hwcnt_names_tbex.h
+++ b/mali_kbase/mali_kbase_gator_hwcnt_names_tbex.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -166,12 +166,12 @@ static const char * const hardware_counters_mali_tBEx[] = {
"",
"",
"TBEx_FRAG_ACTIVE",
- "TBEx_FRAG_PRIMITIVES",
+ "TBEx_FRAG_PRIMITIVES_OUT",
"TBEx_FRAG_PRIM_RAST",
"TBEx_FRAG_FPK_ACTIVE",
"TBEx_FRAG_STARVING",
"TBEx_FRAG_WARPS",
- "TBEx_FRAG_PARTIAL_WARPS",
+ "TBEx_FRAG_PARTIAL_QUADS_RAST",
"TBEx_FRAG_QUADS_RAST",
"TBEx_FRAG_QUADS_EZS_TEST",
"TBEx_FRAG_QUADS_EZS_UPDATE",
diff --git a/mali_kbase/mali_kbase_gator_hwcnt_names_tnax.h b/mali_kbase/mali_kbase_gator_hwcnt_names_tnax.h
index a90db76..579f57b 100644
--- a/mali_kbase/mali_kbase_gator_hwcnt_names_tnax.h
+++ b/mali_kbase/mali_kbase_gator_hwcnt_names_tnax.h
@@ -196,15 +196,15 @@ static const char * const hardware_counters_mali_tNAx[] = {
"TNAx_EXEC_ICACHE_MISS",
"TNAx_EXEC_STARVE_ARITH",
"TNAx_CALL_BLEND_SHADER",
- "TNAx_TEX_MSGI_NUM_QUADS",
- "TNAx_TEX_DFCH_NUM_PASSES",
- "TNAx_TEX_DFCH_NUM_PASSES_MISS",
- "TNAx_TEX_DFCH_NUM_PASSES_MIP_MAP",
- "TNAx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
- "TNAx_TEX_TFCH_NUM_LINES_FETCHED",
- "TNAx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
- "TNAx_TEX_TFCH_NUM_OPERATIONS",
+ "TNAx_TEX_MSGI_NUM_FLITS",
+ "TNAx_TEX_DFCH_CLK_STALLED",
+ "TNAx_TEX_TFCH_CLK_STALLED",
+ "TNAx_TEX_TFCH_STARVED_PENDING_DATA_FETCH",
"TNAx_TEX_FILT_NUM_OPERATIONS",
+ "TNAx_TEX_FILT_NUM_FXR_OPERATIONS",
+ "TNAx_TEX_FILT_NUM_FST_OPERATIONS",
+ "TNAx_TEX_MSGO_NUM_MSG",
+ "TNAx_TEX_MSGO_NUM_FLITS",
"TNAx_LS_MEM_READ_FULL",
"TNAx_LS_MEM_READ_SHORT",
"TNAx_LS_MEM_WRITE_FULL",
diff --git a/mali_kbase/mali_kbase_gpu_memory_debugfs.c b/mali_kbase/mali_kbase_gpu_memory_debugfs.c
index 514b065..616505c 100644
--- a/mali_kbase/mali_kbase_gpu_memory_debugfs.c
+++ b/mali_kbase/mali_kbase_gpu_memory_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2017, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -43,7 +43,7 @@ static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
kbdev_list = kbase_dev_list_get();
list_for_each(entry, kbdev_list) {
struct kbase_device *kbdev = NULL;
- struct kbasep_kctx_list_element *element;
+ struct kbase_context *kctx;
kbdev = list_entry(entry, struct kbase_device, entry);
/* output the total memory usage and cap for this device */
@@ -51,13 +51,13 @@ static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
kbdev->devname,
atomic_read(&(kbdev->memdev.used_pages)));
mutex_lock(&kbdev->kctx_list_lock);
- list_for_each_entry(element, &kbdev->kctx_list, link) {
+ list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
/* output the memory usage and cap for each kctx
* opened on this device */
seq_printf(sfile, " %s-0x%p %10u\n",
"kctx",
- element->kctx,
- atomic_read(&(element->kctx->used_pages)));
+ kctx,
+ atomic_read(&(kctx->used_pages)));
}
mutex_unlock(&kbdev->kctx_list_lock);
}
diff --git a/mali_kbase/mali_kbase_gpuprops.c b/mali_kbase/mali_kbase_gpuprops.c
index 62ba105..ee3617c 100644
--- a/mali_kbase/mali_kbase_gpuprops.c
+++ b/mali_kbase/mali_kbase_gpuprops.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -32,6 +32,9 @@
#include <mali_kbase_hwaccess_gpuprops.h>
#include "mali_kbase_ioctl.h"
#include <linux/clk.h>
+#include <mali_kbase_pm_internal.h>
+#include <linux/of_platform.h>
+#include <linux/moduleparam.h>
/**
* KBASE_UBFX32 - Extracts bits from a 32-bit bitfield.
@@ -195,7 +198,13 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, str
/* Populate the base_gpu_props structure */
kbase_gpuprops_update_core_props_gpu_id(gpu_props);
gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
+#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE
gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+#else
+ gpu_props->core_props.gpu_available_memory_size =
+ totalram_pages() << PAGE_SHIFT;
+#endif
+
gpu_props->core_props.num_exec_engines =
KBASE_UBFX32(gpu_props->raw_props.core_features, 0, 4);
@@ -303,6 +312,87 @@ void kbase_gpuprops_set_features(struct kbase_device *kbdev)
gpu_props->thread_props.max_thread_group_split = 0;
}
+/*
+ * Module parameters to allow the L2 size and hash configuration to be
+ * overridden.
+ *
+ * These parameters must be set on insmod to take effect, and are not visible
+ * in sysfs.
+ */
+static u8 override_l2_size;
+module_param(override_l2_size, byte, 0);
+MODULE_PARM_DESC(override_l2_size, "Override L2 size config for testing");
+
+static u8 override_l2_hash;
+module_param(override_l2_hash, byte, 0);
+MODULE_PARM_DESC(override_l2_hash, "Override L2 hash config for testing");
+
+/**
+ * kbase_read_l2_config_from_dt - Read L2 configuration
+ * @kbdev: The kbase device for which to get the L2 configuration.
+ *
+ * Check for L2 configuration overrides in module parameters and device tree.
+ * Override values in module parameters take priority over override values in
+ * device tree.
+ *
+ * Return: true if either size or hash was overridden, false if no overrides
+ * were found.
+ */
+static bool kbase_read_l2_config_from_dt(struct kbase_device * const kbdev)
+{
+ struct device_node *np = kbdev->dev->of_node;
+
+ if (!np)
+ return false;
+
+ if (override_l2_size)
+ kbdev->l2_size_override = override_l2_size;
+ else if (of_property_read_u8(np, "l2-size", &kbdev->l2_size_override))
+ kbdev->l2_size_override = 0;
+
+ if (override_l2_hash)
+ kbdev->l2_hash_override = override_l2_hash;
+ else if (of_property_read_u8(np, "l2-hash", &kbdev->l2_hash_override))
+ kbdev->l2_hash_override = 0;
+
+ if (kbdev->l2_size_override || kbdev->l2_hash_override)
+ return true;
+
+ return false;
+}
+
+void kbase_gpuprops_update_l2_features(struct kbase_device *kbdev)
+{
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG)) {
+ struct kbase_gpuprops_regdump regdump;
+ base_gpu_props *gpu_props = &kbdev->gpu_props.props;
+
+ /* Check for L2 cache size & hash overrides */
+ if (!kbase_read_l2_config_from_dt(kbdev))
+ return;
+
+ /* Need L2 to get powered to reflect to L2_FEATURES */
+ kbase_pm_context_active(kbdev);
+
+ /* Wait for the completion of L2 power transition */
+ kbase_pm_wait_for_l2_powered(kbdev);
+
+ /* Dump L2_FEATURES register */
+ kbase_backend_gpuprops_get_l2_features(kbdev, &regdump);
+
+ dev_info(kbdev->dev, "Reflected L2_FEATURES is 0x%x\n",
+ regdump.l2_features);
+
+ /* Update gpuprops with reflected L2_FEATURES */
+ gpu_props->raw_props.l2_features = regdump.l2_features;
+ gpu_props->l2_props.log2_cache_size =
+ KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+
+ /* Let GPU idle */
+ kbase_pm_context_idle(kbdev);
+ }
+}
+
static struct {
u32 type;
size_t offset;
diff --git a/mali_kbase/mali_kbase_gpuprops.h b/mali_kbase/mali_kbase_gpuprops.h
index 37d9c08..8edba48 100644
--- a/mali_kbase/mali_kbase_gpuprops.h
+++ b/mali_kbase/mali_kbase_gpuprops.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2015,2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015,2017,2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -55,6 +55,14 @@ void kbase_gpuprops_set(struct kbase_device *kbdev);
void kbase_gpuprops_set_features(struct kbase_device *kbdev);
/**
+ * kbase_gpuprops_update_l2_features - Update GPU property of L2_FEATURES
+ * @kbdev: Device pointer
+ *
+ * This function updates l2_features and the log2 cache size.
+ */
+void kbase_gpuprops_update_l2_features(struct kbase_device *kbdev);
+
+/**
* kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer
* @kbdev: The kbase device
*
diff --git a/mali_kbase/mali_kbase_hw.c b/mali_kbase/mali_kbase_hw.c
index 1503469..65d6c09 100644
--- a/mali_kbase/mali_kbase_hw.c
+++ b/mali_kbase/mali_kbase_hw.c
@@ -224,11 +224,17 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{GPU_ID2_PRODUCT_TTRX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0},
{GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tTRx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tTRx_r0p1},
+ {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tTRx_r0p1},
{U32_MAX, NULL} } },
{GPU_ID2_PRODUCT_TNAX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNAx_r0p0},
{GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tNAx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 0, 4), base_hw_issues_tNAx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 0, 5), base_hw_issues_tNAx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tNAx_r0p1},
+ {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tNAx_r0p1},
{U32_MAX, NULL} } },
{GPU_ID2_PRODUCT_TBEX,
diff --git a/mali_kbase/mali_kbase_hwaccess_backend.h b/mali_kbase/mali_kbase_hwaccess_backend.h
index dde4965..d5e3d3a 100644
--- a/mali_kbase/mali_kbase_hwaccess_backend.h
+++ b/mali_kbase/mali_kbase_hwaccess_backend.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -56,4 +56,18 @@ void kbase_backend_early_term(struct kbase_device *kbdev);
*/
void kbase_backend_late_term(struct kbase_device *kbdev);
+/**
+ * kbase_backend_devfreq_init - Perform backend devfreq related initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_devfreq_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_devfreq_term - Perform backend-devfreq termination.
+ * @kbdev: Device pointer
+ */
+void kbase_backend_devfreq_term(struct kbase_device *kbdev);
+
#endif /* _KBASE_HWACCESS_BACKEND_H_ */
diff --git a/mali_kbase/mali_kbase_hwaccess_gpuprops.h b/mali_kbase/mali_kbase_hwaccess_gpuprops.h
index 63844d9..62628b6 100644
--- a/mali_kbase/mali_kbase_hwaccess_gpuprops.h
+++ b/mali_kbase/mali_kbase_hwaccess_gpuprops.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2018, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -40,7 +40,8 @@ void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump);
/**
- * kbase_backend_gpuprops_get - Fill @regdump with GPU properties read from GPU
+ * kbase_backend_gpuprops_get_features - Fill @regdump with GPU properties read
+ * from GPU
* @kbdev: Device pointer
* @regdump: Pointer to struct kbase_gpuprops_regdump structure
*
@@ -50,5 +51,17 @@ void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump);
+/**
+ * kbase_backend_gpuprops_get_l2_features - Fill @regdump with L2_FEATURES read
+ * from GPU
+ * @kbdev: Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * This function reads L2_FEATURES register that is dependent on the hardware
+ * features bitmask. It will power-on the GPU if required.
+ */
+void kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump);
+
#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */
diff --git a/mali_kbase/mali_kbase_hwaccess_pm.h b/mali_kbase/mali_kbase_hwaccess_pm.h
index 5bb3887..44c16f4 100644
--- a/mali_kbase/mali_kbase_hwaccess_pm.h
+++ b/mali_kbase/mali_kbase_hwaccess_pm.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -220,14 +220,15 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
const struct kbase_pm_policy *policy);
/**
- * Retrieve a static list of the available policies.
+ * kbase_pm_list_policies - Retrieve a static list of the available policies.
*
- * @param[out] policies An array pointer to take the list of policies. This may
- * be NULL. The contents of this array must not be
- * modified.
+ * @kbdev: The kbase device structure for the device.
+ * @list: An array pointer to take the list of policies. This may be NULL.
+ * The contents of this array must not be modified.
*
- * @return The number of policies
+ * Return: The number of policies
*/
-int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies);
+int kbase_pm_list_policies(struct kbase_device *kbdev,
+ const struct kbase_pm_policy * const **list);
#endif /* _KBASE_HWACCESS_PM_H_ */
diff --git a/mali_kbase/mali_kbase_hwcnt_backend_gpu.c b/mali_kbase/mali_kbase_hwcnt_backend_gpu.c
index b68607a..a3cc287 100644
--- a/mali_kbase/mali_kbase_hwcnt_backend_gpu.c
+++ b/mali_kbase/mali_kbase_hwcnt_backend_gpu.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,7 +27,7 @@
#include "mali_kbase_pm_policy.h"
#include "mali_kbase_pm_ca.h"
#include "mali_kbase_hwaccess_instr.h"
-#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tracepoints.h"
#ifdef CONFIG_MALI_NO_MALI
#include "backend/gpu/mali_kbase_model_dummy.h"
#endif
@@ -54,7 +54,6 @@ struct kbase_hwcnt_backend_gpu_info {
* @info: Info used to create the backend.
* @kctx: KBase context used for GPU memory allocation and
* counter dumping.
- * @kctx_element: List element used to add kctx to device context list.
* @gpu_dump_va: GPU hardware counter dump buffer virtual address.
* @cpu_dump_va: CPU mapping of gpu_dump_va.
* @vmap: Dump buffer vmap.
@@ -64,7 +63,6 @@ struct kbase_hwcnt_backend_gpu_info {
struct kbase_hwcnt_backend_gpu {
const struct kbase_hwcnt_backend_gpu_info *info;
struct kbase_context *kctx;
- struct kbasep_kctx_list_element *kctx_element;
u64 gpu_dump_va;
void *cpu_dump_va;
struct kbase_vmap_struct *vmap;
@@ -257,7 +255,7 @@ static int kbasep_hwcnt_backend_gpu_dump_alloc(
flags = BASE_MEM_PROT_CPU_RD |
BASE_MEM_PROT_GPU_WR |
- BASE_MEM_PERMANENT_KERNEL_MAPPING |
+ BASEP_MEM_PERMANENT_KERNEL_MAPPING |
BASE_MEM_CACHED_CPU;
if (kctx->kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE)
@@ -310,16 +308,6 @@ static void kbasep_hwcnt_backend_gpu_destroy(
kbasep_hwcnt_backend_gpu_dump_free(
kctx, backend->gpu_dump_va);
- if (backend->kctx_element) {
- mutex_lock(&kbdev->kctx_list_lock);
-
- KBASE_TLSTREAM_TL_DEL_CTX(kbdev, kctx);
- list_del(&backend->kctx_element->link);
-
- mutex_unlock(&kbdev->kctx_list_lock);
- kfree(backend->kctx_element);
- }
-
kbasep_js_release_privileged_ctx(kbdev, kctx);
kbase_destroy_context(kctx);
}
@@ -353,33 +341,13 @@ static int kbasep_hwcnt_backend_gpu_create(
backend->info = info;
- backend->kctx = kbase_create_context(kbdev, true);
+ backend->kctx = kbase_create_context(kbdev, true,
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED, 0, NULL);
if (!backend->kctx)
goto alloc_error;
kbasep_js_schedule_privileged_ctx(kbdev, backend->kctx);
- backend->kctx_element = kzalloc(
- sizeof(*backend->kctx_element), GFP_KERNEL);
- if (!backend->kctx_element)
- goto alloc_error;
-
- backend->kctx_element->kctx = backend->kctx;
-
- /* Add kernel context to list of contexts associated with device. */
- mutex_lock(&kbdev->kctx_list_lock);
-
- list_add(&backend->kctx_element->link, &kbdev->kctx_list);
- /* Fire tracepoint while lock is held, to ensure tracepoint is not
- * created in both body and summary stream
- */
- KBASE_TLSTREAM_TL_NEW_CTX(kbdev,
- backend->kctx,
- backend->kctx->id,
- (u32)(backend->kctx->tgid));
-
- mutex_unlock(&kbdev->kctx_list_lock);
-
errcode = kbasep_hwcnt_backend_gpu_dump_alloc(
info, backend->kctx, &backend->gpu_dump_va);
if (errcode)
diff --git a/mali_kbase/mali_kbase_ioctl.h b/mali_kbase/mali_kbase_ioctl.h
index 033a1bd..4181e72 100644
--- a/mali_kbase/mali_kbase_ioctl.h
+++ b/mali_kbase/mali_kbase_ioctl.h
@@ -66,9 +66,22 @@ extern "C" {
* - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS
* 11.13:
* - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT
+ * 11.14:
+ * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set
+ * under base_mem_alloc_flags
+ * 11.15:
+ * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags.
+ * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be
+ * passed to mmap().
+ * 11.16:
+ * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf.
+ * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for
+ * dma-buf. Now, buffers are mapped on GPU when first imported, no longer
+ * requiring external resource or sticky resource tracking. UNLESS,
+ * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled.
*/
-#define BASE_UK_VERSION_MAJOR 11
-#define BASE_UK_VERSION_MINOR 13
+#define BASE_UK_VERSION_MAJOR ((__u16)11)
+#define BASE_UK_VERSION_MINOR ((__u16)16)
/**
* struct kbase_ioctl_version_check - Check version compatibility with kernel
@@ -336,6 +349,7 @@ struct kbase_ioctl_mem_jit_init_old {
* @va_pages: Number of VA pages to reserve for JIT
* @max_allocations: Maximum number of concurrent allocations
* @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
* @padding: Currently unused, must be zero
*
* Note that depending on the VA size of the application and GPU, the value
@@ -345,7 +359,8 @@ struct kbase_ioctl_mem_jit_init {
__u64 va_pages;
__u8 max_allocations;
__u8 trim_level;
- __u8 padding[6];
+ __u8 group_id;
+ __u8 padding[5];
};
#define KBASE_IOCTL_MEM_JIT_INIT \
diff --git a/mali_kbase/mali_kbase_jd.c b/mali_kbase/mali_kbase_jd.c
index d459ad2..749ca7f 100644
--- a/mali_kbase/mali_kbase_jd.c
+++ b/mali_kbase/mali_kbase_jd.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,7 +35,7 @@
#include <mali_kbase_jm.h>
#include <mali_kbase_hwaccess_jm.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include "mali_kbase_dma_fence.h"
diff --git a/mali_kbase/mali_kbase_jd_debugfs.c b/mali_kbase/mali_kbase_jd_debugfs.c
index 7b15d8a..a74ba30 100644
--- a/mali_kbase/mali_kbase_jd_debugfs.c
+++ b/mali_kbase/mali_kbase_jd_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -229,7 +229,12 @@ static const struct file_operations kbasep_jd_debugfs_atoms_fops = {
void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx)
{
- KBASE_DEBUG_ASSERT(kctx != NULL);
+ /* Caller already ensures this, but we keep the pattern for
+ * maintenance safety.
+ */
+ if (WARN_ON(!kctx) ||
+ WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+ return;
/* Expose all atoms */
debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx,
diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c
index 4f44644..e2e1d17 100644
--- a/mali_kbase/mali_kbase_js.c
+++ b/mali_kbase/mali_kbase_js.c
@@ -27,7 +27,7 @@
*/
#include <mali_kbase.h>
#include <mali_kbase_js.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_ctx_sched.h>
@@ -1878,7 +1878,7 @@ void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
} else {
/* Already scheduled in - We need to retain it to keep the
* corresponding address space */
- kbasep_js_runpool_retain_ctx(kbdev, kctx);
+ WARN_ON(!kbasep_js_runpool_retain_ctx(kbdev, kctx));
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
mutex_unlock(&js_devdata->queue_mutex);
}
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 5ea54a0..2ccf300 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -43,10 +43,11 @@
#include <mali_midg_regmap.h>
#include <mali_kbase_cache_policy.h>
#include <mali_kbase_hw.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_native_mgm.h>
#include <mali_kbase_mem_pool_group.h>
+
/* Forward declarations */
static void free_partial_locked(struct kbase_context *kctx,
struct kbase_mem_pool *pool, struct tagged_addr tp);
@@ -818,13 +819,17 @@ static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
#endif
int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
- u8 max_allocations, u8 trim_level)
+ u8 max_allocations, u8 trim_level, int group_id)
{
int err = 0;
if (trim_level > 100)
return -EINVAL;
+ if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) ||
+ WARN_ON(group_id < 0))
+ return -EINVAL;
+
kbase_gpu_vm_lock(kctx);
#ifdef CONFIG_64BIT
@@ -840,6 +845,7 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
kctx->jit_max_allocations = max_allocations;
kctx->trim_level = trim_level;
kctx->jit_va = true;
+ kctx->jit_group_id = group_id;
}
kbase_gpu_vm_unlock(kctx);
@@ -1231,23 +1237,42 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc);
}
+ if (reg->flags & KBASE_REG_IMPORT_PAD &&
+ !WARN_ON(reg->nr_pages < reg->gpu_alloc->nents) &&
+ reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM &&
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+ /* For padded imported dma-buf memory, map the dummy aliasing
+ * page from the end of the dma-buf pages, to the end of the
+ * region using a read only mapping.
+ *
+ * Only map when it's imported dma-buf memory that is currently
+ * mapped.
+ *
+ * Assume reg->gpu_alloc->nents is the number of actual pages
+ * in the dma-buf memory.
+ */
+ err = kbase_mmu_insert_single_page(kctx,
+ reg->start_pfn + reg->gpu_alloc->nents,
+ kctx->aliasing_sink_page,
+ reg->nr_pages - reg->gpu_alloc->nents,
+ (reg->flags | KBASE_REG_GPU_RD) &
+ ~KBASE_REG_GPU_WR);
+ if (err)
+ goto bad_insert;
+ }
+
return err;
bad_insert:
- if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
- u64 stride;
+ kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+ reg->start_pfn, reg->nr_pages,
+ kctx->as_nr);
- stride = reg->gpu_alloc->imported.alias.stride;
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
while (i--)
- if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
- kbase_mmu_teardown_pages(kctx->kbdev,
- &kctx->mmu,
- reg->start_pfn + (i * stride),
- reg->gpu_alloc->imported.alias.aliased[i].length,
- kctx->as_nr);
+ if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
- }
}
kbase_remove_va_region(reg);
@@ -1263,43 +1288,57 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
{
int err = 0;
+ size_t i;
if (reg->start_pfn == 0)
return 0;
- if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
- size_t i;
+ if (!reg->gpu_alloc)
+ return -EINVAL;
+ /* Tear down down GPU page tables, depending on memory type. */
+ switch (reg->gpu_alloc->type) {
+ case KBASE_MEM_TYPE_ALIAS: /* Fall-through */
+ case KBASE_MEM_TYPE_IMPORTED_UMM:
err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
reg->start_pfn, reg->nr_pages, kctx->as_nr);
- KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
- for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
- if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
- kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
- } else if (reg->gpu_alloc) {
+ break;
+ default:
err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
reg->start_pfn, kbase_reg_current_backed_size(reg),
kctx->as_nr);
- kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
+ break;
}
- if (reg->gpu_alloc && reg->gpu_alloc->type ==
- KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
- struct kbase_alloc_import_user_buf *user_buf =
- &reg->gpu_alloc->imported.user_buf;
+ /* Update tracking, and other cleanup, depending on memory type. */
+ switch (reg->gpu_alloc->type) {
+ case KBASE_MEM_TYPE_ALIAS:
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+ for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
+ if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+ break;
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+ struct kbase_alloc_import_user_buf *user_buf =
+ &reg->gpu_alloc->imported.user_buf;
- if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
- user_buf->current_mapping_usage_count &=
- ~PINNED_ON_IMPORT;
+ if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
+ user_buf->current_mapping_usage_count &=
+ ~PINNED_ON_IMPORT;
- kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc,
- (reg->flags & KBASE_REG_GPU_WR));
+ /* The allocation could still have active mappings. */
+ if (user_buf->current_mapping_usage_count == 0) {
+ kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc,
+ (reg->flags & KBASE_REG_GPU_WR));
+ }
+ }
}
+ /* Fall-through */
+ default:
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
+ break;
}
- if (err)
- return err;
-
return err;
}
@@ -1477,8 +1516,18 @@ static int kbase_do_syncset(struct kbase_context *kctx,
goto out_unlock;
}
- if (!(reg->flags & KBASE_REG_CPU_CACHED) ||
- kbase_mem_is_imported(reg->gpu_alloc->type))
+ /*
+ * Handle imported memory before checking for KBASE_REG_CPU_CACHED. The
+ * CPU mapping cacheability is defined by the owner of the imported
+ * memory, and not by kbase, therefore we must assume that any imported
+ * memory may be cached.
+ */
+ if (kbase_mem_is_imported(reg->gpu_alloc->type)) {
+ err = kbase_mem_do_sync_imported(kctx, reg, sync_fn);
+ goto out_unlock;
+ }
+
+ if (!(reg->flags & KBASE_REG_CPU_CACHED))
goto out_unlock;
start = (uintptr_t)sset->user_addr;
@@ -1756,9 +1805,12 @@ int kbase_update_region_flags(struct kbase_context *kctx,
KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
}
- if (flags & BASE_MEM_PERMANENT_KERNEL_MAPPING)
+ if (flags & BASEP_MEM_PERMANENT_KERNEL_MAPPING)
reg->flags |= KBASE_REG_PERMANENT_KERNEL_MAPPING;
+ if (flags & BASEP_MEM_NO_USER_FREE)
+ reg->flags |= KBASE_REG_NO_USER_FREE;
+
if (flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE)
reg->flags |= KBASE_REG_GPU_VA_SAME_4GB_PAGE;
@@ -2150,7 +2202,8 @@ invalid_request:
return NULL;
}
-static void free_partial(struct kbase_context *kctx, struct tagged_addr tp)
+static void free_partial(struct kbase_context *kctx, int group_id, struct
+ tagged_addr tp)
{
struct page *p, *head_page;
struct kbase_sub_alloc *sa;
@@ -2163,7 +2216,7 @@ static void free_partial(struct kbase_context *kctx, struct tagged_addr tp)
if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
list_del(&sa->link);
kbase_mem_pool_free(
- &kctx->mem_pools.large[BASE_MEM_GROUP_DEFAULT],
+ &kctx->mem_pools.large[group_id],
head_page,
true);
kfree(sa);
@@ -2224,7 +2277,7 @@ int kbase_free_phy_pages_helper(
start_free += 512;
freed += 512;
} else if (is_partial(*start_free)) {
- free_partial(kctx, *start_free);
+ free_partial(kctx, alloc->group_id, *start_free);
nr_pages_to_free--;
start_free++;
freed++;
@@ -2391,6 +2444,12 @@ void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
}
}
+/**
+ * kbase_jd_user_buf_unpin_pages - Release the pinned pages of a user buffer.
+ * @alloc: The allocation for the imported user buffer.
+ */
+static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc);
+
void kbase_mem_kref_free(struct kref *kref)
{
struct kbase_mem_phy_alloc *alloc;
@@ -2442,12 +2501,22 @@ void kbase_mem_kref_free(struct kref *kref)
break;
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM:
+ if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
+ WARN_ONCE(alloc->imported.umm.current_mapping_usage_count != 1,
+ "WARNING: expected excatly 1 mapping, got %d",
+ alloc->imported.umm.current_mapping_usage_count);
+ dma_buf_unmap_attachment(
+ alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt,
+ DMA_BIDIRECTIONAL);
+ }
dma_buf_detach(alloc->imported.umm.dma_buf,
alloc->imported.umm.dma_attachment);
dma_buf_put(alloc->imported.umm.dma_buf);
break;
#endif
case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ kbase_jd_user_buf_unpin_pages(alloc);
if (alloc->imported.user_buf.mm)
mmdrop(alloc->imported.user_buf.mm);
if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
@@ -2863,6 +2932,13 @@ KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
void kbase_jit_debugfs_init(struct kbase_context *kctx)
{
+ /* Caller already ensures this, but we keep the pattern for
+ * maintenance safety.
+ */
+ if (WARN_ON(!kctx) ||
+ WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+ return;
+
/* Debugfs entry for getting the number of JIT allocations. */
debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry,
kctx, &kbase_jit_debugfs_count_fops);
@@ -3000,13 +3076,13 @@ static int kbase_jit_grow(struct kbase_context *kctx,
}
if (pages_required >= (SZ_2M / SZ_4K)) {
- pool = &kctx->mem_pools.large[BASE_MEM_GROUP_DEFAULT];
+ pool = &kctx->mem_pools.large[kctx->jit_group_id];
/* Round up to number of 2 MB pages required */
pages_required += ((SZ_2M / SZ_4K) - 1);
pages_required /= (SZ_2M / SZ_4K);
} else {
#endif
- pool = &kctx->mem_pools.small[BASE_MEM_GROUP_DEFAULT];
+ pool = &kctx->mem_pools.small[kctx->jit_group_id];
#ifdef CONFIG_MALI_2MB_ALLOC
}
#endif
@@ -3252,7 +3328,8 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
/* No suitable JIT allocation was found so create a new one */
u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
- BASE_MEM_COHERENT_LOCAL;
+ BASE_MEM_COHERENT_LOCAL |
+ BASEP_MEM_NO_USER_FREE;
u64 gpu_addr;
mutex_unlock(&kctx->jit_evict_lock);
@@ -3260,6 +3337,8 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP)
flags |= BASE_MEM_TILER_ALIGN_TOP;
+ flags |= base_mem_group_id_set(kctx->jit_group_id);
+
reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
info->extent, &flags, &gpu_addr);
if (!reg) {
@@ -3272,8 +3351,6 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
goto out_unlocked;
}
- reg->flags |= KBASE_REG_NO_USER_FREE;
-
mutex_lock(&kctx->jit_evict_lock);
list_add(&reg->jit_node, &kctx->jit_active_head);
mutex_unlock(&kctx->jit_evict_lock);
@@ -3453,30 +3530,47 @@ bool kbase_has_exec_va_zone(struct kbase_context *kctx)
return has_exec_va_zone;
}
-static int kbase_jd_user_buf_map(struct kbase_context *kctx,
+static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc)
+{
+ if (alloc->nents) {
+ struct page **pages = alloc->imported.user_buf.pages;
+ long i;
+
+ WARN_ON(alloc->nents != alloc->imported.user_buf.nr_pages);
+
+ for (i = 0; i < alloc->nents; i++) {
+ /* Shall come to this point only if the userbuf pages
+ * were pinned but weren't mapped on the GPU side. If
+ * they were mapped then pages would have got unpinned
+ * before kbase_mem_kref_free() was called.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
struct kbase_va_region *reg)
{
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+ struct page **pages = alloc->imported.user_buf.pages;
+ unsigned long address = alloc->imported.user_buf.address;
+ struct mm_struct *mm = alloc->imported.user_buf.mm;
long pinned_pages;
- struct kbase_mem_phy_alloc *alloc;
- struct page **pages;
- struct tagged_addr *pa;
long i;
- int err = -ENOMEM;
- unsigned long address;
- struct mm_struct *mm;
- struct device *dev;
- unsigned long offset;
- unsigned long local_size;
- unsigned long gwt_mask = ~0;
- alloc = reg->gpu_alloc;
- pa = kbase_get_gpu_phy_pages(reg);
- address = alloc->imported.user_buf.address;
- mm = alloc->imported.user_buf.mm;
+ if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+ return -EINVAL;
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+ if (alloc->nents) {
+ if (WARN_ON(alloc->nents != alloc->imported.user_buf.nr_pages))
+ return -EINVAL;
+ else
+ return 0;
+ }
- pages = alloc->imported.user_buf.pages;
+ if (WARN_ON(reg->gpu_alloc->imported.user_buf.mm != current->mm))
+ return -EINVAL;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
pinned_pages = get_user_pages(NULL, mm,
@@ -3519,6 +3613,35 @@ KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
return -ENOMEM;
}
+ alloc->nents = pinned_pages;
+
+ return 0;
+}
+
+static int kbase_jd_user_buf_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ long pinned_pages;
+ struct kbase_mem_phy_alloc *alloc;
+ struct page **pages;
+ struct tagged_addr *pa;
+ long i;
+ int err = -ENOMEM;
+ unsigned long address;
+ struct device *dev;
+ unsigned long offset;
+ unsigned long local_size;
+ unsigned long gwt_mask = ~0;
+
+ err = kbase_jd_user_buf_pin_pages(kctx, reg);
+ if (err)
+ return err;
+
+ alloc = reg->gpu_alloc;
+ pa = kbase_get_gpu_phy_pages(reg);
+ address = alloc->imported.user_buf.address;
+ pinned_pages = alloc->nents;
+ pages = alloc->imported.user_buf.pages;
dev = kctx->kbdev->dev;
offset = address & ~PAGE_MASK;
local_size = alloc->imported.user_buf.size;
@@ -3541,7 +3664,6 @@ KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
offset = 0;
}
- alloc->nents = pinned_pages;
#ifdef CONFIG_MALI_CINSTR_GWT
if (kctx->gwt_enabled)
gwt_mask = ~KBASE_REG_GPU_WR;
@@ -3553,9 +3675,9 @@ KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
if (err == 0)
return 0;
- alloc->nents = 0;
/* fall down */
unwind:
+ alloc->nents = 0;
while (i--) {
dma_unmap_page(kctx->kbdev->dev,
alloc->imported.user_buf.dma_addrs[i],
@@ -3570,6 +3692,10 @@ unwind:
return err;
}
+/* This function also performs the work of kbase_jd_user_buf_unpin_pages()
+ * which implies that a call to kbase_jd_user_buf_pin_pages() may NOT
+ * necessarily have a corresponding call to kbase_jd_user_buf_unpin_pages().
+ */
static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
struct kbase_mem_phy_alloc *alloc, bool writeable)
{
@@ -3596,128 +3722,19 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
alloc->nents = 0;
}
-#ifdef CONFIG_DMA_SHARED_BUFFER
-static int kbase_jd_umm_map(struct kbase_context *kctx,
- struct kbase_va_region *reg)
-{
- struct sg_table *sgt;
- struct scatterlist *s;
- int i;
- struct tagged_addr *pa;
- int err;
- size_t count = 0;
- struct kbase_mem_phy_alloc *alloc;
- unsigned long gwt_mask = ~0;
-
- alloc = reg->gpu_alloc;
-
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM);
- KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt);
- sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
- DMA_BIDIRECTIONAL);
-
- if (IS_ERR_OR_NULL(sgt))
- return -EINVAL;
-
- /* save for later */
- alloc->imported.umm.sgt = sgt;
-
- pa = kbase_get_gpu_phy_pages(reg);
- KBASE_DEBUG_ASSERT(pa);
-
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- size_t j, pages = PFN_UP(sg_dma_len(s));
-
- WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
- "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
- sg_dma_len(s));
-
- WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
- "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
- (unsigned long long) sg_dma_address(s));
-
- for (j = 0; (j < pages) && (count < reg->nr_pages); j++,
- count++)
- *pa++ = as_tagged(sg_dma_address(s) +
- (j << PAGE_SHIFT));
- WARN_ONCE(j < pages,
- "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
- alloc->imported.umm.dma_buf->size);
- }
-
- if (!(reg->flags & KBASE_REG_IMPORT_PAD) &&
- WARN_ONCE(count < reg->nr_pages,
- "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
- alloc->imported.umm.dma_buf->size)) {
- err = -EINVAL;
- goto err_unmap_attachment;
- }
-
- /* Update nents as we now have pages to map */
- alloc->nents = reg->nr_pages;
-
-#ifdef CONFIG_MALI_CINSTR_GWT
- if (kctx->gwt_enabled)
- gwt_mask = ~KBASE_REG_GPU_WR;
-#endif
-
- err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
- kbase_get_gpu_phy_pages(reg),
- count,
- (reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD) &
- gwt_mask,
- kctx->as_nr);
- if (err)
- goto err_unmap_attachment;
-
- if (reg->flags & KBASE_REG_IMPORT_PAD) {
- err = kbase_mmu_insert_single_page(kctx,
- reg->start_pfn + count,
- kctx->aliasing_sink_page,
- reg->nr_pages - count,
- (reg->flags | KBASE_REG_GPU_RD) &
- ~KBASE_REG_GPU_WR);
- if (err)
- goto err_teardown_orig_pages;
- }
-
- return 0;
-
-err_teardown_orig_pages:
- kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
- count, kctx->as_nr);
-err_unmap_attachment:
- dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
- alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- alloc->imported.umm.sgt = NULL;
-
- return err;
-}
-
-static void kbase_jd_umm_unmap(struct kbase_context *kctx,
- struct kbase_mem_phy_alloc *alloc)
-{
- KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(alloc);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
- dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
- alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- alloc->imported.umm.sgt = NULL;
- alloc->nents = 0;
-}
-#endif /* CONFIG_DMA_SHARED_BUFFER */
-
struct kbase_mem_phy_alloc *kbase_map_external_resource(
struct kbase_context *kctx, struct kbase_va_region *reg,
struct mm_struct *locked_mm)
{
int err;
+ lockdep_assert_held(&kctx->reg_lock);
+
/* decide what needs to happen for this resource */
switch (reg->gpu_alloc->type) {
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- if (reg->gpu_alloc->imported.user_buf.mm != locked_mm)
+ if ((reg->gpu_alloc->imported.user_buf.mm != locked_mm) &&
+ (!reg->gpu_alloc->nents))
goto exit;
reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
@@ -3732,14 +3749,9 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
break;
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
- reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
- if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
- err = kbase_jd_umm_map(kctx, reg);
- if (err) {
- reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
- goto exit;
- }
- }
+ err = kbase_mem_umm_map(kctx, reg);
+ if (err)
+ goto exit;
break;
}
#endif
@@ -3758,23 +3770,7 @@ void kbase_unmap_external_resource(struct kbase_context *kctx,
switch (alloc->type) {
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
- alloc->imported.umm.current_mapping_usage_count--;
-
- if (0 == alloc->imported.umm.current_mapping_usage_count) {
- if (reg && reg->gpu_alloc == alloc) {
- int err;
-
- err = kbase_mmu_teardown_pages(
- kctx->kbdev,
- &kctx->mmu,
- reg->start_pfn,
- alloc->nents,
- kctx->as_nr);
- WARN_ON(err);
- }
-
- kbase_jd_umm_unmap(kctx, alloc);
- }
+ kbase_mem_umm_unmap(kctx, reg, alloc);
}
break;
#endif /* CONFIG_DMA_SHARED_BUFFER */
@@ -3784,7 +3780,8 @@ void kbase_unmap_external_resource(struct kbase_context *kctx,
if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
bool writeable = true;
- if (reg && reg->gpu_alloc == alloc)
+ if (!kbase_is_region_invalid_or_free(reg) &&
+ reg->gpu_alloc == alloc)
kbase_mmu_teardown_pages(
kctx->kbdev,
&kctx->mmu,
diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h
index de5550f..672bcac 100644
--- a/mali_kbase/mali_kbase_mem.h
+++ b/mali_kbase/mali_kbase_mem.h
@@ -726,7 +726,7 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
/**
* kbase_mem_pool_alloc_pages - Allocate pages from memory pool
* @pool: Memory pool to allocate from
- * @nr_pages: Number of pages to allocate
+ * @nr_4k_pages: Number of pages to allocate
* @pages: Pointer to array where the physical address of the allocated
* pages will be stored.
* @partial_allowed: If fewer pages allocated is allowed
@@ -745,7 +745,7 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
* the kernel OoM killer runs. If the caller must allocate pages while holding
* this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
*/
-int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
struct tagged_addr *pages, bool partial_allowed);
/**
@@ -911,11 +911,13 @@ int kbase_region_tracker_init(struct kbase_context *kctx);
* @jit_va_pages: Size of the JIT region in pages
* @max_allocations: Maximum number of allocations allowed for the JIT region
* @trim_level: Trim level for the JIT region
+ * @group_id: The physical group ID from which to allocate JIT memory.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
*
* Return: 0 if success, negative error code otherwise.
*/
int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
- u8 max_allocations, u8 trim_level);
+ u8 max_allocations, u8 trim_level, int group_id);
/**
* kbase_region_tracker_init_exec - Initialize the EXEC_VA region
@@ -1013,20 +1015,24 @@ int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size
*
* The structure should be terminated using kbase_mmu_term()
*
- * @kbdev: kbase device
- * @mmut: structure to initialise
- * @kctx: optional kbase context, may be NULL if this set of MMU tables is not
- * associated with a context
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
+ * @mmut: GPU page tables to be initialized.
+ * @kctx: Optional kbase context, may be NULL if this set of MMU tables
+ * is not associated with a context.
+ * @group_id: The physical group ID from which to allocate GPU page tables.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Return: 0 if successful, otherwise a negative error code.
*/
int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- struct kbase_context *kctx);
+ struct kbase_context *kctx, int group_id);
/**
* kbase_mmu_term - Terminate an object representing GPU page tables
*
* This will free any page tables that have been allocated
*
- * @kbdev: kbase device
- * @mmut: kbase_mmu_table to be destroyed
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
+ * @mmut: GPU page tables to be destroyed.
*/
void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
@@ -1519,6 +1525,21 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
void kbase_unmap_external_resource(struct kbase_context *kctx,
struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_jd_user_buf_pin_pages - Pin the pages of a user buffer.
+ * @kctx: kbase context.
+ * @reg: The region associated with the imported user buffer.
+ *
+ * To successfully pin the pages for a user buffer the current mm_struct must
+ * be the same as the mm_struct of the user buffer. After successfully pinning
+ * the pages further calls to this function succeed without doing work.
+ *
+ * Return: zero on success or negative number on failure.
+ */
+int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
+ struct kbase_va_region *reg);
+
/**
* kbase_sticky_resource_init - Initialize sticky resource management.
* @kctx: kbase context
@@ -1584,4 +1605,54 @@ static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+/**
+ * kbase_mem_umm_map - Map dma-buf
+ * @kctx: Pointer to the kbase context
+ * @reg: Pointer to the region of the imported dma-buf to map
+ *
+ * Map a dma-buf on the GPU. The mappings are reference counted.
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int kbase_mem_umm_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg);
+
+/**
+ * kbase_mem_umm_unmap - Unmap dma-buf
+ * @kctx: Pointer to the kbase context
+ * @reg: Pointer to the region of the imported dma-buf to unmap
+ * @alloc: Pointer to the alloc to release
+ *
+ * Unmap a dma-buf from the GPU. The mappings are reference counted.
+ *
+ * @reg must be the original region with GPU mapping of @alloc; or NULL. If
+ * @reg is NULL, or doesn't match @alloc, the GPU page table entries matching
+ * @reg will not be updated.
+ *
+ * @alloc must be a valid physical allocation of type
+ * KBASE_MEM_TYPE_IMPORTED_UMM that was previously mapped by
+ * kbase_mem_umm_map(). The dma-buf attachment referenced by @alloc will
+ * release it's mapping reference, and if the refcount reaches 0, also be be
+ * unmapped, regardless of the value of @reg.
+ */
+void kbase_mem_umm_unmap(struct kbase_context *kctx,
+ struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_mem_do_sync_imported - Sync caches for imported memory
+ * @kctx: Pointer to the kbase context
+ * @reg: Pointer to the region with imported memory to sync
+ * @sync_fn: The type of sync operation to perform
+ *
+ * Sync CPU caches for supported (currently only dma-buf (UMM)) memory.
+ * Attempting to sync unsupported imported memory types will result in an error
+ * code, -EINVAL.
+ *
+ * Return: 0 on success, or a negative error code.
+ */
+int kbase_mem_do_sync_imported(struct kbase_context *kctx,
+ struct kbase_va_region *reg, enum kbase_sync_type sync_fn);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
#endif /* _KBASE_MEM_H_ */
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 70da90c..d62940c 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -47,9 +47,20 @@
#include <mali_kbase.h>
#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_ioctl.h>
+#if KERNEL_VERSION(4, 17, 2) > LINUX_VERSION_CODE
+/* Enable workaround for ion in v4.17.2 and earlier to avoid the potentially
+ * disruptive warnings which can come if begin_cpu_access and end_cpu_access
+ * methods are not called in pairs.
+ * dma_sync_sg_for_* calls will be made directly as a workaround.
+ * This will also address the case on kernels prior to 4.12, where ion lacks
+ * the cache maintenance in begin_cpu_access and end_cpu_access methods.
+ */
+#define KBASE_MEM_ION_SYNC_WORKAROUND
+#endif
+
#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE)
#define vm_fault_t int
@@ -326,7 +337,8 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
if (kbase_update_region_flags(kctx, reg, *flags) != 0)
goto invalid_flags;
- if (kbase_reg_prepare_native(reg, kctx, BASE_MEM_GROUP_DEFAULT) != 0) {
+ if (kbase_reg_prepare_native(reg, kctx,
+ base_mem_group_id_get(*flags)) != 0) {
dev_err(dev, "Failed to prepare region");
goto prepare_failed;
}
@@ -384,8 +396,6 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
BUG_ON(kctx->pending_regions[cookie_nr]);
kctx->pending_regions[cookie_nr] = reg;
- kbase_gpu_vm_unlock(kctx);
-
/* relocate to correct base */
cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE);
cookie <<= PAGE_SHIFT;
@@ -397,8 +407,13 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
if (kctx->api_version < KBASE_API_VERSION(10, 1) ||
kctx->api_version > KBASE_API_VERSION(10, 4)) {
*gpu_va = (u64) cookie;
+ kbase_gpu_vm_unlock(kctx);
return reg;
}
+
+ kbase_va_region_alloc_get(kctx, reg);
+ kbase_gpu_vm_unlock(kctx);
+
if (*flags & BASE_MEM_PROT_CPU_RD)
prot |= PROT_READ;
if (*flags & BASE_MEM_PROT_CPU_WR)
@@ -407,15 +422,42 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
cpu_addr = vm_mmap(kctx->filp, 0, va_map, prot,
MAP_SHARED, cookie);
- if (IS_ERR_VALUE(cpu_addr)) {
- kbase_gpu_vm_lock(kctx);
- kctx->pending_regions[cookie_nr] = NULL;
- kctx->cookies |= (1UL << cookie_nr);
- kbase_gpu_vm_unlock(kctx);
- goto no_mmap;
+ kbase_gpu_vm_lock(kctx);
+
+ /* Since vm lock was released, check if the region has already
+ * been freed meanwhile. This could happen if User was able to
+ * second guess the cookie or the CPU VA and free the region
+ * through the guessed value.
+ */
+ if (reg->flags & KBASE_REG_VA_FREED) {
+ kbase_va_region_alloc_put(kctx, reg);
+ reg = NULL;
+ } else if (IS_ERR_VALUE(cpu_addr)) {
+ /* Once the vm lock is released, multiple scenarios can
+ * arise under which the cookie could get re-assigned
+ * to some other region.
+ */
+ if (!WARN_ON(kctx->pending_regions[cookie_nr] &&
+ (kctx->pending_regions[cookie_nr] != reg))) {
+ kctx->pending_regions[cookie_nr] = NULL;
+ kctx->cookies |= (1UL << cookie_nr);
+ }
+
+ /* Region has not been freed and we can be sure that
+ * User won't be able to free the region now. So we
+ * can free it ourselves.
+ * If the region->start_pfn isn't zero then the
+ * allocation will also be unmapped from GPU side.
+ */
+ kbase_mem_free_region(kctx, reg);
+ kbase_va_region_alloc_put(kctx, reg);
+ reg = NULL;
+ } else {
+ kbase_va_region_alloc_put(kctx, reg);
+ *gpu_va = (u64) cpu_addr;
}
- *gpu_va = (u64) cpu_addr;
+ kbase_gpu_vm_unlock(kctx);
} else /* we control the VA */ {
if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
dev_warn(dev, "Failed to map memory on GPU");
@@ -520,6 +562,8 @@ int kbase_mem_query(struct kbase_context *kctx,
if (KBASE_REG_GPU_VA_SAME_4GB_PAGE & reg->flags)
*out |= BASE_MEM_GPU_VA_SAME_4GB_PAGE;
+ *out |= base_mem_group_id_set(reg->cpu_alloc->group_id);
+
WARN(*out & ~BASE_MEM_FLAGS_QUERYABLE,
"BASE_MEM_FLAGS_QUERYABLE needs updating\n");
*out &= BASE_MEM_FLAGS_QUERYABLE;
@@ -800,7 +844,7 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
struct kbase_va_region *reg;
int ret = -EINVAL;
unsigned int real_flags = 0;
- unsigned int prev_flags = 0;
+ unsigned int new_flags = 0;
bool prev_needed, new_needed;
KBASE_DEBUG_ASSERT(kctx);
@@ -875,25 +919,59 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
goto out_unlock;
}
- /* save for roll back */
- prev_flags = reg->flags;
- reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
- reg->flags |= real_flags;
+ new_flags = reg->flags & ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+ new_flags |= real_flags;
/* Currently supporting only imported memory */
#ifdef CONFIG_DMA_SHARED_BUFFER
- if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
- /* Future use will use the new flags, existing mapping will NOT be updated
- * as memory should not be in use by the GPU when updating the flags.
+ if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
+ /* Future use will use the new flags, existing mapping
+ * will NOT be updated as memory should not be in use
+ * by the GPU when updating the flags.
*/
- ret = 0;
WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
- }
+ ret = 0;
+ } else if (reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+ /*
+ * When CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is not enabled the
+ * dma-buf GPU mapping should always be present, check that
+ * this is the case and warn and skip the page table update if
+ * not.
+ *
+ * Then update dma-buf GPU mapping with the new flags.
+ *
+ * Note: The buffer must not be in use on the GPU when
+ * changing flags. If the buffer is in active use on
+ * the GPU, there is a risk that the GPU may trigger a
+ * shareability fault, as it will see the same
+ * addresses from buffer with different shareability
+ * properties.
+ */
+ dev_dbg(kctx->kbdev->dev,
+ "Updating page tables on mem flag change\n");
+ ret = kbase_mmu_update_pages(kctx, reg->start_pfn,
+ kbase_get_gpu_phy_pages(reg),
+ kbase_reg_current_backed_size(reg),
+ new_flags);
+ if (ret)
+ dev_warn(kctx->kbdev->dev,
+ "Failed to update GPU page tables on flag change: %d\n",
+ ret);
+ } else
+ WARN_ON(!reg->gpu_alloc->imported.umm.current_mapping_usage_count);
+#else
+ /* Reject when dma-buf support is not enabled. */
+ ret = -EINVAL;
#endif /* CONFIG_DMA_SHARED_BUFFER */
- /* roll back on error */
- if (ret)
- reg->flags = prev_flags;
+ /* If everything is good, then set the new flags on the region. */
+ if (!ret)
+ reg->flags = new_flags;
out_unlock:
kbase_gpu_vm_unlock(kctx);
@@ -904,7 +982,310 @@ out:
#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
+int kbase_mem_do_sync_imported(struct kbase_context *kctx,
+ struct kbase_va_region *reg, enum kbase_sync_type sync_fn)
+{
+ int ret = -EINVAL;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ struct dma_buf *dma_buf;
+ enum dma_data_direction dir = DMA_BIDIRECTIONAL;
+
+ /* We assume that the same physical allocation object is used for both
+ * GPU and CPU for imported buffers.
+ */
+ WARN_ON(reg->cpu_alloc != reg->gpu_alloc);
+
+ /* Currently only handle dma-bufs
+ *
+ * Also, attempting to sync with CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND
+ * enabled can expose us to a Linux Kernel issue between v4.6 and
+ * v4.19. We will not attempt to support cache syncs on dma-bufs that
+ * are mapped on demand (i.e. not on import), even on pre-4.6, neither
+ * on 4.20 or newer kernels, because this makes it difficult for
+ * userspace to know when they can rely on the cache sync.
+ * Instead, only support syncing when we've mapped dma-bufs on import.
+ */
+ if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM ||
+ IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND))
+ return ret;
+
+ dma_buf = reg->gpu_alloc->imported.umm.dma_buf;
+
+ switch (sync_fn) {
+ case KBASE_SYNC_TO_DEVICE:
+ dev_dbg(kctx->kbdev->dev,
+ "Syncing imported buffer at GPU VA %llx to GPU\n",
+ reg->start_pfn);
+#ifdef KBASE_MEM_ION_SYNC_WORKAROUND
+ if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) {
+ struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment;
+ struct sg_table *sgt = reg->gpu_alloc->imported.umm.sgt;
+
+ dma_sync_sg_for_device(attachment->dev, sgt->sgl,
+ sgt->nents, dir);
+ ret = 0;
+ }
+#else
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
+ dma_buf_end_cpu_access(dma_buf,
+ 0, dma_buf->size,
+ dir);
+ ret = 0;
+#else
+ ret = dma_buf_end_cpu_access(dma_buf,
+ dir);
+#endif
+#endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
+ break;
+ case KBASE_SYNC_TO_CPU:
+ dev_dbg(kctx->kbdev->dev,
+ "Syncing imported buffer at GPU VA %llx to CPU\n",
+ reg->start_pfn);
+#ifdef KBASE_MEM_ION_SYNC_WORKAROUND
+ if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) {
+ struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment;
+ struct sg_table *sgt = reg->gpu_alloc->imported.umm.sgt;
+
+ dma_sync_sg_for_cpu(attachment->dev, sgt->sgl,
+ sgt->nents, dir);
+ ret = 0;
+ }
+#else
+ ret = dma_buf_begin_cpu_access(dma_buf,
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
+ 0, dma_buf->size,
+#endif
+ dir);
+#endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
+ break;
+ };
+
+ if (unlikely(ret))
+ dev_warn(kctx->kbdev->dev,
+ "Failed to sync mem region %pK at GPU VA %llx: %d\n",
+ reg, reg->start_pfn, ret);
+
+#else /* CONFIG_DMA_SHARED_BUFFER */
+ CSTD_UNUSED(kctx);
+ CSTD_UNUSED(reg);
+ CSTD_UNUSED(sync_fn);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ return ret;
+}
+
#ifdef CONFIG_DMA_SHARED_BUFFER
+/**
+ * kbase_mem_umm_unmap_attachment - Unmap dma-buf attachment
+ * @kctx: Pointer to kbase context
+ * @alloc: Pointer to allocation with imported dma-buf memory to unmap
+ *
+ * This will unmap a dma-buf. Must be called after the GPU page tables for the
+ * region have been torn down.
+ */
+static void kbase_mem_umm_unmap_attachment(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc)
+{
+ struct tagged_addr *pa = alloc->pages;
+
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+ alloc->imported.umm.sgt = NULL;
+
+ memset(pa, 0xff, sizeof(*pa) * alloc->nents);
+ alloc->nents = 0;
+}
+
+/**
+ * kbase_mem_umm_map_attachment - Prepare attached dma-buf for GPU mapping
+ * @kctx: Pointer to kbase context
+ * @reg: Pointer to region with imported dma-buf memory to map
+ *
+ * Map the dma-buf and prepare the page array with the tagged Mali physical
+ * addresses for GPU mapping.
+ *
+ * Return: 0 on success, or negative error code
+ */
+static int kbase_mem_umm_map_attachment(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ struct sg_table *sgt;
+ struct scatterlist *s;
+ int i;
+ struct tagged_addr *pa;
+ int err;
+ size_t count = 0;
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+
+ WARN_ON_ONCE(alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM);
+ WARN_ON_ONCE(alloc->imported.umm.sgt);
+
+ sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(sgt))
+ return -EINVAL;
+
+ /* save for later */
+ alloc->imported.umm.sgt = sgt;
+
+ pa = kbase_get_gpu_phy_pages(reg);
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ size_t j, pages = PFN_UP(sg_dma_len(s));
+
+ WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
+ "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
+ sg_dma_len(s));
+
+ WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
+ "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
+ (unsigned long long) sg_dma_address(s));
+
+ for (j = 0; (j < pages) && (count < reg->nr_pages); j++, count++)
+ *pa++ = as_tagged(sg_dma_address(s) +
+ (j << PAGE_SHIFT));
+ WARN_ONCE(j < pages,
+ "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
+ alloc->imported.umm.dma_buf->size);
+ }
+
+ if (!(reg->flags & KBASE_REG_IMPORT_PAD) &&
+ WARN_ONCE(count < reg->nr_pages,
+ "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
+ alloc->imported.umm.dma_buf->size)) {
+ err = -EINVAL;
+ goto err_unmap_attachment;
+ }
+
+ /* Update nents as we now have pages to map */
+ alloc->nents = count;
+
+ return 0;
+
+err_unmap_attachment:
+ kbase_mem_umm_unmap_attachment(kctx, alloc);
+
+ return err;
+}
+
+int kbase_mem_umm_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ int err;
+ unsigned long gwt_mask = ~0;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
+ if (reg->gpu_alloc->imported.umm.current_mapping_usage_count != 1) {
+ if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT)) {
+ if (!kbase_is_region_invalid_or_free(reg)) {
+ err = kbase_mem_do_sync_imported(kctx, reg,
+ KBASE_SYNC_TO_DEVICE);
+ WARN_ON_ONCE(err);
+ }
+ }
+ return 0;
+ }
+
+ err = kbase_mem_umm_map_attachment(kctx, reg);
+ if (err)
+ goto bad_map_attachment;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+ if (kctx->gwt_enabled)
+ gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+ err = kbase_mmu_insert_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn,
+ kbase_get_gpu_phy_pages(reg),
+ kbase_reg_current_backed_size(reg),
+ reg->flags & gwt_mask,
+ kctx->as_nr);
+ if (err)
+ goto bad_insert;
+
+ if (reg->flags & KBASE_REG_IMPORT_PAD &&
+ !WARN_ON(reg->nr_pages < reg->gpu_alloc->nents)) {
+ /* For padded imported dma-buf memory, map the dummy aliasing
+ * page from the end of the dma-buf pages, to the end of the
+ * region using a read only mapping.
+ *
+ * Assume reg->gpu_alloc->nents is the number of actual pages
+ * in the dma-buf memory.
+ */
+ err = kbase_mmu_insert_single_page(kctx,
+ reg->start_pfn + reg->gpu_alloc->nents,
+ kctx->aliasing_sink_page,
+ reg->nr_pages - reg->gpu_alloc->nents,
+ (reg->flags | KBASE_REG_GPU_RD) &
+ ~KBASE_REG_GPU_WR);
+ if (err)
+ goto bad_pad_insert;
+ }
+
+ return 0;
+
+bad_pad_insert:
+ kbase_mmu_teardown_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn,
+ reg->gpu_alloc->nents,
+ kctx->as_nr);
+bad_insert:
+ kbase_mem_umm_unmap_attachment(kctx, reg->gpu_alloc);
+bad_map_attachment:
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
+
+ return err;
+}
+
+void kbase_mem_umm_unmap(struct kbase_context *kctx,
+ struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
+{
+ alloc->imported.umm.current_mapping_usage_count--;
+ if (alloc->imported.umm.current_mapping_usage_count) {
+ if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT)) {
+ if (!kbase_is_region_invalid_or_free(reg)) {
+ int err = kbase_mem_do_sync_imported(kctx, reg,
+ KBASE_SYNC_TO_CPU);
+ WARN_ON_ONCE(err);
+ }
+ }
+ return;
+ }
+
+ if (!kbase_is_region_invalid_or_free(reg) && reg->gpu_alloc == alloc) {
+ int err;
+
+ err = kbase_mmu_teardown_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn,
+ reg->nr_pages,
+ kctx->as_nr);
+ WARN_ON(err);
+ }
+
+ kbase_mem_umm_unmap_attachment(kctx, alloc);
+}
+
+/**
+ * kbase_mem_from_umm - Import dma-buf memory into kctx
+ * @kctx: Pointer to kbase context to import memory into
+ * @fd: File descriptor of dma-buf to import
+ * @va_pages: Pointer where virtual size of the region will be output
+ * @flags: Pointer to memory flags
+ * @padding: Number of read only padding pages to be inserted at the end of the
+ * GPU mapping of the dma-buf
+ *
+ * Return: Pointer to new kbase_va_region object of the imported dma-buf, or
+ * NULL on error.
+ *
+ * This function imports a dma-buf into kctx, and created a kbase_va_region
+ * object that wraps the dma-buf.
+ */
static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
int fd, u64 *va_pages, u64 *flags, u32 padding)
{
@@ -913,25 +1294,41 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
struct dma_buf_attachment *dma_attachment;
bool shared_zone = false;
+ /* 64-bit address range is the max */
+ if (*va_pages > (U64_MAX / PAGE_SIZE))
+ return NULL;
+
dma_buf = dma_buf_get(fd);
if (IS_ERR_OR_NULL(dma_buf))
- goto no_buf;
+ return NULL;
dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
- if (!dma_attachment)
- goto no_attachment;
+ if (IS_ERR_OR_NULL(dma_attachment)) {
+ dma_buf_put(dma_buf);
+ return NULL;
+ }
*va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
- if (!*va_pages)
- goto bad_size;
-
- if (*va_pages > (U64_MAX / PAGE_SIZE))
- /* 64-bit address range is the max */
- goto bad_size;
+ if (!*va_pages) {
+ dma_buf_detach(dma_buf, dma_attachment);
+ dma_buf_put(dma_buf);
+ return NULL;
+ }
/* ignore SAME_VA */
*flags &= ~BASE_MEM_SAME_VA;
+ /*
+ * Force CPU cached flag.
+ *
+ * We can't query the dma-buf exporter to get details about the CPU
+ * cache attributes of CPU mappings, so we have to assume that the
+ * buffer may be cached, and call into the exporter for cache
+ * maintenance, and rely on the exporter to do the right thing when
+ * handling our calls.
+ */
+ *flags |= BASE_MEM_CACHED_CPU;
+
if (*flags & BASE_MEM_IMPORT_SHARED)
shared_zone = true;
@@ -954,19 +1351,22 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
}
- if (!reg)
- goto no_region;
-
- if (kbase_update_region_flags(kctx, reg, *flags) != 0)
- goto invalid_flags;
+ if (!reg) {
+ dma_buf_detach(dma_buf, dma_attachment);
+ dma_buf_put(dma_buf);
+ return NULL;
+ }
reg->gpu_alloc = kbase_alloc_create(kctx, *va_pages,
KBASE_MEM_TYPE_IMPORTED_UMM, BASE_MEM_GROUP_DEFAULT);
if (IS_ERR_OR_NULL(reg->gpu_alloc))
- goto no_alloc_obj;
+ goto no_alloc;
reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+ goto error_out;
+
/* No pages to map yet */
reg->gpu_alloc->nents = 0;
@@ -987,17 +1387,30 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
reg->extent = 0;
+ if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
+ int err;
+
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count = 1;
+
+ err = kbase_mem_umm_map_attachment(kctx, reg);
+ if (err) {
+ dev_warn(kctx->kbdev->dev,
+ "Failed to map dma-buf %pK on GPU: %d\n",
+ dma_buf, err);
+ goto error_out;
+ }
+
+ *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+ }
+
return reg;
-no_alloc_obj:
-invalid_flags:
+error_out:
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+no_alloc:
kfree(reg);
-no_region:
-bad_size:
- dma_buf_detach(dma_buf, dma_attachment);
-no_attachment:
- dma_buf_put(dma_buf);
-no_buf:
+
return NULL;
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
@@ -2118,9 +2531,9 @@ out:
return err;
}
-int kbase_mmap(struct file *file, struct vm_area_struct *vma)
+int kbase_context_mmap(struct kbase_context *const kctx,
+ struct vm_area_struct *const vma)
{
- struct kbase_context *kctx = file->private_data;
struct kbase_va_region *reg = NULL;
void *kaddr = NULL;
size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -2271,7 +2684,7 @@ out:
return err;
}
-KBASE_EXPORT_TEST_API(kbase_mmap);
+KBASE_EXPORT_TEST_API(kbase_context_mmap);
void kbase_sync_mem_regions(struct kbase_context *kctx,
struct kbase_vmap_struct *map, enum kbase_sync_type dest)
diff --git a/mali_kbase/mali_kbase_mem_linux.h b/mali_kbase/mali_kbase_mem_linux.h
index 5cb88d1..e34972f 100644
--- a/mali_kbase/mali_kbase_mem_linux.h
+++ b/mali_kbase/mali_kbase_mem_linux.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010, 2012-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010, 2012-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -129,15 +129,15 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
/**
- * kbase_mmap - Mmap method, gets invoked when mmap system call is issued on
- * device file /dev/malixx.
- * @file: Pointer to the device file /dev/malixx instance.
+ * kbase_context_mmap - Memory map method, gets invoked when mmap system call is
+ * issued on device file /dev/malixx.
+ * @kctx: The kernel context
* @vma: Pointer to the struct containing the info where the GPU allocation
* will be mapped in virtual address space of CPU.
*
* Return: 0 on success or error code
*/
-int kbase_mmap(struct file *file, struct vm_area_struct *vma);
+int kbase_context_mmap(struct kbase_context *kctx, struct vm_area_struct *vma);
/**
* kbase_mem_evictable_init - Initialize the Ephemeral memory eviction
diff --git a/mali_kbase/mali_kbase_mem_profile_debugfs.c b/mali_kbase/mali_kbase_mem_profile_debugfs.c
index d4f8433..a59f572 100644
--- a/mali_kbase/mali_kbase_mem_profile_debugfs.c
+++ b/mali_kbase/mali_kbase_mem_profile_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2017, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -75,7 +75,9 @@ int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
- if (!debugfs_create_file("mem_profile", S_IRUGO,
+ if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
+ err = -ENOMEM;
+ } else if (!debugfs_create_file("mem_profile", 0444,
kctx->kctx_dentry, kctx,
&kbasep_mem_profile_debugfs_fops)) {
err = -EAGAIN;
diff --git a/mali_kbase/mali_kbase_mipe_gen_header.h b/mali_kbase/mali_kbase_mipe_gen_header.h
new file mode 100644
index 0000000..99475b6
--- /dev/null
+++ b/mali_kbase/mali_kbase_mipe_gen_header.h
@@ -0,0 +1,120 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_mipe_proto.h"
+
+/**
+ * This header generates MIPE tracepoint declaration BLOB at
+ * compile time.
+ *
+ * Before including this header, the following parameters
+ * must be defined:
+ *
+ * MIPE_HEADER_BLOB_VAR_NAME: the name of the variable
+ * where the result BLOB will be stored.
+ *
+ * MIPE_HEADER_TP_LIST: the list of tracepoints to process.
+ * It should be defined as follows:
+ * #define MIPE_HEADER_TP_LIST \
+ * TP_DESC(FIRST_TRACEPOINT, "Some description", "@II", "first_arg,second_arg") \
+ * TP_DESC(SECOND_TRACEPOINT, "Some description", "@II", "first_arg,second_arg") \
+ * etc.
+ * Where the first argument is tracepoints name, the second
+ * argument is a short tracepoint description, the third argument
+ * argument types (see MIPE documentation), and the fourth argument
+ * is comma separated argument names.
+ *
+ * MIPE_HEADER_TP_LIST_COUNT: number of entries in MIPE_HEADER_TP_LIST.
+ *
+ * MIPE_HEADER_PKT_CLASS: MIPE packet class.
+ */
+
+#if !defined(MIPE_HEADER_BLOB_VAR_NAME)
+#error "MIPE_HEADER_BLOB_VAR_NAME must be defined!"
+#endif
+
+#if !defined(MIPE_HEADER_TP_LIST)
+#error "MIPE_HEADER_TP_LIST must be defined!"
+#endif
+
+#if !defined(MIPE_HEADER_TP_LIST_COUNT)
+#error "MIPE_HEADER_TP_LIST_COUNT must be defined!"
+#endif
+
+#if !defined(MIPE_HEADER_PKT_CLASS)
+#error "MIPE_HEADER_PKT_CLASS must be defined!"
+#endif
+
+static const struct {
+ u32 _mipe_w0;
+ u32 _mipe_w1;
+ u8 _protocol_version;
+ u8 _pointer_size;
+ u32 _tp_count;
+#define TP_DESC(name, desc, arg_types, arg_names) \
+ struct { \
+ u32 _name; \
+ u32 _size_string_name; \
+ char _string_name[sizeof(#name)]; \
+ u32 _size_desc; \
+ char _desc[sizeof(desc)]; \
+ u32 _size_arg_types; \
+ char _arg_types[sizeof(arg_types)]; \
+ u32 _size_arg_names; \
+ char _arg_names[sizeof(arg_names)]; \
+ } __attribute__ ((__packed__)) __ ## name;
+
+ MIPE_HEADER_TP_LIST
+#undef TP_DESC
+
+} __attribute__ ((__packed__)) MIPE_HEADER_BLOB_VAR_NAME = {
+ ._mipe_w0 = MIPE_PACKET_HEADER_W0(
+ TL_PACKET_FAMILY_TL,
+ MIPE_HEADER_PKT_CLASS,
+ TL_PACKET_TYPE_HEADER,
+ 1),
+ ._mipe_w1 = MIPE_PACKET_HEADER_W1(
+ sizeof(MIPE_HEADER_BLOB_VAR_NAME) - PACKET_HEADER_SIZE,
+ 0),
+ ._protocol_version = SWTRACE_VERSION,
+ ._pointer_size = sizeof(void *),
+ ._tp_count = MIPE_HEADER_TP_LIST_COUNT,
+#define TP_DESC(name, desc, arg_types, arg_names) \
+ .__ ## name = { \
+ ._name = name, \
+ ._size_string_name = sizeof(#name), \
+ ._string_name = #name, \
+ ._size_desc = sizeof(desc), \
+ ._desc = desc, \
+ ._size_arg_types = sizeof(arg_types), \
+ ._arg_types = arg_types, \
+ ._size_arg_names = sizeof(arg_names), \
+ ._arg_names = arg_names \
+ },
+ MIPE_HEADER_TP_LIST
+#undef TP_DESC
+};
+
+#undef MIPE_HEADER_BLOB_VAR_NAME
+#undef MIPE_HEADER_TP_LIST
+#undef MIPE_HEADER_TP_LIST_COUNT
+#undef MIPE_HEADER_PKT_CLASS
diff --git a/mali_kbase/mali_kbase_mipe_proto.h b/mali_kbase/mali_kbase_mipe_proto.h
new file mode 100644
index 0000000..fb61faa
--- /dev/null
+++ b/mali_kbase/mali_kbase_mipe_proto.h
@@ -0,0 +1,113 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_MIPE_PROTO_H)
+#define _KBASE_MIPE_PROTO_H
+
+#define _BITFIELD_MASK_FIELD(pos, len) \
+ (((1 << len) - 1) << pos)
+
+#define _BITFIELD_SET_FIELD(pos, len, value) \
+ (_BITFIELD_MASK_FIELD(pos, len) & ((value) << pos))
+
+#define BITFIELD_SET(field_name, value) \
+ _BITFIELD_SET_FIELD(field_name ## _POS, field_name ## _LEN, value)
+
+/* The version of swtrace protocol used in timeline stream. */
+#define SWTRACE_VERSION 3
+
+/* Packet header - first word.
+ * These values must be defined according to MIPE documentation.
+ */
+#define PACKET_STREAMID_POS 0
+#define PACKET_STREAMID_LEN 8
+#define PACKET_RSVD1_POS (PACKET_STREAMID_POS + PACKET_STREAMID_LEN)
+#define PACKET_RSVD1_LEN 8
+#define PACKET_TYPE_POS (PACKET_RSVD1_POS + PACKET_RSVD1_LEN)
+#define PACKET_TYPE_LEN 3
+#define PACKET_CLASS_POS (PACKET_TYPE_POS + PACKET_TYPE_LEN)
+#define PACKET_CLASS_LEN 7
+#define PACKET_FAMILY_POS (PACKET_CLASS_POS + PACKET_CLASS_LEN)
+#define PACKET_FAMILY_LEN 6
+
+/* Packet header - second word
+ * These values must be defined according to MIPE documentation.
+ */
+#define PACKET_LENGTH_POS 0
+#define PACKET_LENGTH_LEN 24
+#define PACKET_SEQBIT_POS (PACKET_LENGTH_POS + PACKET_LENGTH_LEN)
+#define PACKET_SEQBIT_LEN 1
+#define PACKET_RSVD2_POS (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN)
+#define PACKET_RSVD2_LEN 7
+
+/* First word of a MIPE packet */
+#define MIPE_PACKET_HEADER_W0(pkt_family, pkt_class, pkt_type, stream_id) \
+ (0 \
+ | BITFIELD_SET(PACKET_FAMILY, pkt_family) \
+ | BITFIELD_SET(PACKET_CLASS, pkt_class) \
+ | BITFIELD_SET(PACKET_TYPE, pkt_type) \
+ | BITFIELD_SET(PACKET_STREAMID, stream_id))
+
+/* Second word of a MIPE packet */
+#define MIPE_PACKET_HEADER_W1(packet_length, seqbit) \
+ (0 \
+ | BITFIELD_SET(PACKET_LENGTH, packet_length) \
+ | BITFIELD_SET(PACKET_SEQBIT, seqbit))
+
+/* The number of bytes reserved for packet header.
+ * These value must be defined according to MIPE documentation.
+ */
+#define PACKET_HEADER_SIZE 8 /* bytes */
+
+/* The number of bytes reserved for packet sequence number.
+ * These value must be defined according to MIPE documentation.
+ */
+#define PACKET_NUMBER_SIZE 4 /* bytes */
+
+/* Timeline packet family ids.
+ * Values are significant! Check MIPE documentation.
+ */
+enum tl_packet_family {
+ TL_PACKET_FAMILY_CTRL = 0, /* control packets */
+ TL_PACKET_FAMILY_TL = 1, /* timeline packets */
+ TL_PACKET_FAMILY_COUNT
+};
+
+/* Packet classes used in timeline streams.
+ * Values are significant! Check MIPE documentation.
+ */
+enum tl_packet_class {
+ TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */
+ TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */
+};
+
+/* Packet types used in timeline streams.
+ * Values are significant! Check MIPE documentation.
+ */
+enum tl_packet_type {
+ TL_PACKET_TYPE_HEADER = 0, /* stream's header/directory */
+ TL_PACKET_TYPE_BODY = 1, /* stream's body */
+ TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */
+};
+
+#endif /* _KBASE_MIPE_PROTO_H */
+
diff --git a/mali_kbase/mali_kbase_mmu.c b/mali_kbase/mali_kbase_mmu.c
index 8192bc8..b1bd5c4 100644
--- a/mali_kbase/mali_kbase_mmu.c
+++ b/mali_kbase/mali_kbase_mmu.c
@@ -32,7 +32,7 @@
#include <linux/dma-mapping.h>
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <mali_kbase_instr_defs.h>
#include <mali_kbase_debug.h>
@@ -890,8 +890,7 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
int i;
struct page *p;
- p = kbase_mem_pool_alloc(
- &kbdev->mem_pools.small[BASE_MEM_GROUP_DEFAULT]);
+ p = kbase_mem_pool_alloc(&kbdev->mem_pools.small[mmut->group_id]);
if (!p)
return 0;
@@ -926,7 +925,7 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
return page_to_phys(p);
alloc_free:
- kbase_mem_pool_free(&kbdev->mem_pools.small[BASE_MEM_GROUP_DEFAULT], p,
+ kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id], p,
false);
return 0;
@@ -1151,7 +1150,7 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
mutex_unlock(&kctx->mmu.mmu_lock);
err = kbase_mem_pool_grow(
&kctx->kbdev->mem_pools.small[
- BASE_MEM_GROUP_DEFAULT],
+ kctx->mmu.group_id],
MIDGARD_MMU_BOTTOMLEVEL);
mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
@@ -1227,7 +1226,7 @@ static inline void cleanup_empty_pte(struct kbase_device *kbdev,
tmp_pgd = kbdev->mmu_mode->pte_to_phy_addr(*pte);
tmp_p = phys_to_page(tmp_pgd);
- kbase_mem_pool_free(&kbdev->mem_pools.small[BASE_MEM_GROUP_DEFAULT],
+ kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id],
tmp_p, false);
/* If the MMU tables belong to a context then we accounted the memory
@@ -1297,7 +1296,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
*/
mutex_unlock(&mmut->mmu_lock);
err = kbase_mem_pool_grow(
- &kbdev->mem_pools.small[BASE_MEM_GROUP_DEFAULT],
+ &kbdev->mem_pools.small[mmut->group_id],
cur_level);
mutex_lock(&mmut->mmu_lock);
} while (!err);
@@ -1782,7 +1781,7 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
mutex_unlock(&kctx->mmu.mmu_lock);
err = kbase_mem_pool_grow(
&kctx->kbdev->mem_pools.small[
- BASE_MEM_GROUP_DEFAULT],
+ kctx->mmu.group_id],
MIDGARD_MMU_BOTTOMLEVEL);
mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
@@ -1873,7 +1872,7 @@ static void mmu_teardown_level(struct kbase_device *kbdev,
p = pfn_to_page(PFN_DOWN(pgd));
- kbase_mem_pool_free(&kbdev->mem_pools.small[BASE_MEM_GROUP_DEFAULT],
+ kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id],
p, true);
atomic_sub(1, &kbdev->memdev.used_pages);
@@ -1887,9 +1886,15 @@ static void mmu_teardown_level(struct kbase_device *kbdev,
}
}
-int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- struct kbase_context *kctx)
+int kbase_mmu_init(struct kbase_device *const kbdev,
+ struct kbase_mmu_table *const mmut, struct kbase_context *const kctx,
+ int const group_id)
{
+ if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) ||
+ WARN_ON(group_id < 0))
+ return -EINVAL;
+
+ mmut->group_id = group_id;
mutex_init(&mmut->mmu_lock);
mmut->kctx = kctx;
@@ -1908,7 +1913,7 @@ int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
int err;
err = kbase_mem_pool_grow(
- &kbdev->mem_pools.small[BASE_MEM_GROUP_DEFAULT],
+ &kbdev->mem_pools.small[mmut->group_id],
MIDGARD_MMU_BOTTOMLEVEL);
if (err) {
kbase_mmu_term(kbdev, mmut);
diff --git a/mali_kbase/mali_kbase_mmu_hw.h b/mali_kbase/mali_kbase_mmu_hw.h
index 70d5f2b..f49a1d4 100644
--- a/mali_kbase/mali_kbase_mmu_hw.h
+++ b/mali_kbase/mali_kbase_mmu_hw.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015,2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,8 +35,8 @@
* be provided.
*/
-#ifndef _MALI_KBASE_MMU_HW_H_
-#define _MALI_KBASE_MMU_HW_H_
+#ifndef _KBASE_MMU_HW_H_
+#define _KBASE_MMU_HW_H_
/* Forward declarations */
struct kbase_device;
@@ -121,4 +121,4 @@ void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
/** @} *//* end group mali_kbase_mmu_hw */
/** @} *//* end group base_kbase_api */
-#endif /* _MALI_KBASE_MMU_HW_H_ */
+#endif /* _KBASE_MMU_HW_H_ */
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index f7969be..6d0a268 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -35,7 +35,7 @@
#include <mali_base_kernel.h>
#include <mali_kbase_hwaccess_time.h>
#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
#include <linux/version.h>
#include <linux/ktime.h>
#include <linux/pfn.h>
@@ -1168,8 +1168,7 @@ static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(kbdev, katom,
info->gpu_alloc_addr,
- new_addr, info->va_pages,
- entry_mmu_flags);
+ new_addr, entry_mmu_flags, info->id);
kbase_vunmap(kctx, &mapping);
}
@@ -1320,9 +1319,12 @@ static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
* still succeed this soft job but don't try and free
* the allocation.
*/
- if (kctx->jit_alloc[ids[j]] != (struct kbase_va_region *) -1)
+ if (kctx->jit_alloc[ids[j]] != (struct kbase_va_region *) -1) {
+ KBASE_TLSTREAM_TL_JIT_USEDPAGES(kctx->kbdev,
+ kctx->jit_alloc[ids[j]]->
+ gpu_alloc->nents, ids[j]);
kbase_jit_free(kctx, kctx->jit_alloc[ids[j]]);
-
+ }
kctx->jit_alloc[ids[j]] = NULL;
}
}
diff --git a/mali_kbase/mali_kbase_timeline.c b/mali_kbase/mali_kbase_timeline.c
new file mode 100644
index 0000000..d527bb3
--- /dev/null
+++ b/mali_kbase/mali_kbase_timeline.c
@@ -0,0 +1,341 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_timeline.h"
+#include "mali_kbase_timeline_priv.h"
+#include "mali_kbase_tracepoints.h"
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+
+/* The period of autoflush checker execution in milliseconds. */
+#define AUTOFLUSH_INTERVAL 1000 /* ms */
+
+/*****************************************************************************/
+
+/* These values are used in mali_kbase_tracepoints.h
+ * to retrieve the streams from a kbase_timeline instance.
+ */
+const size_t __obj_stream_offset =
+ offsetof(struct kbase_timeline, streams)
+ + sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_OBJ;
+
+const size_t __aux_stream_offset =
+ offsetof(struct kbase_timeline, streams)
+ + sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_AUX;
+
+/**
+ * kbasep_timeline_autoflush_timer_callback - autoflush timer callback
+ * @timer: Timer list
+ *
+ * Timer is executed periodically to check if any of the stream contains
+ * buffer ready to be submitted to user space.
+ */
+static void kbasep_timeline_autoflush_timer_callback(struct timer_list *timer)
+{
+ enum tl_stream_type stype;
+ int rcode;
+ struct kbase_timeline *timeline =
+ container_of(timer, struct kbase_timeline, autoflush_timer);
+
+ CSTD_UNUSED(timer);
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) {
+ struct kbase_tlstream *stream = &timeline->streams[stype];
+
+ int af_cnt = atomic_read(&stream->autoflush_counter);
+
+ /* Check if stream contain unflushed data. */
+ if (af_cnt < 0)
+ continue;
+
+ /* Check if stream should be flushed now. */
+ if (af_cnt != atomic_cmpxchg(
+ &stream->autoflush_counter,
+ af_cnt,
+ af_cnt + 1))
+ continue;
+ if (!af_cnt)
+ continue;
+
+ /* Autoflush this stream. */
+ kbase_tlstream_flush_stream(stream);
+ }
+
+ if (atomic_read(&timeline->autoflush_timer_active))
+ rcode = mod_timer(
+ &timeline->autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+}
+
+
+
+/*****************************************************************************/
+
+int kbase_timeline_init(struct kbase_timeline **timeline,
+ atomic_t *timeline_is_enabled)
+{
+ enum tl_stream_type i;
+ struct kbase_timeline *result;
+
+ if (!timeline || !timeline_is_enabled)
+ return -EINVAL;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ mutex_init(&result->reader_lock);
+ init_waitqueue_head(&result->event_queue);
+
+ /* Prepare stream structures. */
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
+ kbase_tlstream_init(&result->streams[i], i,
+ &result->event_queue);
+
+ /* Initialize autoflush timer. */
+ atomic_set(&result->autoflush_timer_active, 0);
+ kbase_timer_setup(&result->autoflush_timer,
+ kbasep_timeline_autoflush_timer_callback);
+ result->is_enabled = timeline_is_enabled;
+
+ *timeline = result;
+ return 0;
+}
+
+void kbase_timeline_term(struct kbase_timeline *timeline)
+{
+ enum tl_stream_type i;
+
+ if (!timeline)
+ return;
+
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
+ kbase_tlstream_term(&timeline->streams[i]);
+
+ kfree(timeline);
+}
+
+static void kbase_create_timeline_objects(struct kbase_device *kbdev)
+{
+ unsigned int lpu_id;
+ unsigned int as_nr;
+ struct kbase_context *kctx;
+ struct kbase_timeline *timeline = kbdev->timeline;
+ struct kbase_tlstream *summary =
+ &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
+
+ /* Summarize the LPU objects. */
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ u32 *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ __kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, *lpu);
+ }
+
+ /* Summarize the Address Space objects. */
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
+
+ /* Create GPU object and make it retain all LPUs and address spaces. */
+ __kbase_tlstream_tl_new_gpu(summary,
+ kbdev,
+ kbdev->gpu_props.props.raw_props.gpu_id,
+ kbdev->gpu_props.num_cores);
+
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ void *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ __kbase_tlstream_tl_lifelink_lpu_gpu(summary, lpu, kbdev);
+ }
+
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ __kbase_tlstream_tl_lifelink_as_gpu(summary,
+ &kbdev->as[as_nr],
+ kbdev);
+
+ /* Lock the context list, to ensure no changes to the list are made
+ * while we're summarizing the contexts and their contents.
+ */
+ mutex_lock(&kbdev->kctx_list_lock);
+
+ /* For each context in the device... */
+ list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
+ /* Summarize the context itself */
+ __kbase_tlstream_tl_new_ctx(summary,
+ kctx,
+ kctx->id,
+ (u32)(kctx->tgid));
+ };
+
+ /* Reset body stream buffers while holding the kctx lock.
+ * This ensures we can't fire both summary and normal tracepoints for
+ * the same objects.
+ * If we weren't holding the lock, it's possible that the summarized
+ * objects could have been created, destroyed, or used after we
+ * constructed the summary stream tracepoints, but before we reset
+ * the body stream, resulting in losing those object event tracepoints.
+ */
+ kbase_timeline_streams_body_reset(timeline);
+
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ /* Static object are placed into summary packet that needs to be
+ * transmitted first. Flush all streams to make it available to
+ * user space.
+ */
+ kbase_timeline_streams_flush(timeline);
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+static void kbase_tlstream_current_devfreq_target(struct kbase_device *kbdev)
+{
+ struct devfreq *devfreq = kbdev->devfreq;
+
+ /* Devfreq initialization failure isn't a fatal error, so devfreq might
+ * be null.
+ */
+ if (devfreq) {
+ unsigned long cur_freq = 0;
+
+ mutex_lock(&devfreq->lock);
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+ cur_freq = kbdev->current_nominal_freq;
+#else
+ cur_freq = devfreq->last_status.current_frequency;
+#endif
+ KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(kbdev, (u64)cur_freq);
+ mutex_unlock(&devfreq->lock);
+ }
+}
+#endif /* CONFIG_MALI_DEVFREQ */
+
+int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags)
+{
+ int ret;
+ u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
+ struct kbase_timeline *timeline = kbdev->timeline;
+
+ if (!atomic_cmpxchg(timeline->is_enabled, 0, tlstream_enabled)) {
+ int rcode;
+
+ ret = anon_inode_getfd(
+ "[mali_tlstream]",
+ &kbasep_tlstream_fops,
+ timeline,
+ O_RDONLY | O_CLOEXEC);
+ if (ret < 0) {
+ atomic_set(timeline->is_enabled, 0);
+ return ret;
+ }
+
+ /* Reset and initialize header streams. */
+ kbase_tlstream_reset(
+ &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY]);
+
+ timeline->obj_header_btc = obj_desc_header_size;
+ timeline->aux_header_btc = aux_desc_header_size;
+
+ /* Start autoflush timer. */
+ atomic_set(&timeline->autoflush_timer_active, 1);
+ rcode = mod_timer(
+ &timeline->autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+
+ /* If job dumping is enabled, readjust the software event's
+ * timeout as the default value of 3 seconds is often
+ * insufficient.
+ */
+ if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) {
+ dev_info(kbdev->dev,
+ "Job dumping is enabled, readjusting the software event's timeout\n");
+ atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+ 1800000);
+ }
+
+ /* Summary stream was cleared during acquire.
+ * Create static timeline objects that will be
+ * read by client.
+ */
+ kbase_create_timeline_objects(kbdev);
+
+#ifdef CONFIG_MALI_DEVFREQ
+ /* Devfreq target tracepoints are only fired when the target
+ * changes, so we won't know the current target unless we
+ * send it now.
+ */
+ kbase_tlstream_current_devfreq_target(kbdev);
+#endif /* CONFIG_MALI_DEVFREQ */
+
+ } else {
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void kbase_timeline_streams_flush(struct kbase_timeline *timeline)
+{
+ enum tl_stream_type stype;
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+ kbase_tlstream_flush_stream(&timeline->streams[stype]);
+}
+
+void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline)
+{
+ kbase_tlstream_reset(
+ &timeline->streams[TL_STREAM_TYPE_OBJ]);
+ kbase_tlstream_reset(
+ &timeline->streams[TL_STREAM_TYPE_AUX]);
+}
+
+#if MALI_UNIT_TEST
+void kbase_timeline_stats(struct kbase_timeline *timeline,
+ u32 *bytes_collected, u32 *bytes_generated)
+{
+ enum tl_stream_type stype;
+
+ KBASE_DEBUG_ASSERT(bytes_collected);
+
+ /* Accumulate bytes generated per stream */
+ *bytes_generated = 0;
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+ *bytes_generated += atomic_read(
+ &timeline->streams[stype].bytes_generated);
+
+ *bytes_collected = atomic_read(&timeline->bytes_collected);
+}
+#endif /* MALI_UNIT_TEST */
diff --git a/mali_kbase/mali_kbase_timeline.h b/mali_kbase/mali_kbase_timeline.h
new file mode 100644
index 0000000..d800288
--- /dev/null
+++ b/mali_kbase/mali_kbase_timeline.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TIMELINE_H)
+#define _KBASE_TIMELINE_H
+
+#include <mali_kbase.h>
+
+/*****************************************************************************/
+
+struct kbase_timeline;
+
+/**
+ * kbase_timeline_init - initialize timeline infrastructure in kernel
+ * @timeline: Newly created instance of kbase_timeline will
+ * be stored in this pointer.
+ * @timeline_is_enabled: Timeline status will be written to this variable
+ * when a client is attached/detached. The variable
+ * must be valid while timeline instance is valid.
+ * Return: zero on success, negative number on error
+ */
+int kbase_timeline_init(struct kbase_timeline **timeline,
+ atomic_t *timeline_is_enabled);
+
+/**
+ * kbase_timeline_term - terminate timeline infrastructure in kernel
+ *
+ * @timeline: Timeline instance to be terminated. It must be previously created
+ * with kbase_timeline_init().
+ */
+void kbase_timeline_term(struct kbase_timeline *timeline);
+
+/**
+ * kbase_timeline_io_acquire - acquire timeline stream file descriptor
+ * @kbdev: Kbase device
+ * @flags: Timeline stream flags
+ *
+ * This descriptor is meant to be used by userspace timeline to gain access to
+ * kernel timeline stream. This stream is later broadcasted by user space to the
+ * timeline client.
+ * Only one entity can own the descriptor at any given time. Descriptor shall be
+ * closed if unused. If descriptor cannot be obtained (i.e. when it is already
+ * being used) return will be a negative value.
+ *
+ * Return: file descriptor on success, negative number on error
+ */
+int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags);
+
+/**
+ * kbase_timeline_streams_flush - flush timeline streams.
+ * @timeline: Timeline instance
+ *
+ * Function will flush pending data in all timeline streams.
+ */
+void kbase_timeline_streams_flush(struct kbase_timeline *timeline);
+
+/**
+ * kbase_timeline_streams_body_reset - reset timeline body streams.
+ *
+ * Function will discard pending data in all timeline body streams.
+ * @timeline: Timeline instance
+ */
+void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_timeline_test - start timeline stream data generator
+ * @kbdev: Kernel common context
+ * @tpw_count: Number of trace point writers in each context
+ * @msg_delay: Time delay in milliseconds between trace points written by one
+ * writer
+ * @msg_count: Number of trace points written by one writer
+ * @aux_msg: If non-zero aux messages will be included
+ *
+ * This test starts a requested number of asynchronous writers in both IRQ and
+ * thread context. Each writer will generate required number of test
+ * tracepoints (tracepoints with embedded information about writer that
+ * should be verified by user space reader). Tracepoints will be emitted in
+ * all timeline body streams. If aux_msg is non-zero writer will also
+ * generate not testable tracepoints (tracepoints without information about
+ * writer). These tracepoints are used to check correctness of remaining
+ * timeline message generating functions. Writer will wait requested time
+ * between generating another set of messages. This call blocks until all
+ * writers finish.
+ */
+void kbase_timeline_test(
+ struct kbase_device *kbdev,
+ unsigned int tpw_count,
+ unsigned int msg_delay,
+ unsigned int msg_count,
+ int aux_msg);
+
+/**
+ * kbase_timeline_stats - read timeline stream statistics
+ * @timeline: Timeline instance
+ * @bytes_collected: Will hold number of bytes read by the user
+ * @bytes_generated: Will hold number of bytes generated by trace points
+ */
+void kbase_timeline_stats(struct kbase_timeline *timeline, u32 *bytes_collected, u32 *bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* _KBASE_TIMELINE_H */
diff --git a/mali_kbase/mali_kbase_timeline_io.c b/mali_kbase/mali_kbase_timeline_io.c
new file mode 100644
index 0000000..f01bf57
--- /dev/null
+++ b/mali_kbase/mali_kbase_timeline_io.c
@@ -0,0 +1,313 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase_timeline_priv.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
+
+#include <linux/poll.h>
+
+/* The timeline stream file operations functions. */
+static ssize_t kbasep_timeline_io_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos);
+static unsigned int kbasep_timeline_io_poll(struct file *filp, poll_table *wait);
+static int kbasep_timeline_io_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+const struct file_operations kbasep_tlstream_fops = {
+ .release = kbasep_timeline_io_release,
+ .read = kbasep_timeline_io_read,
+ .poll = kbasep_timeline_io_poll,
+};
+
+/**
+ * kbasep_timeline_io_packet_pending - check timeline streams for pending packets
+ * @timeline: Timeline instance
+ * @ready_stream: Pointer to variable where stream will be placed
+ * @rb_idx_raw: Pointer to variable where read buffer index will be placed
+ *
+ * Function checks all streams for pending packets. It will stop as soon as
+ * packet ready to be submitted to user space is detected. Variables under
+ * pointers, passed as the parameters to this function will be updated with
+ * values pointing to right stream and buffer.
+ *
+ * Return: non-zero if any of timeline streams has at last one packet ready
+ */
+static int kbasep_timeline_io_packet_pending(
+ struct kbase_timeline *timeline,
+ struct kbase_tlstream **ready_stream,
+ unsigned int *rb_idx_raw)
+{
+ enum tl_stream_type i = 0;
+
+ KBASE_DEBUG_ASSERT(ready_stream);
+ KBASE_DEBUG_ASSERT(rb_idx_raw);
+
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; ++i) {
+ struct kbase_tlstream *stream = &timeline->streams[i];
+ *rb_idx_raw = atomic_read(&stream->rbi);
+ /* Read buffer index may be updated by writer in case of
+ * overflow. Read and write buffer indexes must be
+ * loaded in correct order.
+ */
+ smp_rmb();
+ if (atomic_read(&stream->wbi) != *rb_idx_raw) {
+ *ready_stream = stream;
+ return 1;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * kbasep_timeline_copy_header - copy timeline headers to the user
+ * @timeline: Timeline instance
+ * @buffer: Pointer to the buffer provided by user
+ * @size: Maximum amount of data that can be stored in the buffer
+ * @copy_len: Pointer to amount of bytes that has been copied already
+ * within the read system call.
+ *
+ * This helper function checks if timeline headers have not been sent
+ * to the user, and if so, sends them. @ref copy_len is respectively
+ * updated.
+ *
+ * Returns: 0 if success, -1 if copy_to_user has failed.
+ */
+static inline int kbasep_timeline_copy_header(
+ struct kbase_timeline *timeline,
+ char __user *buffer,
+ size_t size,
+ ssize_t *copy_len)
+{
+ if (timeline->obj_header_btc) {
+ size_t offset = obj_desc_header_size -
+ timeline->obj_header_btc;
+
+ size_t header_cp_size = MIN(
+ size - *copy_len,
+ timeline->obj_header_btc);
+
+ if (copy_to_user(
+ &buffer[*copy_len],
+ &obj_desc_header[offset],
+ header_cp_size))
+ return -1;
+
+ timeline->obj_header_btc -= header_cp_size;
+ *copy_len += header_cp_size;
+ }
+
+ if (timeline->aux_header_btc) {
+ size_t offset = aux_desc_header_size -
+ timeline->aux_header_btc;
+ size_t header_cp_size = MIN(
+ size - *copy_len,
+ timeline->aux_header_btc);
+
+ if (copy_to_user(
+ &buffer[*copy_len],
+ &aux_desc_header[offset],
+ header_cp_size))
+ return -1;
+
+ timeline->aux_header_btc -= header_cp_size;
+ *copy_len += header_cp_size;
+ }
+ return 0;
+}
+
+
+/**
+ * kbasep_timeline_io_read - copy data from streams to buffer provided by user
+ * @filp: Pointer to file structure
+ * @buffer: Pointer to the buffer provided by user
+ * @size: Maximum amount of data that can be stored in the buffer
+ * @f_pos: Pointer to file offset (unused)
+ *
+ * Return: number of bytes stored in the buffer
+ */
+static ssize_t kbasep_timeline_io_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos)
+{
+ ssize_t copy_len = 0;
+ struct kbase_timeline *timeline;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(f_pos);
+
+ if (WARN_ON(!filp->private_data))
+ return -EFAULT;
+
+ timeline = (struct kbase_timeline *) filp->private_data;
+
+ if (!buffer)
+ return -EINVAL;
+
+ if ((*f_pos < 0) || (size < PACKET_SIZE))
+ return -EINVAL;
+
+ mutex_lock(&timeline->reader_lock);
+
+ while (copy_len < size) {
+ struct kbase_tlstream *stream = NULL;
+ unsigned int rb_idx_raw = 0;
+ unsigned int wb_idx_raw;
+ unsigned int rb_idx;
+ size_t rb_size;
+
+ if (kbasep_timeline_copy_header(
+ timeline, buffer, size, &copy_len)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* If we already read some packets and there is no
+ * packet pending then return back to user.
+ * If we don't have any data yet, wait for packet to be
+ * submitted.
+ */
+ if (copy_len > 0) {
+ if (!kbasep_timeline_io_packet_pending(
+ timeline,
+ &stream,
+ &rb_idx_raw))
+ break;
+ } else {
+ if (wait_event_interruptible(
+ timeline->event_queue,
+ kbasep_timeline_io_packet_pending(
+ timeline,
+ &stream,
+ &rb_idx_raw))) {
+ copy_len = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ if (WARN_ON(!stream)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* Check if this packet fits into the user buffer.
+ * If so copy its content.
+ */
+ rb_idx = rb_idx_raw % PACKET_COUNT;
+ rb_size = atomic_read(&stream->buffer[rb_idx].size);
+ if (rb_size > size - copy_len)
+ break;
+ if (copy_to_user(
+ &buffer[copy_len],
+ stream->buffer[rb_idx].data,
+ rb_size)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* If the distance between read buffer index and write
+ * buffer index became more than PACKET_COUNT, then overflow
+ * happened and we need to ignore the last portion of bytes
+ * that we have just sent to user.
+ */
+ smp_rmb();
+ wb_idx_raw = atomic_read(&stream->wbi);
+
+ if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
+ copy_len += rb_size;
+ atomic_inc(&stream->rbi);
+#if MALI_UNIT_TEST
+ atomic_add(rb_size, &timeline->bytes_collected);
+#endif /* MALI_UNIT_TEST */
+
+ } else {
+ const unsigned int new_rb_idx_raw =
+ wb_idx_raw - PACKET_COUNT + 1;
+ /* Adjust read buffer index to the next valid buffer */
+ atomic_set(&stream->rbi, new_rb_idx_raw);
+ }
+ }
+
+ mutex_unlock(&timeline->reader_lock);
+
+ return copy_len;
+}
+
+/**
+ * kbasep_timeline_io_poll - poll timeline stream for packets
+ * @filp: Pointer to file structure
+ * @wait: Pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_timeline_io_poll(struct file *filp, poll_table *wait)
+{
+ struct kbase_tlstream *stream;
+ unsigned int rb_idx;
+ struct kbase_timeline *timeline;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(wait);
+
+ if (WARN_ON(!filp->private_data))
+ return -EFAULT;
+
+ timeline = (struct kbase_timeline *) filp->private_data;
+
+ poll_wait(filp, &timeline->event_queue, wait);
+ if (kbasep_timeline_io_packet_pending(timeline, &stream, &rb_idx))
+ return POLLIN;
+ return 0;
+}
+
+/**
+ * kbasep_timeline_io_release - release timeline stream descriptor
+ * @inode: Pointer to inode structure
+ * @filp: Pointer to file structure
+ *
+ * Return always return zero
+ */
+static int kbasep_timeline_io_release(struct inode *inode, struct file *filp)
+{
+ struct kbase_timeline *timeline;
+
+ KBASE_DEBUG_ASSERT(inode);
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(filp->private_data);
+
+ CSTD_UNUSED(inode);
+
+ timeline = (struct kbase_timeline *) filp->private_data;
+
+ /* Stop autoflush timer before releasing access to streams. */
+ atomic_set(&timeline->autoflush_timer_active, 0);
+ del_timer_sync(&timeline->autoflush_timer);
+
+ atomic_set(timeline->is_enabled, 0);
+ return 0;
+}
diff --git a/mali_kbase/mali_kbase_timeline_priv.h b/mali_kbase/mali_kbase_timeline_priv.h
new file mode 100644
index 0000000..e4a4a20
--- /dev/null
+++ b/mali_kbase/mali_kbase_timeline_priv.h
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TIMELINE_PRIV_H)
+#define _KBASE_TIMELINE_PRIV_H
+
+#include <mali_kbase.h>
+#include <mali_kbase_tlstream.h>
+
+#include <linux/timer.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+
+/**
+ * struct kbase_timeline - timeline state structure
+ * @streams: The timeline streams generated by kernel
+ * @autoflush_timer: Autoflush timer
+ * @autoflush_timer_active: If non-zero autoflush timer is active
+ * @reader_lock: Reader lock. Only one reader is allowed to
+ * have access to the timeline streams at any given time.
+ * @event_queue: Timeline stream event queue
+ * @bytes_collected: Number of bytes read by user
+ * @is_enabled: Zero, if timeline is disabled. Timeline stream flags
+ * otherwise. See kbase_timeline_io_acquire().
+ * @obj_header_btc: Remaining bytes to copy for the object stream header
+ * @aux_header_btc: Remaining bytes to copy for the aux stream header
+ */
+struct kbase_timeline {
+ struct kbase_tlstream streams[TL_STREAM_TYPE_COUNT];
+ struct timer_list autoflush_timer;
+ atomic_t autoflush_timer_active;
+ struct mutex reader_lock;
+ wait_queue_head_t event_queue;
+#if MALI_UNIT_TEST
+ atomic_t bytes_collected;
+#endif /* MALI_UNIT_TEST */
+ atomic_t *is_enabled;
+ size_t obj_header_btc;
+ size_t aux_header_btc;
+};
+
+extern const struct file_operations kbasep_tlstream_fops;
+
+#endif /* _KBASE_TIMELINE_PRIV_H */
diff --git a/mali_kbase/mali_kbase_tl_serialize.h b/mali_kbase/mali_kbase_tl_serialize.h
new file mode 100644
index 0000000..90808ce
--- /dev/null
+++ b/mali_kbase/mali_kbase_tl_serialize.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TL_SERIALIZE_H)
+#define _KBASE_TL_SERIALIZE_H
+
+#include <mali_kbase.h>
+
+#include <linux/timer.h>
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC 1000000000ull /* ns */
+
+/**
+ * kbasep_serialize_bytes - serialize bytes to the message buffer
+ *
+ * Serialize bytes as is using memcpy()
+ *
+ * @buffer: Message buffer
+ * @pos: Message buffer offset
+ * @bytes: Bytes to serialize
+ * @len: Length of bytes array
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_bytes(
+ char *buffer,
+ size_t pos,
+ const void *bytes,
+ size_t len)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(bytes);
+
+ memcpy(&buffer[pos], bytes, len);
+
+ return pos + len;
+}
+
+/**
+ * kbasep_serialize_string - serialize string to the message buffer
+ *
+ * String is serialized as 4 bytes for string size,
+ * then string content and then null terminator.
+ *
+ * @buffer: Message buffer
+ * @pos: Message buffer offset
+ * @string: String to serialize
+ * @max_write_size: Number of bytes that can be stored in buffer
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_string(
+ char *buffer,
+ size_t pos,
+ const char *string,
+ size_t max_write_size)
+{
+ u32 string_len;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(string);
+ /* Timeline string consists of at least string length and nul
+ * terminator.
+ */
+ KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
+ max_write_size -= sizeof(string_len);
+
+ string_len = strlcpy(
+ &buffer[pos + sizeof(string_len)],
+ string,
+ max_write_size);
+ string_len += sizeof(char);
+
+ /* Make sure that the source string fit into the buffer. */
+ KBASE_DEBUG_ASSERT(string_len <= max_write_size);
+
+ /* Update string length. */
+ memcpy(&buffer[pos], &string_len, sizeof(string_len));
+
+ return pos + sizeof(string_len) + string_len;
+}
+
+/**
+ * kbasep_serialize_timestamp - serialize timestamp to the message buffer
+ *
+ * Get current timestamp using kbasep_get_timestamp()
+ * and serialize it as 64 bit unsigned integer.
+ *
+ * @buffer: Message buffer
+ * @pos: Message buffer offset
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_timestamp(void *buffer, size_t pos)
+{
+ struct timespec ts;
+ u64 timestamp;
+
+ getrawmonotonic(&ts);
+ timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+
+ return kbasep_serialize_bytes(
+ buffer, pos,
+ &timestamp, sizeof(timestamp));
+}
+#endif /* _KBASE_TL_SERIALIZE_H */
+
diff --git a/mali_kbase/mali_kbase_tlstream.c b/mali_kbase/mali_kbase_tlstream.c
index c663896..2a76bc0 100644
--- a/mali_kbase/mali_kbase_tlstream.c
+++ b/mali_kbase/mali_kbase_tlstream.c
@@ -20,718 +20,12 @@
*
*/
-#include <linux/anon_inodes.h>
-#include <linux/atomic.h>
-#include <linux/file.h>
-#include <linux/mutex.h>
-#include <linux/poll.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/stringify.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-
-#include <mali_kbase.h>
-#include <mali_kbase_jm.h>
-#include <mali_kbase_tlstream.h>
-
-/*****************************************************************************/
-
-/* The version of swtrace protocol used in timeline stream. */
-#define SWTRACE_VERSION 3
-
-/* The maximum expected length of string in tracepoint descriptor. */
-#define STRLEN_MAX 64 /* bytes */
-
-/* The number of nanoseconds in a second. */
-#define NSECS_IN_SEC 1000000000ull /* ns */
-
-/* The period of autoflush checker execution in milliseconds. */
-#define AUTOFLUSH_INTERVAL 1000 /* ms */
-
-/* The maximum size of a single packet used by timeline. */
-#define PACKET_SIZE 4096 /* bytes */
-
-/* The number of packets used by one timeline stream. */
-#if defined(CONFIG_MALI_JOB_DUMP) || defined(CONFIG_MALI_VECTOR_DUMP)
- #define PACKET_COUNT 64
-#else
- #define PACKET_COUNT 32
-#endif
-
-/* The number of bytes reserved for packet header.
- * These value must be defined according to MIPE documentation. */
-#define PACKET_HEADER_SIZE 8 /* bytes */
-
-/* The number of bytes reserved for packet sequence number.
- * These value must be defined according to MIPE documentation. */
-#define PACKET_NUMBER_SIZE 4 /* bytes */
-
-/* Packet header - first word.
- * These values must be defined according to MIPE documentation. */
-#define PACKET_STREAMID_POS 0
-#define PACKET_STREAMID_LEN 8
-#define PACKET_RSVD1_POS (PACKET_STREAMID_POS + PACKET_STREAMID_LEN)
-#define PACKET_RSVD1_LEN 8
-#define PACKET_TYPE_POS (PACKET_RSVD1_POS + PACKET_RSVD1_LEN)
-#define PACKET_TYPE_LEN 3
-#define PACKET_CLASS_POS (PACKET_TYPE_POS + PACKET_TYPE_LEN)
-#define PACKET_CLASS_LEN 7
-#define PACKET_FAMILY_POS (PACKET_CLASS_POS + PACKET_CLASS_LEN)
-#define PACKET_FAMILY_LEN 6
-
-/* Packet header - second word
- * These values must be defined according to MIPE documentation. */
-#define PACKET_LENGTH_POS 0
-#define PACKET_LENGTH_LEN 24
-#define PACKET_SEQBIT_POS (PACKET_LENGTH_POS + PACKET_LENGTH_LEN)
-#define PACKET_SEQBIT_LEN 1
-#define PACKET_RSVD2_POS (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN)
-#define PACKET_RSVD2_LEN 7
-
-/* Types of streams generated by timeline.
- * Order is significant! Header streams must precede respective body streams. */
-enum tl_stream_type {
- TL_STREAM_TYPE_OBJ_HEADER,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- TL_STREAM_TYPE_OBJ,
- TL_STREAM_TYPE_AUX_HEADER,
- TL_STREAM_TYPE_AUX,
-
- TL_STREAM_TYPE_COUNT
-};
-
-/* Timeline packet family ids.
- * Values are significant! Check MIPE documentation. */
-enum tl_packet_family {
- TL_PACKET_FAMILY_CTRL = 0, /* control packets */
- TL_PACKET_FAMILY_TL = 1, /* timeline packets */
-
- TL_PACKET_FAMILY_COUNT
-};
-
-/* Packet classes used in timeline streams.
- * Values are significant! Check MIPE documentation. */
-enum tl_packet_class {
- TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */
- TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */
-};
-
-/* Packet types used in timeline streams.
- * Values are significant! Check MIPE documentation. */
-enum tl_packet_type {
- TL_PACKET_TYPE_HEADER = 0, /* stream's header/directory */
- TL_PACKET_TYPE_BODY = 1, /* stream's body */
- TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */
-};
-
-/* Message ids of trace events that are recorded in the timeline stream. */
-enum tl_msg_id_obj {
- /* Timeline object events. */
- KBASE_TL_NEW_CTX,
- KBASE_TL_NEW_GPU,
- KBASE_TL_NEW_LPU,
- KBASE_TL_NEW_ATOM,
- KBASE_TL_NEW_AS,
- KBASE_TL_DEL_CTX,
- KBASE_TL_DEL_ATOM,
- KBASE_TL_LIFELINK_LPU_GPU,
- KBASE_TL_LIFELINK_AS_GPU,
- KBASE_TL_RET_CTX_LPU,
- KBASE_TL_RET_ATOM_CTX,
- KBASE_TL_RET_ATOM_LPU,
- KBASE_TL_NRET_CTX_LPU,
- KBASE_TL_NRET_ATOM_CTX,
- KBASE_TL_NRET_ATOM_LPU,
- KBASE_TL_RET_AS_CTX,
- KBASE_TL_NRET_AS_CTX,
- KBASE_TL_RET_ATOM_AS,
- KBASE_TL_NRET_ATOM_AS,
- KBASE_TL_ATTRIB_ATOM_CONFIG,
- KBASE_TL_ATTRIB_ATOM_PRIORITY,
- KBASE_TL_ATTRIB_ATOM_STATE,
- KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
- KBASE_TL_ATTRIB_ATOM_JIT,
- KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
- KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
- KBASE_TL_ATTRIB_AS_CONFIG,
- KBASE_TL_EVENT_LPU_SOFTSTOP,
- KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
- KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
- KBASE_TL_EVENT_ATOM_SOFTJOB_START,
- KBASE_TL_EVENT_ATOM_SOFTJOB_END,
-
- /* Job dump specific events. */
- KBASE_JD_GPU_SOFT_RESET,
-};
-
-/* Message ids of trace events that are recorded in the auxiliary stream. */
-enum tl_msg_id_aux {
- KBASE_AUX_PM_STATE,
- KBASE_AUX_PAGEFAULT,
- KBASE_AUX_PAGESALLOC,
- KBASE_AUX_DEVFREQ_TARGET,
- KBASE_AUX_PROTECTED_ENTER_START,
- KBASE_AUX_PROTECTED_ENTER_END,
- KBASE_AUX_PROTECTED_LEAVE_START,
- KBASE_AUX_PROTECTED_LEAVE_END,
- KBASE_AUX_JIT_STATS,
- KBASE_AUX_EVENT_JOB_SLOT,
-};
-
-/*****************************************************************************/
-
-/**
- * struct tl_stream - timeline stream structure
- * @lock: Message order lock
- * @buffer: Array of buffers
- * @wbi: Write buffer index
- * @rbi: Read buffer index
- * @numbered: If non-zero stream's packets are sequentially numbered
- * @autoflush_counter: Counter tracking stream's autoflush state
- *
- * This structure holds information needed to construct proper packets in the
- * timeline stream.
- *
- * Each message in the sequence must bear a timestamp that is
- * greater than the previous message in the same stream. For this reason
- * a lock is held throughout the process of message creation.
- *
- * Each stream contains a set of buffers. Each buffer will hold one MIPE
- * packet. In case there is no free space required to store the incoming
- * message the oldest buffer is discarded. Each packet in timeline body
- * stream has a sequence number embedded, this value must increment
- * monotonically and is used by the packets receiver to discover these
- * buffer overflows.
- *
- * The autoflush counter is set to a negative number when there is no data
- * pending for flush and it is set to zero on every update of the buffer. The
- * autoflush timer will increment the counter by one on every expiry. If there
- * is no activity on the buffer for two consecutive timer expiries, the stream
- * buffer will be flushed.
- */
-struct tl_stream {
- spinlock_t lock;
-
- struct {
- atomic_t size; /* number of bytes in buffer */
- char data[PACKET_SIZE]; /* buffer's data */
- } buffer[PACKET_COUNT];
-
- atomic_t wbi;
- atomic_t rbi;
-
- int numbered;
- atomic_t autoflush_counter;
-};
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tl_serialize.h"
+#include "mali_kbase_mipe_proto.h"
/**
- * struct tp_desc - tracepoint message descriptor structure
- * @id: Tracepoint ID identifying message in stream
- * @id_str: Human readable version of tracepoint ID
- * @name: Tracepoint description
- * @arg_types: Tracepoint's arguments types declaration
- * @arg_names: Comma separated list of tracepoint's arguments names
- */
-struct tp_desc {
- u32 id;
- const char *id_str;
- const char *name;
- const char *arg_types;
- const char *arg_names;
-};
-
-/*****************************************************************************/
-
-/* Configuration of timeline streams generated by kernel.
- * Kernel emit only streams containing either timeline object events or
- * auxiliary events. All streams have stream id value of 1 (as opposed to user
- * space streams that have value of 0). */
-static const struct {
- enum tl_packet_family pkt_family;
- enum tl_packet_class pkt_class;
- enum tl_packet_type pkt_type;
- unsigned int stream_id;
-} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
- {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_HEADER, 1},
- {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
- {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1},
- {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_HEADER, 1},
- {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1}
-};
-
-/**
- * struct kbase_timeline - timeline state structure
- * @streams: The timeline streams generated by kernel
- * @autoflush_timer: Autoflush timer
- * @autoflush_timer_active: If non-zero autoflush timer is active
- * @reader_lock: Reader lock. Only one reader is allowed to
- * have access to the timeline streams at any given time.
- * @event_queue: Timeline stream event queue
- * @bytes_collected: Number of bytes read by user
- * @bytes_generated: Number of bytes generated by tracepoint messages
- */
-struct kbase_timeline {
- struct tl_stream streams[TL_STREAM_TYPE_COUNT];
- struct timer_list autoflush_timer;
- atomic_t autoflush_timer_active;
- struct mutex reader_lock;
- wait_queue_head_t event_queue;
-#if MALI_UNIT_TEST
- atomic_t bytes_collected;
- atomic_t bytes_generated;
-#endif /* MALI_UNIT_TEST */
- atomic_t *is_enabled;
-};
-
-
-/* The timeline stream file operations functions. */
-static ssize_t kbasep_tlstream_read(
- struct file *filp,
- char __user *buffer,
- size_t size,
- loff_t *f_pos);
-static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait);
-static int kbasep_tlstream_release(struct inode *inode, struct file *filp);
-
-/* The timeline stream file operations structure. */
-static const struct file_operations kbasep_tlstream_fops = {
- .release = kbasep_tlstream_release,
- .read = kbasep_tlstream_read,
- .poll = kbasep_tlstream_poll,
-};
-
-/* Descriptors of timeline messages transmitted in object events stream. */
-static const struct tp_desc tp_desc_obj[] = {
- {
- KBASE_TL_NEW_CTX,
- __stringify(KBASE_TL_NEW_CTX),
- "object ctx is created",
- "@pII",
- "ctx,ctx_nr,tgid"
- },
- {
- KBASE_TL_NEW_GPU,
- __stringify(KBASE_TL_NEW_GPU),
- "object gpu is created",
- "@pII",
- "gpu,gpu_id,core_count"
- },
- {
- KBASE_TL_NEW_LPU,
- __stringify(KBASE_TL_NEW_LPU),
- "object lpu is created",
- "@pII",
- "lpu,lpu_nr,lpu_fn"
- },
- {
- KBASE_TL_NEW_ATOM,
- __stringify(KBASE_TL_NEW_ATOM),
- "object atom is created",
- "@pI",
- "atom,atom_nr"
- },
- {
- KBASE_TL_NEW_AS,
- __stringify(KBASE_TL_NEW_AS),
- "address space object is created",
- "@pI",
- "address_space,as_nr"
- },
- {
- KBASE_TL_DEL_CTX,
- __stringify(KBASE_TL_DEL_CTX),
- "context is destroyed",
- "@p",
- "ctx"
- },
- {
- KBASE_TL_DEL_ATOM,
- __stringify(KBASE_TL_DEL_ATOM),
- "atom is destroyed",
- "@p",
- "atom"
- },
- {
- KBASE_TL_LIFELINK_LPU_GPU,
- __stringify(KBASE_TL_LIFELINK_LPU_GPU),
- "lpu is deleted with gpu",
- "@pp",
- "lpu,gpu"
- },
- {
- KBASE_TL_LIFELINK_AS_GPU,
- __stringify(KBASE_TL_LIFELINK_AS_GPU),
- "address space is deleted with gpu",
- "@pp",
- "address_space,gpu"
- },
- {
- KBASE_TL_RET_CTX_LPU,
- __stringify(KBASE_TL_RET_CTX_LPU),
- "context is retained by lpu",
- "@pp",
- "ctx,lpu"
- },
- {
- KBASE_TL_RET_ATOM_CTX,
- __stringify(KBASE_TL_RET_ATOM_CTX),
- "atom is retained by context",
- "@pp",
- "atom,ctx"
- },
- {
- KBASE_TL_RET_ATOM_LPU,
- __stringify(KBASE_TL_RET_ATOM_LPU),
- "atom is retained by lpu",
- "@pps",
- "atom,lpu,attrib_match_list"
- },
- {
- KBASE_TL_NRET_CTX_LPU,
- __stringify(KBASE_TL_NRET_CTX_LPU),
- "context is released by lpu",
- "@pp",
- "ctx,lpu"
- },
- {
- KBASE_TL_NRET_ATOM_CTX,
- __stringify(KBASE_TL_NRET_ATOM_CTX),
- "atom is released by context",
- "@pp",
- "atom,ctx"
- },
- {
- KBASE_TL_NRET_ATOM_LPU,
- __stringify(KBASE_TL_NRET_ATOM_LPU),
- "atom is released by lpu",
- "@pp",
- "atom,lpu"
- },
- {
- KBASE_TL_RET_AS_CTX,
- __stringify(KBASE_TL_RET_AS_CTX),
- "address space is retained by context",
- "@pp",
- "address_space,ctx"
- },
- {
- KBASE_TL_NRET_AS_CTX,
- __stringify(KBASE_TL_NRET_AS_CTX),
- "address space is released by context",
- "@pp",
- "address_space,ctx"
- },
- {
- KBASE_TL_RET_ATOM_AS,
- __stringify(KBASE_TL_RET_ATOM_AS),
- "atom is retained by address space",
- "@pp",
- "atom,address_space"
- },
- {
- KBASE_TL_NRET_ATOM_AS,
- __stringify(KBASE_TL_NRET_ATOM_AS),
- "atom is released by address space",
- "@pp",
- "atom,address_space"
- },
- {
- KBASE_TL_ATTRIB_ATOM_CONFIG,
- __stringify(KBASE_TL_ATTRIB_ATOM_CONFIG),
- "atom job slot attributes",
- "@pLLI",
- "atom,descriptor,affinity,config"
- },
- {
- KBASE_TL_ATTRIB_ATOM_PRIORITY,
- __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY),
- "atom priority",
- "@pI",
- "atom,prio"
- },
- {
- KBASE_TL_ATTRIB_ATOM_STATE,
- __stringify(KBASE_TL_ATTRIB_ATOM_STATE),
- "atom state",
- "@pI",
- "atom,state"
- },
- {
- KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
- __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITIZED),
- "atom caused priority change",
- "@p",
- "atom"
- },
- {
- KBASE_TL_ATTRIB_ATOM_JIT,
- __stringify(KBASE_TL_ATTRIB_ATOM_JIT),
- "jit done for atom",
- "@pLLLL",
- "atom,edit_addr,new_addr,va_pages,jit_flags"
- },
- {
- KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
- __stringify(KBASE_TL_ATTRIB_ATOM_JITALLOCINFO),
- "Information about JIT allocations",
- "@pLLLIIIII",
- "atom,va_pgs,com_pgs,extent,j_id,bin_id,max_allocs,flags,usg_id"
- },
- {
- KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
- __stringify(KBASE_TL_ATTRIB_ATOM_JITFREEINFO),
- "Information about JIT frees",
- "@pI",
- "atom,j_id"
- },
- {
- KBASE_TL_ATTRIB_AS_CONFIG,
- __stringify(KBASE_TL_ATTRIB_AS_CONFIG),
- "address space attributes",
- "@pLLL",
- "address_space,transtab,memattr,transcfg"
- },
- {
- KBASE_TL_EVENT_LPU_SOFTSTOP,
- __stringify(KBASE_TL_EVENT_LPU_SOFTSTOP),
- "softstop event on given lpu",
- "@p",
- "lpu"
- },
- {
- KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
- __stringify(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX),
- "atom softstopped",
- "@p",
- "atom"
- },
- {
- KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
- __stringify(KBASE_TL_EVENT_SOFTSTOP_ISSUE),
- "atom softstop issued",
- "@p",
- "atom"
- },
- {
- KBASE_TL_EVENT_ATOM_SOFTJOB_START,
- __stringify(KBASE_TL_EVENT_ATOM_SOFTJOB_START),
- "atom soft job has started",
- "@p",
- "atom"
- },
- {
- KBASE_TL_EVENT_ATOM_SOFTJOB_END,
- __stringify(KBASE_TL_EVENT_ATOM_SOFTJOB_END),
- "atom soft job has completed",
- "@p",
- "atom"
- },
- {
- KBASE_JD_GPU_SOFT_RESET,
- __stringify(KBASE_JD_GPU_SOFT_RESET),
- "gpu soft reset",
- "@p",
- "gpu"
- },
-};
-
-/* Descriptors of timeline messages transmitted in auxiliary events stream. */
-static const struct tp_desc tp_desc_aux[] = {
- {
- KBASE_AUX_PM_STATE,
- __stringify(KBASE_AUX_PM_STATE),
- "PM state",
- "@IL",
- "core_type,core_state_bitset"
- },
- {
- KBASE_AUX_PAGEFAULT,
- __stringify(KBASE_AUX_PAGEFAULT),
- "Page fault",
- "@IIL",
- "ctx_nr,as_nr,page_cnt_change"
- },
- {
- KBASE_AUX_PAGESALLOC,
- __stringify(KBASE_AUX_PAGESALLOC),
- "Total alloc pages change",
- "@IL",
- "ctx_nr,page_cnt"
- },
- {
- KBASE_AUX_DEVFREQ_TARGET,
- __stringify(KBASE_AUX_DEVFREQ_TARGET),
- "New device frequency target",
- "@L",
- "target_freq"
- },
- {
- KBASE_AUX_PROTECTED_ENTER_START,
- __stringify(KBASE_AUX_PROTECTED_ENTER_START),
- "enter protected mode start",
- "@p",
- "gpu"
- },
- {
- KBASE_AUX_PROTECTED_ENTER_END,
- __stringify(KBASE_AUX_PROTECTED_ENTER_END),
- "enter protected mode end",
- "@p",
- "gpu"
- },
- {
- KBASE_AUX_PROTECTED_LEAVE_START,
- __stringify(KBASE_AUX_PROTECTED_LEAVE_START),
- "leave protected mode start",
- "@p",
- "gpu"
- },
- {
- KBASE_AUX_PROTECTED_LEAVE_END,
- __stringify(KBASE_AUX_PROTECTED_LEAVE_END),
- "leave protected mode end",
- "@p",
- "gpu"
- },
- {
- KBASE_AUX_JIT_STATS,
- __stringify(KBASE_AUX_JIT_STATS),
- "per-bin JIT statistics",
- "@IIIIII",
- "ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages"
- },
- {
- KBASE_AUX_EVENT_JOB_SLOT,
- __stringify(KBASE_AUX_EVENT_JOB_SLOT),
- "event on a given job slot",
- "@pIII",
- "ctx,slot_nr,atom_nr,event"
- }
-};
-
-/*****************************************************************************/
-
-/**
- * kbasep_tlstream_get_timestamp - return timestamp
- *
- * Function returns timestamp value based on raw monotonic timer. Value will
- * wrap around zero in case of overflow.
- * Return: timestamp value
- */
-static u64 kbasep_tlstream_get_timestamp(void)
-{
- struct timespec ts;
- u64 timestamp;
-
- getrawmonotonic(&ts);
- timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
- return timestamp;
-}
-
-/**
- * kbasep_tlstream_write_bytes - write data to message buffer
- * @buffer: buffer where data will be written
- * @pos: position in the buffer where to place data
- * @bytes: pointer to buffer holding data
- * @len: length of data to be written
- *
- * Return: updated position in the buffer
- */
-static size_t kbasep_tlstream_write_bytes(
- char *buffer,
- size_t pos,
- const void *bytes,
- size_t len)
-{
- KBASE_DEBUG_ASSERT(buffer);
- KBASE_DEBUG_ASSERT(bytes);
-
- memcpy(&buffer[pos], bytes, len);
-
- return pos + len;
-}
-
-/**
- * kbasep_tlstream_write_string - write string to message buffer
- * @buffer: buffer where data will be written
- * @pos: position in the buffer where to place data
- * @string: pointer to buffer holding the source string
- * @max_write_size: number of bytes that can be stored in buffer
- *
- * Return: updated position in the buffer
- */
-static size_t kbasep_tlstream_write_string(
- char *buffer,
- size_t pos,
- const char *string,
- size_t max_write_size)
-{
- u32 string_len;
-
- KBASE_DEBUG_ASSERT(buffer);
- KBASE_DEBUG_ASSERT(string);
- /* Timeline string consists of at least string length and nul
- * terminator. */
- KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
- max_write_size -= sizeof(string_len);
-
- string_len = strlcpy(
- &buffer[pos + sizeof(string_len)],
- string,
- max_write_size);
- string_len += sizeof(char);
-
- /* Make sure that the source string fit into the buffer. */
- KBASE_DEBUG_ASSERT(string_len <= max_write_size);
-
- /* Update string length. */
- memcpy(&buffer[pos], &string_len, sizeof(string_len));
-
- return pos + sizeof(string_len) + string_len;
-}
-
-/**
- * kbasep_tlstream_write_timestamp - write timestamp to message buffer
- * @buffer: buffer where data will be written
- * @pos: position in the buffer where to place data
- *
- * Return: updated position in the buffer
- */
-static size_t kbasep_tlstream_write_timestamp(void *buffer, size_t pos)
-{
- u64 timestamp = kbasep_tlstream_get_timestamp();
-
- return kbasep_tlstream_write_bytes(
- buffer, pos,
- &timestamp, sizeof(timestamp));
-}
-
-/**
- * kbasep_tlstream_put_bits - put bits in a word
- * @word: pointer to the words being modified
- * @value: value that shall be written to given position
- * @bitpos: position where value shall be written (in bits)
- * @bitlen: length of value (in bits)
- */
-static void kbasep_tlstream_put_bits(
- u32 *word,
- u32 value,
- unsigned int bitpos,
- unsigned int bitlen)
-{
- const u32 mask = ((1 << bitlen) - 1) << bitpos;
-
- KBASE_DEBUG_ASSERT(word);
- KBASE_DEBUG_ASSERT((0 != bitlen) && (32 >= bitlen));
- KBASE_DEBUG_ASSERT((bitpos + bitlen) <= 32);
-
- *word &= ~mask;
- *word |= ((value << bitpos) & mask);
-}
-
-/**
- * kbasep_tlstream_packet_header_setup - setup the packet header
+ * kbasep_packet_header_setup - setup the packet header
* @buffer: pointer to the buffer
* @pkt_family: packet's family
* @pkt_type: packet's type
@@ -741,107 +35,69 @@ static void kbasep_tlstream_put_bits(
*
* Function sets up immutable part of packet header in the given buffer.
*/
-static void kbasep_tlstream_packet_header_setup(
- char *buffer,
- enum tl_packet_family pkt_family,
- enum tl_packet_class pkt_class,
- enum tl_packet_type pkt_type,
- unsigned int stream_id,
- int numbered)
+static void kbasep_packet_header_setup(
+ char *buffer,
+ enum tl_packet_family pkt_family,
+ enum tl_packet_class pkt_class,
+ enum tl_packet_type pkt_type,
+ unsigned int stream_id,
+ int numbered)
{
- u32 word0 = 0;
- u32 word1 = 0;
-
- KBASE_DEBUG_ASSERT(buffer);
- KBASE_DEBUG_ASSERT(pkt_family == TL_PACKET_FAMILY_TL);
- KBASE_DEBUG_ASSERT(
- (pkt_type == TL_PACKET_TYPE_HEADER) ||
- (pkt_type == TL_PACKET_TYPE_SUMMARY) ||
- (pkt_type == TL_PACKET_TYPE_BODY));
- KBASE_DEBUG_ASSERT(
- (pkt_class == TL_PACKET_CLASS_OBJ) ||
- (pkt_class == TL_PACKET_CLASS_AUX));
-
- kbasep_tlstream_put_bits(
- &word0, pkt_family,
- PACKET_FAMILY_POS, PACKET_FAMILY_LEN);
- kbasep_tlstream_put_bits(
- &word0, pkt_class,
- PACKET_CLASS_POS, PACKET_CLASS_LEN);
- kbasep_tlstream_put_bits(
- &word0, pkt_type,
- PACKET_TYPE_POS, PACKET_TYPE_LEN);
- kbasep_tlstream_put_bits(
- &word0, stream_id,
- PACKET_STREAMID_POS, PACKET_STREAMID_LEN);
-
- if (numbered)
- kbasep_tlstream_put_bits(
- &word1, 1,
- PACKET_SEQBIT_POS, PACKET_SEQBIT_LEN);
-
- memcpy(&buffer[0], &word0, sizeof(word0));
- memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+ u32 words[2] = {
+ MIPE_PACKET_HEADER_W0(pkt_family, pkt_class, pkt_type, stream_id),
+ MIPE_PACKET_HEADER_W1(0, !!numbered),
+ };
+ memcpy(buffer, words, sizeof(words));
}
/**
- * kbasep_tlstream_packet_header_update - update the packet header
+ * kbasep_packet_header_update - update the packet header
* @buffer: pointer to the buffer
* @data_size: amount of data carried in this packet
+ * @numbered: non-zero if the stream is numbered
*
* Function updates mutable part of packet header in the given buffer.
* Note that value of data_size must not including size of the header.
*/
-static void kbasep_tlstream_packet_header_update(
- char *buffer,
- size_t data_size)
+static void kbasep_packet_header_update(
+ char *buffer,
+ size_t data_size,
+ int numbered)
{
u32 word0;
- u32 word1;
+ u32 word1 = MIPE_PACKET_HEADER_W1((u32)data_size, !!numbered);
KBASE_DEBUG_ASSERT(buffer);
CSTD_UNUSED(word0);
- memcpy(&word1, &buffer[sizeof(word0)], sizeof(word1));
-
- kbasep_tlstream_put_bits(
- &word1, data_size,
- PACKET_LENGTH_POS, PACKET_LENGTH_LEN);
-
memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
}
/**
- * kbasep_tlstream_packet_number_update - update the packet number
+ * kbasep_packet_number_update - update the packet number
* @buffer: pointer to the buffer
* @counter: value of packet counter for this packet's stream
*
* Function updates packet number embedded within the packet placed in the
* given buffer.
*/
-static void kbasep_tlstream_packet_number_update(char *buffer, u32 counter)
+static void kbasep_packet_number_update(char *buffer, u32 counter)
{
KBASE_DEBUG_ASSERT(buffer);
memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter));
}
-/**
- * kbasep_timeline_stream_reset - reset stream
- * @stream: pointer to the stream structure
- *
- * Function discards all pending messages and resets packet counters.
- */
-static void kbasep_timeline_stream_reset(struct tl_stream *stream)
+void kbase_tlstream_reset(struct kbase_tlstream *stream)
{
unsigned int i;
for (i = 0; i < PACKET_COUNT; i++) {
if (stream->numbered)
atomic_set(
- &stream->buffer[i].size,
- PACKET_HEADER_SIZE +
- PACKET_NUMBER_SIZE);
+ &stream->buffer[i].size,
+ PACKET_HEADER_SIZE +
+ PACKET_NUMBER_SIZE);
else
atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
}
@@ -850,14 +106,26 @@ static void kbasep_timeline_stream_reset(struct tl_stream *stream)
atomic_set(&stream->rbi, 0);
}
-/**
- * kbasep_timeline_stream_init - initialize timeline stream
- * @stream: pointer to the stream structure
- * @stream_type: stream type
+/* Configuration of timeline streams generated by kernel.
+ * Kernel emit only streams containing either timeline object events or
+ * auxiliary events. All streams have stream id value of 1 (as opposed to user
+ * space streams that have value of 0).
*/
-static void kbasep_timeline_stream_init(
- struct tl_stream *stream,
- enum tl_stream_type stream_type)
+static const struct {
+ enum tl_packet_family pkt_family;
+ enum tl_packet_class pkt_class;
+ enum tl_packet_type pkt_type;
+ unsigned int stream_id;
+} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1}
+};
+
+void kbase_tlstream_init(
+ struct kbase_tlstream *stream,
+ enum tl_stream_type stream_type,
+ wait_queue_head_t *ready_read)
{
unsigned int i;
@@ -873,44 +141,43 @@ static void kbasep_timeline_stream_init(
stream->numbered = 0;
for (i = 0; i < PACKET_COUNT; i++)
- kbasep_tlstream_packet_header_setup(
- stream->buffer[i].data,
- tl_stream_cfg[stream_type].pkt_family,
- tl_stream_cfg[stream_type].pkt_class,
- tl_stream_cfg[stream_type].pkt_type,
- tl_stream_cfg[stream_type].stream_id,
- stream->numbered);
+ kbasep_packet_header_setup(
+ stream->buffer[i].data,
+ tl_stream_cfg[stream_type].pkt_family,
+ tl_stream_cfg[stream_type].pkt_class,
+ tl_stream_cfg[stream_type].pkt_type,
+ tl_stream_cfg[stream_type].stream_id,
+ stream->numbered);
- kbasep_timeline_stream_reset(stream);
+#if MALI_UNIT_TEST
+ atomic_set(&stream->bytes_generated, 0);
+#endif
+ stream->ready_read = ready_read;
+
+ kbase_tlstream_reset(stream);
}
-/**
- * kbasep_timeline_stream_term - terminate timeline stream
- * @stream: pointer to the stream structure
- */
-static void kbasep_timeline_stream_term(struct tl_stream *stream)
+void kbase_tlstream_term(struct kbase_tlstream *stream)
{
KBASE_DEBUG_ASSERT(stream);
}
/**
- * kbasep_tlstream_msgbuf_submit - submit packet to the user space
- * @timeline: Timeline instance
+ * kbase_tlstream_msgbuf_submit - submit packet to user space
* @stream: Pointer to the stream structure
* @wb_idx_raw: Write buffer index
- * @wb_size: Length of data stored in current buffer
+ * @wb_size: Length of data stored in the current buffer
*
- * Function updates currently written buffer with packet header. Then write
- * index is incremented and buffer is handled to user space. Parameters
- * of new buffer are returned using provided arguments.
+ * Updates currently written buffer with the packet header.
+ * Then write index is incremented and the buffer is handed to user space.
+ * Parameters of the new buffer are returned using provided arguments.
*
- * Return: length of data in new buffer
+ * Return: length of data in the new buffer
*
- * Warning: User must update the stream structure with returned value.
+ * Warning: the user must update the stream structure with returned value.
*/
static size_t kbasep_tlstream_msgbuf_submit(
- struct kbase_timeline *timeline,
- struct tl_stream *stream,
+ struct kbase_tlstream *stream,
unsigned int wb_idx_raw,
unsigned int wb_size)
{
@@ -919,14 +186,15 @@ static size_t kbasep_tlstream_msgbuf_submit(
/* Set stream as flushed. */
atomic_set(&stream->autoflush_counter, -1);
- kbasep_tlstream_packet_header_update(
- stream->buffer[wb_idx].data,
- wb_size - PACKET_HEADER_SIZE);
+ kbasep_packet_header_update(
+ stream->buffer[wb_idx].data,
+ wb_size - PACKET_HEADER_SIZE,
+ stream->numbered);
if (stream->numbered)
- kbasep_tlstream_packet_number_update(
- stream->buffer[wb_idx].data,
- wb_idx_raw);
+ kbasep_packet_number_update(
+ stream->buffer[wb_idx].data,
+ wb_idx_raw);
/* Increasing write buffer index will expose this packet to the reader.
* As stream->lock is not taken on reader side we must make sure memory
@@ -935,7 +203,7 @@ static size_t kbasep_tlstream_msgbuf_submit(
atomic_inc(&stream->wbi);
/* Inform user that packets are ready for reading. */
- wake_up_interruptible(&timeline->event_queue);
+ wake_up_interruptible(stream->ready_read);
wb_size = PACKET_HEADER_SIZE;
if (stream->numbered)
@@ -944,39 +212,18 @@ static size_t kbasep_tlstream_msgbuf_submit(
return wb_size;
}
-/**
- * kbasep_tlstream_msgbuf_acquire - lock selected stream and reserves buffer
- * @timeline: Timeline instance
- * @stream_type: Type of the stream that shall be locked
- * @msg_size: Message size
- * @flags: Pointer to store flags passed back on stream release
- *
- * Function will lock the stream and reserve the number of bytes requested
- * in msg_size for the user.
- *
- * Return: pointer to the buffer where message can be stored
- *
- * Warning: Stream must be released with kbasep_tlstream_msgbuf_release().
- * Only atomic operations are allowed while stream is locked
- * (i.e. do not use any operation that may sleep).
- */
-static char *kbasep_tlstream_msgbuf_acquire(
- struct kbase_timeline *timeline,
- enum tl_stream_type stream_type,
- size_t msg_size,
- unsigned long *flags) __acquires(&stream->lock)
+char *kbase_tlstream_msgbuf_acquire(
+ struct kbase_tlstream *stream,
+ size_t msg_size,
+ unsigned long *flags) __acquires(&stream->lock)
{
- struct tl_stream *stream;
unsigned int wb_idx_raw;
unsigned int wb_idx;
size_t wb_size;
- KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
KBASE_DEBUG_ASSERT(
- PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
- msg_size);
-
- stream = &timeline->streams[stream_type];
+ PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
+ msg_size);
spin_lock_irqsave(&stream->lock, *flags);
@@ -986,7 +233,7 @@ static char *kbasep_tlstream_msgbuf_acquire(
/* Select next buffer if data will not fit into current one. */
if (PACKET_SIZE < wb_size + msg_size) {
- wb_size = kbasep_tlstream_msgbuf_submit(timeline,
+ wb_size = kbasep_tlstream_msgbuf_submit(
stream, wb_idx_raw, wb_size);
wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
}
@@ -995,52 +242,25 @@ static char *kbasep_tlstream_msgbuf_acquire(
atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size);
#if MALI_UNIT_TEST
- atomic_add(msg_size, &timeline->bytes_generated);
+ atomic_add(msg_size, &stream->bytes_generated);
#endif /* MALI_UNIT_TEST */
return &stream->buffer[wb_idx].data[wb_size];
}
-/**
- * kbasep_tlstream_msgbuf_release - unlock selected stream
- * @timeline: Timeline instance
- * @stream_type: Type of the stream that shall be locked
- * @flags: Value obtained during stream acquire
- *
- * Function releases stream that has been previously locked with a call to
- * kbasep_tlstream_msgbuf_acquire().
- */
-static void kbasep_tlstream_msgbuf_release(
- struct kbase_timeline *timeline,
- enum tl_stream_type stream_type,
- unsigned long flags) __releases(&stream->lock)
+void kbase_tlstream_msgbuf_release(
+ struct kbase_tlstream *stream,
+ unsigned long flags) __releases(&stream->lock)
{
- struct tl_stream *stream;
-
- KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
-
- stream = &timeline->streams[stream_type];
-
/* Mark stream as containing unflushed data. */
atomic_set(&stream->autoflush_counter, 0);
spin_unlock_irqrestore(&stream->lock, flags);
}
-/*****************************************************************************/
-
-/**
- * kbasep_tlstream_flush_stream - flush stream
- * @timeline: Timeline instance
- * @stype: Type of stream to be flushed
- *
- * Flush pending data in timeline stream.
- */
-static void kbasep_tlstream_flush_stream(
- struct kbase_timeline *timeline,
- enum tl_stream_type stype)
+void kbase_tlstream_flush_stream(
+ struct kbase_tlstream *stream)
{
- struct tl_stream *stream = &timeline->streams[stype];
unsigned long flags;
unsigned int wb_idx_raw;
unsigned int wb_idx;
@@ -1057,7 +277,7 @@ static void kbasep_tlstream_flush_stream(
wb_size = atomic_read(&stream->buffer[wb_idx].size);
if (wb_size > min_size) {
- wb_size = kbasep_tlstream_msgbuf_submit(timeline,
+ wb_size = kbasep_tlstream_msgbuf_submit(
stream, wb_idx_raw, wb_size);
wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
atomic_set(&stream->buffer[wb_idx].size, wb_size);
@@ -1065,1737 +285,3 @@ static void kbasep_tlstream_flush_stream(
spin_unlock_irqrestore(&stream->lock, flags);
}
-/**
- * kbasep_tlstream_autoflush_timer_callback - autoflush timer callback
- * @timer: Timer list
- *
- * Timer is executed periodically to check if any of the stream contains
- * buffer ready to be submitted to user space.
- */
-static void kbasep_tlstream_autoflush_timer_callback(struct timer_list *timer)
-{
- enum tl_stream_type stype;
- int rcode;
- struct kbase_timeline *timeline =
- container_of(timer, struct kbase_timeline, autoflush_timer);
-
- CSTD_UNUSED(timer);
-
- for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) {
- struct tl_stream *stream = &timeline->streams[stype];
- unsigned long flags;
- unsigned int wb_idx_raw;
- unsigned int wb_idx;
- size_t wb_size;
- size_t min_size = PACKET_HEADER_SIZE;
-
- int af_cnt = atomic_read(&stream->autoflush_counter);
-
- /* Check if stream contain unflushed data. */
- if (0 > af_cnt)
- continue;
-
- /* Check if stream should be flushed now. */
- if (af_cnt != atomic_cmpxchg(
- &stream->autoflush_counter,
- af_cnt,
- af_cnt + 1))
- continue;
- if (!af_cnt)
- continue;
-
- /* Autoflush this stream. */
- if (stream->numbered)
- min_size += PACKET_NUMBER_SIZE;
-
- spin_lock_irqsave(&stream->lock, flags);
-
- wb_idx_raw = atomic_read(&stream->wbi);
- wb_idx = wb_idx_raw % PACKET_COUNT;
- wb_size = atomic_read(&stream->buffer[wb_idx].size);
-
- if (wb_size > min_size) {
- wb_size = kbasep_tlstream_msgbuf_submit(timeline,
- stream, wb_idx_raw, wb_size);
- wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
- atomic_set(&stream->buffer[wb_idx].size,
- wb_size);
- }
- spin_unlock_irqrestore(&stream->lock, flags);
- }
-
- if (atomic_read(&timeline->autoflush_timer_active))
- rcode = mod_timer(
- &timeline->autoflush_timer,
- jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
- CSTD_UNUSED(rcode);
-}
-
-/**
- * kbasep_tlstream_packet_pending - check timeline streams for pending packets
- * @timeline: Timeline instance
- * @stype: Pointer to variable where stream type will be placed
- * @rb_idx_raw: Pointer to variable where read buffer index will be placed
- *
- * Function checks all streams for pending packets. It will stop as soon as
- * packet ready to be submitted to user space is detected. Variables under
- * pointers, passed as the parameters to this function will be updated with
- * values pointing to right stream and buffer.
- *
- * Return: non-zero if any of timeline streams has at last one packet ready
- */
-static int kbasep_tlstream_packet_pending(
- struct kbase_timeline *timeline,
- enum tl_stream_type *stype,
- unsigned int *rb_idx_raw)
-{
- int pending = 0;
-
- KBASE_DEBUG_ASSERT(stype);
- KBASE_DEBUG_ASSERT(rb_idx_raw);
-
- for (
- *stype = 0;
- (*stype < TL_STREAM_TYPE_COUNT) && !pending;
- (*stype)++) {
- struct tl_stream *stream = &timeline->streams[*stype];
- *rb_idx_raw = atomic_read(&stream->rbi);
- /* Read buffer index may be updated by writer in case of
- * overflow. Read and write buffer indexes must be
- * loaded in correct order.
- */
- smp_rmb();
- if (atomic_read(&stream->wbi) != *rb_idx_raw)
- pending = 1;
-
- }
- (*stype)--;
-
- return pending;
-}
-
-/**
- * kbasep_tlstream_read - copy data from streams to buffer provided by user
- * @filp: Pointer to file structure
- * @buffer: Pointer to the buffer provided by user
- * @size: Maximum amount of data that can be stored in the buffer
- * @f_pos: Pointer to file offset (unused)
- *
- * Return: number of bytes stored in the buffer
- */
-static ssize_t kbasep_tlstream_read(
- struct file *filp,
- char __user *buffer,
- size_t size,
- loff_t *f_pos)
-{
- ssize_t copy_len = 0;
- struct kbase_timeline *timeline;
-
- KBASE_DEBUG_ASSERT(filp);
- KBASE_DEBUG_ASSERT(f_pos);
-
- if (WARN_ON(!filp->private_data))
- return -EFAULT;
-
- timeline = (struct kbase_timeline *) filp->private_data;
-
- if (!buffer)
- return -EINVAL;
-
- if ((0 > *f_pos) || (PACKET_SIZE > size))
- return -EINVAL;
-
- mutex_lock(&timeline->reader_lock);
-
- while (copy_len < size) {
- enum tl_stream_type stype;
- struct tl_stream *stream;
- unsigned int rb_idx_raw = 0;
- unsigned int wb_idx_raw;
- unsigned int rb_idx;
- size_t rb_size;
-
- /* If we already read some packets and there is no
- * packet pending then return back to user.
- * If we don't have any data yet, wait for packet to be
- * submitted.
- */
- if (0 < copy_len) {
- if (!kbasep_tlstream_packet_pending(
- timeline,
- &stype,
- &rb_idx_raw))
- break;
- } else {
- if (wait_event_interruptible(
- timeline->event_queue,
- kbasep_tlstream_packet_pending(
- timeline,
- &stype,
- &rb_idx_raw))) {
- copy_len = -ERESTARTSYS;
- break;
- }
- }
-
- /* Check if this packet fits into the user buffer.
- * If so copy its content. */
- rb_idx = rb_idx_raw % PACKET_COUNT;
- stream = &timeline->streams[stype];
- rb_size = atomic_read(&stream->buffer[rb_idx].size);
- if (rb_size > size - copy_len)
- break;
- if (copy_to_user(
- &buffer[copy_len],
- stream->buffer[rb_idx].data,
- rb_size)) {
- copy_len = -EFAULT;
- break;
- }
-
- /* If the distance between read buffer index and write
- * buffer index became more than PACKET_COUNT, then overflow
- * happened and we need to ignore the last portion of bytes
- * that we have just sent to user.
- */
- smp_rmb();
- wb_idx_raw = atomic_read(&stream->wbi);
-
- if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
- copy_len += rb_size;
- atomic_inc(&stream->rbi);
-#if MALI_UNIT_TEST
- atomic_add(rb_size, &timeline->bytes_collected);
-#endif /* MALI_UNIT_TEST */
-
- } else {
- const unsigned int new_rb_idx_raw =
- wb_idx_raw - PACKET_COUNT + 1;
- /* Adjust read buffer index to the next valid buffer */
- atomic_set(&stream->rbi, new_rb_idx_raw);
- }
- }
-
- mutex_unlock(&timeline->reader_lock);
-
- return copy_len;
-}
-
-/**
- * kbasep_tlstream_poll - poll timeline stream for packets
- * @filp: Pointer to file structure
- * @wait: Pointer to poll table
- * Return: POLLIN if data can be read without blocking, otherwise zero
- */
-static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait)
-{
- enum tl_stream_type stream_type;
- unsigned int rb_idx;
- struct kbase_timeline *timeline;
-
- KBASE_DEBUG_ASSERT(filp);
- KBASE_DEBUG_ASSERT(wait);
-
- if (WARN_ON(!filp->private_data))
- return -EFAULT;
-
- timeline = (struct kbase_timeline *) filp->private_data;
-
- poll_wait(filp, &timeline->event_queue, wait);
- if (kbasep_tlstream_packet_pending(timeline, &stream_type, &rb_idx))
- return POLLIN;
- return 0;
-}
-
-/**
- * kbasep_tlstream_release - release timeline stream descriptor
- * @inode: Pointer to inode structure
- * @filp: Pointer to file structure
- *
- * Return always return zero
- */
-static int kbasep_tlstream_release(struct inode *inode, struct file *filp)
-{
- struct kbase_timeline *timeline;
-
- KBASE_DEBUG_ASSERT(inode);
- KBASE_DEBUG_ASSERT(filp);
- KBASE_DEBUG_ASSERT(filp->private_data);
-
- CSTD_UNUSED(inode);
-
- timeline = (struct kbase_timeline *) filp->private_data;
-
- /* Stop autoflush timer before releasing access to streams. */
- atomic_set(&timeline->autoflush_timer_active, 0);
- del_timer_sync(&timeline->autoflush_timer);
-
- atomic_set(timeline->is_enabled, 0);
- return 0;
-}
-
-/**
- * kbasep_tlstream_timeline_header - prepare timeline header stream packet
- * @timeline: Timeline instance
- * @stream_type: Type of the stream that will carry header data
- * @tp_desc: Pointer to array with tracepoint descriptors
- * @tp_count: Number of descriptors in the given array
- *
- * Functions fills in information about tracepoints stored in body stream
- * associated with this header stream.
- */
-static void kbasep_tlstream_timeline_header(
- struct kbase_timeline *timeline,
- enum tl_stream_type stream_type,
- const struct tp_desc *tp_desc,
- u32 tp_count)
-{
- const u8 tv = SWTRACE_VERSION; /* protocol version */
- const u8 ps = sizeof(void *); /* pointer size */
- size_t msg_size = sizeof(tv) + sizeof(ps) + sizeof(tp_count);
- char *buffer;
- size_t pos = 0;
- unsigned long flags;
- unsigned int i;
-
- KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
- KBASE_DEBUG_ASSERT(tp_desc);
-
- /* Calculate the size of the timeline message. */
- for (i = 0; i < tp_count; i++) {
- msg_size += sizeof(tp_desc[i].id);
- msg_size +=
- strnlen(tp_desc[i].id_str, STRLEN_MAX) +
- sizeof(char) + sizeof(u32);
- msg_size +=
- strnlen(tp_desc[i].name, STRLEN_MAX) +
- sizeof(char) + sizeof(u32);
- msg_size +=
- strnlen(tp_desc[i].arg_types, STRLEN_MAX) +
- sizeof(char) + sizeof(u32);
- msg_size +=
- strnlen(tp_desc[i].arg_names, STRLEN_MAX) +
- sizeof(char) + sizeof(u32);
- }
-
- KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE >= msg_size);
-
- buffer = kbasep_tlstream_msgbuf_acquire(timeline, stream_type, msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &tv, sizeof(tv));
- pos = kbasep_tlstream_write_bytes(buffer, pos, &ps, sizeof(ps));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &tp_count, sizeof(tp_count));
-
- for (i = 0; i < tp_count; i++) {
- pos = kbasep_tlstream_write_bytes(
- buffer, pos,
- &tp_desc[i].id, sizeof(tp_desc[i].id));
- pos = kbasep_tlstream_write_string(
- buffer, pos,
- tp_desc[i].id_str, msg_size - pos);
- pos = kbasep_tlstream_write_string(
- buffer, pos,
- tp_desc[i].name, msg_size - pos);
- pos = kbasep_tlstream_write_string(
- buffer, pos,
- tp_desc[i].arg_types, msg_size - pos);
- pos = kbasep_tlstream_write_string(
- buffer, pos,
- tp_desc[i].arg_names, msg_size - pos);
- }
-
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(timeline, stream_type, flags);
-
- /* We don't expect any more data to be read in this stream.
- * As header stream must be read before its associated body stream,
- * make this packet visible to the user straightaway. */
- kbasep_tlstream_flush_stream(timeline, stream_type);
-}
-
-/*****************************************************************************/
-
-int kbase_tlstream_init(struct kbase_timeline **timeline,
- atomic_t *timeline_is_enabled)
-{
- enum tl_stream_type i;
- struct kbase_timeline *result;
-
- if (!timeline || !timeline_is_enabled)
- return -EINVAL;
-
- result = kzalloc(sizeof(*result), GFP_KERNEL);
- if (!result)
- return -ENOMEM;
-
- mutex_init(&result->reader_lock);
- init_waitqueue_head(&result->event_queue);
-
- /* Prepare stream structures. */
- for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
- kbasep_timeline_stream_init(&result->streams[i], i);
-
- /* Initialize autoflush timer. */
- atomic_set(&result->autoflush_timer_active, 0);
- kbase_timer_setup(&result->autoflush_timer,
- kbasep_tlstream_autoflush_timer_callback);
- result->is_enabled = timeline_is_enabled;
-
- *timeline = result;
- return 0;
-}
-
-void kbase_tlstream_term(struct kbase_timeline *timeline)
-{
- enum tl_stream_type i;
-
- if (!timeline)
- return;
-
- for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
- kbasep_timeline_stream_term(&timeline->streams[i]);
-
- kfree(timeline);
-}
-
-static void kbase_create_timeline_objects(struct kbase_device *kbdev)
-{
- unsigned int lpu_id;
- unsigned int as_nr;
- struct kbasep_kctx_list_element *element;
- struct kbase_timeline *timeline = kbdev->timeline;
-
- /* Create LPU objects. */
- for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- u32 *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(kbdev, lpu, lpu_id, *lpu);
- }
-
- /* Create Address Space objects. */
- for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(kbdev, &kbdev->as[as_nr], as_nr);
-
- /* Create GPU object and make it retain all LPUs and address spaces. */
- KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(
- kbdev,
- kbdev,
- kbdev->gpu_props.props.raw_props.gpu_id,
- kbdev->gpu_props.num_cores);
-
- for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- void *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(kbdev, lpu, kbdev);
- }
- for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(
- kbdev,
- &kbdev->as[as_nr],
- kbdev);
-
- /* Create object for each known context. */
- mutex_lock(&kbdev->kctx_list_lock);
- list_for_each_entry(element, &kbdev->kctx_list, link) {
- KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(
- kbdev,
- element->kctx,
- element->kctx->id,
- (u32)(element->kctx->tgid));
- }
- /* Before releasing the lock, reset body stream buffers.
- * This will prevent context creation message to be directed to both
- * summary and body stream.
- */
- kbase_tlstream_reset_body_streams(timeline);
- mutex_unlock(&kbdev->kctx_list_lock);
- /* Static object are placed into summary packet that needs to be
- * transmitted first. Flush all streams to make it available to
- * user space.
- */
- kbase_tlstream_flush_streams(timeline);
-}
-
-#ifdef CONFIG_MALI_DEVFREQ
-static void kbase_tlstream_current_devfreq_target(struct kbase_device *kbdev)
-{
- struct devfreq *devfreq = kbdev->devfreq;
-
- /* Devfreq initialization failure isn't a fatal error, so devfreq might
- * be null.
- */
- if (devfreq) {
- mutex_lock(&devfreq->lock);
- KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(kbdev,
- (u64)devfreq->last_status.current_frequency);
- mutex_unlock(&devfreq->lock);
- }
-}
-#endif /* CONFIG_MALI_DEVFREQ */
-
-int kbase_tlstream_acquire(struct kbase_device *kbdev, u32 flags)
-{
- int ret;
- u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
- struct kbase_timeline *timeline = kbdev->timeline;
-
- if (0 == atomic_cmpxchg(timeline->is_enabled, 0, tlstream_enabled)) {
- int rcode;
-
- ret = anon_inode_getfd(
- "[mali_tlstream]",
- &kbasep_tlstream_fops,
- timeline,
- O_RDONLY | O_CLOEXEC);
- if (ret < 0) {
- atomic_set(timeline->is_enabled, 0);
- return ret;
- }
-
- /* Reset and initialize header streams. */
- kbasep_timeline_stream_reset(
- &timeline->streams[TL_STREAM_TYPE_OBJ_HEADER]);
- kbasep_timeline_stream_reset(
- &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY]);
- kbasep_timeline_stream_reset(
- &timeline->streams[TL_STREAM_TYPE_AUX_HEADER]);
- kbasep_tlstream_timeline_header(timeline,
- TL_STREAM_TYPE_OBJ_HEADER,
- tp_desc_obj,
- ARRAY_SIZE(tp_desc_obj));
- kbasep_tlstream_timeline_header(timeline,
- TL_STREAM_TYPE_AUX_HEADER,
- tp_desc_aux,
- ARRAY_SIZE(tp_desc_aux));
-
- /* Start autoflush timer. */
- atomic_set(&timeline->autoflush_timer_active, 1);
- rcode = mod_timer(
- &timeline->autoflush_timer,
- jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
- CSTD_UNUSED(rcode);
-
- /* If job dumping is enabled, readjust the software event's
- * timeout as the default value of 3 seconds is often
- * insufficient. */
- if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) {
- dev_info(kbdev->dev,
- "Job dumping is enabled, readjusting the software event's timeout\n");
- atomic_set(&kbdev->js_data.soft_job_timeout_ms,
- 1800000);
- }
-
- /* Summary stream was cleared during acquire.
- * Create static timeline objects that will be
- * read by client.
- */
- kbase_create_timeline_objects(kbdev);
-
-#ifdef CONFIG_MALI_DEVFREQ
- /* Devfreq target tracepoints are only fired when the target
- * changes, so we won't know the current target unless we
- * send it now.
- */
- kbase_tlstream_current_devfreq_target(kbdev);
-#endif /* CONFIG_MALI_DEVFREQ */
-
- } else {
- ret = -EBUSY;
- }
-
- return ret;
-}
-
-void kbase_tlstream_flush_streams(struct kbase_timeline *timeline)
-{
- enum tl_stream_type stype;
-
- for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
- kbasep_tlstream_flush_stream(timeline, stype);
-}
-
-void kbase_tlstream_reset_body_streams(struct kbase_timeline *timeline)
-{
- kbasep_timeline_stream_reset(
- &timeline->streams[TL_STREAM_TYPE_OBJ]);
- kbasep_timeline_stream_reset(
- &timeline->streams[TL_STREAM_TYPE_AUX]);
-}
-
-#if MALI_UNIT_TEST
-void kbase_tlstream_stats(struct kbase_timeline *timeline,
- u32 *bytes_collected, u32 *bytes_generated)
-{
- KBASE_DEBUG_ASSERT(bytes_collected);
- KBASE_DEBUG_ASSERT(bytes_generated);
- *bytes_collected = atomic_read(&timeline->bytes_collected);
- *bytes_generated = atomic_read(&timeline->bytes_generated);
-}
-#endif /* MALI_UNIT_TEST */
-
-/*****************************************************************************/
-
-void __kbase_tlstream_tl_summary_new_ctx(struct kbase_timeline *tl, void *context, u32 nr, u32 tgid)
-{
- const u32 msg_id = KBASE_TL_NEW_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
- sizeof(tgid);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &nr, sizeof(nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &tgid, sizeof(tgid));
-
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ_SUMMARY, flags);
-}
-
-void __kbase_tlstream_tl_summary_new_gpu(struct kbase_timeline *tl, void *gpu, u32 id, u32 core_count)
-{
- const u32 msg_id = KBASE_TL_NEW_GPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(id) +
- sizeof(core_count);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &id, sizeof(id));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &core_count, sizeof(core_count));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ_SUMMARY, flags);
-}
-
-void __kbase_tlstream_tl_summary_new_lpu(struct kbase_timeline *tl, void *lpu, u32 nr, u32 fn)
-{
- const u32 msg_id = KBASE_TL_NEW_LPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(nr) +
- sizeof(fn);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &nr, sizeof(nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &fn, sizeof(fn));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ_SUMMARY, flags);
-}
-
-void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(struct kbase_timeline *tl, void *lpu, void *gpu)
-{
- const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ_SUMMARY, flags);
-}
-
-void __kbase_tlstream_tl_summary_new_as(struct kbase_timeline *tl, void *as, u32 nr)
-{
- const u32 msg_id = KBASE_TL_NEW_AS;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(nr);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &nr, sizeof(nr));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ_SUMMARY, flags);
-}
-
-void __kbase_tlstream_tl_summary_lifelink_as_gpu(struct kbase_timeline *tl, void *as, void *gpu)
-{
- const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ_SUMMARY,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ_SUMMARY, flags);
-}
-
-/*****************************************************************************/
-
-void __kbase_tlstream_tl_new_ctx(struct kbase_timeline *tl, void *context, u32 nr, u32 tgid)
-{
- const u32 msg_id = KBASE_TL_NEW_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
- sizeof(tgid);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &nr, sizeof(nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &tgid, sizeof(tgid));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_new_atom(struct kbase_timeline *tl, void *atom, u32 nr)
-{
- const u32 msg_id = KBASE_TL_NEW_ATOM;
- const size_t msg_size = sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
- sizeof(nr);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &nr, sizeof(nr));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_del_ctx(struct kbase_timeline *tl, void *context)
-{
- const u32 msg_id = KBASE_TL_DEL_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(context);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_del_atom(struct kbase_timeline *tl, void *atom)
-{
- const u32 msg_id = KBASE_TL_DEL_ATOM;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_ret_ctx_lpu(struct kbase_timeline *tl, void *context, void *lpu)
-{
- const u32 msg_id = KBASE_TL_RET_CTX_LPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_ret_atom_ctx(struct kbase_timeline *tl, void *atom, void *context)
-{
- const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_ret_atom_lpu(struct kbase_timeline *tl,
- void *atom, void *lpu, const char *attrib_match_list)
-{
- const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
- const size_t msg_s0 = sizeof(u32) + sizeof(char) +
- strnlen(attrib_match_list, STRLEN_MAX);
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) +
- sizeof(atom) + sizeof(lpu) + msg_s0;
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- pos = kbasep_tlstream_write_string(
- buffer, pos, attrib_match_list, msg_s0);
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_nret_ctx_lpu(struct kbase_timeline *tl, void *context, void *lpu)
-{
- const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_nret_atom_ctx(struct kbase_timeline *tl, void *atom, void *context)
-{
- const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_nret_atom_lpu(struct kbase_timeline *tl, void *atom, void *lpu)
-{
- const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_ret_as_ctx(struct kbase_timeline *tl, void *as, void *ctx)
-{
- const u32 msg_id = KBASE_TL_RET_AS_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &ctx, sizeof(ctx));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_nret_as_ctx(struct kbase_timeline *tl, void *as, void *ctx)
-{
- const u32 msg_id = KBASE_TL_NRET_AS_CTX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &ctx, sizeof(ctx));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_ret_atom_as(struct kbase_timeline *tl, void *atom, void *as)
-{
- const u32 msg_id = KBASE_TL_RET_ATOM_AS;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_nret_atom_as(struct kbase_timeline *tl, void *atom, void *as)
-{
- const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_config(struct kbase_timeline *tl,
- void *atom, u64 jd, u64 affinity, u32 config)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
- sizeof(jd) + sizeof(affinity) + sizeof(config);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &jd, sizeof(jd));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &affinity, sizeof(affinity));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &config, sizeof(config));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_priority(struct kbase_timeline *tl, void *atom, u32 prio)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(prio);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &prio, sizeof(prio));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_state(struct kbase_timeline *tl, void *atom, u32 state)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(state);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &state, sizeof(state));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_prioritized(struct kbase_timeline *tl, void *atom)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_jit(struct kbase_timeline *tl,
- void *atom, u64 edit_addr, u64 new_addr,
- u64 va_pages, u64 jit_flags)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom)
- + sizeof(edit_addr) + sizeof(new_addr) + sizeof(va_pages)
- + sizeof(jit_flags);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &edit_addr, sizeof(edit_addr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &new_addr, sizeof(new_addr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &va_pages, sizeof(va_pages));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &jit_flags, sizeof(jit_flags));
-
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_jitallocinfo(struct kbase_timeline *tl,
- void *atom, u64 va_pages, u64 commit_pages, u64 extent,
- u32 jit_id, u32 bin_id, u32 max_allocations, u32 jit_flags,
- u32 usage_id)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
- sizeof(va_pages) + sizeof(commit_pages) +
- sizeof(extent) + sizeof(jit_id) +
- sizeof(bin_id) + sizeof(max_allocations) +
- sizeof(jit_flags) + sizeof(usage_id);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id,
- sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &va_pages, sizeof(va_pages));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &commit_pages, sizeof(commit_pages));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &extent, sizeof(extent));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &jit_id, sizeof(jit_id));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &bin_id, sizeof(bin_id));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &max_allocations,
- sizeof(max_allocations));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &jit_flags, sizeof(jit_flags));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &usage_id, sizeof(usage_id));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(struct kbase_timeline *tl, void *atom, u32 jit_id)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(jit_id);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id,
- sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &jit_id, sizeof(jit_id));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-
-void __kbase_tlstream_tl_attrib_as_config(struct kbase_timeline *tl,
- void *as, u64 transtab, u64 memattr, u64 transcfg)
-{
- const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(as) +
- sizeof(transtab) + sizeof(memattr) + sizeof(transcfg);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &as, sizeof(as));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &transtab, sizeof(transtab));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &memattr, sizeof(memattr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &transcfg, sizeof(transcfg));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_event_lpu_softstop(struct kbase_timeline *tl, void *lpu)
-{
- const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(lpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &lpu, sizeof(lpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_event_atom_softstop_ex(struct kbase_timeline *tl, void *atom)
-{
- const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_event_atom_softstop_issue(struct kbase_timeline *tl, void *atom)
-{
- const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_event_atom_softjob_start(struct kbase_timeline *tl, void *atom)
-{
- const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_tl_event_atom_softjob_end(struct kbase_timeline *tl, void *atom)
-{
- const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(atom);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom, sizeof(atom));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-void __kbase_tlstream_jd_gpu_soft_reset(struct kbase_timeline *tl, void *gpu)
-{
- const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_OBJ,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_OBJ, flags);
-}
-
-/*****************************************************************************/
-
-void __kbase_tlstream_aux_pm_state(struct kbase_timeline *tl, u32 core_type, u64 state)
-{
- const u32 msg_id = KBASE_AUX_PM_STATE;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(core_type) +
- sizeof(state);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &core_type, sizeof(core_type));
- pos = kbasep_tlstream_write_bytes(buffer, pos, &state, sizeof(state));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_pagefault(struct kbase_timeline *tl, u32 ctx_nr, u32 as_nr, u64 page_count_change)
-{
- const u32 msg_id = KBASE_AUX_PAGEFAULT;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
- sizeof(as_nr) + sizeof(page_count_change);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX, msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
- pos = kbasep_tlstream_write_bytes(buffer, pos, &as_nr, sizeof(as_nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos,
- &page_count_change, sizeof(page_count_change));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_pagesalloc(struct kbase_timeline *tl, u32 ctx_nr, u64 page_count)
-{
- const u32 msg_id = KBASE_AUX_PAGESALLOC;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
- sizeof(page_count);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX, msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &page_count, sizeof(page_count));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_devfreq_target(struct kbase_timeline *tl, u64 target_freq)
-{
- const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(target_freq);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX, msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &target_freq, sizeof(target_freq));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_protected_enter_start(struct kbase_timeline *tl, void *gpu)
-{
- const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-void __kbase_tlstream_aux_protected_enter_end(struct kbase_timeline *tl, void *gpu)
-{
- const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_protected_leave_start(struct kbase_timeline *tl, void *gpu)
-{
- const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_protected_leave_end(struct kbase_timeline *tl, void *gpu)
-{
- const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
- const size_t msg_size =
- sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &gpu, sizeof(gpu));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_jit_stats(struct kbase_timeline *tl, u32 ctx_nr, u32 bid,
- u32 max_allocs, u32 allocs,
- u32 va_pages, u32 ph_pages)
-{
- const u32 msg_id = KBASE_AUX_JIT_STATS;
- const size_t msg_size = sizeof(msg_id) + sizeof(u64) +
- sizeof(ctx_nr) + sizeof(bid) +
- sizeof(max_allocs) + sizeof(allocs) +
- sizeof(va_pages) + sizeof(ph_pages);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &ctx_nr, sizeof(ctx_nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &bid, sizeof(bid));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &max_allocs, sizeof(max_allocs));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &allocs, sizeof(allocs));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &va_pages, sizeof(va_pages));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &ph_pages, sizeof(ph_pages));
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
-void __kbase_tlstream_aux_event_job_slot(struct kbase_timeline *tl,
- struct kbase_context *context, u32 slot_nr, u32 atom_nr, u32 event)
-{
- const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
- const size_t msg_size = sizeof(msg_id) + sizeof(u64) +
- sizeof(context) + sizeof(slot_nr) +
- sizeof(atom_nr) + sizeof(event);
- unsigned long flags;
- char *buffer;
- size_t pos = 0;
-
- buffer = kbasep_tlstream_msgbuf_acquire(tl,
- TL_STREAM_TYPE_AUX,
- msg_size, &flags);
- KBASE_DEBUG_ASSERT(buffer);
-
- pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_tlstream_write_timestamp(buffer, pos);
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &context, sizeof(context));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &slot_nr, sizeof(slot_nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &atom_nr, sizeof(atom_nr));
- pos = kbasep_tlstream_write_bytes(
- buffer, pos, &event, sizeof(event));
-
- KBASE_DEBUG_ASSERT(msg_size == pos);
-
- kbasep_tlstream_msgbuf_release(tl, TL_STREAM_TYPE_AUX, flags);
-}
-
diff --git a/mali_kbase/mali_kbase_tlstream.h b/mali_kbase/mali_kbase_tlstream.h
index 7b2407f..ed6dca4 100644
--- a/mali_kbase/mali_kbase_tlstream.h
+++ b/mali_kbase/mali_kbase_tlstream.h
@@ -23,801 +23,144 @@
#if !defined(_KBASE_TLSTREAM_H)
#define _KBASE_TLSTREAM_H
-#include <mali_kbase.h>
-#include <mali_kbase_gator.h>
-
-#include <linux/typecheck.h>
-
-/*****************************************************************************/
-
-struct kbase_timeline;
-
-/**
- * kbase_tlstream_init - initialize timeline infrastructure in kernel
- * @timeline: Newly created instance of kbase_timeline will
- * be stored in this pointer.
- * @timeline_is_enabled: Timeline status will be written to this variable
- * when a client is attached/detached. The variable
- * must be valid while timeline instance is valid.
- * Return: zero on success, negative number on error
- */
-int kbase_tlstream_init(struct kbase_timeline **timeline,
- atomic_t *timeline_is_enabled);
-
-/**
- * kbase_tlstream_term - terminate timeline infrastructure in kernel
- *
- * @timeline: Timeline instance to be terminated. It must be previously created
- * with kbase_tlstream_init().
- */
-void kbase_tlstream_term(struct kbase_timeline *timeline);
-
-/**
- * kbase_tlstream_acquire - acquire timeline stream file descriptor
- * @kbdev: Kbase device
- * @flags: Timeline stream flags
- *
- * This descriptor is meant to be used by userspace timeline to gain access to
- * kernel timeline stream. This stream is later broadcasted by user space to the
- * timeline client.
- * Only one entity can own the descriptor at any given time. Descriptor shall be
- * closed if unused. If descriptor cannot be obtained (i.e. when it is already
- * being used) return will be a negative value.
- *
- * Return: file descriptor on success, negative number on error
- */
-int kbase_tlstream_acquire(struct kbase_device *kbdev, u32 flags);
-
-/**
- * kbase_tlstream_flush_streams - flush timeline streams.
- * @timeline: Timeline instance
- *
- * Function will flush pending data in all timeline streams.
- */
-void kbase_tlstream_flush_streams(struct kbase_timeline *timeline);
-
-/**
- * kbase_tlstream_reset_body_streams - reset timeline body streams.
- *
- * Function will discard pending data in all timeline body streams.
- * @timeline: Timeline instance
- */
-void kbase_tlstream_reset_body_streams(struct kbase_timeline *timeline);
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+/* The maximum size of a single packet used by timeline. */
+#define PACKET_SIZE 4096 /* bytes */
+
+/* The number of packets used by one timeline stream. */
+#if defined(CONFIG_MALI_JOB_DUMP) || defined(CONFIG_MALI_VECTOR_DUMP)
+ #define PACKET_COUNT 64
+#else
+ #define PACKET_COUNT 32
+#endif
+/* The maximum expected length of string in tracepoint descriptor. */
+#define STRLEN_MAX 64 /* bytes */
+
+/**
+ * struct kbase_tlstream - timeline stream structure
+ * @lock: Message order lock
+ * @buffer: Array of buffers
+ * @wbi: Write buffer index
+ * @rbi: Read buffer index
+ * @numbered: If non-zero stream's packets are sequentially numbered
+ * @autoflush_counter: Counter tracking stream's autoflush state
+ * @ready_read: Pointer to a wait queue, which is signaled when
+ * timeline messages are ready for collection.
+ * @bytes_generated: Number of bytes generated by tracepoint messages
+ *
+ * This structure holds information needed to construct proper packets in the
+ * timeline stream.
+ *
+ * Each message in the sequence must bear a timestamp that is
+ * greater than the previous message in the same stream. For this reason
+ * a lock is held throughout the process of message creation.
+ *
+ * Each stream contains a set of buffers. Each buffer will hold one MIPE
+ * packet. In case there is no free space required to store the incoming
+ * message the oldest buffer is discarded. Each packet in timeline body
+ * stream has a sequence number embedded, this value must increment
+ * monotonically and is used by the packets receiver to discover these
+ * buffer overflows.
+ *
+ * The autoflush counter is set to a negative number when there is no data
+ * pending for flush and it is set to zero on every update of the buffer. The
+ * autoflush timer will increment the counter by one on every expiry. If there
+ * is no activity on the buffer for two consecutive timer expiries, the stream
+ * buffer will be flushed.
+ */
+struct kbase_tlstream {
+ spinlock_t lock;
+
+ struct {
+ atomic_t size; /* number of bytes in buffer */
+ char data[PACKET_SIZE]; /* buffer's data */
+ } buffer[PACKET_COUNT];
+
+ atomic_t wbi;
+ atomic_t rbi;
+
+ int numbered;
+ atomic_t autoflush_counter;
+ wait_queue_head_t *ready_read;
#if MALI_UNIT_TEST
-/**
- * kbase_tlstream_test - start timeline stream data generator
- * @kbdev: Kernel common context
- * @tpw_count: Number of trace point writers in each context
- * @msg_delay: Time delay in milliseconds between trace points written by one
- * writer
- * @msg_count: Number of trace points written by one writer
- * @aux_msg: If non-zero aux messages will be included
- *
- * This test starts a requested number of asynchronous writers in both IRQ and
- * thread context. Each writer will generate required number of test
- * tracepoints (tracepoints with embedded information about writer that
- * should be verified by user space reader). Tracepoints will be emitted in
- * all timeline body streams. If aux_msg is non-zero writer will also
- * generate not testable tracepoints (tracepoints without information about
- * writer). These tracepoints are used to check correctness of remaining
- * timeline message generating functions. Writer will wait requested time
- * between generating another set of messages. This call blocks until all
- * writers finish.
- */
-void kbase_tlstream_test(
- struct kbase_device *kbdev,
- unsigned int tpw_count,
- unsigned int msg_delay,
- unsigned int msg_count,
- int aux_msg);
-
-/**
- * kbase_tlstream_stats - read timeline stream statistics
- * @timeline: Timeline instance
- * @bytes_collected: Will hold number of bytes read by the user
- * @bytes_generated: Will hold number of bytes generated by trace points
- */
-void kbase_tlstream_stats(struct kbase_timeline *timeline, u32 *bytes_collected, u32 *bytes_generated);
-#endif /* MALI_UNIT_TEST */
-
-/*****************************************************************************/
-
-#define TL_ATOM_STATE_IDLE 0
-#define TL_ATOM_STATE_READY 1
-#define TL_ATOM_STATE_DONE 2
-#define TL_ATOM_STATE_POSTED 3
-
-/* We want these values to match */
-#define TL_JS_EVENT_START GATOR_JOB_SLOT_START
-#define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP
-#define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED
-
-void __kbase_tlstream_tl_summary_new_ctx(struct kbase_timeline *tl, void *context, u32 nr, u32 tgid);
-void __kbase_tlstream_tl_summary_new_gpu(struct kbase_timeline *tl, void *gpu, u32 id, u32 core_count);
-void __kbase_tlstream_tl_summary_new_lpu(struct kbase_timeline *tl, void *lpu, u32 nr, u32 fn);
-void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(struct kbase_timeline *tl, void *lpu, void *gpu);
-void __kbase_tlstream_tl_summary_new_as(struct kbase_timeline *tl, void *as, u32 nr);
-void __kbase_tlstream_tl_summary_lifelink_as_gpu(struct kbase_timeline *tl, void *as, void *gpu);
-void __kbase_tlstream_tl_new_ctx(struct kbase_timeline *tl, void *context, u32 nr, u32 tgid);
-void __kbase_tlstream_tl_new_atom(struct kbase_timeline *tl, void *atom, u32 nr);
-void __kbase_tlstream_tl_del_ctx(struct kbase_timeline *tl, void *context);
-void __kbase_tlstream_tl_del_atom(struct kbase_timeline *tl, void *atom);
-void __kbase_tlstream_tl_ret_ctx_lpu(struct kbase_timeline *tl, void *context, void *lpu);
-void __kbase_tlstream_tl_ret_atom_ctx(struct kbase_timeline *tl, void *atom, void *context);
-void __kbase_tlstream_tl_ret_atom_lpu(struct kbase_timeline *tl,
- void *atom, void *lpu, const char *attrib_match_list);
-void __kbase_tlstream_tl_nret_ctx_lpu(struct kbase_timeline *tl, void *context, void *lpu);
-void __kbase_tlstream_tl_nret_atom_ctx(struct kbase_timeline *tl, void *atom, void *context);
-void __kbase_tlstream_tl_nret_atom_lpu(struct kbase_timeline *tl, void *atom, void *lpu);
-void __kbase_tlstream_tl_ret_as_ctx(struct kbase_timeline *tl, void *as, void *ctx);
-void __kbase_tlstream_tl_nret_as_ctx(struct kbase_timeline *tl, void *as, void *ctx);
-void __kbase_tlstream_tl_ret_atom_as(struct kbase_timeline *tl, void *atom, void *as);
-void __kbase_tlstream_tl_nret_atom_as(struct kbase_timeline *tl, void *atom, void *as);
-void __kbase_tlstream_tl_dep_atom_atom(struct kbase_timeline *tl, void *atom1, void *atom2);
-void __kbase_tlstream_tl_ndep_atom_atom(struct kbase_timeline *tl, void *atom1, void *atom2);
-void __kbase_tlstream_tl_rdep_atom_atom(struct kbase_timeline *tl, void *atom1, void *atom2);
-void __kbase_tlstream_tl_attrib_atom_config(struct kbase_timeline *tl,
- void *atom, u64 jd, u64 affinity, u32 config);
-void __kbase_tlstream_tl_attrib_atom_priority(struct kbase_timeline *tl, void *atom, u32 prio);
-void __kbase_tlstream_tl_attrib_atom_state(struct kbase_timeline *tl, void *atom, u32 state);
-void __kbase_tlstream_tl_attrib_atom_prioritized(struct kbase_timeline *tl, void *atom);
-void __kbase_tlstream_tl_attrib_atom_jit(struct kbase_timeline *tl,
- void *atom, u64 edit_addr, u64 new_addr,
- u64 va_pages, u64 jit_flags);
-void __kbase_tlstream_tl_attrib_atom_jitallocinfo(struct kbase_timeline *tl,
- void *atom, u64 va_pages, u64 commit_pages, u64 extent,
- u32 jit_id, u32 bin_id, u32 max_allocations, u32 flags,
- u32 usage_id);
-void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(struct kbase_timeline *tl, void *atom, u32 jit_id);
-void __kbase_tlstream_tl_attrib_as_config(struct kbase_timeline *tl,
- void *as, u64 transtab, u64 memattr, u64 transcfg);
-void __kbase_tlstream_tl_event_atom_softstop_ex(struct kbase_timeline *tl, void *atom);
-void __kbase_tlstream_tl_event_lpu_softstop(struct kbase_timeline *tl, void *lpu);
-void __kbase_tlstream_tl_event_atom_softstop_issue(struct kbase_timeline *tl, void *atom);
-void __kbase_tlstream_tl_event_atom_softjob_start(struct kbase_timeline *tl, void *atom);
-void __kbase_tlstream_tl_event_atom_softjob_end(struct kbase_timeline *tl, void *atom);
-void __kbase_tlstream_jd_gpu_soft_reset(struct kbase_timeline *tl, void *gpu);
-void __kbase_tlstream_aux_pm_state(struct kbase_timeline *tl, u32 core_type, u64 state);
-void __kbase_tlstream_aux_pagefault(struct kbase_timeline *tl, u32 ctx_nr, u32 as_nr, u64 page_count_change);
-void __kbase_tlstream_aux_pagesalloc(struct kbase_timeline *tl, u32 ctx_nr, u64 page_count);
-void __kbase_tlstream_aux_devfreq_target(struct kbase_timeline *tl, u64 target_freq);
-void __kbase_tlstream_aux_protected_enter_start(struct kbase_timeline *tl, void *gpu);
-void __kbase_tlstream_aux_protected_enter_end(struct kbase_timeline *tl, void *gpu);
-void __kbase_tlstream_aux_protected_leave_start(struct kbase_timeline *tl, void *gpu);
-void __kbase_tlstream_aux_protected_leave_end(struct kbase_timeline *tl, void *gpu);
-void __kbase_tlstream_aux_jit_stats(struct kbase_timeline *tl, u32 ctx_nr, u32 bin_id,
- u32 max_allocations, u32 allocations,
- u32 va_pages_nr, u32 ph_pages_nr);
-void __kbase_tlstream_aux_event_job_slot(struct kbase_timeline *tl,
- struct kbase_context *context, u32 slot_nr, u32 atom_nr, u32 event);
-
-#define TLSTREAM_ENABLED (1 << 31)
-
-#define __TRACE_IF_ENABLED(trace_name, kbdev, ...) \
- do { \
- int enabled = atomic_read(&kbdev->timeline_is_enabled); \
- typecheck(struct kbase_device *, kbdev); \
- if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_##trace_name(kbdev->timeline, __VA_ARGS__); \
- } while (0)
-
-#define __TRACE_IF_ENABLED_LATENCY(trace_name, kbdev, ...) \
- do { \
- int enabled = atomic_read(&kbdev->timeline_is_enabled); \
- typecheck(struct kbase_device *, kbdev); \
- if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
- __kbase_tlstream_##trace_name(kbdev->timeline, __VA_ARGS__); \
- } while (0)
-
-#define __TRACE_IF_ENABLED_JD(trace_name, kbdev, ...) \
- do { \
- int enabled = atomic_read(&kbdev->timeline_is_enabled); \
- typecheck(struct kbase_device *, kbdev); \
- if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \
- __kbase_tlstream_##trace_name(kbdev->timeline, __VA_ARGS__); \
- } while (0)
-
-
-/*****************************************************************************/
-
-/* Gator tracepoints are hooked into TLSTREAM macro interface.
- * When the following tracepoints are called, corresponding
- * Gator tracepoint will be called as well.
- */
-#if defined(CONFIG_MALI_GATOR_SUPPORT)
-
-/* `event` is one of TL_JS_EVENT values here.
- * The values of TL_JS_EVENT are guaranteed to match
- * with corresponding GATOR_JOB_SLOT values.
- */
-#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, context, slot_nr, atom_nr, event) \
- do { \
- kbase_trace_mali_job_slots_event(kbdev->id, \
- GATOR_MAKE_EVENT(event, slot_nr), \
- context, (u8) atom_nr); \
- __TRACE_IF_ENABLED(aux_event_job_slot, kbdev, context, slot_nr, atom_nr, event); \
- } while (0)
-
-#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \
- do { \
- kbase_trace_mali_pm_status(kbdev->id, \
- core_type, state); \
- __TRACE_IF_ENABLED(aux_pm_state, kbdev, core_type, state); \
- } while (0)
-
-
-#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, ctx_nr, as_nr, page_count_change) \
- do { \
- kbase_trace_mali_page_fault_insert_pages(kbdev->id, \
- as_nr, \
- page_count_change); \
- __TRACE_IF_ENABLED(aux_pagefault, kbdev, ctx_nr, as_nr, page_count_change); \
- } while (0)
-
-/* kbase_trace_mali_total_alloc_pages_change is handled differently here.
- * We stream the total amount of pages allocated for `kbdev` rather
- * than `page_count`, which is per-context.
- */
-#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_count) do { \
- u32 global_pages_count = atomic_read(&kbdev->memdev.used_pages); \
- kbase_trace_mali_total_alloc_pages_change(kbdev->id, \
- global_pages_count); \
- __TRACE_IF_ENABLED(aux_pagesalloc, kbdev, ctx_nr, page_count); \
- } while (0)
-
-
+ atomic_t bytes_generated;
#endif
+};
-/*****************************************************************************/
-
-/**
- * KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX - create context object in timeline
- * summary
- * @kbdev: Kbase device
- * @context: Name of the context object
- * @nr: Context number
- * @tgid: Thread Group Id
- *
- * Function emits a timeline message informing about context creation. Context
- * is created with context number (its attribute), that can be used to link
- * kbase context with userspace context.
- * This message is directed to timeline summary stream.
- */
-#define KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(kbdev, context, nr, tgid) \
- __TRACE_IF_ENABLED(tl_summary_new_ctx, kbdev, context, nr, tgid)
-
-/**
- * KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU - create GPU object in timeline summary
- * @kbdev: Kbase device
- * @gpu: Name of the GPU object
- * @id: ID value of this GPU
- * @core_count: Number of cores this GPU hosts
- *
- * Function emits a timeline message informing about GPU creation. GPU is
- * created with two attributes: id and core count.
- * This message is directed to timeline summary stream.
- */
-#define KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(kbdev, gpu, id, core_count) \
- __TRACE_IF_ENABLED(tl_summary_new_gpu, kbdev, gpu, id, core_count)
-
-/**
- * KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU - create LPU object in timeline summary
- * @kbdev: Kbase device
- * @lpu: Name of the Logical Processing Unit object
- * @nr: Sequential number assigned to this LPU
- * @fn: Property describing this LPU's functional abilities
- *
- * Function emits a timeline message informing about LPU creation. LPU is
- * created with two attributes: number linking this LPU with GPU's job slot
- * and function bearing information about this LPU abilities.
- * This message is directed to timeline summary stream.
- */
-#define KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(kbdev, lpu, nr, fn) \
- __TRACE_IF_ENABLED(tl_summary_new_lpu, kbdev, lpu, nr, fn)
-
-/**
- * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU - lifelink LPU object to GPU
- * @kbdev: Kbase device
- * @lpu: Name of the Logical Processing Unit object
- * @gpu: Name of the GPU object
- *
- * Function emits a timeline message informing that LPU object shall be deleted
- * along with GPU object.
- * This message is directed to timeline summary stream.
- */
-#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(kbdev, lpu, gpu) \
- __TRACE_IF_ENABLED(tl_summary_lifelink_lpu_gpu, kbdev, lpu, gpu)
-
-/**
- * KBASE_TLSTREAM_TL_SUMMARY_NEW_AS - create address space object in timeline summary
- * @kbdev: Kbase device
- * @as: Name of the address space object
- * @nr: Sequential number assigned to this address space
- *
- * Function emits a timeline message informing about address space creation.
- * Address space is created with one attribute: number identifying this
- * address space.
- * This message is directed to timeline summary stream.
- */
-#define KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(kbdev, as, nr) \
- __TRACE_IF_ENABLED(tl_summary_new_as, kbdev, as, nr)
-
-/**
- * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU - lifelink address space object to GPU
- * @kbdev: Kbase device
- * @as: Name of the address space object
- * @gpu: Name of the GPU object
- *
- * Function emits a timeline message informing that address space object
- * shall be deleted along with GPU object.
- * This message is directed to timeline summary stream.
- */
-#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(kbdev, as, gpu) \
- __TRACE_IF_ENABLED(tl_summary_lifelink_as_gpu, kbdev, as, gpu)
-
-/**
- * KBASE_TLSTREAM_TL_NEW_CTX - create context object in timeline
- * @kbdev: Kbase device
- * @context: Name of the context object
- * @nr: Context number
- * @tgid: Thread Group Id
- *
- * Function emits a timeline message informing about context creation. Context
- * is created with context number (its attribute), that can be used to link
- * kbase context with userspace context.
- */
-#define KBASE_TLSTREAM_TL_NEW_CTX(kbdev, context, nr, tgid) \
- __TRACE_IF_ENABLED(tl_new_ctx, kbdev, context, nr, tgid)
-
-/**
- * KBASE_TLSTREAM_TL_NEW_ATOM - create atom object in timeline
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @nr: Sequential number assigned to this atom
- *
- * Function emits a timeline message informing about atom creation. Atom is
- * created with atom number (its attribute) that links it with actual work
- * bucket id understood by hardware.
- */
-#define KBASE_TLSTREAM_TL_NEW_ATOM(kbdev, atom, nr) \
- __TRACE_IF_ENABLED(tl_new_atom, kbdev, atom, nr)
-
-/**
- * KBASE_TLSTREAM_TL_DEL_CTX - destroy context object in timeline
- * @kbdev: Kbase device
- * @context: Name of the context object
- *
- * Function emits a timeline message informing that context object ceased to
- * exist.
- */
-#define KBASE_TLSTREAM_TL_DEL_CTX(kbdev, context) \
- __TRACE_IF_ENABLED(tl_del_ctx, kbdev, context)
-
-/**
- * KBASE_TLSTREAM_TL_DEL_ATOM - destroy atom object in timeline
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- *
- * Function emits a timeline message informing that atom object ceased to
- * exist.
- */
-#define KBASE_TLSTREAM_TL_DEL_ATOM(kbdev, atom) \
- __TRACE_IF_ENABLED(tl_del_atom, kbdev, atom)
-
-/**
- * KBASE_TLSTREAM_TL_RET_CTX_LPU - retain context by LPU
- * @kbdev: Kbase device
- * @context: Name of the context object
- * @lpu: Name of the Logical Processing Unit object
- *
- * Function emits a timeline message informing that context is being held
- * by LPU and must not be deleted unless it is released.
- */
-#define KBASE_TLSTREAM_TL_RET_CTX_LPU(kbdev, context, lpu) \
- __TRACE_IF_ENABLED(tl_ret_ctx_lpu, kbdev, context, lpu)
-
-/**
- * KBASE_TLSTREAM_TL_RET_ATOM_CTX - retain atom by context
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @context: Name of the context object
- *
- * Function emits a timeline message informing that atom object is being held
- * by context and must not be deleted unless it is released.
- */
-#define KBASE_TLSTREAM_TL_RET_ATOM_CTX(kbdev, atom, context) \
- __TRACE_IF_ENABLED(tl_ret_atom_ctx, kbdev, atom, context)
-
-/**
- * KBASE_TLSTREAM_TL_RET_ATOM_LPU - retain atom by LPU
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @lpu: Name of the Logical Processing Unit object
- * @attrib_match_list: List containing match operator attributes
- *
- * Function emits a timeline message informing that atom object is being held
- * by LPU and must not be deleted unless it is released.
- */
-#define KBASE_TLSTREAM_TL_RET_ATOM_LPU(kbdev, atom, lpu, attrib_match_list) \
- __TRACE_IF_ENABLED(tl_ret_atom_lpu, kbdev, atom, lpu, attrib_match_list)
+/* Types of streams generated by timeline. */
+enum tl_stream_type {
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ TL_STREAM_TYPE_OBJ,
+ TL_STREAM_TYPE_AUX,
-/**
- * KBASE_TLSTREAM_TL_NRET_CTX_LPU - release context by LPU
- * @kbdev: Kbase device
- * @context: Name of the context object
- * @lpu: Name of the Logical Processing Unit object
- *
- * Function emits a timeline message informing that context is being released
- * by LPU object.
- */
-#define KBASE_TLSTREAM_TL_NRET_CTX_LPU(kbdev, context, lpu) \
- __TRACE_IF_ENABLED(tl_nret_ctx_lpu, kbdev, context, lpu)
-
-/**
- * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - release atom by context
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @context: Name of the context object
- *
- * Function emits a timeline message informing that atom object is being
- * released by context.
- */
-#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX(kbdev, atom, context) \
- __TRACE_IF_ENABLED(tl_nret_atom_ctx, kbdev, atom, context)
-
-/**
- * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - release atom by LPU
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @lpu: Name of the Logical Processing Unit object
- *
- * Function emits a timeline message informing that atom object is being
- * released by LPU.
- */
-#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU(kbdev, atom, lpu) \
- __TRACE_IF_ENABLED(tl_nret_atom_lpu, kbdev, atom, lpu)
-
-/**
- * KBASE_TLSTREAM_TL_RET_AS_CTX - lifelink address space object to context
- * @kbdev: Kbase device
- * @as: Name of the address space object
- * @ctx: Name of the context object
- *
- * Function emits a timeline message informing that address space object
- * is being held by the context object.
- */
-#define KBASE_TLSTREAM_TL_RET_AS_CTX(kbdev, as, ctx) \
- __TRACE_IF_ENABLED(tl_ret_as_ctx, kbdev, as, ctx)
-
-/**
- * KBASE_TLSTREAM_TL_NRET_AS_CTX - release address space by context
- * @kbdev: Kbase device
- * @as: Name of the address space object
- * @ctx: Name of the context object
- *
- * Function emits a timeline message informing that address space object
- * is being released by atom.
- */
-#define KBASE_TLSTREAM_TL_NRET_AS_CTX(kbdev, as, ctx) \
- __TRACE_IF_ENABLED(tl_nret_as_ctx, kbdev, as, ctx)
-
-/**
- * KBASE_TLSTREAM_TL_RET_ATOM_AS - retain atom by address space
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @as: Name of the address space object
- *
- * Function emits a timeline message informing that atom object is being held
- * by address space and must not be deleted unless it is released.
- */
-#define KBASE_TLSTREAM_TL_RET_ATOM_AS(kbdev, atom, as) \
- __TRACE_IF_ENABLED(tl_ret_atom_as, kbdev, atom, as)
+ TL_STREAM_TYPE_COUNT
+};
/**
- * KBASE_TLSTREAM_TL_NRET_ATOM_AS - release atom by address space
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @as: Name of the address space object
- *
- * Function emits a timeline message informing that atom object is being
- * released by address space.
+ * kbase_tlstream_init - initialize timeline stream
+ * @stream: Pointer to the stream structure
+ * @stream_type: Stream type
+ * @ready_read: Pointer to a wait queue to signal when
+ * timeline messages are ready for collection.
*/
-#define KBASE_TLSTREAM_TL_NRET_ATOM_AS(kbdev, atom, as) \
- __TRACE_IF_ENABLED(tl_nret_atom_as, kbdev, atom, as)
+void kbase_tlstream_init(struct kbase_tlstream *stream,
+ enum tl_stream_type stream_type,
+ wait_queue_head_t *ready_read);
/**
- * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - atom job slot attributes
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @jd: Job descriptor address
- * @affinity: Job affinity
- * @config: Job config
- *
- * Function emits a timeline message containing atom attributes.
+ * kbase_tlstream_term - terminate timeline stream
+ * @stream: Pointer to the stream structure
*/
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(kbdev, atom, jd, affinity, config) \
- __TRACE_IF_ENABLED(tl_attrib_atom_config, kbdev, atom, jd, affinity, config)
+void kbase_tlstream_term(struct kbase_tlstream *stream);
/**
- * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - atom priority
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @prio: Atom priority
+ * kbase_tlstream_reset - reset stream
+ * @stream: Pointer to the stream structure
*
- * Function emits a timeline message containing atom priority.
+ * Function discards all pending messages and resets packet counters.
*/
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(kbdev, atom, prio) \
- __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority, kbdev, atom, prio)
+void kbase_tlstream_reset(struct kbase_tlstream *stream);
/**
- * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - atom state
- * @kbdev: Kbase device
- * @atom: Name of the atom object
- * @state: Atom state
+ * kbase_tlstream_msgbuf_acquire - lock selected stream and reserve a buffer
+ * @stream: Pointer to the stream structure
+ * @msg_size: Message size
+ * @flags: Pointer to store flags passed back on stream release
*
- * Function emits a timeline message containing atom state.
- */
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, atom, state) \
- __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_state, kbdev, atom, state)
-
-/**
- * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED - atom was prioritized
- * @kbdev: Kbase device
- * @atom: Name of the atom object
+ * Lock the stream and reserve the number of bytes requested
+ * in msg_size for the user.
*
- * Function emits a timeline message signalling priority change
- */
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED(kbdev, atom) \
- __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_prioritized, kbdev, atom)
-
-/**
- * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - jit happened on atom
- * @kbdev: Kbase device
- * @atom: Atom identifier
- * @edit_addr: Address edited by jit
- * @new_addr: Address placed into the edited location
- * @va_pages: Maximum number of pages this jit can allocate
- * @jit_flags: Flags defining the properties of the memory region
- */
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(kbdev, atom, edit_addr, new_addr, \
- va_pages, jit_flags) \
- __TRACE_IF_ENABLED_JD(tl_attrib_atom_jit, kbdev, atom, edit_addr, \
- new_addr, va_pages, jit_flags)
-
-/**
- * Information about the JIT allocation atom.
+ * Return: pointer to the buffer where a message can be stored
*
- * @kbdev: Kbase device
- * @atom: Atom identifier.
- * @va_pages: The minimum number of virtual pages required.
- * @commit_pages: The minimum number of physical pages which
- * should back the allocation.
- * @extent: Granularity of physical pages to grow the
- * allocation by during a fault.
- * @jit_id: Unique ID provided by the caller, this is used
- * to pair allocation and free requests.
- * @bin_id: The JIT allocation bin, used in conjunction with
- * @max_allocations to limit the number of each
- * type of JIT allocation.
- * @max_allocations: The maximum number of allocations allowed within
- * the bin specified by @bin_id. Should be the same
- * for all JIT allocations within the same bin.
- * @jit_flags: Flags specifying the special requirements for
- * the JIT allocation.
- * @usage_id: A hint about which allocation should be reused.
- * The kernel should attempt to use a previous
- * allocation with the same usage_id
+ * Warning: The stream must be released with kbase_tlstream_msgbuf_release().
+ * Only atomic operations are allowed while the stream is locked
+ * (i.e. do not use any operation that may sleep).
*/
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(kbdev, atom, va_pages, \
- commit_pages, extent, jit_id, bin_id,\
- max_allocations, jit_flags, usage_id) \
- __TRACE_IF_ENABLED(tl_attrib_atom_jitallocinfo, kbdev, atom, va_pages, \
- commit_pages, extent, jit_id, bin_id,\
- max_allocations, jit_flags, usage_id)
+char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream,
+ size_t msg_size, unsigned long *flags) __acquires(&stream->lock);
/**
- * Information about the JIT free atom.
+ * kbase_tlstream_msgbuf_release - unlock selected stream
+ * @stream: Pointer to the stream structure
+ * @flags: Value obtained during stream acquire
*
- * @kbdev: Kbase device
- * @atom: Atom identifier.
- * @jit_id: Unique ID provided by the caller, this is used
- * to pair allocation and free requests.
+ * Release the stream that has been previously
+ * locked with a call to kbase_tlstream_msgbuf_acquire().
*/
-#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO(kbdev, atom, jit_id) \
- __TRACE_IF_ENABLED(tl_attrib_atom_jitfreeinfo, kbdev, atom, jit_id)
+void kbase_tlstream_msgbuf_release(struct kbase_tlstream *stream,
+ unsigned long flags) __releases(&stream->lock);
/**
- * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - address space attributes
- * @kbdev: Kbase device
- * @as: Assigned address space
- * @transtab: Configuration of the TRANSTAB register
- * @memattr: Configuration of the MEMATTR register
- * @transcfg: Configuration of the TRANSCFG register (or zero if not present)
+ * kbase_tlstream_flush_stream - flush stream
+ * @stream: Pointer to the stream structure
*
- * Function emits a timeline message containing address space attributes.
- */
-#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(kbdev, as, transtab, memattr, transcfg) \
- __TRACE_IF_ENABLED(tl_attrib_as_config, kbdev, as, transtab, memattr, transcfg)
-
-/**
- * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX
- * @kbdev: Kbase device
- * @atom: Atom identifier
- */
-#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(kbdev, atom) \
- __TRACE_IF_ENABLED(tl_event_atom_softstop_ex, kbdev, atom)
-
-/**
- * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP
- * @kbdev: Kbase device
- * @lpu: Name of the LPU object
- */
-#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(kbdev, lpu) \
- __TRACE_IF_ENABLED(tl_event_lpu_softstop, kbdev, lpu)
-
-/**
- * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE
- * @kbdev: Kbase device
- * @atom: Atom identifier
- */
-#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(kbdev, atom) \
- __TRACE_IF_ENABLED(tl_event_atom_softstop_issue, kbdev, atom)
-
-/**
- * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START
- * @kbdev: Kbase device
- * @atom: Atom identifier
+ * Flush pending data in the timeline stream.
*/
-#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(kbdev, atom) \
- __TRACE_IF_ENABLED(tl_event_atom_softjob_start, kbdev, atom)
+void kbase_tlstream_flush_stream(struct kbase_tlstream *stream);
-/**
- * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END
- * @kbdev: Kbase device
- * @atom: Atom identifier
- */
-#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(kbdev, atom) \
- __TRACE_IF_ENABLED(tl_event_atom_softjob_end, kbdev, atom)
-
-/**
- * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - The GPU is being soft reset
- * @kbdev: Kbase device
- * @gpu: Name of the GPU object
- *
- * This imperative tracepoint is specific to job dumping.
- * Function emits a timeline message indicating GPU soft reset.
- */
-#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev, gpu) \
- __TRACE_IF_ENABLED(jd_gpu_soft_reset, kbdev, gpu)
-
-
-/**
- * KBASE_TLSTREAM_AUX_PM_STATE - timeline message: power management state
- * @kbdev: Kbase device
- * @core_type: Core type (shader, tiler, l2 cache, l3 cache)
- * @state: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF)
- */
-#if !defined(KBASE_TLSTREAM_AUX_PM_STATE)
-#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \
- __TRACE_IF_ENABLED(aux_pm_state, kbdev, core_type, state)
-#endif
-
-/**
- * KBASE_TLSTREAM_AUX_PAGEFAULT - timeline message: MMU page fault event
- * resulting in new pages being mapped
- * @kbdev: Kbase device
- * @ctx_nr: Kernel context number
- * @as_nr: Address space number
- * @page_count_change: Number of pages to be added
- */
-#if !defined(KBASE_TLSTREAM_AUX_PAGEFAULT)
-#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, ctx_nr, as_nr, page_count_change) \
- __TRACE_IF_ENABLED(aux_pagefault, kbdev, ctx_nr, as_nr, page_count_change)
-#endif
-
-/**
- * KBASE_TLSTREAM_AUX_PAGESALLOC - timeline message: total number of allocated
- * pages is changed
- * @kbdev: Kbase device
- * @ctx_nr: Kernel context number
- * @page_count: Number of pages used by the context
- */
-#if !defined(KBASE_TLSTREAM_AUX_PAGESALLOC)
-#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_count) \
- __TRACE_IF_ENABLED(aux_pagesalloc, kbdev, ctx_nr, page_count)
-#endif
-
-/**
- * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - timeline message: new target DVFS
- * frequency
- * @kbdev: Kbase device
- * @target_freq: New target frequency
- */
-#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(kbdev, target_freq) \
- __TRACE_IF_ENABLED(aux_devfreq_target, kbdev, target_freq)
-
-/**
- * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - The GPU has started transitioning
- * to protected mode
- * @kbdev: Kbase device
- * @gpu: Name of the GPU object
- *
- * Function emits a timeline message indicating the GPU is starting to
- * transition to protected mode.
- */
-#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev, gpu) \
- __TRACE_IF_ENABLED_LATENCY(aux_protected_enter_start, kbdev, gpu)
-
-/**
- * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - The GPU has finished transitioning
- * to protected mode
- * @kbdev: Kbase device
- * @gpu: Name of the GPU object
- *
- * Function emits a timeline message indicating the GPU has finished
- * transitioning to protected mode.
- */
-#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev, gpu) \
- __TRACE_IF_ENABLED_LATENCY(aux_protected_enter_end, kbdev, gpu)
-
-/**
- * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - The GPU has started transitioning
- * to non-protected mode
- * @kbdev: Kbase device
- * @gpu: Name of the GPU object
- *
- * Function emits a timeline message indicating the GPU is starting to
- * transition to non-protected mode.
- */
-#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(kbdev, gpu) \
- __TRACE_IF_ENABLED_LATENCY(aux_protected_leave_start, kbdev, gpu)
-
-/**
- * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - The GPU has finished transitioning
- * to non-protected mode
- * @kbdev: Kbase device
- * @gpu: Name of the GPU object
- *
- * Function emits a timeline message indicating the GPU has finished
- * transitioning to non-protected mode.
- */
-#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev, gpu) \
- __TRACE_IF_ENABLED_LATENCY(aux_protected_leave_end, kbdev, gpu)
-
-/**
- * KBASE_TLSTREAM_AUX_JIT_STATS - JIT allocations per bin statistics
- *
- * @kbdev: Kbase device
- * @ctx_nr: Kernel context number
- * @bid: JIT bin id
- * @max_allocs: Maximum allocations allowed in this bin.
- * UINT_MAX is a special value. It denotes that
- * the parameter was not changed since the last time.
- * @allocs: Number of active allocations in this bin
- * @va_pages: Number of virtual pages allocated in this bin
- * @ph_pages: Number of physical pages allocated in this bin
- *
- * Function emits a timeline message indicating the JIT statistics
- * for a given bin have chaned.
- */
-#define KBASE_TLSTREAM_AUX_JIT_STATS(kbdev, ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages) \
- __TRACE_IF_ENABLED(aux_jit_stats, kbdev, ctx_nr, bid, \
- max_allocs, allocs, \
- va_pages, ph_pages)
-
-/**
- * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT - An event has happened on a job slot
- *
- * @kbdev: Kbase device
- * @context: Kernel context pointer, NULL if event is not TL_JS_EVENT_START
- * @slot_nr: Job slot number
- * @atom_nr: Sequential number of an atom which has started
- * execution on the job slot. Zero, if event is not TL_JS_EVENT_START.
- * @event: Event type. One of TL_JS_EVENT values.
- */
-#if !defined(KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT)
-#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, context, slot_nr, atom_nr, event) \
- __TRACE_IF_ENABLED(aux_event_job_slot, kbdev, context, slot_nr, atom_nr, event)
-#endif
#endif /* _KBASE_TLSTREAM_H */
+
diff --git a/mali_kbase/mali_kbase_tracepoints.c b/mali_kbase/mali_kbase_tracepoints.c
new file mode 100644
index 0000000..2f7784f
--- /dev/null
+++ b/mali_kbase/mali_kbase_tracepoints.c
@@ -0,0 +1,2766 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * DO NOT EDIT.
+ */
+
+#include "mali_kbase_tracepoints.h"
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tl_serialize.h"
+
+/* clang-format off */
+
+/* Message ids of trace events that are recorded in the timeline stream. */
+enum tl_msg_id_obj {
+ KBASE_TL_NEW_CTX,
+ KBASE_TL_NEW_GPU,
+ KBASE_TL_NEW_LPU,
+ KBASE_TL_NEW_ATOM,
+ KBASE_TL_NEW_AS,
+ KBASE_TL_DEL_CTX,
+ KBASE_TL_DEL_ATOM,
+ KBASE_TL_LIFELINK_LPU_GPU,
+ KBASE_TL_LIFELINK_AS_GPU,
+ KBASE_TL_RET_CTX_LPU,
+ KBASE_TL_RET_ATOM_CTX,
+ KBASE_TL_RET_ATOM_LPU,
+ KBASE_TL_NRET_CTX_LPU,
+ KBASE_TL_NRET_ATOM_CTX,
+ KBASE_TL_NRET_ATOM_LPU,
+ KBASE_TL_RET_AS_CTX,
+ KBASE_TL_NRET_AS_CTX,
+ KBASE_TL_RET_ATOM_AS,
+ KBASE_TL_NRET_ATOM_AS,
+ KBASE_TL_ATTRIB_ATOM_CONFIG,
+ KBASE_TL_ATTRIB_ATOM_PRIORITY,
+ KBASE_TL_ATTRIB_ATOM_STATE,
+ KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
+ KBASE_TL_ATTRIB_ATOM_JIT,
+ KBASE_TL_JIT_USEDPAGES,
+ KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
+ KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
+ KBASE_TL_ATTRIB_AS_CONFIG,
+ KBASE_TL_EVENT_LPU_SOFTSTOP,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+ KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+ KBASE_TL_EVENT_ATOM_SOFTJOB_END,
+ KBASE_JD_GPU_SOFT_RESET,
+ KBASE_TL_NEW_KCPUQUEUE,
+ KBASE_TL_RET_KCPUQUEUE_CTX,
+ KBASE_TL_DEL_KCPUQUEUE,
+ KBASE_TL_NRET_KCPUQUEUE_CTX,
+ KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
+ KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
+ KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
+ KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
+ KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START,
+ KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER,
+ KBASE_OBJ_MSG_COUNT,
+};
+
+/* Message ids of trace events that are recorded in the auxiliary stream. */
+enum tl_msg_id_aux {
+ KBASE_AUX_PM_STATE,
+ KBASE_AUX_PAGEFAULT,
+ KBASE_AUX_PAGESALLOC,
+ KBASE_AUX_DEVFREQ_TARGET,
+ KBASE_AUX_PROTECTED_ENTER_START,
+ KBASE_AUX_PROTECTED_ENTER_END,
+ KBASE_AUX_PROTECTED_LEAVE_START,
+ KBASE_AUX_PROTECTED_LEAVE_END,
+ KBASE_AUX_JIT_STATS,
+ KBASE_AUX_EVENT_JOB_SLOT,
+ KBASE_AUX_MSG_COUNT,
+};
+
+#define OBJ_TL_LIST \
+ TP_DESC(KBASE_TL_NEW_CTX, \
+ "object ctx is created", \
+ "@pII", \
+ "ctx,ctx_nr,tgid") \
+ TP_DESC(KBASE_TL_NEW_GPU, \
+ "object gpu is created", \
+ "@pII", \
+ "gpu,gpu_id,core_count") \
+ TP_DESC(KBASE_TL_NEW_LPU, \
+ "object lpu is created", \
+ "@pII", \
+ "lpu,lpu_nr,lpu_fn") \
+ TP_DESC(KBASE_TL_NEW_ATOM, \
+ "object atom is created", \
+ "@pI", \
+ "atom,atom_nr") \
+ TP_DESC(KBASE_TL_NEW_AS, \
+ "address space object is created", \
+ "@pI", \
+ "address_space,as_nr") \
+ TP_DESC(KBASE_TL_DEL_CTX, \
+ "context is destroyed", \
+ "@p", \
+ "ctx") \
+ TP_DESC(KBASE_TL_DEL_ATOM, \
+ "atom is destroyed", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_LIFELINK_LPU_GPU, \
+ "lpu is deleted with gpu", \
+ "@pp", \
+ "lpu,gpu") \
+ TP_DESC(KBASE_TL_LIFELINK_AS_GPU, \
+ "address space is deleted with gpu", \
+ "@pp", \
+ "address_space,gpu") \
+ TP_DESC(KBASE_TL_RET_CTX_LPU, \
+ "context is retained by lpu", \
+ "@pp", \
+ "ctx,lpu") \
+ TP_DESC(KBASE_TL_RET_ATOM_CTX, \
+ "atom is retained by context", \
+ "@pp", \
+ "atom,ctx") \
+ TP_DESC(KBASE_TL_RET_ATOM_LPU, \
+ "atom is retained by lpu", \
+ "@pps", \
+ "atom,lpu,attrib_match_list") \
+ TP_DESC(KBASE_TL_NRET_CTX_LPU, \
+ "context is released by lpu", \
+ "@pp", \
+ "ctx,lpu") \
+ TP_DESC(KBASE_TL_NRET_ATOM_CTX, \
+ "atom is released by context", \
+ "@pp", \
+ "atom,ctx") \
+ TP_DESC(KBASE_TL_NRET_ATOM_LPU, \
+ "atom is released by lpu", \
+ "@pp", \
+ "atom,lpu") \
+ TP_DESC(KBASE_TL_RET_AS_CTX, \
+ "address space is retained by context", \
+ "@pp", \
+ "address_space,ctx") \
+ TP_DESC(KBASE_TL_NRET_AS_CTX, \
+ "address space is released by context", \
+ "@pp", \
+ "address_space,ctx") \
+ TP_DESC(KBASE_TL_RET_ATOM_AS, \
+ "atom is retained by address space", \
+ "@pp", \
+ "atom,address_space") \
+ TP_DESC(KBASE_TL_NRET_ATOM_AS, \
+ "atom is released by address space", \
+ "@pp", \
+ "atom,address_space") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_CONFIG, \
+ "atom job slot attributes", \
+ "@pLLI", \
+ "atom,descriptor,affinity,config") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITY, \
+ "atom priority", \
+ "@pI", \
+ "atom,prio") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_STATE, \
+ "atom state", \
+ "@pI", \
+ "atom,state") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITIZED, \
+ "atom caused priority change", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_JIT, \
+ "jit done for atom", \
+ "@pLLLI", \
+ "atom,edit_addr,new_addr,jit_flags,j_id") \
+ TP_DESC(KBASE_TL_JIT_USEDPAGES, \
+ "used pages for jit", \
+ "@LI", \
+ "used_pages,j_id") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_JITALLOCINFO, \
+ "Information about JIT allocations", \
+ "@pLLLIIIII", \
+ "atom,va_pgs,com_pgs,extent,j_id,bin_id,max_allocs,flags,usg_id") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_JITFREEINFO, \
+ "Information about JIT frees", \
+ "@pI", \
+ "atom,j_id") \
+ TP_DESC(KBASE_TL_ATTRIB_AS_CONFIG, \
+ "address space attributes", \
+ "@pLLL", \
+ "address_space,transtab,memattr,transcfg") \
+ TP_DESC(KBASE_TL_EVENT_LPU_SOFTSTOP, \
+ "softstop event on given lpu", \
+ "@p", \
+ "lpu") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX, \
+ "atom softstopped", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE, \
+ "atom softstop issued", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_START, \
+ "atom soft job has started", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_END, \
+ "atom soft job has completed", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_JD_GPU_SOFT_RESET, \
+ "gpu soft reset", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_TL_NEW_KCPUQUEUE, \
+ "New KCPU Queue", \
+ "@ppI", \
+ "kcpu_queue,ctx,kcpuq_num_pending_cmds") \
+ TP_DESC(KBASE_TL_RET_KCPUQUEUE_CTX, \
+ "Context retains KCPU Queue", \
+ "@pp", \
+ "kcpu_queue,ctx") \
+ TP_DESC(KBASE_TL_DEL_KCPUQUEUE, \
+ "Delete KCPU Queue", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_NRET_KCPUQUEUE_CTX, \
+ "Context releases KCPU Queue", \
+ "@pp", \
+ "kcpu_queue,ctx") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL, \
+ "KCPU Queue enqueues Signal on Fence", \
+ "@pL", \
+ "kcpu_queue,fence") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT, \
+ "KCPU Queue enqueues Wait on Fence", \
+ "@pL", \
+ "kcpu_queue,fence") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ "Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ "Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+ "@pLI", \
+ "kcpu_queue,cqs_obj_gpu_addr,cqs_obj_compare_value") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ "End array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ "Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ "Array item of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+ "@pL", \
+ "kcpu_queue,cqs_obj_gpu_addr") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ "End array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ "Begin array of KCPU Queue enqueues Debug Copy", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ "Array item of KCPU Queue enqueues Debug Copy", \
+ "@pL", \
+ "kcpu_queue,debugcopy_dst_size") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ "End array of KCPU Queue enqueues Debug Copy", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
+ "KCPU Queue enqueues Map Import", \
+ "@pL", \
+ "kcpu_queue,map_import_buf_gpu_addr") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT, \
+ "KCPU Queue enqueues Unmap Import", \
+ "@pL", \
+ "kcpu_queue,map_import_buf_gpu_addr") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ "Begin array of KCPU Queue enqueues JIT Alloc", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ "Array item of KCPU Queue enqueues JIT Alloc", \
+ "@pLLLLIIIII", \
+ "kcpu_queue,jit_alloc_gpu_alloc_addr_dest,jit_alloc_va_pages,jit_alloc_commit_pages,jit_alloc_extent,jit_alloc_jit_id,jit_alloc_bin_id,jit_alloc_max_allocations,jit_alloc_flags,jit_alloc_usage_id") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ "End array of KCPU Queue enqueues JIT Alloc", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ "Begin array of KCPU Queue enqueues JIT Free", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ "Array item of KCPU Queue enqueues JIT Free", \
+ "@pI", \
+ "kcpu_queue,jit_alloc_jit_id") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ "End array of KCPU Queue enqueues JIT Free", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
+ "KCPU Queue starts a Signal on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END, \
+ "KCPU Queue ends a Signal on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START, \
+ "KCPU Queue starts a Wait on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END, \
+ "KCPU Queue ends a Wait on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
+ "KCPU Queue starts a Wait on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
+ "KCPU Queue ends a Wait on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START, \
+ "KCPU Queue starts a Set on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END, \
+ "KCPU Queue ends a Set on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START, \
+ "KCPU Queue starts an array of Debug Copys", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END, \
+ "KCPU Queue ends an array of Debug Copys", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
+ "KCPU Queue starts a Map Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END, \
+ "KCPU Queue ends a Map Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START, \
+ "KCPU Queue starts an Unmap Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END, \
+ "KCPU Queue ends an Unmap Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START, \
+ "KCPU Queue starts an array of JIT Allocs", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ "Begin array of KCPU Queue ends an array of JIT Allocs", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ "Array item of KCPU Queue ends an array of JIT Allocs", \
+ "@pLLL", \
+ "kcpu_queue,jit_alloc_gpu_alloc_addr,jit_alloc_mmu_flags,jit_alloc_pages_allocated") \
+ TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ "End array of KCPU Queue ends an array of JIT Allocs", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START, \
+ "KCPU Queue starts an array of JIT Frees", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ "KCPU Queue ends an array of JIT Frees", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER, \
+ "KCPU Queue executes an Error Barrier", \
+ "@p", \
+ "kcpu_queue") \
+
+#define MIPE_HEADER_BLOB_VAR_NAME __obj_desc_header
+#define MIPE_HEADER_TP_LIST OBJ_TL_LIST
+#define MIPE_HEADER_TP_LIST_COUNT KBASE_OBJ_MSG_COUNT
+#define MIPE_HEADER_PKT_CLASS TL_PACKET_CLASS_OBJ
+
+#include "mali_kbase_mipe_gen_header.h"
+
+const char *obj_desc_header = (const char *) &__obj_desc_header;
+const size_t obj_desc_header_size = sizeof(__obj_desc_header);
+
+#define AUX_TL_LIST \
+ TP_DESC(KBASE_AUX_PM_STATE, \
+ "PM state", \
+ "@IL", \
+ "core_type,core_state_bitset") \
+ TP_DESC(KBASE_AUX_PAGEFAULT, \
+ "Page fault", \
+ "@IIL", \
+ "ctx_nr,as_nr,page_cnt_change") \
+ TP_DESC(KBASE_AUX_PAGESALLOC, \
+ "Total alloc pages change", \
+ "@IL", \
+ "ctx_nr,page_cnt") \
+ TP_DESC(KBASE_AUX_DEVFREQ_TARGET, \
+ "New device frequency target", \
+ "@L", \
+ "target_freq") \
+ TP_DESC(KBASE_AUX_PROTECTED_ENTER_START, \
+ "enter protected mode start", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_PROTECTED_ENTER_END, \
+ "enter protected mode end", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_PROTECTED_LEAVE_START, \
+ "leave protected mode start", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_PROTECTED_LEAVE_END, \
+ "leave protected mode end", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_JIT_STATS, \
+ "per-bin JIT statistics", \
+ "@IIIIII", \
+ "ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages") \
+ TP_DESC(KBASE_AUX_EVENT_JOB_SLOT, \
+ "event on a given job slot", \
+ "@pIII", \
+ "ctx,slot_nr,atom_nr,event") \
+
+#define MIPE_HEADER_BLOB_VAR_NAME __aux_desc_header
+#define MIPE_HEADER_TP_LIST AUX_TL_LIST
+#define MIPE_HEADER_TP_LIST_COUNT KBASE_AUX_MSG_COUNT
+#define MIPE_HEADER_PKT_CLASS TL_PACKET_CLASS_AUX
+
+#include "mali_kbase_mipe_gen_header.h"
+
+const char *aux_desc_header = (const char *) &__aux_desc_header;
+const size_t aux_desc_header_size = sizeof(__aux_desc_header);
+
+void __kbase_tlstream_tl_new_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 ctx_nr,
+ u32 tgid)
+{
+ const u32 msg_id = KBASE_TL_NEW_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(ctx_nr)
+ + sizeof(tgid)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &tgid, sizeof(tgid));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_gpu(
+ struct kbase_tlstream *stream,
+ const void *gpu,
+ u32 gpu_id,
+ u32 core_count)
+{
+ const u32 msg_id = KBASE_TL_NEW_GPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ + sizeof(gpu_id)
+ + sizeof(core_count)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu_id, sizeof(gpu_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &core_count, sizeof(core_count));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_lpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ u32 lpu_nr,
+ u32 lpu_fn)
+{
+ const u32 msg_id = KBASE_TL_NEW_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(lpu)
+ + sizeof(lpu_nr)
+ + sizeof(lpu_fn)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu_nr, sizeof(lpu_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu_fn, sizeof(lpu_fn));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_atom(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 atom_nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_ATOM;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(atom_nr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom_nr, sizeof(atom_nr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_as(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u32 as_nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_AS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(as_nr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &as_nr, sizeof(as_nr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_DEL_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_atom(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_DEL_ATOM;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_lifelink_lpu_gpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(lpu)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_lifelink_as_gpu(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_RET_CTX_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu,
+ const char *attrib_match_list)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
+ const size_t s0 = sizeof(u32) + sizeof(char)
+ + strnlen(attrib_match_list, STRLEN_MAX);
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(lpu)
+ + s0
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+ pos = kbasep_serialize_string(buffer,
+ pos, attrib_match_list, s0);
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_AS_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_AS_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_AS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(address_space)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(address_space)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_config(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 descriptor,
+ u64 affinity,
+ u32 config)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(descriptor)
+ + sizeof(affinity)
+ + sizeof(config)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &descriptor, sizeof(descriptor));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &affinity, sizeof(affinity));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &config, sizeof(config));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 prio)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(prio)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &prio, sizeof(prio));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 state)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(state)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &state, sizeof(state));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jit(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 edit_addr,
+ u64 new_addr,
+ u64 jit_flags,
+ u32 j_id)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(edit_addr)
+ + sizeof(new_addr)
+ + sizeof(jit_flags)
+ + sizeof(j_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &edit_addr, sizeof(edit_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &new_addr, sizeof(new_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_flags, sizeof(jit_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jit_usedpages(
+ struct kbase_tlstream *stream,
+ u64 used_pages,
+ u32 j_id)
+{
+ const u32 msg_id = KBASE_TL_JIT_USEDPAGES;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(used_pages)
+ + sizeof(j_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &used_pages, sizeof(used_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 va_pgs,
+ u64 com_pgs,
+ u64 extent,
+ u32 j_id,
+ u32 bin_id,
+ u32 max_allocs,
+ u32 flags,
+ u32 usg_id)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(va_pgs)
+ + sizeof(com_pgs)
+ + sizeof(extent)
+ + sizeof(j_id)
+ + sizeof(bin_id)
+ + sizeof(max_allocs)
+ + sizeof(flags)
+ + sizeof(usg_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &va_pgs, sizeof(va_pgs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &com_pgs, sizeof(com_pgs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &extent, sizeof(extent));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &bin_id, sizeof(bin_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &max_allocs, sizeof(max_allocs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &flags, sizeof(flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &usg_id, sizeof(usg_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 j_id)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(j_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_as_config(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u64 transtab,
+ u64 memattr,
+ u64 transcfg)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(transtab)
+ + sizeof(memattr)
+ + sizeof(transcfg)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &transtab, sizeof(transtab));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &memattr, sizeof(memattr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &transcfg, sizeof(transcfg));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_lpu_softstop(
+ struct kbase_tlstream *stream,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_ex(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_issue(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_start(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_end(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_jd_gpu_soft_reset(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pm_state(
+ struct kbase_tlstream *stream,
+ u32 core_type,
+ u64 core_state_bitset)
+{
+ const u32 msg_id = KBASE_AUX_PM_STATE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(core_type)
+ + sizeof(core_state_bitset)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &core_type, sizeof(core_type));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &core_state_bitset, sizeof(core_state_bitset));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagefault(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 as_nr,
+ u64 page_cnt_change)
+{
+ const u32 msg_id = KBASE_AUX_PAGEFAULT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx_nr)
+ + sizeof(as_nr)
+ + sizeof(page_cnt_change)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &as_nr, sizeof(as_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &page_cnt_change, sizeof(page_cnt_change));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u64 page_cnt)
+{
+ const u32 msg_id = KBASE_AUX_PAGESALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx_nr)
+ + sizeof(page_cnt)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &page_cnt, sizeof(page_cnt));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(
+ struct kbase_tlstream *stream,
+ u64 target_freq)
+{
+ const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(target_freq)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &target_freq, sizeof(target_freq));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_start(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_end(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_start(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_end(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_jit_stats(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 bid,
+ u32 max_allocs,
+ u32 allocs,
+ u32 va_pages,
+ u32 ph_pages)
+{
+ const u32 msg_id = KBASE_AUX_JIT_STATS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx_nr)
+ + sizeof(bid)
+ + sizeof(max_allocs)
+ + sizeof(allocs)
+ + sizeof(va_pages)
+ + sizeof(ph_pages)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &bid, sizeof(bid));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &max_allocs, sizeof(max_allocs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &allocs, sizeof(allocs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &va_pages, sizeof(va_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ph_pages, sizeof(ph_pages));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_event_job_slot(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 slot_nr,
+ u32 atom_nr,
+ u32 event)
+{
+ const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(slot_nr)
+ + sizeof(atom_nr)
+ + sizeof(event)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &slot_nr, sizeof(slot_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom_nr, sizeof(atom_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &event, sizeof(event));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *ctx,
+ u32 kcpuq_num_pending_cmds)
+{
+ const u32 msg_id = KBASE_TL_NEW_KCPUQUEUE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(ctx)
+ + sizeof(kcpuq_num_pending_cmds)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpuq_num_pending_cmds, sizeof(kcpuq_num_pending_cmds));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_kcpuqueue_ctx(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_KCPUQUEUE_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_DEL_KCPUQUEUE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_KCPUQUEUE_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 fence)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(fence)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &fence, sizeof(fence));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 fence)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(fence)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &fence, sizeof(fence));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr,
+ u32 cqs_obj_compare_value)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(cqs_obj_gpu_addr)
+ + sizeof(cqs_obj_compare_value)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &cqs_obj_compare_value, sizeof(cqs_obj_compare_value));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(cqs_obj_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 debugcopy_dst_size)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(debugcopy_dst_size)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &debugcopy_dst_size, sizeof(debugcopy_dst_size));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(map_import_buf_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(map_import_buf_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr_dest,
+ u64 jit_alloc_va_pages,
+ u64 jit_alloc_commit_pages,
+ u64 jit_alloc_extent,
+ u32 jit_alloc_jit_id,
+ u32 jit_alloc_bin_id,
+ u32 jit_alloc_max_allocations,
+ u32 jit_alloc_flags,
+ u32 jit_alloc_usage_id)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_alloc_gpu_alloc_addr_dest)
+ + sizeof(jit_alloc_va_pages)
+ + sizeof(jit_alloc_commit_pages)
+ + sizeof(jit_alloc_extent)
+ + sizeof(jit_alloc_jit_id)
+ + sizeof(jit_alloc_bin_id)
+ + sizeof(jit_alloc_max_allocations)
+ + sizeof(jit_alloc_flags)
+ + sizeof(jit_alloc_usage_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_gpu_alloc_addr_dest, sizeof(jit_alloc_gpu_alloc_addr_dest));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_va_pages, sizeof(jit_alloc_va_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_commit_pages, sizeof(jit_alloc_commit_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_extent, sizeof(jit_alloc_extent));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_bin_id, sizeof(jit_alloc_bin_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_max_allocations, sizeof(jit_alloc_max_allocations));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_flags, sizeof(jit_alloc_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_usage_id, sizeof(jit_alloc_usage_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u32 jit_alloc_jit_id)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_alloc_jit_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr,
+ u64 jit_alloc_mmu_flags,
+ u64 jit_alloc_pages_allocated)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_alloc_gpu_alloc_addr)
+ + sizeof(jit_alloc_mmu_flags)
+ + sizeof(jit_alloc_pages_allocated)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_gpu_alloc_addr, sizeof(jit_alloc_gpu_alloc_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_mmu_flags, sizeof(jit_alloc_mmu_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_pages_allocated, sizeof(jit_alloc_pages_allocated));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+/* clang-format on */
diff --git a/mali_kbase/mali_kbase_tracepoints.h b/mali_kbase/mali_kbase_tracepoints.h
new file mode 100644
index 0000000..6447bad
--- /dev/null
+++ b/mali_kbase/mali_kbase_tracepoints.h
@@ -0,0 +1,2358 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * DO NOT EDIT.
+ */
+
+#if !defined(_KBASE_TRACEPOINTS_H)
+#define _KBASE_TRACEPOINTS_H
+
+/* Tracepoints are abstract callbacks notifying that some important
+ * software or hardware event has happened.
+ *
+ * In this particular implementation, it results into a MIPE
+ * timeline event and, in some cases, it also fires an ftrace event
+ * (a.k.a. Gator events, see details below).
+ */
+
+#include "mali_kbase.h"
+#include "mali_kbase_gator.h"
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+
+/* clang-format off */
+
+struct kbase_tlstream;
+
+extern const size_t __obj_stream_offset;
+extern const size_t __aux_stream_offset;
+
+/* This macro dispatches a kbase_tlstream from
+ * a kbase_device instance. Only AUX or OBJ
+ * streams can be dispatched. It is aware of
+ * kbase_timeline binary representation and
+ * relies on offset variables:
+ * __obj_stream_offset and __aux_stream_offset.
+ */
+#define __TL_DISPATCH_STREAM(kbdev, stype) \
+ ((struct kbase_tlstream *) \
+ ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset))
+
+struct tp_desc;
+
+/* Descriptors of timeline messages transmitted in object events stream. */
+extern const char *obj_desc_header;
+extern const size_t obj_desc_header_size;
+/* Descriptors of timeline messages transmitted in auxiliary events stream. */
+extern const char *aux_desc_header;
+extern const size_t aux_desc_header_size;
+
+#define TL_ATOM_STATE_IDLE 0
+#define TL_ATOM_STATE_READY 1
+#define TL_ATOM_STATE_DONE 2
+#define TL_ATOM_STATE_POSTED 3
+
+#define TL_JS_EVENT_START GATOR_JOB_SLOT_START
+#define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP
+#define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED
+
+#define TLSTREAM_ENABLED (1 << 31)
+
+void __kbase_tlstream_tl_new_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 ctx_nr,
+ u32 tgid);
+void __kbase_tlstream_tl_new_gpu(
+ struct kbase_tlstream *stream,
+ const void *gpu,
+ u32 gpu_id,
+ u32 core_count);
+void __kbase_tlstream_tl_new_lpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ u32 lpu_nr,
+ u32 lpu_fn);
+void __kbase_tlstream_tl_new_atom(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 atom_nr);
+void __kbase_tlstream_tl_new_as(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u32 as_nr);
+void __kbase_tlstream_tl_del_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx);
+void __kbase_tlstream_tl_del_atom(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_lifelink_lpu_gpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ const void *gpu);
+void __kbase_tlstream_tl_lifelink_as_gpu(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *gpu);
+void __kbase_tlstream_tl_ret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu);
+void __kbase_tlstream_tl_ret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx);
+void __kbase_tlstream_tl_ret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu,
+ const char *attrib_match_list);
+void __kbase_tlstream_tl_nret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu);
+void __kbase_tlstream_tl_nret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx);
+void __kbase_tlstream_tl_nret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu);
+void __kbase_tlstream_tl_ret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx);
+void __kbase_tlstream_tl_nret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx);
+void __kbase_tlstream_tl_ret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space);
+void __kbase_tlstream_tl_nret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space);
+void __kbase_tlstream_tl_attrib_atom_config(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 descriptor,
+ u64 affinity,
+ u32 config);
+void __kbase_tlstream_tl_attrib_atom_priority(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 prio);
+void __kbase_tlstream_tl_attrib_atom_state(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 state);
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_attrib_atom_jit(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 edit_addr,
+ u64 new_addr,
+ u64 jit_flags,
+ u32 j_id);
+void __kbase_tlstream_tl_jit_usedpages(
+ struct kbase_tlstream *stream,
+ u64 used_pages,
+ u32 j_id);
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 va_pgs,
+ u64 com_pgs,
+ u64 extent,
+ u32 j_id,
+ u32 bin_id,
+ u32 max_allocs,
+ u32 flags,
+ u32 usg_id);
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 j_id);
+void __kbase_tlstream_tl_attrib_as_config(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u64 transtab,
+ u64 memattr,
+ u64 transcfg);
+void __kbase_tlstream_tl_event_lpu_softstop(
+ struct kbase_tlstream *stream,
+ const void *lpu);
+void __kbase_tlstream_tl_event_atom_softstop_ex(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_event_atom_softstop_issue(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_start(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_end(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_jd_gpu_soft_reset(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_pm_state(
+ struct kbase_tlstream *stream,
+ u32 core_type,
+ u64 core_state_bitset);
+void __kbase_tlstream_aux_pagefault(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 as_nr,
+ u64 page_cnt_change);
+void __kbase_tlstream_aux_pagesalloc(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u64 page_cnt);
+void __kbase_tlstream_aux_devfreq_target(
+ struct kbase_tlstream *stream,
+ u64 target_freq);
+void __kbase_tlstream_aux_protected_enter_start(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_protected_enter_end(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_protected_leave_start(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_protected_leave_end(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_jit_stats(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 bid,
+ u32 max_allocs,
+ u32 allocs,
+ u32 va_pages,
+ u32 ph_pages);
+void __kbase_tlstream_aux_event_job_slot(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 slot_nr,
+ u32 atom_nr,
+ u32 event);
+void __kbase_tlstream_tl_new_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *ctx,
+ u32 kcpuq_num_pending_cmds);
+void __kbase_tlstream_tl_ret_kcpuqueue_ctx(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *ctx);
+void __kbase_tlstream_tl_del_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *ctx);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 fence);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 fence);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr,
+ u32 cqs_obj_compare_value);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 debugcopy_dst_size);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr_dest,
+ u64 jit_alloc_va_pages,
+ u64 jit_alloc_commit_pages,
+ u64 jit_alloc_extent,
+ u32 jit_alloc_jit_id,
+ u32 jit_alloc_bin_id,
+ u32 jit_alloc_max_allocations,
+ u32 jit_alloc_flags,
+ u32 jit_alloc_usage_id);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u32 jit_alloc_jit_id);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr,
+ u64 jit_alloc_mmu_flags,
+ u64 jit_alloc_pages_allocated);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+
+struct kbase_tlstream;
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_CTX -
+ * object ctx is created
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @ctx_nr: Kernel context number
+ * @tgid: Thread Group Id
+ */
+#define KBASE_TLSTREAM_TL_NEW_CTX( \
+ kbdev, \
+ ctx, \
+ ctx_nr, \
+ tgid \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx, ctx_nr, tgid); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_GPU -
+ * object gpu is created
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ * @gpu_id: Name of the GPU object
+ * @core_count: Number of cores this GPU hosts
+ */
+#define KBASE_TLSTREAM_TL_NEW_GPU( \
+ kbdev, \
+ gpu, \
+ gpu_id, \
+ core_count \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_gpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ gpu, gpu_id, core_count); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_LPU -
+ * object lpu is created
+ *
+ * @kbdev: Kbase device
+ * @lpu: Name of the Logical Processing Unit object
+ * @lpu_nr: Sequential number assigned to the newly created LPU
+ * @lpu_fn: Property describing functional abilities of this LPU
+ */
+#define KBASE_TLSTREAM_TL_NEW_LPU( \
+ kbdev, \
+ lpu, \
+ lpu_nr, \
+ lpu_fn \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ lpu, lpu_nr, lpu_fn); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_ATOM -
+ * object atom is created
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @atom_nr: Sequential number of an atom
+ */
+#define KBASE_TLSTREAM_TL_NEW_ATOM( \
+ kbdev, \
+ atom, \
+ atom_nr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_atom( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, atom_nr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_AS -
+ * address space object is created
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @as_nr: Address space number
+ */
+#define KBASE_TLSTREAM_TL_NEW_AS( \
+ kbdev, \
+ address_space, \
+ as_nr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_as( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, as_nr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_CTX -
+ * context is destroyed
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_DEL_CTX( \
+ kbdev, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_del_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_ATOM -
+ * atom is destroyed
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_DEL_ATOM( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_del_atom( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU -
+ * lpu is deleted with gpu
+ *
+ * @kbdev: Kbase device
+ * @lpu: Name of the Logical Processing Unit object
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU( \
+ kbdev, \
+ lpu, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_lifelink_lpu_gpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ lpu, gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_LIFELINK_AS_GPU -
+ * address space is deleted with gpu
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU( \
+ kbdev, \
+ address_space, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_lifelink_as_gpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_CTX_LPU -
+ * context is retained by lpu
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_RET_CTX_LPU( \
+ kbdev, \
+ ctx, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_ctx_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx, lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_CTX -
+ * atom is retained by context
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_CTX( \
+ kbdev, \
+ atom, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_atom_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_LPU -
+ * atom is retained by lpu
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @lpu: Name of the Logical Processing Unit object
+ * @attrib_match_list: List containing match operator attributes
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_LPU( \
+ kbdev, \
+ atom, \
+ lpu, \
+ attrib_match_list \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_atom_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, lpu, attrib_match_list); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_CTX_LPU -
+ * context is released by lpu
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_NRET_CTX_LPU( \
+ kbdev, \
+ ctx, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_ctx_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx, lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_CTX -
+ * atom is released by context
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX( \
+ kbdev, \
+ atom, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_atom_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_LPU -
+ * atom is released by lpu
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU( \
+ kbdev, \
+ atom, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_atom_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_AS_CTX -
+ * address space is retained by context
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_AS_CTX( \
+ kbdev, \
+ address_space, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_as_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_AS_CTX -
+ * address space is released by context
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_AS_CTX( \
+ kbdev, \
+ address_space, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_as_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_AS -
+ * atom is retained by address space
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @address_space: Name of the address space object
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_AS( \
+ kbdev, \
+ atom, \
+ address_space \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_atom_as( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, address_space); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_AS -
+ * atom is released by address space
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @address_space: Name of the address space object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_AS( \
+ kbdev, \
+ atom, \
+ address_space \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_atom_as( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, address_space); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG -
+ * atom job slot attributes
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @descriptor: Job descriptor address
+ * @affinity: Job affinity
+ * @config: Job config
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG( \
+ kbdev, \
+ atom, \
+ descriptor, \
+ affinity, \
+ config \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_config( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, descriptor, affinity, config); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY -
+ * atom priority
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @prio: Atom priority
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY( \
+ kbdev, \
+ atom, \
+ prio \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_tl_attrib_atom_priority( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, prio); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE -
+ * atom state
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @state: Atom state
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE( \
+ kbdev, \
+ atom, \
+ state \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_tl_attrib_atom_state( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, state); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED -
+ * atom caused priority change
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_tl_attrib_atom_prioritized( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT -
+ * jit done for atom
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @edit_addr: Address edited by jit
+ * @new_addr: Address placed into the edited location
+ * @jit_flags: Flags defining the properties of the memory region
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( \
+ kbdev, \
+ atom, \
+ edit_addr, \
+ new_addr, \
+ jit_flags, \
+ j_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_jit( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, edit_addr, new_addr, jit_flags, j_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_JIT_USEDPAGES -
+ * used pages for jit
+ *
+ * @kbdev: Kbase device
+ * @used_pages: Number of pages used for jit
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_JIT_USEDPAGES( \
+ kbdev, \
+ used_pages, \
+ j_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_jit_usedpages( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ used_pages, j_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO -
+ * Information about JIT allocations
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @va_pgs: The minimum number of virtual pages required
+ * @com_pgs: The minimum number of physical pages which
+ * should back the allocation.
+ * @extent: Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * @bin_id: The JIT allocation bin, used in conjunction with
+ * max_allocations to limit the number of each
+ * type of JIT allocation.
+ * @max_allocs: Maximum allocations allowed in this bin.
+ * @flags: Flags specifying the special requirements for
+ * the JIT allocation.
+ * @usg_id: A hint about which allocation should be reused.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO( \
+ kbdev, \
+ atom, \
+ va_pgs, \
+ com_pgs, \
+ extent, \
+ j_id, \
+ bin_id, \
+ max_allocs, \
+ flags, \
+ usg_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_jitallocinfo( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, va_pgs, com_pgs, extent, j_id, bin_id, max_allocs, flags, usg_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO -
+ * Information about JIT frees
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO( \
+ kbdev, \
+ atom, \
+ j_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_jitfreeinfo( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, j_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG -
+ * address space attributes
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @transtab: Configuration of the TRANSTAB register
+ * @memattr: Configuration of the MEMATTR register
+ * @transcfg: Configuration of the TRANSCFG register (or zero if not present)
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG( \
+ kbdev, \
+ address_space, \
+ transtab, \
+ memattr, \
+ transcfg \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_as_config( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, transtab, memattr, transcfg); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP -
+ * softstop event on given lpu
+ *
+ * @kbdev: Kbase device
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( \
+ kbdev, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_lpu_softstop( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX -
+ * atom softstopped
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softstop_ex( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE -
+ * atom softstop issued
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softstop_issue( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START -
+ * atom soft job has started
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softjob_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END -
+ * atom soft job has completed
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softjob_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_JD_GPU_SOFT_RESET -
+ * gpu soft reset
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_jd_gpu_soft_reset( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PM_STATE -
+ * PM state
+ *
+ * @kbdev: Kbase device
+ * @core_type: Core type (shader, tiler, l2 cache, l3 cache)
+ * @core_state_bitset: 64bits bitmask reporting power state of the cores
+ * (1-ON, 0-OFF)
+ */
+#define KBASE_TLSTREAM_AUX_PM_STATE( \
+ kbdev, \
+ core_type, \
+ core_state_bitset \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pm_state( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ core_type, core_state_bitset); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGEFAULT -
+ * Page fault
+ *
+ * @kbdev: Kbase device
+ * @ctx_nr: Kernel context number
+ * @as_nr: Address space number
+ * @page_cnt_change: Number of pages to be added
+ */
+#define KBASE_TLSTREAM_AUX_PAGEFAULT( \
+ kbdev, \
+ ctx_nr, \
+ as_nr, \
+ page_cnt_change \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagefault( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, as_nr, page_cnt_change); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGESALLOC -
+ * Total alloc pages change
+ *
+ * @kbdev: Kbase device
+ * @ctx_nr: Kernel context number
+ * @page_cnt: Number of pages used by the context
+ */
+#define KBASE_TLSTREAM_AUX_PAGESALLOC( \
+ kbdev, \
+ ctx_nr, \
+ page_cnt \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagesalloc( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, page_cnt); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET -
+ * New device frequency target
+ *
+ * @kbdev: Kbase device
+ * @target_freq: New target frequency
+ */
+#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET( \
+ kbdev, \
+ target_freq \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_devfreq_target( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ target_freq); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START -
+ * enter protected mode start
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_enter_start( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END -
+ * enter protected mode end
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_enter_end( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START -
+ * leave protected mode start
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_leave_start( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END -
+ * leave protected mode end
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_leave_end( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_JIT_STATS -
+ * per-bin JIT statistics
+ *
+ * @kbdev: Kbase device
+ * @ctx_nr: Kernel context number
+ * @bid: JIT bin id
+ * @max_allocs: Maximum allocations allowed in this bin.
+ * @allocs: Number of active allocations in this bin
+ * @va_pages: Number of virtual pages allocated in this bin
+ * @ph_pages: Number of physical pages allocated in this bin
+ */
+#define KBASE_TLSTREAM_AUX_JIT_STATS( \
+ kbdev, \
+ ctx_nr, \
+ bid, \
+ max_allocs, \
+ allocs, \
+ va_pages, \
+ ph_pages \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_jit_stats( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT -
+ * event on a given job slot
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @slot_nr: Job slot number
+ * @atom_nr: Sequential number of an atom
+ * @event: Event type. One of TL_JS_EVENT values
+ */
+#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT( \
+ kbdev, \
+ ctx, \
+ slot_nr, \
+ atom_nr, \
+ event \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_event_job_slot( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx, slot_nr, atom_nr, event); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_KCPUQUEUE -
+ * New KCPU Queue
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @ctx: Name of the context object
+ * @kcpuq_num_pending_cmds: Number of commands already enqueued
+ * in the KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_NEW_KCPUQUEUE( \
+ kbdev, \
+ kcpu_queue, \
+ ctx, \
+ kcpuq_num_pending_cmds \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_kcpuqueue( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, ctx, kcpuq_num_pending_cmds); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX -
+ * Context retains KCPU Queue
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX( \
+ kbdev, \
+ kcpu_queue, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_kcpuqueue_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_KCPUQUEUE -
+ * Delete KCPU Queue
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_DEL_KCPUQUEUE( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_del_kcpuqueue( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX -
+ * Context releases KCPU Queue
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX( \
+ kbdev, \
+ kcpu_queue, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_kcpuqueue_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL -
+ * KCPU Queue enqueues Signal on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @fence: Fence object handle
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \
+ kbdev, \
+ kcpu_queue, \
+ fence \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, fence); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT -
+ * KCPU Queue enqueues Wait on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @fence: Fence object handle
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \
+ kbdev, \
+ kcpu_queue, \
+ fence \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, fence); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @cqs_obj_gpu_addr: CQS Object GPU ptr
+ * @cqs_obj_compare_value: Semaphore value that should be met or exceeded
+ * for the WAIT to pass
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+ kbdev, \
+ kcpu_queue, \
+ cqs_obj_gpu_addr, \
+ cqs_obj_compare_value \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, cqs_obj_gpu_addr, cqs_obj_compare_value); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * End array of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * Array item of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @cqs_obj_gpu_addr: CQS Object GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET( \
+ kbdev, \
+ kcpu_queue, \
+ cqs_obj_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, cqs_obj_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * End array of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * Begin array of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * Array item of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @debugcopy_dst_size: Debug Copy destination size
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+ kbdev, \
+ kcpu_queue, \
+ debugcopy_dst_size \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, debugcopy_dst_size); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * End array of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT -
+ * KCPU Queue enqueues Map Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @map_import_buf_gpu_addr: Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \
+ kbdev, \
+ kcpu_queue, \
+ map_import_buf_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, map_import_buf_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT -
+ * KCPU Queue enqueues Unmap Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @map_import_buf_gpu_addr: Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \
+ kbdev, \
+ kcpu_queue, \
+ map_import_buf_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, map_import_buf_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * Begin array of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * Array item of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_alloc_gpu_alloc_addr_dest: The GPU virtual address to write
+ * the JIT allocated GPU virtual address to
+ * @jit_alloc_va_pages: The minimum number of virtual pages required
+ * @jit_alloc_commit_pages: The minimum number of physical pages which
+ * should back the allocation
+ * @jit_alloc_extent: Granularity of physical pages to grow the allocation
+ * by during a fault
+ * @jit_alloc_jit_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests. Zero is not a valid value
+ * @jit_alloc_bin_id: The JIT allocation bin, used in conjunction with
+ * max_allocations to limit the number of each type of JIT allocation
+ * @jit_alloc_max_allocations: The maximum number of allocations
+ * allowed within the bin specified by bin_id. Should be the same for all
+ * JIT allocations within the same bin.
+ * @jit_alloc_flags: Flags specifying the special requirements for the
+ * JIT allocation
+ * @jit_alloc_usage_id: A hint about which allocation should be
+ * reused. The kernel should attempt to use a previous allocation with the same
+ * usage_id
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+ kbdev, \
+ kcpu_queue, \
+ jit_alloc_gpu_alloc_addr_dest, \
+ jit_alloc_va_pages, \
+ jit_alloc_commit_pages, \
+ jit_alloc_extent, \
+ jit_alloc_jit_id, \
+ jit_alloc_bin_id, \
+ jit_alloc_max_allocations, \
+ jit_alloc_flags, \
+ jit_alloc_usage_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_alloc_gpu_alloc_addr_dest, jit_alloc_va_pages, jit_alloc_commit_pages, jit_alloc_extent, jit_alloc_jit_id, jit_alloc_bin_id, jit_alloc_max_allocations, jit_alloc_flags, jit_alloc_usage_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * End array of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * Begin array of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * Array item of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_alloc_jit_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests. Zero is not a valid value
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+ kbdev, \
+ kcpu_queue, \
+ jit_alloc_jit_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_alloc_jit_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * End array of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START -
+ * KCPU Queue starts a Signal on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END -
+ * KCPU Queue ends a Signal on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START -
+ * KCPU Queue starts a Wait on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END -
+ * KCPU Queue ends a Wait on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START -
+ * KCPU Queue starts a Wait on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END -
+ * KCPU Queue ends a Wait on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START -
+ * KCPU Queue starts a Set on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END -
+ * KCPU Queue ends a Set on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START -
+ * KCPU Queue starts an array of Debug Copys
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END -
+ * KCPU Queue ends an array of Debug Copys
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START -
+ * KCPU Queue starts a Map Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END -
+ * KCPU Queue ends a Map Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START -
+ * KCPU Queue starts an Unmap Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END -
+ * KCPU Queue ends an Unmap Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START -
+ * KCPU Queue starts an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * Begin array of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * Array item of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address
+ * @jit_alloc_mmu_flags: The MMU flags for the JIT allocation
+ * @jit_alloc_pages_allocated: The number of pages allocated by the JIT
+ * allocation
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+ kbdev, \
+ kcpu_queue, \
+ jit_alloc_gpu_alloc_addr, \
+ jit_alloc_mmu_flags, \
+ jit_alloc_pages_allocated \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_alloc_gpu_alloc_addr, jit_alloc_mmu_flags, jit_alloc_pages_allocated); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * End array of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START -
+ * KCPU Queue starts an array of JIT Frees
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER -
+ * KCPU Queue executes an Error Barrier
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+
+/* Gator tracepoints are hooked into TLSTREAM interface.
+ * When the following tracepoints are called, corresponding
+ * Gator tracepoint will be called as well.
+ */
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+/* `event` is one of TL_JS_EVENT values here.
+ * The values of TL_JS_EVENT are guaranteed to match
+ * with corresponding GATOR_JOB_SLOT values.
+ */
+#undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT
+#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \
+ context, slot_nr, atom_nr, event) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ kbase_trace_mali_job_slots_event(kbdev->id, \
+ GATOR_MAKE_EVENT(event, slot_nr), \
+ context, (u8) atom_nr); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_event_job_slot( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ context, slot_nr, atom_nr, event); \
+ } while (0)
+
+#undef KBASE_TLSTREAM_AUX_PM_STATE
+#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ kbase_trace_mali_pm_status(kbdev->id, \
+ core_type, state); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pm_state( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ core_type, state); \
+ } while (0)
+
+#undef KBASE_TLSTREAM_AUX_PAGEFAULT
+#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \
+ ctx_nr, as_nr, page_cnt_change) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ kbase_trace_mali_page_fault_insert_pages(kbdev->id, \
+ as_nr, \
+ page_cnt_change); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagefault( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, as_nr, page_cnt_change); \
+ } while (0)
+
+/* kbase_trace_mali_total_alloc_pages_change is handled differently here.
+ * We stream the total amount of pages allocated for `kbdev` rather
+ * than `page_count`, which is per-context.
+ */
+#undef KBASE_TLSTREAM_AUX_PAGESALLOC
+#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ u32 global_pages_count = \
+ atomic_read(&kbdev->memdev.used_pages); \
+ \
+ kbase_trace_mali_total_alloc_pages_change(kbdev->id, \
+ global_pages_count); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagesalloc( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, page_cnt); \
+ } while (0)
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+/* clang-format on */
+#endif
diff --git a/mali_kbase/mali_midg_regmap.h b/mali_kbase/mali_midg_regmap.h
index 223ce6a..45f7c14 100644
--- a/mali_kbase/mali_midg_regmap.h
+++ b/mali_kbase/mali_midg_regmap.h
@@ -52,6 +52,8 @@
#define GPU_FAULTADDRESS_LO 0x040 /* (RO) GPU exception fault address, low word */
#define GPU_FAULTADDRESS_HI 0x044 /* (RO) GPU exception fault address, high word */
+#define L2_CONFIG 0x048 /* (RW) Level 2 cache configuration */
+
#define PWR_KEY 0x050 /* (WO) Power manager key register */
#define PWR_OVERRIDE0 0x054 /* (RW) Power manager override settings */
#define PWR_OVERRIDE1 0x058 /* (RW) Power manager override settings */
@@ -413,23 +415,6 @@
/* L2_MMU_CONFIG register */
#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT (23)
#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT (24)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
-
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT (26)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
-#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
-
-#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT (12)
-#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
-
-#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT (15)
-#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
/* End L2_MMU_CONFIG register */
@@ -460,10 +445,18 @@
#define SC_TLS_HASH_ENABLE (1ul << 17)
#define SC_LS_ATTR_CHECK_DISABLE (1ul << 18)
#define SC_ENABLE_TEXGRD_FLAGS (1ul << 25)
+#define SC_VAR_ALGORITHM (1ul << 29)
/* End SHADER_CONFIG register */
/* TILER_CONFIG register */
#define TC_CLOCK_GATE_OVERRIDE (1ul << 0)
/* End TILER_CONFIG register */
+/* L2_CONFIG register */
+#define L2_CONFIG_SIZE_SHIFT 16
+#define L2_CONFIG_SIZE_MASK (0xFFul << L2_CONFIG_SIZE_SHIFT)
+#define L2_CONFIG_HASH_SHIFT 24
+#define L2_CONFIG_HASH_MASK (0xFFul << L2_CONFIG_HASH_SHIFT)
+/* End L2_CONFIG register */
+
#endif /* _MIDGARD_REGMAP_H_ */
diff --git a/mali_kbase/mali_midg_regmap_jm.h b/mali_kbase/mali_midg_regmap_jm.h
index f03c8a6..69996e2 100644
--- a/mali_kbase/mali_midg_regmap_jm.h
+++ b/mali_kbase/mali_midg_regmap_jm.h
@@ -20,8 +20,8 @@
*
*/
-#ifndef _MIDGARD_REGMAP_JM_H_
-#define _MIDGARD_REGMAP_JM_H_
+#ifndef _MIDG_REGMAP_JM_H_
+#define _MIDG_REGMAP_JM_H_
/* GPU control registers */
@@ -195,4 +195,4 @@
#define JM_IDVS_GROUP_SIZE_SHIFT (16)
#define JM_MAX_IDVS_GROUP_SIZE (0x3F)
-#endif /* _MIDGARD_REGMAP_JM_H_ */
+#endif /* _MIDG_REGMAP_JM_H_ */
diff --git a/mali_kbase/thirdparty/mali_kbase_mmap.c b/mali_kbase/thirdparty/mali_kbase_mmap.c
index 9cb0465..f266d8e 100644
--- a/mali_kbase/thirdparty/mali_kbase_mmap.c
+++ b/mali_kbase/thirdparty/mali_kbase_mmap.c
@@ -35,7 +35,7 @@
/* mali_kbase_mmap.c
*
* This file contains Linux specific implementation of
- * kbase_get_unmapped_area() interface.
+ * kbase_context_get_unmapped_area() interface.
*/
@@ -253,11 +253,10 @@ check_current:
* simplified slightly. Modifications come from the fact that some values
* about the memory area are known in advance.
*/
-unsigned long kbase_get_unmapped_area(struct file *filp,
+unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
const unsigned long addr, const unsigned long len,
const unsigned long pgoff, const unsigned long flags)
{
- struct kbase_context *kctx = filp->private_data;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
unsigned long align_offset = 0;
@@ -337,8 +336,8 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
kbase_gpu_vm_unlock(kctx);
#ifndef CONFIG_64BIT
} else {
- return current->mm->get_unmapped_area(filp, addr, len, pgoff,
- flags);
+ return current->mm->get_unmapped_area(
+ kctx->filp, addr, len, pgoff, flags);
#endif
}