summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2020-02-03 12:15:52 +0100
committerSidath Senanayake <sidaths@google.com>2020-02-03 12:15:52 +0100
commit7ed9a0b8efa3abac35833b55f8012a2a85cc67a1 (patch)
tree0c297b2906c620d9cab1dd5f06db6909941cd843
parent1f3b3ea0d31f66a6ec806e8f6ac5ebee44cb5923 (diff)
downloadgpu-7ed9a0b8efa3abac35833b55f8012a2a85cc67a1.tar.gz
Mali Valhall DDK r22p0 KMD
Provenance: 6425105ec (collaborate/EAC/v_r22p0) VX504X08X-BU-00000-r22p0-01rel0 - Android DDK VX504X08X-BU-60000-r22p0-01rel0 - Android Document Bundle Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: I257ad9ba30a61008e6425d63700356f16009ead7
-rw-r--r--mali_kbase/Kbuild28
-rw-r--r--mali_kbase/backend/gpu/Kbuild12
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_device_hw.c23
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c1
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_instr_backend.c2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_as.c2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_defs.h2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_hw.c6
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_internal.h2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_rb.c4
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_js_backend.c1
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c101
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c8
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c37
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_policy.c2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_shader_states.h2
-rw-r--r--mali_kbase/build.bp15
-rw-r--r--mali_kbase/device/backend/mali_kbase_device_jm.c144
-rw-r--r--mali_kbase/device/mali_kbase_device.c (renamed from mali_kbase/mali_kbase_device.c)138
-rw-r--r--mali_kbase/device/mali_kbase_device.h64
-rw-r--r--mali_kbase/device/mali_kbase_device_internal.h64
-rw-r--r--mali_kbase/gpu/backend/mali_kbase_gpu_fault_jm.c181
-rw-r--r--mali_kbase/gpu/backend/mali_kbase_gpu_regmap_jm.h (renamed from mali_kbase/mali_midg_regmap_jm.h)68
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu.h (renamed from mali_kbase/platform/meson/mali_kbase_config_meson.c)26
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_coherency.h (renamed from mali_kbase/mali_midg_coherency.h)8
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_fault.h (renamed from mali_kbase/platform/meson/mali_kbase_config_platform.h)32
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_id.h (renamed from mali_kbase/mali_kbase_gpu_id.h)4
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_regmap.h (renamed from mali_kbase/mali_midg_regmap.h)55
-rw-r--r--mali_kbase/mali_base_hwconfig_features.h45
-rw-r--r--mali_kbase/mali_base_hwconfig_issues.h62
-rw-r--r--mali_kbase/mali_base_kernel.h4
-rw-r--r--mali_kbase/mali_kbase.h74
-rw-r--r--mali_kbase/mali_kbase_as_fault_debugfs.c5
-rw-r--r--mali_kbase/mali_kbase_config_defaults.h31
-rw-r--r--mali_kbase/mali_kbase_context.c14
-rw-r--r--mali_kbase/mali_kbase_core_linux.c716
-rw-r--r--mali_kbase/mali_kbase_cs_experimental.h72
-rw-r--r--mali_kbase/mali_kbase_defs.h18
-rw-r--r--mali_kbase/mali_kbase_event.c12
-rw-r--r--mali_kbase/mali_kbase_gpu_memory_debugfs.c5
-rw-r--r--mali_kbase/mali_kbase_gpuprops.c29
-rw-r--r--mali_kbase/mali_kbase_gpuprops.h22
-rw-r--r--mali_kbase/mali_kbase_hw.c24
-rw-r--r--mali_kbase/mali_kbase_hwaccess_jm.h5
-rw-r--r--mali_kbase/mali_kbase_hwaccess_pm.h20
-rw-r--r--mali_kbase/mali_kbase_jm.c3
-rw-r--r--mali_kbase/mali_kbase_jm.h2
-rw-r--r--mali_kbase/mali_kbase_js.c1
-rw-r--r--mali_kbase/mali_kbase_js.h3
-rw-r--r--mali_kbase/mali_kbase_mem.c46
-rw-r--r--mali_kbase/mali_kbase_mem.h26
-rw-r--r--mali_kbase/mali_kbase_mmu.c173
-rw-r--r--mali_kbase/mali_kbase_mmu_mode_aarch64.c2
-rw-r--r--mali_kbase/mali_kbase_mmu_mode_lpae.c2
-rw-r--r--mali_kbase/mali_kbase_pm.c4
-rw-r--r--mali_kbase/mali_kbase_regs_history_debugfs.c4
-rw-r--r--mali_kbase/mali_kbase_softjobs.c83
-rw-r--r--mali_kbase/mali_kbase_sync.h3
-rw-r--r--mali_kbase/mali_kbase_sync_common.c2
-rw-r--r--mali_kbase/mali_kbase_tracepoints.c459
-rw-r--r--mali_kbase/mali_kbase_tracepoints.h442
-rw-r--r--mali_kbase/platform/meson/Kbuild24
-rw-r--r--mali_kbase/platform/meson/mali_kbase_runtime_pm.c257
63 files changed, 1712 insertions, 2014 deletions
diff --git a/mali_kbase/Kbuild b/mali_kbase/Kbuild
index d644fab..503d597 100644
--- a/mali_kbase/Kbuild
+++ b/mali_kbase/Kbuild
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -21,7 +21,7 @@
# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r21p0-01rel0"
+MALI_RELEASE_NAME ?= "r22p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
@@ -34,6 +34,7 @@ MALI_USE_CSF ?= 0
MALI_UNIT_TEST ?= 0
MALI_KERNEL_TEST_API ?= 0
MALI_COVERAGE ?= 0
+MALI_CS_EXPERIMENTAL ?= 0
CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
# Set up our defines, which will be passed to gcc
@@ -43,7 +44,8 @@ DEFINES = \
-DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
-DMALI_COVERAGE=$(MALI_COVERAGE) \
- -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\"
+ -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
+ -DMALI_CS_EXPERIMENTAL=$(MALI_CS_EXPERIMENTAL)
ifeq ($(KBUILD_EXTMOD),)
# in-tree
@@ -62,19 +64,16 @@ ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH)
subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
SRC := \
- mali_kbase_device.c \
+ device/mali_kbase_device.c \
mali_kbase_cache_policy.c \
mali_kbase_mem.c \
mali_kbase_mem_pool_group.c \
mali_kbase_mmu.c \
mali_kbase_native_mgm.c \
mali_kbase_ctx_sched.c \
- mali_kbase_jd.c \
- mali_kbase_jd_debugfs.c \
mali_kbase_jm.c \
mali_kbase_gpuprops.c \
mali_kbase_js.c \
- mali_kbase_js_ctx_attr.c \
mali_kbase_event.c \
mali_kbase_context.c \
mali_kbase_pm.c \
@@ -98,7 +97,6 @@ SRC := \
mali_kbase_mmu_mode_aarch64.c \
mali_kbase_disjoint_events.c \
mali_kbase_debug_mem_view.c \
- mali_kbase_debug_job_fault.c \
mali_kbase_smc.c \
mali_kbase_mem_pool.c \
mali_kbase_mem_pool_debugfs.c \
@@ -112,6 +110,20 @@ SRC := \
mali_kbase_regs_history_debugfs.c \
thirdparty/mali_kbase_mmap.c
+ifeq ($(MALI_USE_CSF),0)
+ SRC += \
+ mali_kbase_debug_job_fault.c \
+ mali_kbase_jd.c \
+ mali_kbase_jd_debugfs.c \
+ mali_kbase_js_ctx_attr.c \
+ device/backend/mali_kbase_device_jm.c \
+ gpu/backend/mali_kbase_gpu_fault_jm.c
+else
+ SRC += \
+ device/backend/mali_kbase_device_csf.c \
+ gpu/backend/mali_kbase_gpu_fault_csf.c
+
+endif
ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
SRC += mali_kbase_gwt.c
diff --git a/mali_kbase/backend/gpu/Kbuild b/mali_kbase/backend/gpu/Kbuild
index 2414d51..9a913bf 100644
--- a/mali_kbase/backend/gpu/Kbuild
+++ b/mali_kbase/backend/gpu/Kbuild
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -24,12 +24,9 @@ BACKEND += \
backend/gpu/mali_kbase_device_hw.c \
backend/gpu/mali_kbase_gpu.c \
backend/gpu/mali_kbase_gpuprops_backend.c \
- backend/gpu/mali_kbase_debug_job_fault_backend.c \
backend/gpu/mali_kbase_irq_linux.c \
backend/gpu/mali_kbase_instr_backend.c \
backend/gpu/mali_kbase_jm_as.c \
- backend/gpu/mali_kbase_jm_hw.c \
- backend/gpu/mali_kbase_jm_rb.c \
backend/gpu/mali_kbase_js_backend.c \
backend/gpu/mali_kbase_mmu_hw_direct.c \
backend/gpu/mali_kbase_pm_backend.c \
@@ -42,6 +39,13 @@ BACKEND += \
backend/gpu/mali_kbase_time.c \
backend/gpu/mali_kbase_l2_mmu_config.c
+ifeq ($(MALI_USE_CSF),0)
+ BACKEND += \
+ backend/gpu/mali_kbase_debug_job_fault_backend.c \
+ backend/gpu/mali_kbase_jm_hw.c \
+ backend/gpu/mali_kbase_jm_rb.c
+endif
+
ifeq ($(MALI_CUSTOMER_RELEASE),0)
BACKEND += \
backend/gpu/mali_kbase_pm_always_on_demand.c
diff --git a/mali_kbase/backend/gpu/mali_kbase_device_hw.c b/mali_kbase/backend/gpu/mali_kbase_device_hw.c
index 401c668..380f74f 100644
--- a/mali_kbase/backend/gpu/mali_kbase_device_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_device_hw.c
@@ -25,6 +25,7 @@
*
*/
#include <mali_kbase.h>
+#include <gpu/mali_kbase_gpu_fault.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_device_internal.h>
@@ -143,8 +144,8 @@ void kbase_io_history_dump(struct kbase_device *kbdev)
&h->buf[(h->count - iters + i) % h->size];
char const access = (io->addr & 1) ? 'w' : 'r';
- dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access,
- (void *)(io->addr & ~0x1), io->value);
+ dev_err(kbdev->dev, "%6i: %c: reg 0x%016lx val %08x\n", i,
+ access, (unsigned long)(io->addr & ~0x1), io->value);
}
spin_unlock_irqrestore(&h->lock, flags);
@@ -203,23 +204,19 @@ KBASE_EXPORT_TEST_API(kbase_reg_read);
*/
static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
{
- u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- u32 status = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_FAULTSTATUS));
+ u32 status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS));
u64 address = (u64) kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
address |= kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
- if ((gpu_id & GPU_ID2_PRODUCT_MODEL) != GPU_ID2_PRODUCT_TULX) {
- dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
- status,
- kbase_exception_name(kbdev, status & 0xFF),
- address);
- if (multiple)
- dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
- }
+ dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+ status,
+ kbase_gpu_exception_name(status & 0xFF),
+ address);
+ if (multiple)
+ dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
}
static bool kbase_gpu_fault_interrupt(struct kbase_device *kbdev, int multiple)
diff --git a/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c b/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c
index 29018b2..ae3ed9c 100644
--- a/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -122,4 +122,3 @@ void kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
GPU_CONTROL_REG(L2_FEATURES));
}
}
-
diff --git a/mali_kbase/backend/gpu/mali_kbase_instr_backend.c b/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
index 4e216d3..1f98863 100644
--- a/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
@@ -27,7 +27,7 @@
*/
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_hwaccess_instr.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_as.c b/mali_kbase/backend/gpu/mali_kbase_jm_as.c
index c8153ba..5b30e93 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_as.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_as.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_defs.h b/mali_kbase/backend/gpu/mali_kbase_jm_defs.h
index b4d2ae1..4603229 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016, 2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016, 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
index 518ae5e..5e29721 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
@@ -26,7 +26,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_tracepoints.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_hwaccess_jm.h>
@@ -142,8 +142,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
cfg |= JS_CONFIG_THREAD_PRI(8);
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
- (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
+ if (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED)
cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
if (kbase_hw_has_feature(kbdev,
@@ -1281,4 +1280,3 @@ void kbase_reset_gpu_term(struct kbase_device *kbdev)
{
destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
}
-
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_internal.h b/mali_kbase/backend/gpu/mali_kbase_jm_internal.h
index 452ddee..880a89b 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2016, 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
index a1f90f5..ce0273a 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
@@ -26,6 +26,7 @@
*/
#include <mali_kbase.h>
+#include <gpu/mali_kbase_gpu_fault.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_jm.h>
#include <mali_kbase_js.h>
@@ -1125,8 +1126,7 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
if (!kbase_ctx_flag(katom->kctx, KCTX_DYING))
dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
js, completion_code,
- kbase_exception_name
- (kbdev,
+ kbase_gpu_exception_name(
completion_code));
#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
diff --git a/mali_kbase/backend/gpu/mali_kbase_js_backend.c b/mali_kbase/backend/gpu/mali_kbase_js_backend.c
index 1ffaa23..c4df191 100644
--- a/mali_kbase/backend/gpu/mali_kbase_js_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_js_backend.c
@@ -313,7 +313,6 @@ int kbase_backend_timer_init(struct kbase_device *kbdev)
hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
backend->scheduling_timer.function = timer_callback;
-
backend->timer_running = false;
return 0;
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
index 670e1cb..3eb435f 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -29,41 +29,61 @@
#include <backend/gpu/mali_kbase_device_internal.h>
#include <mali_kbase_as_fault_debugfs.h>
-static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
- u32 num_pages)
+/**
+ * lock_region() - Generate lockaddr to lock memory region in MMU
+ * @pfn: Starting page frame number of the region to lock
+ * @num_pages: Number of pages to lock. It must be greater than 0.
+ * @lockaddr: Address and size of memory region to lock
+ *
+ * The lockaddr value is a combination of the starting address and
+ * the size of the region that encompasses all the memory pages to lock.
+ *
+ * The size is expressed as a logarithm: it is represented in a way
+ * that is compatible with the HW specification and it also determines
+ * how many of the lowest bits of the address are cleared.
+ *
+ * Return: 0 if success, or an error code on failure.
+ */
+static int lock_region(u64 pfn, u32 num_pages, u64 *lockaddr)
{
- u64 region;
+ const u64 lockaddr_base = pfn << PAGE_SHIFT;
+ u64 lockaddr_size_log2, region_frame_number_start,
+ region_frame_number_end;
- /* can't lock a zero sized range */
- KBASE_DEBUG_ASSERT(num_pages);
+ if (num_pages == 0)
+ return -EINVAL;
- region = pfn << PAGE_SHIFT;
- /*
- * fls returns (given the ASSERT above):
- * 1 .. 32
- *
- * 10 + fls(num_pages)
- * results in the range (11 .. 42)
+ /* The size is expressed as a logarithm and should take into account
+ * the possibility that some pages might spill into the next region.
*/
+ lockaddr_size_log2 = fls(num_pages) + PAGE_SHIFT - 1;
- /* gracefully handle num_pages being zero */
- if (0 == num_pages) {
- region |= KBASE_LOCK_REGION_MIN_SIZE;
- } else {
- u8 region_width;
+ /* Round up if the number of pages is not a power of 2. */
+ if (num_pages != ((u32)1 << (lockaddr_size_log2 - PAGE_SHIFT)))
+ lockaddr_size_log2 += 1;
- region_width = 10 + fls(num_pages);
- if (num_pages != (1ul << (region_width - 11))) {
- /* not pow2, so must go up to the next pow2 */
- region_width += 1;
- }
- region_width = MAX(region_width, KBASE_LOCK_REGION_MIN_SIZE);
+ /* Round up if some memory pages spill into the next region. */
+ region_frame_number_start = pfn >> (lockaddr_size_log2 - PAGE_SHIFT);
+ region_frame_number_end =
+ (pfn + num_pages - 1) >> (lockaddr_size_log2 - PAGE_SHIFT);
- KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
- region |= region_width;
- }
+ if (region_frame_number_start < region_frame_number_end)
+ lockaddr_size_log2 += 1;
- return region;
+ /* Represent the size according to the HW specification. */
+ lockaddr_size_log2 = MAX(lockaddr_size_log2,
+ KBASE_LOCK_REGION_MIN_SIZE_LOG2);
+
+ if (lockaddr_size_log2 > KBASE_LOCK_REGION_MAX_SIZE_LOG2)
+ return -EINVAL;
+
+ /* The lowest bits are cleared and then set to size - 1 to represent
+ * the size in a way that is compatible with the HW specification.
+ */
+ *lockaddr = lockaddr_base & ~((1ull << lockaddr_size_log2) - 1);
+ *lockaddr |= lockaddr_size_log2 - 1;
+
+ return 0;
}
static int wait_ready(struct kbase_device *kbdev,
@@ -108,9 +128,6 @@ static void validate_protected_page_fault(struct kbase_device *kbdev)
* debug mode is turned on */
u32 protected_debug_mode = 0;
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
- return;
-
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
protected_debug_mode = kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
@@ -300,20 +317,26 @@ int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
/* Unlock doesn't require a lock first */
ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
} else {
- u64 lock_addr = lock_region(kbdev, vpfn, nr);
+ u64 lock_addr;
- /* Lock the region that needs to be updated */
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
+ ret = lock_region(vpfn, nr, &lock_addr);
+
+ if (!ret) {
+ /* Lock the region that needs to be updated */
+ kbase_reg_write(kbdev,
+ MMU_AS_REG(as->number, AS_LOCKADDR_LO),
lock_addr & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
+ kbase_reg_write(kbdev,
+ MMU_AS_REG(as->number, AS_LOCKADDR_HI),
(lock_addr >> 32) & 0xFFFFFFFFUL);
- write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
+ write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
- /* Run the MMU operation */
- write_cmd(kbdev, as->number, op);
+ /* Run the MMU operation */
+ write_cmd(kbdev, as->number, op);
- /* Wait for the flush to complete */
- ret = wait_ready(kbdev, as->number);
+ /* Wait for the flush to complete */
+ ret = wait_ready(kbdev, as->number);
+ }
}
return ret;
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index e6d119b..e016221 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -26,15 +26,15 @@
*/
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_hwaccess_jm.h>
-#include <mali_kbase_hwcnt_context.h>
#include <backend/gpu/mali_kbase_js_internal.h>
-#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_devfreq.h>
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
@@ -615,7 +615,7 @@ void kbase_pm_power_changed(struct kbase_device *kbdev)
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_update_state(kbdev);
- kbase_backend_slot_update(kbdev);
+ kbase_backend_slot_update(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index ff4f574..cb8f647 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -28,7 +28,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config_defaults.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_tracepoints.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_config_defaults.h>
@@ -1687,6 +1687,7 @@ static void kbase_set_jm_quirks(struct kbase_device *kbdev, const u32 prod_id)
{
kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
GPU_CONTROL_REG(JM_CONFIG));
+
if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == GPU_ID2_PRODUCT_TMIX) {
/* Only for tMIx */
u32 coherency_features;
@@ -1712,14 +1713,14 @@ static void kbase_set_jm_quirks(struct kbase_device *kbdev, const u32 prod_id)
"idvs-group-size", &tmp))
tmp = default_idvs_group_size;
- if (tmp > JM_MAX_IDVS_GROUP_SIZE) {
+ if (tmp > IDVS_GROUP_MAX_SIZE) {
dev_err(kbdev->dev,
"idvs-group-size of %d is too large. Maximum value is %d",
- tmp, JM_MAX_IDVS_GROUP_SIZE);
+ tmp, IDVS_GROUP_MAX_SIZE);
tmp = default_idvs_group_size;
}
- kbdev->hw_quirks_jm |= tmp << JM_IDVS_GROUP_SIZE_SHIFT;
+ kbdev->hw_quirks_jm |= tmp << IDVS_GROUP_SIZE_SHIFT;
}
#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
@@ -1944,29 +1945,20 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
return -EINVAL;
}
-static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
+int kbase_pm_protected_mode_enable(struct kbase_device *const kbdev)
{
- struct kbase_device *kbdev = pdev->data;
-
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
GPU_COMMAND_SET_PROTECTED_MODE);
return 0;
}
-static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
+int kbase_pm_protected_mode_disable(struct kbase_device *const kbdev)
{
- struct kbase_device *kbdev = pdev->data;
-
lockdep_assert_held(&kbdev->pm.lock);
return kbase_pm_do_reset(kbdev);
}
-struct protected_mode_ops kbase_native_protected_ops = {
- .protected_mode_enable = kbasep_protected_mode_enable,
- .protected_mode_disable = kbasep_protected_mode_disable
-};
-
int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
{
unsigned long irq_flags;
@@ -1999,11 +1991,8 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
/* Soft reset the GPU */
- if (kbdev->protected_mode_support)
- err = kbdev->protected_ops->protected_mode_disable(
- kbdev->protected_dev);
- else
- err = kbase_pm_do_reset(kbdev);
+ err = kbdev->protected_ops->protected_mode_disable(
+ kbdev->protected_dev);
spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
kbdev->protected_mode = false;
@@ -2019,12 +2008,8 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
/* Sanity check protected mode was left after reset */
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
- u32 gpu_status = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_STATUS));
-
- WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
- }
+ WARN_ON(kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) &
+ GPU_STATUS_PROTECTED_MODE_ACTIVE);
/* If cycle counter was in use re-enable it, enable_irqs will only be
* false when called from kbase_pm_powerup */
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
index 843dc5a..7d9bb03 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
@@ -25,7 +25,7 @@
*/
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_shader_states.h b/mali_kbase/backend/gpu/mali_kbase_pm_shader_states.h
index 2f573de..2bd9e47 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_shader_states.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_shader_states.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/build.bp b/mali_kbase/build.bp
index 86c4afd..967d12c 100644
--- a/mali_kbase/build.bp
+++ b/mali_kbase/build.bp
@@ -87,6 +87,10 @@ bob_kernel_module {
"platform/*/*.h",
"platform/*/Kbuild",
"thirdparty/*.c",
+ "device/*.c",
+ "device/*.h",
+ "gpu/*.c",
+ "gpu/*.h",
],
kbuild_options: [
"CONFIG_MALI_KUTF=n",
@@ -120,8 +124,19 @@ bob_kernel_module {
mali_hw_errata_1485982_use_clock_alternative: {
kbuild_options: ["CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE=y"],
},
+ gpu_has_job_manager: {
+ srcs: [
+ "jm/*.h",
+ "device/backend/*_jm.c",
+ "gpu/backend/*_jm.c",
+ "gpu/backend/*_jm.h",
+ ],
+ },
gpu_has_csf: {
srcs: [
+ "device/backend/*_csf.c",
+ "gpu/backend/*_csf.c",
+ "gpu/backend/*_csf.h",
"csf/*.c",
"csf/*.h",
"csf/Kbuild",
diff --git a/mali_kbase/device/backend/mali_kbase_device_jm.c b/mali_kbase/device/backend/mali_kbase_device_jm.c
new file mode 100644
index 0000000..2f53f10
--- /dev/null
+++ b/mali_kbase/device/backend/mali_kbase_device_jm.c
@@ -0,0 +1,144 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "../mali_kbase_device_internal.h"
+#include "../mali_kbase_device.h"
+
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_reset_gpu.h>
+
+#ifdef CONFIG_MALI_NO_MALI
+#include <mali_kbase_model_linux.h>
+#endif
+
+static const struct kbase_device_init dev_init[] = {
+#ifdef CONFIG_MALI_NO_MALI
+ {kbase_gpu_device_create, kbase_gpu_device_destroy,
+ "Dummy model initialization failed"},
+#else
+ {assign_irqs, NULL,
+ "IRQ search failed"},
+ {registers_map, registers_unmap,
+ "Register map failed"},
+#endif
+ {power_control_init, power_control_term,
+ "Power control initialization failed"},
+ {kbase_device_io_history_init, kbase_device_io_history_term,
+ "Register access history initialization failed"},
+ {kbase_backend_early_init, kbase_backend_early_term,
+ "Early backend initialization failed"},
+ {kbase_device_populate_max_freq, NULL,
+ "Populating max frequency failed"},
+ {kbase_device_misc_init, kbase_device_misc_term,
+ "Miscellaneous device initialization failed"},
+ {kbase_ctx_sched_init, kbase_ctx_sched_term,
+ "Context scheduler initialization failed"},
+ {kbase_mem_init, kbase_mem_term,
+ "Memory subsystem initialization failed"},
+ {kbase_device_coherency_init, NULL,
+ "Device coherency init failed"},
+ {kbase_protected_mode_init, kbase_protected_mode_term,
+ "Protected mode subsystem initialization failed"},
+ {kbase_device_list_init, kbase_device_list_term,
+ "Device list setup failed"},
+ {kbasep_js_devdata_init, kbasep_js_devdata_term,
+ "Job JS devdata initialization failed"},
+ {kbase_device_timeline_init, kbase_device_timeline_term,
+ "Timeline stream initialization failed"},
+ {kbase_device_hwcnt_backend_gpu_init,
+ kbase_device_hwcnt_backend_gpu_term,
+ "GPU hwcnt backend creation failed"},
+ {kbase_device_hwcnt_context_init, kbase_device_hwcnt_context_term,
+ "GPU hwcnt context initialization failed"},
+ {kbase_device_hwcnt_virtualizer_init,
+ kbase_device_hwcnt_virtualizer_term,
+ "GPU hwcnt virtualizer initialization failed"},
+ {kbase_device_vinstr_init, kbase_device_vinstr_term,
+ "Virtual instrumentation initialization failed"},
+ {kbase_backend_late_init, kbase_backend_late_term,
+ "Late backend initialization failed"},
+#ifdef MALI_KBASE_BUILD
+ {kbase_debug_job_fault_dev_init, kbase_debug_job_fault_dev_term,
+ "Job fault debug initialization failed"},
+ {kbase_device_debugfs_init, kbase_device_debugfs_term,
+ "DebugFS initialization failed"},
+ /* Sysfs init needs to happen before registering the device with
+ * misc_register(), otherwise it causes a race condition between
+ * registering the device and a uevent event being generated for
+ * userspace, causing udev rules to run which might expect certain
+ * sysfs attributes present. As a result of the race condition
+ * we avoid, some Mali sysfs entries may have appeared to udev
+ * to not exist.
+ * For more information, see
+ * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the
+ * paragraph that starts with "Word of warning", currently the
+ * second-last paragraph.
+ */
+ {kbase_sysfs_init, kbase_sysfs_term, "SysFS group creation failed"},
+ {kbase_device_misc_register, kbase_device_misc_deregister,
+ "Misc device registration failed"},
+#ifdef CONFIG_MALI_BUSLOG
+ {buslog_init, buslog_term, "Bus log client registration failed"},
+#endif
+ {kbase_gpuprops_populate_user_buffer, kbase_gpuprops_free_user_buffer,
+ "GPU property population failed"},
+#endif
+};
+
+static void kbase_device_term_partial(struct kbase_device *kbdev,
+ unsigned int i)
+{
+ while (i-- > 0) {
+ if (dev_init[i].term)
+ dev_init[i].term(kbdev);
+ }
+}
+
+void kbase_device_term(struct kbase_device *kbdev)
+{
+ kbase_device_term_partial(kbdev, ARRAY_SIZE(dev_init));
+ kbasep_js_devdata_halt(kbdev);
+ kbase_mem_halt(kbdev);
+}
+
+int kbase_device_init(struct kbase_device *kbdev)
+{
+ int err = 0;
+ unsigned int i = 0;
+
+ kbase_device_id_init(kbdev);
+ kbase_disjoint_init(kbdev);
+
+ for (i = 0; i < ARRAY_SIZE(dev_init); i++) {
+ err = dev_init[i].init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "%s error = %d\n",
+ dev_init[i].err_mes, err);
+ kbase_device_term_partial(kbdev, i);
+ break;
+ }
+ }
+
+ return err;
+}
diff --git a/mali_kbase/mali_kbase_device.c b/mali_kbase/device/mali_kbase_device.c
index 89db174..3062fa3 100644
--- a/mali_kbase/mali_kbase_device.c
+++ b/mali_kbase/device/mali_kbase_device.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/types.h>
#include <mali_kbase.h>
#include <mali_kbase_defs.h>
@@ -39,6 +40,14 @@
#include <mali_kbase_hw.h>
#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_timeline.h>
+#include "mali_kbase_vinstr.h"
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+
+#include "mali_kbase_device.h"
+#include "mali_kbase_device_internal.h"
+
/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
* Supports tracing feature provided in the base module.
* Please keep it in sync with the value of base module.
@@ -57,6 +66,15 @@ static const char *kbasep_trace_code_string[] = {
#define DEBUG_MESSAGE_SIZE 256
+/* Number of register accesses for the buffer that we allocate during
+ * initialization time. The buffer size can be changed later via debugfs.
+ */
+#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
+
+static DEFINE_MUTEX(kbase_dev_list_lock);
+static LIST_HEAD(kbase_dev_list);
+static int kbase_dev_nr;
+
static int kbasep_trace_init(struct kbase_device *kbdev);
static void kbasep_trace_term(struct kbase_device *kbdev);
static void kbasep_trace_hook_wrapper(void *param);
@@ -114,7 +132,7 @@ static void kbase_device_all_as_term(struct kbase_device *kbdev)
kbase_device_as_term(kbdev, i);
}
-int kbase_device_init(struct kbase_device * const kbdev)
+int kbase_device_misc_init(struct kbase_device * const kbdev)
{
int err;
#ifdef CONFIG_ARM64
@@ -224,7 +242,7 @@ fail:
return err;
}
-void kbase_device_term(struct kbase_device *kbdev)
+void kbase_device_misc_term(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev);
@@ -246,6 +264,122 @@ void kbase_device_free(struct kbase_device *kbdev)
kfree(kbdev);
}
+void kbase_device_id_init(struct kbase_device *kbdev)
+{
+ scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
+ kbase_dev_nr);
+ kbdev->id = kbase_dev_nr++;
+}
+
+int kbase_device_hwcnt_backend_gpu_init(struct kbase_device *kbdev)
+{
+ return kbase_hwcnt_backend_gpu_create(kbdev, &kbdev->hwcnt_gpu_iface);
+}
+
+void kbase_device_hwcnt_backend_gpu_term(struct kbase_device *kbdev)
+{
+ kbase_hwcnt_backend_gpu_destroy(&kbdev->hwcnt_gpu_iface);
+}
+
+int kbase_device_hwcnt_context_init(struct kbase_device *kbdev)
+{
+ return kbase_hwcnt_context_init(&kbdev->hwcnt_gpu_iface,
+ &kbdev->hwcnt_gpu_ctx);
+}
+
+void kbase_device_hwcnt_context_term(struct kbase_device *kbdev)
+{
+ kbase_hwcnt_context_term(kbdev->hwcnt_gpu_ctx);
+}
+
+int kbase_device_hwcnt_virtualizer_init(struct kbase_device *kbdev)
+{
+ return kbase_hwcnt_virtualizer_init(kbdev->hwcnt_gpu_ctx,
+ KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS,
+ &kbdev->hwcnt_gpu_virt);
+}
+
+void kbase_device_hwcnt_virtualizer_term(struct kbase_device *kbdev)
+{
+ kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt);
+}
+
+int kbase_device_timeline_init(struct kbase_device *kbdev)
+{
+ atomic_set(&kbdev->timeline_is_enabled, 0);
+ return kbase_timeline_init(&kbdev->timeline,
+ &kbdev->timeline_is_enabled);
+}
+
+void kbase_device_timeline_term(struct kbase_device *kbdev)
+{
+ kbase_timeline_term(kbdev->timeline);
+}
+
+int kbase_device_vinstr_init(struct kbase_device *kbdev)
+{
+ return kbase_vinstr_init(kbdev->hwcnt_gpu_virt, &kbdev->vinstr_ctx);
+}
+
+void kbase_device_vinstr_term(struct kbase_device *kbdev)
+{
+ kbase_vinstr_term(kbdev->vinstr_ctx);
+}
+
+int kbase_device_io_history_init(struct kbase_device *kbdev)
+{
+ return kbase_io_history_init(&kbdev->io_history,
+ KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
+}
+
+void kbase_device_io_history_term(struct kbase_device *kbdev)
+{
+ kbase_io_history_term(&kbdev->io_history);
+}
+
+int kbase_device_misc_register(struct kbase_device *kbdev)
+{
+ return misc_register(&kbdev->mdev);
+}
+
+void kbase_device_misc_deregister(struct kbase_device *kbdev)
+{
+ misc_deregister(&kbdev->mdev);
+}
+
+int kbase_device_list_init(struct kbase_device *kbdev)
+{
+ const struct list_head *dev_list;
+
+ dev_list = kbase_device_get_list();
+ list_add(&kbdev->entry, &kbase_dev_list);
+ kbase_device_put_list(dev_list);
+
+ return 0;
+}
+
+void kbase_device_list_term(struct kbase_device *kbdev)
+{
+ const struct list_head *dev_list;
+
+ dev_list = kbase_device_get_list();
+ list_del(&kbdev->entry);
+ kbase_device_put_list(dev_list);
+}
+
+const struct list_head *kbase_device_get_list(void)
+{
+ mutex_lock(&kbase_dev_list_lock);
+ return &kbase_dev_list;
+}
+KBASE_EXPORT_TEST_API(kbase_device_get_list);
+
+void kbase_device_put_list(const struct list_head *dev_list)
+{
+ mutex_unlock(&kbase_dev_list_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_device_put_list);
+
/*
* Device trace functions
*/
diff --git a/mali_kbase/device/mali_kbase_device.h b/mali_kbase/device/mali_kbase_device.h
new file mode 100644
index 0000000..b1a3e1b
--- /dev/null
+++ b/mali_kbase/device/mali_kbase_device.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+/**
+ * kbase_device_get_list - get device list.
+ *
+ * Get access to device list.
+ *
+ * Return: Pointer to the linked list head.
+ */
+const struct list_head *kbase_device_get_list(void);
+
+/**
+ * kbase_device_put_list - put device list.
+ *
+ * @dev_list: head of linked list containing device list.
+ *
+ * Put access to the device list.
+ */
+void kbase_device_put_list(const struct list_head *dev_list);
+
+/**
+ * kbase_device_init - Device initialisation.
+ *
+ * This is called from device probe to initialise various other
+ * components needed.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 on success and non-zero value on failure.
+ */
+int kbase_device_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_device_term - Device termination.
+ *
+ * This is called from device remove to terminate various components that
+ * were initialised during kbase_device_init.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ */
+void kbase_device_term(struct kbase_device *kbdev);
diff --git a/mali_kbase/device/mali_kbase_device_internal.h b/mali_kbase/device/mali_kbase_device_internal.h
new file mode 100644
index 0000000..4ca57e7
--- /dev/null
+++ b/mali_kbase/device/mali_kbase_device_internal.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+typedef int kbase_device_init_method(struct kbase_device *kbdev);
+typedef void kbase_device_term_method(struct kbase_device *kbdev);
+
+/**
+ * struct kbase_device_init - Device init/term methods.
+ * @init: Function pointer to a initialise method.
+ * @term: Function pointer to a terminate method.
+ * @err_mes: Error message to be printed when init method fails.
+ */
+struct kbase_device_init {
+ kbase_device_init_method *init;
+ kbase_device_term_method *term;
+ char *err_mes;
+};
+
+int kbase_device_vinstr_init(struct kbase_device *kbdev);
+void kbase_device_vinstr_term(struct kbase_device *kbdev);
+
+int kbase_device_timeline_init(struct kbase_device *kbdev);
+void kbase_device_timeline_term(struct kbase_device *kbdev);
+
+int kbase_device_hwcnt_backend_gpu_init(struct kbase_device *kbdev);
+void kbase_device_hwcnt_backend_gpu_term(struct kbase_device *kbdev);
+
+int kbase_device_hwcnt_context_init(struct kbase_device *kbdev);
+void kbase_device_hwcnt_context_term(struct kbase_device *kbdev);
+
+int kbase_device_hwcnt_virtualizer_init(struct kbase_device *kbdev);
+void kbase_device_hwcnt_virtualizer_term(struct kbase_device *kbdev);
+
+int kbase_device_list_init(struct kbase_device *kbdev);
+void kbase_device_list_term(struct kbase_device *kbdev);
+
+int kbase_device_io_history_init(struct kbase_device *kbdev);
+void kbase_device_io_history_term(struct kbase_device *kbdev);
+
+int kbase_device_misc_register(struct kbase_device *kbdev);
+void kbase_device_misc_deregister(struct kbase_device *kbdev);
+
+void kbase_device_id_init(struct kbase_device *kbdev);
diff --git a/mali_kbase/gpu/backend/mali_kbase_gpu_fault_jm.c b/mali_kbase/gpu/backend/mali_kbase_gpu_fault_jm.c
new file mode 100644
index 0000000..63132dc
--- /dev/null
+++ b/mali_kbase/gpu/backend/mali_kbase_gpu_fault_jm.c
@@ -0,0 +1,181 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+#include "../mali_kbase_gpu_fault.h"
+
+const char *kbase_gpu_exception_name(u32 const exception_code)
+{
+ const char *e;
+
+ switch (exception_code) {
+ /* Non-Fault Status code */
+ case 0x00:
+ e = "NOT_STARTED/IDLE/OK";
+ break;
+ case 0x01:
+ e = "DONE";
+ break;
+ case 0x02:
+ e = "INTERRUPTED";
+ break;
+ case 0x03:
+ e = "STOPPED";
+ break;
+ case 0x04:
+ e = "TERMINATED";
+ break;
+ case 0x08:
+ e = "ACTIVE";
+ break;
+ /* Job exceptions */
+ case 0x40:
+ e = "JOB_CONFIG_FAULT";
+ break;
+ case 0x41:
+ e = "JOB_POWER_FAULT";
+ break;
+ case 0x42:
+ e = "JOB_READ_FAULT";
+ break;
+ case 0x43:
+ e = "JOB_WRITE_FAULT";
+ break;
+ case 0x44:
+ e = "JOB_AFFINITY_FAULT";
+ break;
+ case 0x48:
+ e = "JOB_BUS_FAULT";
+ break;
+ case 0x50:
+ e = "INSTR_INVALID_PC";
+ break;
+ case 0x51:
+ e = "INSTR_INVALID_ENC";
+ break;
+ case 0x52:
+ e = "INSTR_TYPE_MISMATCH";
+ break;
+ case 0x53:
+ e = "INSTR_OPERAND_FAULT";
+ break;
+ case 0x54:
+ e = "INSTR_TLS_FAULT";
+ break;
+ case 0x55:
+ e = "INSTR_BARRIER_FAULT";
+ break;
+ case 0x56:
+ e = "INSTR_ALIGN_FAULT";
+ break;
+ case 0x58:
+ e = "DATA_INVALID_FAULT";
+ break;
+ case 0x59:
+ e = "TILE_RANGE_FAULT";
+ break;
+ case 0x5A:
+ e = "ADDR_RANGE_FAULT";
+ break;
+ case 0x60:
+ e = "OUT_OF_MEMORY";
+ break;
+ /* GPU exceptions */
+ case 0x80:
+ e = "DELAYED_BUS_FAULT";
+ break;
+ case 0x88:
+ e = "SHAREABILITY_FAULT";
+ break;
+ /* MMU exceptions */
+ case 0xC0:
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC4:
+ case 0xC5:
+ case 0xC6:
+ case 0xC7:
+ e = "TRANSLATION_FAULT";
+ break;
+ case 0xC8:
+ e = "PERMISSION_FAULT";
+ break;
+ case 0xC9:
+ case 0xCA:
+ case 0xCB:
+ case 0xCC:
+ case 0xCD:
+ case 0xCE:
+ case 0xCF:
+ e = "PERMISSION_FAULT";
+ break;
+ case 0xD0:
+ case 0xD1:
+ case 0xD2:
+ case 0xD3:
+ case 0xD4:
+ case 0xD5:
+ case 0xD6:
+ case 0xD7:
+ e = "TRANSTAB_BUS_FAULT";
+ break;
+ case 0xD8:
+ e = "ACCESS_FLAG";
+ break;
+ case 0xD9:
+ case 0xDA:
+ case 0xDB:
+ case 0xDC:
+ case 0xDD:
+ case 0xDE:
+ case 0xDF:
+ e = "ACCESS_FLAG";
+ break;
+ case 0xE0:
+ case 0xE1:
+ case 0xE2:
+ case 0xE3:
+ case 0xE4:
+ case 0xE5:
+ case 0xE6:
+ case 0xE7:
+ e = "ADDRESS_SIZE_FAULT";
+ break;
+ case 0xE8:
+ case 0xE9:
+ case 0xEA:
+ case 0xEB:
+ case 0xEC:
+ case 0xED:
+ case 0xEE:
+ case 0xEF:
+ e = "MEMORY_ATTRIBUTES_FAULT";
+ break;
+ default:
+ e = "UNKNOWN";
+ break;
+ };
+
+ return e;
+}
diff --git a/mali_kbase/mali_midg_regmap_jm.h b/mali_kbase/gpu/backend/mali_kbase_gpu_regmap_jm.h
index 58e4d08..258ff33 100644
--- a/mali_kbase/mali_midg_regmap_jm.h
+++ b/mali_kbase/gpu/backend/mali_kbase_gpu_regmap_jm.h
@@ -20,15 +20,69 @@
*
*/
-#ifndef _MIDG_REGMAP_JM_H_
-#define _MIDG_REGMAP_JM_H_
+#ifndef _KBASE_GPU_REGMAP_JM_H_
+#define _KBASE_GPU_REGMAP_JM_H_
+
+
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_WA 0x8Dull
+/* Set to inner non-cacheable, outer-non-cacheable
+ * Setting defined by the alloc bits is ignored, but set to a valid encoding:
+ * - no-alloc on read
+ * - no alloc on write
+ */
+#define AS_MEMATTR_AARCH64_NON_CACHEABLE 0x4Cull
+
+/* Symbols for default MEMATTR to use
+ * Default is - HW implementation defined caching
+ */
+#define AS_MEMATTR_INDEX_DEFAULT 0
+#define AS_MEMATTR_INDEX_DEFAULT_ACE 3
+
+/* HW implementation defined caching */
+#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
+/* Force cache on */
+#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1
+/* Write-alloc */
+#define AS_MEMATTR_INDEX_WRITE_ALLOC 2
+/* Outer coherent, inner implementation defined policy */
+#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3
+/* Outer coherent, write alloc inner */
+#define AS_MEMATTR_INDEX_OUTER_WA 4
+/* Normal memory, inner non-cacheable, outer non-cacheable (ARMv8 mode only) */
+#define AS_MEMATTR_INDEX_NON_CACHEABLE 5
/* GPU control registers */
#define CORE_FEATURES 0x008 /* (RO) Shader Core Features */
#define JS_PRESENT 0x01C /* (RO) Job slots present */
-#define LATEST_FLUSH 0x038 /* (RO) Flush ID of latest clean-and-invalidate operation */
-#define GROUPS_L2_COHERENT (1 << 0) /* Cores groups are l2 coherent */
+#define LATEST_FLUSH 0x038 /* (RO) Flush ID of latest
+ * clean-and-invalidate operation
+ */
+
+#define PRFCNT_BASE_LO 0x060 /* (RW) Performance counter memory
+ * region base address, low word
+ */
+#define PRFCNT_BASE_HI 0x064 /* (RW) Performance counter memory
+ * region base address, high word
+ */
+#define PRFCNT_CONFIG 0x068 /* (RW) Performance counter
+ * configuration
+ */
+#define PRFCNT_JM_EN 0x06C /* (RW) Performance counter enable
+ * flags for Job Manager
+ */
+#define PRFCNT_SHADER_EN 0x070 /* (RW) Performance counter enable
+ * flags for shader cores
+ */
+#define PRFCNT_TILER_EN 0x074 /* (RW) Performance counter enable
+ * flags for tiler
+ */
+#define PRFCNT_MMU_L2_EN 0x07C /* (RW) Performance counter enable
+ * flags for MMU/L2 cache
+ */
#define JS0_FEATURES 0x0C0 /* (RO) Features of job slot 0 */
#define JS1_FEATURES 0x0C4 /* (RO) Features of job slot 1 */
@@ -140,7 +194,7 @@
* The values are separated to avoid dependency of userspace and kernel code.
*/
-/* Group of values representing the job status insead a particular fault */
+/* Group of values representing the job status instead of a particular fault */
#define JS_STATUS_NO_EXCEPTION_BASE 0x00
#define JS_STATUS_INTERRUPTED (JS_STATUS_NO_EXCEPTION_BASE + 0x02) /* 0x02 means INTERRUPTED */
#define JS_STATUS_STOPPED (JS_STATUS_NO_EXCEPTION_BASE + 0x03) /* 0x03 means STOPPED */
@@ -192,8 +246,6 @@
#define JM_JOB_THROTTLE_LIMIT_SHIFT (3)
#define JM_MAX_JOB_THROTTLE_LIMIT (0x3F)
#define JM_FORCE_COHERENCY_FEATURES_SHIFT (2)
-#define JM_IDVS_GROUP_SIZE_SHIFT (16)
-#define JM_MAX_IDVS_GROUP_SIZE (0x3F)
/* GPU_COMMAND values */
#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */
@@ -207,4 +259,4 @@
#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */
#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */
-#endif /* _MIDG_REGMAP_JM_H_ */
+#endif /* _KBASE_GPU_REGMAP_JM_H_ */
diff --git a/mali_kbase/platform/meson/mali_kbase_config_meson.c b/mali_kbase/gpu/mali_kbase_gpu.h
index 8de7a88..9516e56 100644
--- a/mali_kbase/platform/meson/mali_kbase_config_meson.c
+++ b/mali_kbase/gpu/mali_kbase_gpu.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015, 2017, 2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,22 +20,12 @@
*
*/
-#include <mali_kbase_config.h>
+#ifndef _KBASE_GPU_H_
+#define _KBASE_GPU_H_
-static struct kbase_platform_config dummy_platform_config;
+#include "mali_kbase_gpu_regmap.h"
+#include "mali_kbase_gpu_fault.h"
+#include "mali_kbase_gpu_coherency.h"
+#include "mali_kbase_gpu_id.h"
-struct kbase_platform_config *kbase_get_platform_config(void)
-{
- return &dummy_platform_config;
-}
-
-#ifndef CONFIG_OF
-int kbase_platform_register(void)
-{
- return 0;
-}
-
-void kbase_platform_unregister(void)
-{
-}
-#endif
+#endif /* _KBASE_GPU_H_ */
diff --git a/mali_kbase/mali_midg_coherency.h b/mali_kbase/gpu/mali_kbase_gpu_coherency.h
index 29d5df3..5ab67db 100644
--- a/mali_kbase/mali_midg_coherency.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_coherency.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,12 +20,12 @@
*
*/
-#ifndef _MIDG_COHERENCY_H_
-#define _MIDG_COHERENCY_H_
+#ifndef _KBASE_GPU_COHERENCY_H_
+#define _KBASE_GPU_COHERENCY_H_
#define COHERENCY_ACE_LITE 0
#define COHERENCY_ACE 1
#define COHERENCY_NONE 31
#define COHERENCY_FEATURE_BIT(x) (1 << (x))
-#endif /* _MIDG_COHERENCY_H_ */
+#endif /* _KBASE_GPU_COHERENCY_H_ */
diff --git a/mali_kbase/platform/meson/mali_kbase_config_platform.h b/mali_kbase/gpu/mali_kbase_gpu_fault.h
index c02886f..f83980b 100644
--- a/mali_kbase/platform/meson/mali_kbase_config_platform.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_fault.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017, 2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,27 +20,17 @@
*
*/
-/**
- * Power management configuration
- *
- * Attached value: pointer to @ref kbase_pm_callback_conf
- * Default value: See @ref kbase_pm_callback_conf
- */
-#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+#ifndef _KBASE_GPU_FAULT_H_
+#define _KBASE_GPU_FAULT_H_
-/**
- * Platform specific configuration functions
+/** Returns the name associated with a Mali exception code
*
- * Attached value: pointer to @ref kbase_platform_funcs_conf
- * Default value: See @ref kbase_platform_funcs_conf
- */
-#define PLATFORM_FUNCS (NULL)
-
-extern struct kbase_pm_callback_conf pm_callbacks;
-
-/**
- * Autosuspend delay
+ * @exception_code: exception code
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
*
- * The delay time (in milliseconds) to be used for autosuspend
+ * Return: name associated with the exception code
*/
-#define AUTO_SUSPEND_DELAY (100)
+const char *kbase_gpu_exception_name(u32 exception_code);
+
+#endif /* _KBASE_GPU_FAULT_H_ */
diff --git a/mali_kbase/mali_kbase_gpu_id.h b/mali_kbase/gpu/mali_kbase_gpu_id.h
index a38e886..24acab1 100644
--- a/mali_kbase/mali_kbase_gpu_id.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_id.h
@@ -19,6 +19,7 @@
* SPDX-License-Identifier: GPL-2.0
*
*/
+
#ifndef _KBASE_GPU_ID_H_
#define _KBASE_GPU_ID_H_
@@ -97,10 +98,9 @@
#define GPU_ID2_PRODUCT_TNAX GPU_ID2_MODEL_MAKE(9, 1)
#define GPU_ID2_PRODUCT_TBEX GPU_ID2_MODEL_MAKE(9, 2)
#define GPU_ID2_PRODUCT_LBEX GPU_ID2_MODEL_MAKE(9, 4)
-#define GPU_ID2_PRODUCT_TULX GPU_ID2_MODEL_MAKE(10, 0)
#define GPU_ID2_PRODUCT_TDUX GPU_ID2_MODEL_MAKE(10, 1)
#define GPU_ID2_PRODUCT_TODX GPU_ID2_MODEL_MAKE(10, 2)
-#define GPU_ID2_PRODUCT_TIDX GPU_ID2_MODEL_MAKE(10, 3)
+#define GPU_ID2_PRODUCT_TGRX GPU_ID2_MODEL_MAKE(10, 3)
#define GPU_ID2_PRODUCT_TVAX GPU_ID2_MODEL_MAKE(10, 4)
#define GPU_ID2_PRODUCT_LODX GPU_ID2_MODEL_MAKE(10, 5)
diff --git a/mali_kbase/mali_midg_regmap.h b/mali_kbase/gpu/mali_kbase_gpu_regmap.h
index 6d5f243..205b59a 100644
--- a/mali_kbase/mali_midg_regmap.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_regmap.h
@@ -20,12 +20,12 @@
*
*/
-#ifndef _MIDG_REGMAP_H_
-#define _MIDG_REGMAP_H_
+#ifndef _KBASE_GPU_REGMAP_H_
+#define _KBASE_GPU_REGMAP_H_
-#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_coherency.h"
#include "mali_kbase_gpu_id.h"
-#include "mali_midg_regmap_jm.h"
+#include "backend/mali_kbase_gpu_regmap_jm.h"
/* Begin Register Offsets */
/* GPU control registers */
@@ -54,18 +54,15 @@
#define L2_CONFIG 0x048 /* (RW) Level 2 cache configuration */
+#define GROUPS_L2_COHERENT (1 << 0) /* Cores groups are l2 coherent */
+#define SUPER_L2_COHERENT (1 << 1) /* Shader cores within a core
+ * supergroup are l2 coherent
+ */
+
#define PWR_KEY 0x050 /* (WO) Power manager key register */
#define PWR_OVERRIDE0 0x054 /* (RW) Power manager override settings */
#define PWR_OVERRIDE1 0x058 /* (RW) Power manager override settings */
-#define PRFCNT_BASE_LO 0x060 /* (RW) Performance counter memory region base address, low word */
-#define PRFCNT_BASE_HI 0x064 /* (RW) Performance counter memory region base address, high word */
-#define PRFCNT_CONFIG 0x068 /* (RW) Performance counter configuration */
-#define PRFCNT_JM_EN 0x06C /* (RW) Performance counter enable flags for Job Manager */
-#define PRFCNT_SHADER_EN 0x070 /* (RW) Performance counter enable flags for shader cores */
-#define PRFCNT_TILER_EN 0x074 /* (RW) Performance counter enable flags for tiler */
-#define PRFCNT_MMU_L2_EN 0x07C /* (RW) Performance counter enable flags for MMU/L2 cache */
-
#define CYCLE_COUNT_LO 0x090 /* (RO) Cycle counter, low word */
#define CYCLE_COUNT_HI 0x094 /* (RO) Cycle counter, high word */
#define TIMESTAMP_LO 0x098 /* (RO) Global time stamp counter, low word */
@@ -353,17 +350,6 @@
/* Inner write-alloc cache setup, no outer caching */
#define AS_MEMATTR_WRITE_ALLOC 0x8Dull
-/* Set to implementation defined, outer caching */
-#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
-/* Set to write back memory, outer caching */
-#define AS_MEMATTR_AARCH64_OUTER_WA 0x8Dull
-/* Set to inner non-cacheable, outer-non-cacheable
- * Setting defined by the alloc bits is ignored, but set to a valid encoding:
- * - no-alloc on read
- * - no alloc on write
- */
-#define AS_MEMATTR_AARCH64_NON_CACHEABLE 0x4Cull
-
/* Use GPU implementation-defined caching policy. */
#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull
/* The attribute set to force all resources to be cached. */
@@ -380,24 +366,6 @@
*/
#define AS_MEMATTR_LPAE_NON_CACHEABLE_RESERVED
-/* Symbols for default MEMATTR to use
- * Default is - HW implementation defined caching */
-#define AS_MEMATTR_INDEX_DEFAULT 0
-#define AS_MEMATTR_INDEX_DEFAULT_ACE 3
-
-/* HW implementation defined caching */
-#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
-/* Force cache on */
-#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1
-/* Write-alloc */
-#define AS_MEMATTR_INDEX_WRITE_ALLOC 2
-/* Outer coherent, inner implementation defined policy */
-#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3
-/* Outer coherent, write alloc inner */
-#define AS_MEMATTR_INDEX_OUTER_WA 4
-/* Normal memory, inner non-cacheable, outer non-cacheable (ARMv8 mode only) */
-#define AS_MEMATTR_INDEX_NON_CACHEABLE 5
-
/* L2_MMU_CONFIG register */
#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT (23)
#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
@@ -440,5 +408,8 @@
#define L2_CONFIG_HASH_MASK (0xFFul << L2_CONFIG_HASH_SHIFT)
/* End L2_CONFIG register */
+/* IDVS_GROUP register */
+#define IDVS_GROUP_SIZE_SHIFT (16)
+#define IDVS_GROUP_MAX_SIZE (0x3F)
-#endif /* _MIDG_REGMAP_H_ */
+#endif /* _KBASE_GPU_REGMAP_H_ */
diff --git a/mali_kbase/mali_base_hwconfig_features.h b/mali_kbase/mali_base_hwconfig_features.h
index c25f870..9ce8a0b 100644
--- a/mali_kbase/mali_base_hwconfig_features.h
+++ b/mali_kbase/mali_base_hwconfig_features.h
@@ -48,7 +48,6 @@ enum base_hw_feature {
BASE_HW_FEATURE_BRNDOUT_KILL,
BASE_HW_FEATURE_WARPING,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -85,7 +84,6 @@ static const enum base_hw_feature base_hw_features_tMIx[] = {
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_END
@@ -112,7 +110,6 @@ static const enum base_hw_feature base_hw_features_tHEx[] = {
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -140,7 +137,6 @@ static const enum base_hw_feature base_hw_features_tSIx[] = {
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -168,7 +164,6 @@ static const enum base_hw_feature base_hw_features_tDVx[] = {
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -196,7 +191,6 @@ static const enum base_hw_feature base_hw_features_tNOx[] = {
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -226,7 +220,6 @@ static const enum base_hw_feature base_hw_features_tGOx[] = {
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -255,7 +248,6 @@ static const enum base_hw_feature base_hw_features_tTRx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -284,7 +276,6 @@ static const enum base_hw_feature base_hw_features_tNAx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -313,7 +304,6 @@ static const enum base_hw_feature base_hw_features_tBEx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -323,35 +313,6 @@ static const enum base_hw_feature base_hw_features_tBEx[] = {
BASE_HW_FEATURE_END
};
-static const enum base_hw_feature base_hw_features_tULx[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_XAFFINITY,
- BASE_HW_FEATURE_WARPING,
- BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_BRNDOUT_CC,
- BASE_HW_FEATURE_BRNDOUT_KILL,
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LD_ST_TILEBUFFER,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_MRT,
- BASE_HW_FEATURE_MSAA_16X,
- BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
- BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_TEST4_DATUM_MODE,
- BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
- BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
- BASE_HW_FEATURE_COHERENCY_REG,
- BASE_HW_FEATURE_AARCH64_MMU,
- BASE_HW_FEATURE_L2_CONFIG,
- BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
- BASE_HW_FEATURE_END
-};
-
static const enum base_hw_feature base_hw_features_tDUx[] = {
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
@@ -372,7 +333,6 @@ static const enum base_hw_feature base_hw_features_tDUx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -402,7 +362,6 @@ static const enum base_hw_feature base_hw_features_tODx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -411,7 +370,7 @@ static const enum base_hw_feature base_hw_features_tODx[] = {
BASE_HW_FEATURE_END
};
-static const enum base_hw_feature base_hw_features_tIDx[] = {
+static const enum base_hw_feature base_hw_features_tGRx[] = {
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
BASE_HW_FEATURE_XAFFINITY,
@@ -431,7 +390,6 @@ static const enum base_hw_feature base_hw_features_tIDx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
@@ -460,7 +418,6 @@ static const enum base_hw_feature base_hw_features_tVAx[] = {
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
diff --git a/mali_kbase/mali_base_hwconfig_issues.h b/mali_kbase/mali_base_hwconfig_issues.h
index 1a28bb1..acbe77a 100644
--- a/mali_kbase/mali_base_hwconfig_issues.h
+++ b/mali_kbase/mali_base_hwconfig_issues.h
@@ -53,7 +53,10 @@ enum base_hw_issue {
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
BASE_HW_ISSUE_GPU2017_1336,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -328,7 +331,10 @@ static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
BASE_HW_ISSUE_GPU2017_1336,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -339,7 +345,10 @@ static const enum base_hw_issue base_hw_issues_tTRx_r0p1[] = {
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
BASE_HW_ISSUE_GPU2017_1336,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -348,6 +357,9 @@ static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -358,7 +370,10 @@ static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
BASE_HW_ISSUE_GPU2017_1336,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -369,7 +384,10 @@ static const enum base_hw_issue base_hw_issues_tNAx_r0p1[] = {
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
BASE_HW_ISSUE_GPU2017_1336,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -378,6 +396,9 @@ static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -387,6 +408,9 @@ static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -396,6 +420,9 @@ static const enum base_hw_issue base_hw_issues_tBEx_r1p0[] = {
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -404,22 +431,9 @@ static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_tULx_r0p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TSIX_2033,
- BASE_HW_ISSUE_TTRX_1337,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_tULx[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TSIX_2033,
- BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -428,6 +442,9 @@ static const enum base_hw_issue base_hw_issues_tDUx_r0p0[] = {
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -436,6 +453,9 @@ static const enum base_hw_issue base_hw_issues_model_tDUx[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -443,6 +463,7 @@ static const enum base_hw_issue base_hw_issues_tODx_r0p0[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -451,21 +472,24 @@ static const enum base_hw_issue base_hw_issues_model_tODx[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
-static const enum base_hw_issue base_hw_issues_tIDx_r0p0[] = {
+static const enum base_hw_issue base_hw_issues_tGRx_r0p0[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
-static const enum base_hw_issue base_hw_issues_model_tIDx[] = {
+static const enum base_hw_issue base_hw_issues_model_tGRx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -473,6 +497,7 @@ static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
@@ -481,6 +506,7 @@ static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3470,
BASE_HW_ISSUE_END
};
diff --git a/mali_kbase/mali_base_kernel.h b/mali_kbase/mali_base_kernel.h
index bd42e5c..3f5a6da 100644
--- a/mali_kbase/mali_base_kernel.h
+++ b/mali_kbase/mali_base_kernel.h
@@ -36,8 +36,8 @@ typedef struct base_mem_handle {
} base_mem_handle;
#include "mali_base_mem_priv.h"
-#include "mali_midg_coherency.h"
-#include "mali_kbase_gpu_id.h"
+#include "gpu/mali_kbase_gpu_coherency.h"
+#include "gpu/mali_kbase_gpu_id.h"
/*
* Dependency stuff, keep it private for now. May want to expose it if
diff --git a/mali_kbase/mali_kbase.h b/mali_kbase/mali_kbase.h
index 1ab785e..119e2db 100644
--- a/mali_kbase/mali_kbase.h
+++ b/mali_kbase/mali_kbase.h
@@ -64,11 +64,11 @@
#include "mali_kbase_mem.h"
#include "mali_kbase_gpu_memory_debugfs.h"
#include "mali_kbase_mem_profile_debugfs.h"
+#include "mali_kbase_gpuprops.h"
+#include "mali_kbase_ioctl.h"
#include "mali_kbase_debug_job_fault.h"
#include "mali_kbase_jd_debugfs.h"
-#include "mali_kbase_gpuprops.h"
#include "mali_kbase_jm.h"
-#include "mali_kbase_ioctl.h"
#include "ipa/mali_kbase_ipa.h"
@@ -97,16 +97,8 @@ struct kbase_device *kbase_device_alloc(void);
* been setup before calling kbase_device_init
*/
-/*
-* API to acquire device list semaphore and return pointer
-* to the device list head
-*/
-const struct list_head *kbase_dev_list_get(void);
-/* API to release the device list semaphore */
-void kbase_dev_list_put(const struct list_head *dev_list);
-
-int kbase_device_init(struct kbase_device * const kbdev);
-void kbase_device_term(struct kbase_device *kbdev);
+int kbase_device_misc_init(struct kbase_device *kbdev);
+void kbase_device_misc_term(struct kbase_device *kbdev);
void kbase_device_free(struct kbase_device *kbdev);
int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
@@ -156,6 +148,32 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx,
const unsigned long addr, const unsigned long len,
const unsigned long pgoff, const unsigned long flags);
+
+int assign_irqs(struct kbase_device *kbdev);
+
+int kbase_sysfs_init(struct kbase_device *kbdev);
+void kbase_sysfs_term(struct kbase_device *kbdev);
+
+void kbase_device_debugfs_term(struct kbase_device *kbdev);
+
+int kbase_protected_mode_init(struct kbase_device *kbdev);
+void kbase_protected_mode_term(struct kbase_device *kbdev);
+
+int power_control_init(struct kbase_device *kbdev);
+void power_control_term(struct kbase_device *kbdev);
+
+int kbase_device_debugfs_init(struct kbase_device *kbdev);
+
+int registers_map(struct kbase_device *kbdev);
+void registers_unmap(struct kbase_device *kbdev);
+
+int kbase_device_coherency_init(struct kbase_device *kbdev);
+
+#ifdef CONFIG_MALI_BUSLOG
+int buslog_init(struct kbase_device *kbdev);
+void buslog_term(struct kbase_device *kbdev);
+#endif
+
int kbase_jd_init(struct kbase_context *kctx);
void kbase_jd_exit(struct kbase_context *kctx);
@@ -243,6 +261,7 @@ void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
struct kbase_jd_atom *target_katom);
+
void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
int kbase_event_pending(struct kbase_context *ctx);
@@ -289,23 +308,6 @@ static inline void kbase_free_user_buffer(
}
/**
- * kbase_mem_copy_from_extres_page() - Copy pages from external resources.
- *
- * @kctx: kbase context within which the copying is to take place.
- * @extres_pages: Pointer to the pages which correspond to the external
- * resources from which the copying will take place.
- * @pages: Pointer to the pages to which the content is to be
- * copied from the provided external resources.
- * @nr_pages: Number of pages to copy.
- * @target_page_nr: Number of target pages which will be used for copying.
- * @offset: Offset into the target pages from which the copying
- * is to be performed.
- * @to_copy: Size of the chunk to be copied, in bytes.
- */
-void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
- void *extres_page, struct page **pages, unsigned int nr_pages,
- unsigned int *target_page_nr, size_t offset, size_t *to_copy);
-/**
* kbase_mem_copy_from_extres() - Copy from external resources.
*
* @kctx: kbase context within which the copying is to take place.
@@ -333,18 +335,6 @@ void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
void kbasep_as_do_poke(struct work_struct *work);
-/** Returns the name associated with a Mali exception code
- *
- * This function is called from the interrupt handler when a GPU fault occurs.
- * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
- *
- * @param[in] kbdev The kbase device that the GPU fault occurred from.
- * @param[in] exception_code exception code
- * @return name associated with the exception code
- */
-const char *kbase_exception_name(struct kbase_device *kbdev,
- u32 exception_code);
-
/**
* Check whether a system suspend is in progress, or has already been suspended
*
@@ -446,6 +436,8 @@ static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
* and during context creation.
*
* @param kbdev The kbase device
+ *
+ * Return: 0 on success and non-zero value on failure.
*/
void kbase_disjoint_init(struct kbase_device *kbdev);
diff --git a/mali_kbase/mali_kbase_as_fault_debugfs.c b/mali_kbase/mali_kbase_as_fault_debugfs.c
index 6f638cc..2e2e394 100644
--- a/mali_kbase/mali_kbase_as_fault_debugfs.c
+++ b/mali_kbase/mali_kbase_as_fault_debugfs.c
@@ -24,6 +24,7 @@
#include <mali_kbase.h>
#include <mali_kbase_as_fault_debugfs.h>
+#include <device/mali_kbase_device.h>
#ifdef CONFIG_DEBUG_FS
#ifdef CONFIG_MALI_DEBUG
@@ -36,7 +37,7 @@ static int kbase_as_fault_read(struct seq_file *sfile, void *data)
const struct list_head *kbdev_list;
struct kbase_device *kbdev = NULL;
- kbdev_list = kbase_dev_list_get();
+ kbdev_list = kbase_device_get_list();
list_for_each(entry, kbdev_list) {
kbdev = list_entry(entry, struct kbase_device, entry);
@@ -53,7 +54,7 @@ static int kbase_as_fault_read(struct seq_file *sfile, void *data)
}
- kbase_dev_list_put(kbdev_list);
+ kbase_device_put_list(kbdev_list);
return 0;
}
diff --git a/mali_kbase/mali_kbase_config_defaults.h b/mali_kbase/mali_kbase_config_defaults.h
index 6a1083c..a4c72da 100644
--- a/mali_kbase/mali_kbase_config_defaults.h
+++ b/mali_kbase/mali_kbase_config_defaults.h
@@ -33,27 +33,6 @@
/* Include mandatory definitions per platform */
#include <mali_kbase_config_platform.h>
-/**
-* Boolean indicating whether the driver is configured to be secure at
-* a potential loss of performance.
-*
-* This currently affects only r0p0-15dev0 HW and earlier.
-*
-* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
-* performance:
-*
-* - When this is set to true, the driver remains fully secure,
-* but potentially loses performance compared with setting this to
-* false.
-* - When set to false, the driver is open to certain security
-* attacks.
-*
-* From r0p0-00rel0 and onwards, there is no security loss by setting
-* this to false, and no performance loss by setting it to
-* true.
-*/
-#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false
-
enum {
/**
* Use unrestricted Address ID width on the AXI bus.
@@ -211,5 +190,15 @@ enum {
*/
#define DEFAULT_GPU_FREQ_KHZ_MAX (5000)
+/**
+ * Default timeout for task execution on an endpoint
+ *
+ * Number of GPU clock cycles before the driver terminates a task that is
+ * making no forward progress on an endpoint (e.g. shader core).
+ * Value chosen is equivalent to the time after which a job is hard stopped
+ * which is 5 seconds (assuming the GPU is usually clocked at ~500 MHZ).
+ */
+#define DEFAULT_PROGRESS_TIMEOUT ((u64)5 * 500 * 1024 * 1024)
+
#endif /* _KBASE_CONFIG_DEFAULTS_H_ */
diff --git a/mali_kbase/mali_kbase_context.c b/mali_kbase/mali_kbase_context.c
index 1cd854e..53bcc4f 100644
--- a/mali_kbase/mali_kbase_context.c
+++ b/mali_kbase/mali_kbase_context.c
@@ -27,12 +27,13 @@
*/
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_mem_linux.h>
#include <mali_kbase_dma_fence.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_mem_pool_group.h>
#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_timeline.h>
struct kbase_context *
kbase_create_context(struct kbase_device *kbdev, bool is_compat,
@@ -262,12 +263,13 @@ void kbase_destroy_context(struct kbase_context *kctx)
* atom debugfs interface alive until all atoms have completed. This
* is useful for debugging hung contexts. */
debugfs_remove_recursive(kctx->kctx_dentry);
+
kbase_debug_job_fault_context_term(kctx);
+
#endif
kbase_event_cleanup(kctx);
-
/*
* JIT must be terminated before the code below as it must be called
* without the region lock being held.
@@ -330,5 +332,13 @@ void kbase_destroy_context(struct kbase_context *kctx)
vfree(kctx);
kbase_pm_context_idle(kbdev);
+
+ /* Flush the timeline stream, so the user can see the termination
+ * tracepoints being fired.
+ * The "if" statement below is for optimization. It is safe to call
+ * kbase_timeline_streams_flush when timeline is disabled.
+ */
+ if (atomic_read(&kbdev->timeline_is_enabled) != 0)
+ kbase_timeline_streams_flush(kbdev->timeline);
}
KBASE_EXPORT_SYMBOL(kbase_destroy_context);
diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c
index 350e707..ca94279 100644
--- a/mali_kbase/mali_kbase_core_linux.c
+++ b/mali_kbase/mali_kbase_core_linux.c
@@ -22,7 +22,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config_defaults.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_gator.h>
#include <mali_kbase_mem_linux.h>
#ifdef CONFIG_MALI_DEVFREQ
@@ -57,6 +57,8 @@
#include "mali_kbase_hwcnt_legacy.h"
#include "mali_kbase_vinstr.h"
+#include "mali_kbase_cs_experimental.h"
+
#ifdef CONFIG_MALI_CINSTR_GWT
#include "mali_kbase_gwt.h"
#endif
@@ -102,17 +104,13 @@
#include <mali_kbase_timeline.h>
#include <mali_kbase_as_fault_debugfs.h>
+#include <device/mali_kbase_device.h>
/* GPU IRQ Tags */
#define JOB_IRQ_TAG 0
#define MMU_IRQ_TAG 1
#define GPU_IRQ_TAG 2
-static int kbase_dev_nr;
-
-static DEFINE_MUTEX(kbase_dev_list_lock);
-static LIST_HEAD(kbase_dev_list);
-
#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
/**
@@ -344,51 +342,20 @@ enum mali_error {
MALI_ERROR_FUNCTION_FAILED,
};
-enum {
- inited_mem = (1u << 0),
- inited_js = (1u << 1),
- /* Bit number 2 was earlier assigned to the runtime-pm initialization
- * stage (which has been merged with the backend_early stage).
- */
-#ifdef CONFIG_MALI_DEVFREQ
- inited_devfreq = (1u << 3),
-#endif /* CONFIG_MALI_DEVFREQ */
- inited_tlstream = (1u << 4),
- inited_backend_early = (1u << 5),
- inited_hwcnt_gpu_iface = (1u << 6),
- inited_hwcnt_gpu_ctx = (1u << 7),
- inited_hwcnt_gpu_virt = (1u << 8),
- inited_vinstr = (1u << 9),
- inited_backend_late = (1u << 10),
- inited_device = (1u << 11),
- inited_job_fault = (1u << 13),
- inited_sysfs_group = (1u << 14),
- inited_misc_register = (1u << 15),
- inited_get_device = (1u << 16),
- inited_dev_list = (1u << 17),
- inited_debugfs = (1u << 18),
- inited_gpu_device = (1u << 19),
- inited_registers_map = (1u << 20),
- inited_io_history = (1u << 21),
- inited_power_control = (1u << 22),
- inited_buslogger = (1u << 23),
- inited_protected = (1u << 24),
- inited_ctx_sched = (1u << 25)
-};
-
static struct kbase_device *to_kbase_device(struct device *dev)
{
return dev_get_drvdata(dev);
}
-static int assign_irqs(struct platform_device *pdev)
+int assign_irqs(struct kbase_device *kbdev)
{
- struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ struct platform_device *pdev;
int i;
if (!kbdev)
return -ENODEV;
+ pdev = to_platform_device(kbdev->dev);
/* 3 IRQ resources */
for (i = 0; i < 3; i++) {
struct resource *irq_res;
@@ -422,30 +389,12 @@ static int assign_irqs(struct platform_device *pdev)
return 0;
}
-/*
- * API to acquire device list mutex and
- * return pointer to the device list head
- */
-const struct list_head *kbase_dev_list_get(void)
-{
- mutex_lock(&kbase_dev_list_lock);
- return &kbase_dev_list;
-}
-KBASE_EXPORT_TEST_API(kbase_dev_list_get);
-
-/* API to release the device list mutex */
-void kbase_dev_list_put(const struct list_head *dev_list)
-{
- mutex_unlock(&kbase_dev_list_lock);
-}
-KBASE_EXPORT_TEST_API(kbase_dev_list_put);
-
/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
struct kbase_device *kbase_find_device(int minor)
{
struct kbase_device *kbdev = NULL;
struct list_head *entry;
- const struct list_head *dev_list = kbase_dev_list_get();
+ const struct list_head *dev_list = kbase_device_get_list();
list_for_each(entry, dev_list) {
struct kbase_device *tmp;
@@ -457,7 +406,7 @@ struct kbase_device *kbase_find_device(int minor)
break;
}
}
- kbase_dev_list_put(dev_list);
+ kbase_device_put_list(dev_list);
return kbdev;
}
@@ -1688,6 +1637,16 @@ void kbase_event_wakeup(struct kbase_context *kctx)
KBASE_EXPORT_TEST_API(kbase_event_wakeup);
+int kbase_event_pending(struct kbase_context *ctx)
+{
+ KBASE_DEBUG_ASSERT(ctx);
+
+ return (atomic_read(&ctx->event_count) != 0) ||
+ (atomic_read(&ctx->event_closed) != 0);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_pending);
+
static int kbase_mmap(struct file *const filp, struct vm_area_struct *const vma)
{
struct kbase_file *const kfile = filp->private_data;
@@ -2417,7 +2376,7 @@ static ssize_t show_js_softstop_always(struct device *dev,
* (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
*/
static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
-#endif /* CONFIG_MALI_DEBUG */
+#endif /* !MALI_USE_CSF */
#ifdef CONFIG_MALI_DEBUG
typedef void (kbasep_debug_command_func) (struct kbase_device *);
@@ -2564,9 +2523,13 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
{ .id = GPU_ID2_PRODUCT_LBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-LBEX" },
{ .id = GPU_ID2_PRODUCT_TNAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-TNAX" },
+ .name = "Mali-G57" },
{ .id = GPU_ID2_PRODUCT_TODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-TODX" },
+ { .id = GPU_ID2_PRODUCT_TGRX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TGRX" },
+ { .id = GPU_ID2_PRODUCT_TVAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TVAX" },
{ .id = GPU_ID2_PRODUCT_LODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-LODX" },
};
@@ -3245,80 +3208,45 @@ static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
-static int kbasep_protected_mode_init(struct kbase_device *kbdev)
+static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
{
-#ifdef CONFIG_OF
- struct device_node *protected_node;
- struct platform_device *pdev;
- struct protected_mode_device *protected_dev;
-#endif
-
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
- /* Use native protected ops */
- kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
- GFP_KERNEL);
- if (!kbdev->protected_dev)
- return -ENOMEM;
- kbdev->protected_dev->data = kbdev;
- kbdev->protected_ops = &kbase_native_protected_ops;
- kbdev->protected_mode_support = true;
- INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
- kbasep_protected_mode_hwcnt_disable_worker);
- kbdev->protected_mode_hwcnt_desired = true;
- kbdev->protected_mode_hwcnt_disabled = false;
- return 0;
- }
-
- kbdev->protected_mode_support = false;
-
-#ifdef CONFIG_OF
- protected_node = of_parse_phandle(kbdev->dev->of_node,
- "protected-mode-switcher", 0);
-
- if (!protected_node)
- protected_node = of_parse_phandle(kbdev->dev->of_node,
- "secure-mode-switcher", 0);
-
- if (!protected_node) {
- /* If protected_node cannot be looked up then we assume
- * protected mode is not supported on this platform. */
- dev_info(kbdev->dev, "Protected mode not available\n");
- return 0;
- }
-
- pdev = of_find_device_by_node(protected_node);
- if (!pdev)
- return -EINVAL;
+ struct kbase_device *kbdev = pdev->data;
- protected_dev = platform_get_drvdata(pdev);
- if (!protected_dev)
- return -EPROBE_DEFER;
+ return kbase_pm_protected_mode_enable(kbdev);
+}
- kbdev->protected_ops = &protected_dev->ops;
- kbdev->protected_dev = protected_dev;
+static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
+{
+ struct kbase_device *kbdev = pdev->data;
- if (kbdev->protected_ops) {
- int err;
+ return kbase_pm_protected_mode_disable(kbdev);
+}
- /* Make sure protected mode is disabled on startup */
- mutex_lock(&kbdev->pm.lock);
- err = kbdev->protected_ops->protected_mode_disable(
- kbdev->protected_dev);
- mutex_unlock(&kbdev->pm.lock);
+static const struct protected_mode_ops kbasep_native_protected_ops = {
+ .protected_mode_enable = kbasep_protected_mode_enable,
+ .protected_mode_disable = kbasep_protected_mode_disable
+};
- /* protected_mode_disable() returns -EINVAL if not supported */
- kbdev->protected_mode_support = (err != -EINVAL);
- }
-#endif
+int kbase_protected_mode_init(struct kbase_device *kbdev)
+{
+ /* Use native protected ops */
+ kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
+ GFP_KERNEL);
+ if (!kbdev->protected_dev)
+ return -ENOMEM;
+ kbdev->protected_dev->data = kbdev;
+ kbdev->protected_ops = &kbasep_native_protected_ops;
+ INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
+ kbasep_protected_mode_hwcnt_disable_worker);
+ kbdev->protected_mode_hwcnt_desired = true;
+ kbdev->protected_mode_hwcnt_disabled = false;
return 0;
}
-static void kbasep_protected_mode_term(struct kbase_device *kbdev)
+void kbase_protected_mode_term(struct kbase_device *kbdev)
{
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
- cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
- kfree(kbdev->protected_dev);
- }
+ cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
+ kfree(kbdev->protected_dev);
}
#ifdef CONFIG_MALI_NO_MALI
@@ -3349,9 +3277,9 @@ static int kbase_common_reg_map(struct kbase_device *kbdev)
return err;
- out_ioremap:
+out_ioremap:
release_mem_region(kbdev->reg_start, kbdev->reg_size);
- out_region:
+out_region:
return err;
}
@@ -3367,40 +3295,40 @@ static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
}
#endif /* CONFIG_MALI_NO_MALI */
-static int registers_map(struct kbase_device * const kbdev)
+int registers_map(struct kbase_device * const kbdev)
{
+ /* the first memory resource is the physical address of the GPU
+ * registers.
+ */
+ struct platform_device *pdev = to_platform_device(kbdev->dev);
+ struct resource *reg_res;
+ int err;
- /* the first memory resource is the physical address of the GPU
- * registers */
- struct platform_device *pdev = to_platform_device(kbdev->dev);
- struct resource *reg_res;
- int err;
-
- reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!reg_res) {
- dev_err(kbdev->dev, "Invalid register resource\n");
- return -ENOENT;
- }
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!reg_res) {
+ dev_err(kbdev->dev, "Invalid register resource\n");
+ return -ENOENT;
+ }
- kbdev->reg_start = reg_res->start;
- kbdev->reg_size = resource_size(reg_res);
+ kbdev->reg_start = reg_res->start;
+ kbdev->reg_size = resource_size(reg_res);
- err = kbase_common_reg_map(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Failed to map registers\n");
- return err;
- }
+ err = kbase_common_reg_map(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Failed to map registers\n");
+ return err;
+ }
return 0;
}
-static void registers_unmap(struct kbase_device *kbdev)
+void registers_unmap(struct kbase_device *kbdev)
{
kbase_common_reg_unmap(kbdev);
}
-static int power_control_init(struct platform_device *pdev)
+int power_control_init(struct kbase_device *kbdev)
{
#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE || !defined(CONFIG_OF)
/* Power control initialization requires at least the capability to get
@@ -3412,7 +3340,7 @@ static int power_control_init(struct platform_device *pdev)
*/
return 0;
#else
- struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ struct platform_device *pdev;
int err = 0;
unsigned int i;
#if defined(CONFIG_REGULATOR)
@@ -3425,6 +3353,8 @@ static int power_control_init(struct platform_device *pdev)
if (!kbdev)
return -ENODEV;
+ pdev = to_platform_device(kbdev->dev);
+
#if defined(CONFIG_REGULATOR)
/* Since the error code EPROBE_DEFER causes the entire probing
* procedure to be restarted from scratch at a later time,
@@ -3525,7 +3455,7 @@ clocks_probe_defer:
#endif /* KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE */
}
-static void power_control_term(struct kbase_device *kbdev)
+void power_control_term(struct kbase_device *kbdev)
{
unsigned int i;
@@ -3687,7 +3617,7 @@ static const struct file_operations
.release = single_release,
};
-static int kbase_device_debugfs_init(struct kbase_device *kbdev)
+int kbase_device_debugfs_init(struct kbase_device *kbdev)
{
struct dentry *debugfs_ctx_defaults_directory;
int err;
@@ -3722,6 +3652,7 @@ static int kbase_device_debugfs_init(struct kbase_device *kbdev)
kbasep_regs_history_debugfs_init(kbdev);
kbase_debug_job_fault_debugfs_init(kbdev);
+
kbasep_gpu_memory_debugfs_init(kbdev);
kbase_as_fault_debugfs_init(kbdev);
/* fops_* variables created by invocations of macro
@@ -3765,7 +3696,7 @@ static int kbase_device_debugfs_init(struct kbase_device *kbdev)
#ifdef CONFIG_MALI_DEVFREQ
#ifdef CONFIG_DEVFREQ_THERMAL
- if (kbdev->inited_subsys & inited_devfreq)
+ if (kbdev->devfreq)
kbase_ipa_debugfs_init(kbdev);
#endif /* CONFIG_DEVFREQ_THERMAL */
#endif /* CONFIG_MALI_DEVFREQ */
@@ -3782,7 +3713,7 @@ out:
return err;
}
-static void kbase_device_debugfs_term(struct kbase_device *kbdev)
+void kbase_device_debugfs_term(struct kbase_device *kbdev)
{
debugfs_remove_recursive(kbdev->mali_debugfs_directory);
}
@@ -3797,14 +3728,18 @@ static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
#endif /* CONFIG_DEBUG_FS */
#endif /* MALI_KBASE_BUILD */
-static void kbase_device_coherency_init(struct kbase_device *kbdev,
- unsigned prod_id)
+int kbase_device_coherency_init(struct kbase_device *kbdev)
{
#ifdef CONFIG_OF
u32 supported_coherency_bitmap =
kbdev->gpu_props.props.raw_props.coherency_mode;
const void *coherency_override_dts;
- u32 override_coherency;
+ u32 override_coherency, gpu_id;
+ unsigned int prod_id;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
+ prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
/* Only for tMIx :
* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
@@ -3849,6 +3784,8 @@ static void kbase_device_coherency_init(struct kbase_device *kbdev,
kbdev->gpu_props.props.raw_props.coherency_mode =
kbdev->system_coherency;
+
+ return 0;
}
#ifdef CONFIG_MALI_BUSLOG
@@ -3865,6 +3802,25 @@ static void kbase_logging_started_cb(void *data)
kbase_reset_gpu(kbdev);
dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
}
+
+int buslog_init(struct kbase_device *kbdev)
+{
+ int err = 0;
+
+ err = bl_core_client_register(kbdev->devname,
+ kbase_logging_started_cb,
+ kbdev, &kbdev->buslogger,
+ THIS_MODULE, NULL);
+ if (err == 0)
+ bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
+
+ return err;
+}
+
+void buslog_term(struct kbase_device *kbdev)
+{
+ bl_core_client_unregister(kbdev->buslogger);
+}
#endif
static struct attribute *kbase_attrs[] = {
@@ -3893,151 +3849,35 @@ static const struct attribute_group kbase_attr_group = {
.attrs = kbase_attrs,
};
-static int kbase_platform_device_remove(struct platform_device *pdev)
+int kbase_sysfs_init(struct kbase_device *kbdev)
{
- struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
- const struct list_head *dev_list;
-
- if (!kbdev)
- return -ENODEV;
-
- kfree(kbdev->gpu_props.prop_buffer);
-
-#ifdef CONFIG_MALI_BUSLOG
- if (kbdev->inited_subsys & inited_buslogger) {
- bl_core_client_unregister(kbdev->buslogger);
- kbdev->inited_subsys &= ~inited_buslogger;
- }
-#endif
-
- if (kbdev->inited_subsys & inited_dev_list) {
- dev_list = kbase_dev_list_get();
- list_del(&kbdev->entry);
- kbase_dev_list_put(dev_list);
- kbdev->inited_subsys &= ~inited_dev_list;
- }
-
- if (kbdev->inited_subsys & inited_misc_register) {
- misc_deregister(&kbdev->mdev);
- kbdev->inited_subsys &= ~inited_misc_register;
- }
-
- if (kbdev->inited_subsys & inited_sysfs_group) {
- sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
- kbdev->inited_subsys &= ~inited_sysfs_group;
- }
-
- if (kbdev->inited_subsys & inited_get_device) {
- put_device(kbdev->dev);
- kbdev->inited_subsys &= ~inited_get_device;
- }
-
-#ifdef MALI_KBASE_BUILD
- if (kbdev->inited_subsys & inited_debugfs) {
- kbase_device_debugfs_term(kbdev);
- kbdev->inited_subsys &= ~inited_debugfs;
- }
-#endif
-
- if (kbdev->inited_subsys & inited_job_fault) {
- kbase_debug_job_fault_dev_term(kbdev);
- kbdev->inited_subsys &= ~inited_job_fault;
- }
-
-
- if (kbdev->inited_subsys & inited_backend_late) {
- kbase_backend_late_term(kbdev);
- kbdev->inited_subsys &= ~inited_backend_late;
- }
-
- if (kbdev->inited_subsys & inited_vinstr) {
- kbase_vinstr_term(kbdev->vinstr_ctx);
- kbdev->inited_subsys &= ~inited_vinstr;
- }
-
- if (kbdev->inited_subsys & inited_hwcnt_gpu_virt) {
- kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt);
- kbdev->inited_subsys &= ~inited_hwcnt_gpu_virt;
- }
-
- if (kbdev->inited_subsys & inited_hwcnt_gpu_ctx) {
- kbase_hwcnt_context_term(kbdev->hwcnt_gpu_ctx);
- kbdev->inited_subsys &= ~inited_hwcnt_gpu_ctx;
- }
-
- if (kbdev->inited_subsys & inited_hwcnt_gpu_iface) {
- kbase_hwcnt_backend_gpu_destroy(&kbdev->hwcnt_gpu_iface);
- kbdev->inited_subsys &= ~inited_hwcnt_gpu_iface;
- }
-
- if (kbdev->inited_subsys & inited_tlstream) {
- kbase_timeline_term(kbdev->timeline);
- kbdev->inited_subsys &= ~inited_tlstream;
- }
-
- /* Bring job and mem sys to a halt before we continue termination */
-
- if (kbdev->inited_subsys & inited_js)
- kbasep_js_devdata_halt(kbdev);
-
- if (kbdev->inited_subsys & inited_mem)
- kbase_mem_halt(kbdev);
-
- if (kbdev->inited_subsys & inited_protected) {
- kbasep_protected_mode_term(kbdev);
- kbdev->inited_subsys &= ~inited_protected;
- }
-
- if (kbdev->inited_subsys & inited_js) {
- kbasep_js_devdata_term(kbdev);
- kbdev->inited_subsys &= ~inited_js;
- }
-
- if (kbdev->inited_subsys & inited_mem) {
- kbase_mem_term(kbdev);
- kbdev->inited_subsys &= ~inited_mem;
- }
-
- if (kbdev->inited_subsys & inited_ctx_sched) {
- kbase_ctx_sched_term(kbdev);
- kbdev->inited_subsys &= ~inited_ctx_sched;
- }
-
- if (kbdev->inited_subsys & inited_device) {
- kbase_device_term(kbdev);
- kbdev->inited_subsys &= ~inited_device;
- }
-
- if (kbdev->inited_subsys & inited_backend_early) {
- kbase_backend_early_term(kbdev);
- kbdev->inited_subsys &= ~inited_backend_early;
- }
+ int err = 0;
- if (kbdev->inited_subsys & inited_io_history) {
- kbase_io_history_term(&kbdev->io_history);
- kbdev->inited_subsys &= ~inited_io_history;
- }
+ kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
+ kbdev->mdev.name = kbdev->devname;
+ kbdev->mdev.fops = &kbase_fops;
+ kbdev->mdev.parent = get_device(kbdev->dev);
+ kbdev->mdev.mode = 0666;
- if (kbdev->inited_subsys & inited_power_control) {
- power_control_term(kbdev);
- kbdev->inited_subsys &= ~inited_power_control;
- }
+ err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
+ return err;
+}
- if (kbdev->inited_subsys & inited_registers_map) {
- registers_unmap(kbdev);
- kbdev->inited_subsys &= ~inited_registers_map;
- }
+void kbase_sysfs_term(struct kbase_device *kbdev)
+{
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
+ put_device(kbdev->dev);
+}
-#ifdef CONFIG_MALI_NO_MALI
- if (kbdev->inited_subsys & inited_gpu_device) {
- gpu_device_destroy(kbdev);
- kbdev->inited_subsys &= ~inited_gpu_device;
- }
-#endif /* CONFIG_MALI_NO_MALI */
+static int kbase_platform_device_remove(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
- if (kbdev->inited_subsys != 0)
- dev_err(kbdev->dev, "Missing sub system termination\n");
+ if (!kbdev)
+ return -ENODEV;
+ kbase_device_term(kbdev);
+ dev_set_drvdata(kbdev->dev, NULL);
kbase_device_free(kbdev);
return 0;
@@ -4046,10 +3886,8 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
void kbase_backend_devfreq_term(struct kbase_device *kbdev)
{
#ifdef CONFIG_MALI_DEVFREQ
- if (kbdev->inited_subsys & inited_devfreq) {
+ if (kbdev->devfreq)
kbase_devfreq_term(kbdev);
- kbdev->inited_subsys &= ~inited_devfreq;
- }
#endif
}
@@ -4059,295 +3897,39 @@ int kbase_backend_devfreq_init(struct kbase_device *kbdev)
/* Devfreq uses hardware counters, so must be initialized after it. */
int err = kbase_devfreq_init(kbdev);
- if (!err)
- kbdev->inited_subsys |= inited_devfreq;
- else
+ if (err)
dev_err(kbdev->dev, "Continuing without devfreq\n");
#endif /* CONFIG_MALI_DEVFREQ */
return 0;
}
-/* Number of register accesses for the buffer that we allocate during
- * initialization time. The buffer size can be changed later via debugfs. */
-#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
-
static int kbase_platform_device_probe(struct platform_device *pdev)
{
struct kbase_device *kbdev;
- struct mali_base_gpu_core_props *core_props;
- u32 gpu_id;
- unsigned prod_id;
- const struct list_head *dev_list;
int err = 0;
+ mali_kbase_print_cs_experimental();
+
kbdev = kbase_device_alloc();
if (!kbdev) {
dev_err(&pdev->dev, "Allocate device failed\n");
- kbase_platform_device_remove(pdev);
return -ENOMEM;
}
kbdev->dev = &pdev->dev;
dev_set_drvdata(kbdev->dev, kbdev);
-#ifdef CONFIG_MALI_NO_MALI
- err = gpu_device_create(kbdev);
- if (err) {
- dev_err(&pdev->dev, "Dummy model initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_gpu_device;
-#endif /* CONFIG_MALI_NO_MALI */
-
- err = assign_irqs(pdev);
- if (err) {
- dev_err(&pdev->dev, "IRQ search failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
-
- err = registers_map(kbdev);
- if (err) {
- dev_err(&pdev->dev, "Register map failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_registers_map;
-
- err = power_control_init(pdev);
- if (err) {
- dev_err(&pdev->dev, "Power control initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_power_control;
-
- err = kbase_io_history_init(&kbdev->io_history,
- KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Register access history initialization failed\n");
- kbase_platform_device_remove(pdev);
- return -ENOMEM;
- }
- kbdev->inited_subsys |= inited_io_history;
-
- err = kbase_backend_early_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Early backend initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_backend_early;
-
- scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
- kbase_dev_nr);
- kbdev->id = kbase_dev_nr;
-
- kbase_disjoint_init(kbdev);
-
- /* obtain max configured gpu frequency, if devfreq is enabled then
- * this will be overridden by the highest operating point found
- */
- core_props = &(kbdev->gpu_props.props.core_props);
-#ifdef GPU_FREQ_KHZ_MAX
- core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
-#else
- core_props->gpu_freq_khz_max = DEFAULT_GPU_FREQ_KHZ_MAX;
-#endif
-
err = kbase_device_init(kbdev);
if (err) {
- dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_device;
-
- err = kbase_ctx_sched_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Context scheduler initialization failed (%d)\n",
- err);
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_ctx_sched;
-
- err = kbase_mem_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_mem;
-
- gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
- prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
-
- kbase_device_coherency_init(kbdev, prod_id);
-
- err = kbasep_protected_mode_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Protected mode subsystem initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_protected;
-
- dev_list = kbase_dev_list_get();
- list_add(&kbdev->entry, &kbase_dev_list);
- kbase_dev_list_put(dev_list);
- kbdev->inited_subsys |= inited_dev_list;
-
- err = kbasep_js_devdata_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_js;
-
- atomic_set(&kbdev->timeline_is_enabled, 0);
- err = kbase_timeline_init(&kbdev->timeline, &kbdev->timeline_is_enabled);
- if (err) {
- dev_err(kbdev->dev, "Timeline stream initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_tlstream;
-
- err = kbase_hwcnt_backend_gpu_create(kbdev, &kbdev->hwcnt_gpu_iface);
- if (err) {
- dev_err(kbdev->dev, "GPU hwcnt backend creation failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_hwcnt_gpu_iface;
-
- err = kbase_hwcnt_context_init(&kbdev->hwcnt_gpu_iface,
- &kbdev->hwcnt_gpu_ctx);
- if (err) {
- dev_err(kbdev->dev,
- "GPU hwcnt context initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_hwcnt_gpu_ctx;
-
- err = kbase_hwcnt_virtualizer_init(
- kbdev->hwcnt_gpu_ctx,
- KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS,
- &kbdev->hwcnt_gpu_virt);
- if (err) {
- dev_err(kbdev->dev,
- "GPU hwcnt virtualizer initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_hwcnt_gpu_virt;
-
- err = kbase_vinstr_init(kbdev->hwcnt_gpu_virt, &kbdev->vinstr_ctx);
- if (err) {
- dev_err(kbdev->dev,
- "Virtual instrumentation initialization failed\n");
- kbase_platform_device_remove(pdev);
- return -EINVAL;
- }
- kbdev->inited_subsys |= inited_vinstr;
-
- /* The initialization of the devfreq is now embedded inside the
- * kbase_backend_late_init(), calling the kbase_backend_devfreq_init()
- * before the first trigger of pm_context_idle(). */
- err = kbase_backend_late_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Late backend initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_backend_late;
-
-
-#ifdef MALI_KBASE_BUILD
- err = kbase_debug_job_fault_dev_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Job fault debug initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_job_fault;
-
- err = kbase_device_debugfs_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "DebugFS initialization failed");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_debugfs;
-
- kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
- kbdev->mdev.name = kbdev->devname;
- kbdev->mdev.fops = &kbase_fops;
- kbdev->mdev.parent = get_device(kbdev->dev);
- kbdev->mdev.mode = 0666;
- kbdev->inited_subsys |= inited_get_device;
-
- /* This needs to happen before registering the device with misc_register(),
- * otherwise it causes a race condition between registering the device and a
- * uevent event being generated for userspace, causing udev rules to run
- * which might expect certain sysfs attributes present. As a result of the
- * race condition we avoid, some Mali sysfs entries may have appeared to
- * udev to not exist.
-
- * For more information, see
- * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the
- * paragraph that starts with "Word of warning", currently the second-last
- * paragraph.
- */
- err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
- if (err) {
- dev_err(&pdev->dev, "SysFS group creation failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_sysfs_group;
-
- err = misc_register(&kbdev->mdev);
- if (err) {
- dev_err(kbdev->dev, "Misc device registration failed for %s\n",
- kbdev->devname);
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_misc_register;
-
-
-#ifdef CONFIG_MALI_BUSLOG
- err = bl_core_client_register(kbdev->devname,
- kbase_logging_started_cb,
- kbdev, &kbdev->buslogger,
- THIS_MODULE, NULL);
- if (err == 0) {
- kbdev->inited_subsys |= inited_buslogger;
- bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
+ dev_err(kbdev->dev, "Device initialization failed\n");
+ dev_set_drvdata(kbdev->dev, NULL);
+ kbase_device_free(kbdev);
} else {
- dev_warn(kbdev->dev, "Bus log client registration failed\n");
- err = 0;
- }
-#endif
-
- err = kbase_gpuprops_populate_user_buffer(kbdev);
- if (err) {
- dev_err(&pdev->dev, "GPU property population failed");
- kbase_platform_device_remove(pdev);
- return err;
- }
-
- dev_info(kbdev->dev,
+#ifdef MALI_KBASE_BUILD
+ dev_info(kbdev->dev,
"Probed as %s\n", dev_name(kbdev->mdev.this_device));
-
- kbase_dev_nr++;
#endif /* MALI_KBASE_BUILD */
+ }
return err;
}
@@ -4375,7 +3957,7 @@ static int kbase_device_suspend(struct device *dev)
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
dev_dbg(dev, "Callback %s\n", __func__);
- if (kbdev->inited_subsys & inited_devfreq) {
+ if (kbdev->devfreq) {
kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
flush_workqueue(kbdev->devfreq_queue.workq);
}
@@ -4404,7 +3986,7 @@ static int kbase_device_resume(struct device *dev)
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
dev_dbg(dev, "Callback %s\n", __func__);
- if (kbdev->inited_subsys & inited_devfreq) {
+ if (kbdev->devfreq) {
mutex_lock(&kbdev->pm.lock);
if (kbdev->pm.active_count > 0)
kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
@@ -4437,7 +4019,7 @@ static int kbase_device_runtime_suspend(struct device *dev)
dev_dbg(dev, "Callback %s\n", __func__);
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- if (kbdev->inited_subsys & inited_devfreq)
+ if (kbdev->devfreq)
kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
#endif
@@ -4476,7 +4058,7 @@ static int kbase_device_runtime_resume(struct device *dev)
#if defined(CONFIG_MALI_DEVFREQ) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- if (kbdev->inited_subsys & inited_devfreq)
+ if (kbdev->devfreq)
kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
#endif
diff --git a/mali_kbase/mali_kbase_cs_experimental.h b/mali_kbase/mali_kbase_cs_experimental.h
new file mode 100644
index 0000000..b68a105
--- /dev/null
+++ b/mali_kbase/mali_kbase_cs_experimental.h
@@ -0,0 +1,72 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ */
+
+#ifndef _KBASE_CS_EXPERIMENTAL_H_
+#define _KBASE_CS_EXPERIMENTAL_H_
+
+#include <linux/kernel.h>
+
+#if MALI_CS_EXPERIMENTAL
+
+/**
+ * mali_kbase_has_cs_experimental() - Has the driver been built with
+ * CS_EXPERIMENTAL=y
+ *
+ * It is preferable to guard cs_experimental code with this rather than #ifdef
+ * all through the code.
+ *
+ * Return: true if built with CS_EXPERIMENTAL false otherwise
+ */
+static inline bool mali_kbase_has_cs_experimental(void)
+{
+ return true;
+}
+#else
+static inline bool mali_kbase_has_cs_experimental(void)
+{
+ return false;
+}
+#endif
+
+/**
+ * mali_kbase_print_cs_experimental() - Print a string if built with
+ * CS_EXPERIMENTAL=y
+ */
+static inline void mali_kbase_print_cs_experimental(void)
+{
+ if (mali_kbase_has_cs_experimental())
+ pr_info("mali_kbase: EXPERIMENTAL (MALI_CS_EXPERIMENTAL) flag enabled");
+}
+
+#endif /* _KBASE_CS_EXPERIMENTAL_H_ */
+
+
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index ad47a3e..e6d9b88 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -155,8 +155,15 @@
/** setting in kbase_context::as_nr that indicates it's invalid */
#define KBASEP_AS_NR_INVALID (-1)
-#define KBASE_LOCK_REGION_MAX_SIZE (63)
-#define KBASE_LOCK_REGION_MIN_SIZE (15)
+/**
+ * Maximum size in bytes of a MMU lock region, as a logarithm
+ */
+#define KBASE_LOCK_REGION_MAX_SIZE_LOG2 (64)
+
+/**
+ * Minimum size in bytes of a MMU lock region, as a logarithm
+ */
+#define KBASE_LOCK_REGION_MIN_SIZE_LOG2 (15)
#define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
@@ -168,7 +175,7 @@
/* Maximum number of pages of memory that require a permanent mapping, per
* kbase_context
*/
-#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((1024ul * 1024ul) >> \
+#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((32 * 1024ul * 1024ul) >> \
PAGE_SHIFT)
/** Atom has been previously soft-stoppped */
@@ -1456,7 +1463,6 @@ struct kbase_devfreq_queue_info {
* enabled.
* @protected_mode_hwcnt_disable_work: Work item to disable GPU hardware
* counters, used if atomic disable is not possible.
- * @protected_mode_support: set to true if protected mode is supported.
* @buslogger: Pointer to the structure required for interfacing
* with the bus logger module to set the size of buffer
* used by the module for capturing bus logs.
@@ -1677,7 +1683,7 @@ struct kbase_device {
u32 snoop_enable_smc;
u32 snoop_disable_smc;
- struct protected_mode_ops *protected_ops;
+ const struct protected_mode_ops *protected_ops;
struct protected_mode_device *protected_dev;
@@ -1691,8 +1697,6 @@ struct kbase_device {
struct work_struct protected_mode_hwcnt_disable_work;
- bool protected_mode_support;
-
#ifdef CONFIG_MALI_BUSLOG
struct bus_logger_client *buslogger;
#endif
diff --git a/mali_kbase/mali_kbase_event.c b/mali_kbase/mali_kbase_event.c
index 721af69..70e6dd6 100644
--- a/mali_kbase/mali_kbase_event.c
+++ b/mali_kbase/mali_kbase_event.c
@@ -50,16 +50,6 @@ static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, stru
return data;
}
-int kbase_event_pending(struct kbase_context *ctx)
-{
- KBASE_DEBUG_ASSERT(ctx);
-
- return (atomic_read(&ctx->event_count) != 0) ||
- (atomic_read(&ctx->event_closed) != 0);
-}
-
-KBASE_EXPORT_TEST_API(kbase_event_pending);
-
int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
{
struct kbase_jd_atom *atom;
@@ -97,7 +87,6 @@ int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *ueve
if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
kbase_jd_free_external_resources(atom);
-
mutex_lock(&ctx->jctx.lock);
uevent->udata = kbase_event_process(ctx, atom);
mutex_unlock(&ctx->jctx.lock);
@@ -121,7 +110,6 @@ static void kbase_event_process_noreport_worker(struct work_struct *data)
if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
kbase_jd_free_external_resources(katom);
-
mutex_lock(&kctx->jctx.lock);
kbase_event_process(kctx, katom);
mutex_unlock(&kctx->jctx.lock);
diff --git a/mali_kbase/mali_kbase_gpu_memory_debugfs.c b/mali_kbase/mali_kbase_gpu_memory_debugfs.c
index 2c42f5c..569abd9 100644
--- a/mali_kbase/mali_kbase_gpu_memory_debugfs.c
+++ b/mali_kbase/mali_kbase_gpu_memory_debugfs.c
@@ -21,6 +21,7 @@
*/
#include <mali_kbase.h>
+#include <device/mali_kbase_device.h>
#ifdef CONFIG_DEBUG_FS
/** Show callback for the @c gpu_memory debugfs file.
@@ -40,7 +41,7 @@ static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
struct list_head *entry;
const struct list_head *kbdev_list;
- kbdev_list = kbase_dev_list_get();
+ kbdev_list = kbase_device_get_list();
list_for_each(entry, kbdev_list) {
struct kbase_device *kbdev = NULL;
struct kbase_context *kctx;
@@ -61,7 +62,7 @@ static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
}
mutex_unlock(&kbdev->kctx_list_lock);
}
- kbase_dev_list_put(kbdev_list);
+ kbase_device_put_list(kbdev_list);
return 0;
}
diff --git a/mali_kbase/mali_kbase_gpuprops.c b/mali_kbase/mali_kbase_gpuprops.c
index f6b70bd..d5495a1 100644
--- a/mali_kbase/mali_kbase_gpuprops.c
+++ b/mali_kbase/mali_kbase_gpuprops.c
@@ -27,9 +27,10 @@
*/
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_gpuprops.h>
#include <mali_kbase_hwaccess_gpuprops.h>
+#include <mali_kbase_config_defaults.h>
#include "mali_kbase_ioctl.h"
#include <linux/clk.h>
#include <mali_kbase_pm_internal.h>
@@ -247,8 +248,8 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, str
gpu_props->thread_props.tls_alloc =
gpu_props->raw_props.thread_tls_alloc;
- /* Workaround for GPU2019HW-509. MIDHARC-2364 was wrongfully applied
- * to tDUx GPUs.
+ /* MIDHARC-2364 was intended for tULx.
+ * Workaround for the incorrectly applied THREAD_FEATURES to tDUx.
*/
gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
@@ -600,3 +601,25 @@ int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev)
return 0;
}
+
+void kbase_gpuprops_free_user_buffer(struct kbase_device *kbdev)
+{
+ kfree(kbdev->gpu_props.prop_buffer);
+}
+
+int kbase_device_populate_max_freq(struct kbase_device *kbdev)
+{
+ struct mali_base_gpu_core_props *core_props;
+
+ /* obtain max configured gpu frequency, if devfreq is enabled then
+ * this will be overridden by the highest operating point found
+ */
+ core_props = &(kbdev->gpu_props.props.core_props);
+#ifdef GPU_FREQ_KHZ_MAX
+ core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
+#else
+ core_props->gpu_freq_khz_max = DEFAULT_GPU_FREQ_KHZ_MAX;
+#endif
+
+ return 0;
+}
diff --git a/mali_kbase/mali_kbase_gpuprops.h b/mali_kbase/mali_kbase_gpuprops.h
index 8edba48..4fdb3f9 100644
--- a/mali_kbase/mali_kbase_gpuprops.h
+++ b/mali_kbase/mali_kbase_gpuprops.h
@@ -66,12 +66,30 @@ void kbase_gpuprops_update_l2_features(struct kbase_device *kbdev);
* kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer
* @kbdev: The kbase device
*
- * Fills kbdev->gpu_props->prop_buffer with the GPU properties for user
- * space to read.
+ * Fills prop_buffer with the GPU properties for user space to read.
*/
int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev);
/**
+ * kbase_gpuprops_free_user_buffer - Free the GPU properties buffer.
+ * @kbdev: kbase device pointer
+ *
+ * Free the GPU properties buffer allocated from
+ * kbase_gpuprops_populate_user_buffer.
+ */
+void kbase_gpuprops_free_user_buffer(struct kbase_device *kbdev);
+
+/**
+ * kbase_device_populate_max_freq - Populate max gpu frequency.
+ * @kbdev: kbase device pointer
+ *
+ * Populate the maximum gpu frequency to be used when devfreq is disabled.
+ *
+ * Return: 0 on success and non-zero value on failure.
+ */
+int kbase_device_populate_max_freq(struct kbase_device *kbdev);
+
+/**
* kbase_gpuprops_update_core_props_gpu_id - break down gpu id value
* @gpu_props: the &base_gpu_props structure
*
diff --git a/mali_kbase/mali_kbase_hw.c b/mali_kbase/mali_kbase_hw.c
index c277c0c..7738630 100644
--- a/mali_kbase/mali_kbase_hw.c
+++ b/mali_kbase/mali_kbase_hw.c
@@ -28,7 +28,7 @@
#include <mali_base_hwconfig_features.h>
#include <mali_base_hwconfig_issues.h>
-#include <mali_midg_regmap.h>
+#include "gpu/mali_kbase_gpu_regmap.h"
#include "mali_kbase.h"
#include "mali_kbase_hw.h"
@@ -68,9 +68,6 @@ void kbase_hw_set_features_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_TBEX:
features = base_hw_features_tBEx;
break;
- case GPU_ID2_PRODUCT_TULX:
- features = base_hw_features_tULx;
- break;
case GPU_ID2_PRODUCT_TDUX:
features = base_hw_features_tDUx;
break;
@@ -78,8 +75,8 @@ void kbase_hw_set_features_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_LODX:
features = base_hw_features_tODx;
break;
- case GPU_ID2_PRODUCT_TIDX:
- features = base_hw_features_tIDx;
+ case GPU_ID2_PRODUCT_TGRX:
+ features = base_hw_features_tGRx;
break;
case GPU_ID2_PRODUCT_TVAX:
features = base_hw_features_tVAx;
@@ -198,10 +195,6 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tBEx_r1p0},
{U32_MAX, NULL} } },
- {GPU_ID2_PRODUCT_TULX,
- {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tULx_r0p0},
- {U32_MAX, NULL} } },
-
{GPU_ID2_PRODUCT_TDUX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDUx_r0p0},
{U32_MAX, NULL} } },
@@ -214,8 +207,8 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tODx_r0p0},
{U32_MAX, NULL} } },
- {GPU_ID2_PRODUCT_TIDX,
- {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tIDx_r0p0},
+ {GPU_ID2_PRODUCT_TGRX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGRx_r0p0},
{U32_MAX, NULL} } },
{GPU_ID2_PRODUCT_TVAX,
@@ -358,9 +351,6 @@ int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_TBEX:
issues = base_hw_issues_model_tBEx;
break;
- case GPU_ID2_PRODUCT_TULX:
- issues = base_hw_issues_model_tULx;
- break;
case GPU_ID2_PRODUCT_TDUX:
issues = base_hw_issues_model_tDUx;
break;
@@ -368,8 +358,8 @@ int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_LODX:
issues = base_hw_issues_model_tODx;
break;
- case GPU_ID2_PRODUCT_TIDX:
- issues = base_hw_issues_model_tIDx;
+ case GPU_ID2_PRODUCT_TGRX:
+ issues = base_hw_issues_model_tGRx;
break;
case GPU_ID2_PRODUCT_TVAX:
issues = base_hw_issues_model_tVAx;
diff --git a/mali_kbase/mali_kbase_hwaccess_jm.h b/mali_kbase/mali_kbase_hwaccess_jm.h
index cfda5c4..4972893 100644
--- a/mali_kbase/mali_kbase_hwaccess_jm.h
+++ b/mali_kbase/mali_kbase_hwaccess_jm.h
@@ -299,9 +299,4 @@ void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
*/
bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev);
-/* Object containing callbacks for enabling/disabling protected mode, used
- * on GPU which supports protected mode switching natively.
- */
-extern struct protected_mode_ops kbase_native_protected_ops;
-
#endif /* _KBASE_HWACCESS_JM_H_ */
diff --git a/mali_kbase/mali_kbase_hwaccess_pm.h b/mali_kbase/mali_kbase_hwaccess_pm.h
index 96c473a..bbaf6ea 100644
--- a/mali_kbase/mali_kbase_hwaccess_pm.h
+++ b/mali_kbase/mali_kbase_hwaccess_pm.h
@@ -29,7 +29,7 @@
#ifndef _KBASE_HWACCESS_PM_H_
#define _KBASE_HWACCESS_PM_H_
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <linux/atomic.h>
#include <mali_kbase_pm_defs.h>
@@ -208,4 +208,22 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
int kbase_pm_list_policies(struct kbase_device *kbdev,
const struct kbase_pm_policy * const **list);
+/**
+ * kbase_protected_most_enable - Enable protected mode
+ *
+ * @kbdev: Address of the instance of a GPU platform device.
+ *
+ * Return: Zero on success or an error code
+ */
+int kbase_pm_protected_mode_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_protected_mode_disable - Disable protected mode
+ *
+ * @kbdev: Address of the instance of a GPU platform device.
+ *
+ * Return: Zero on success or an error code
+ */
+int kbase_pm_protected_mode_disable(struct kbase_device *kbdev);
+
#endif /* _KBASE_HWACCESS_PM_H_ */
diff --git a/mali_kbase/mali_kbase_jm.c b/mali_kbase/mali_kbase_jm.c
index da78a16..b91a706 100644
--- a/mali_kbase/mali_kbase_jm.c
+++ b/mali_kbase/mali_kbase_jm.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -137,4 +137,3 @@ struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
return kbase_js_complete_atom(katom, end_timestamp);
}
-
diff --git a/mali_kbase/mali_kbase_jm.h b/mali_kbase/mali_kbase_jm.h
index c468ea4..a3c7744 100644
--- a/mali_kbase/mali_kbase_jm.h
+++ b/mali_kbase/mali_kbase_jm.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014, 2016, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c
index eb0d9d7..a0090a9 100644
--- a/mali_kbase/mali_kbase_js.c
+++ b/mali_kbase/mali_kbase_js.c
@@ -2585,7 +2585,6 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
if (!kbase_jm_kick(kbdev, 1 << js))
/* No more jobs can be submitted on this slot */
js_mask &= ~(1 << js);
-
if (!kbase_ctx_flag(kctx, KCTX_PULLED)) {
bool pullable = kbase_js_ctx_pullable(kctx, js,
true);
diff --git a/mali_kbase/mali_kbase_js.h b/mali_kbase/mali_kbase_js.h
index 355da27..588777c 100644
--- a/mali_kbase/mali_kbase_js.h
+++ b/mali_kbase/mali_kbase_js.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -546,6 +546,7 @@ bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
ktime_t *end_timestamp);
+
/**
* @brief Submit atoms from all available contexts.
*
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 4589983..278c59b 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -38,7 +38,7 @@
#include <mali_kbase_config.h>
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_cache_policy.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_tracepoints.h>
@@ -1015,12 +1015,8 @@ void kbase_mem_term(struct kbase_device *kbdev)
if (kbdev->mgm_dev)
module_put(kbdev->mgm_dev->owner);
}
-
KBASE_EXPORT_TEST_API(kbase_mem_term);
-
-
-
/**
* @brief Allocate a free region object.
*
@@ -3707,6 +3703,46 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
alloc->nents = 0;
}
+int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages,
+ void *src_page, size_t *to_copy, unsigned int nr_pages,
+ unsigned int *target_page_nr, size_t offset)
+{
+ void *target_page = kmap(dest_pages[*target_page_nr]);
+ size_t chunk = PAGE_SIZE-offset;
+
+ if (!target_page) {
+ pr_err("%s: kmap failure", __func__);
+ return -ENOMEM;
+ }
+
+ chunk = min(chunk, *to_copy);
+
+ memcpy(target_page + offset, src_page, chunk);
+ *to_copy -= chunk;
+
+ kunmap(dest_pages[*target_page_nr]);
+
+ *target_page_nr += 1;
+ if (*target_page_nr >= nr_pages || *to_copy == 0)
+ return 0;
+
+ target_page = kmap(dest_pages[*target_page_nr]);
+ if (!target_page) {
+ pr_err("%s: kmap failure", __func__);
+ return -ENOMEM;
+ }
+
+ KBASE_DEBUG_ASSERT(target_page);
+
+ chunk = min(offset, *to_copy);
+ memcpy(target_page, src_page + PAGE_SIZE-offset, chunk);
+ *to_copy -= chunk;
+
+ kunmap(dest_pages[*target_page_nr]);
+
+ return 0;
+}
+
struct kbase_mem_phy_alloc *kbase_map_external_resource(
struct kbase_context *kctx, struct kbase_va_region *reg,
struct mm_struct *locked_mm)
diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h
index 4fb406d..d2889f1 100644
--- a/mali_kbase/mali_kbase_mem.h
+++ b/mali_kbase/mali_kbase_mem.h
@@ -1684,4 +1684,30 @@ void kbase_mem_umm_unmap(struct kbase_context *kctx,
int kbase_mem_do_sync_imported(struct kbase_context *kctx,
struct kbase_va_region *reg, enum kbase_sync_type sync_fn);
+
+/**
+ * kbase_mem_copy_to_pinned_user_pages - Memcpy from source input page to
+ * an unaligned address at a given offset from the start of a target page.
+ *
+ * @dest_pages: Pointer to the array of pages to which the content is
+ * to be copied from the provided @src_page.
+ * @src_page: Pointer to the page which correspond to the source page
+ * from which the copying will take place.
+ * @to_copy: Total number of bytes pending to be copied from
+ * @src_page to @target_page_nr within @dest_pages.
+ * This will get decremented by number of bytes we
+ * managed to copy from source page to target pages.
+ * @nr_pages: Total number of pages present in @dest_pages.
+ * @target_page_nr: Target page number to which @src_page needs to be
+ * copied. This will get incremented by one if
+ * we are successful in copying from source page.
+ * @offset: Offset in bytes into the target pages from which the
+ * copying is to be performed.
+ *
+ * Return: 0 on success, or a negative error code.
+ */
+int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages,
+ void *src_page, size_t *to_copy, unsigned int nr_pages,
+ unsigned int *target_page_nr, size_t offset);
+
#endif /* _KBASE_MEM_H_ */
diff --git a/mali_kbase/mali_kbase_mmu.c b/mali_kbase/mali_kbase_mmu.c
index f30d09b..1967bc9 100644
--- a/mali_kbase/mali_kbase_mmu.c
+++ b/mali_kbase/mali_kbase_mmu.c
@@ -31,7 +31,8 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_fault.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_tracepoints.h>
#include <mali_kbase_instr_defs.h>
#include <mali_kbase_debug.h>
@@ -2124,174 +2125,6 @@ void bus_fault_worker(struct work_struct *data)
atomic_dec(&kbdev->faults_pending);
}
-const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code)
-{
- const char *e;
-
- switch (exception_code) {
- /* Non-Fault Status code */
- case 0x00:
- e = "NOT_STARTED/IDLE/OK";
- break;
- case 0x01:
- e = "DONE";
- break;
- case 0x02:
- e = "INTERRUPTED";
- break;
- case 0x03:
- e = "STOPPED";
- break;
- case 0x04:
- e = "TERMINATED";
- break;
- case 0x08:
- e = "ACTIVE";
- break;
- /* Job exceptions */
- case 0x40:
- e = "JOB_CONFIG_FAULT";
- break;
- case 0x41:
- e = "JOB_POWER_FAULT";
- break;
- case 0x42:
- e = "JOB_READ_FAULT";
- break;
- case 0x43:
- e = "JOB_WRITE_FAULT";
- break;
- case 0x44:
- e = "JOB_AFFINITY_FAULT";
- break;
- case 0x48:
- e = "JOB_BUS_FAULT";
- break;
- case 0x50:
- e = "INSTR_INVALID_PC";
- break;
- case 0x51:
- e = "INSTR_INVALID_ENC";
- break;
- case 0x52:
- e = "INSTR_TYPE_MISMATCH";
- break;
- case 0x53:
- e = "INSTR_OPERAND_FAULT";
- break;
- case 0x54:
- e = "INSTR_TLS_FAULT";
- break;
- case 0x55:
- e = "INSTR_BARRIER_FAULT";
- break;
- case 0x56:
- e = "INSTR_ALIGN_FAULT";
- break;
- case 0x58:
- e = "DATA_INVALID_FAULT";
- break;
- case 0x59:
- e = "TILE_RANGE_FAULT";
- break;
- case 0x5A:
- e = "ADDR_RANGE_FAULT";
- break;
- case 0x60:
- e = "OUT_OF_MEMORY";
- break;
- /* GPU exceptions */
- case 0x80:
- e = "DELAYED_BUS_FAULT";
- break;
- case 0x88:
- e = "SHAREABILITY_FAULT";
- break;
- /* MMU exceptions */
- case 0xC0:
- case 0xC1:
- case 0xC2:
- case 0xC3:
- case 0xC4:
- case 0xC5:
- case 0xC6:
- case 0xC7:
- e = "TRANSLATION_FAULT";
- break;
- case 0xC8:
- e = "PERMISSION_FAULT";
- break;
- case 0xC9:
- case 0xCA:
- case 0xCB:
- case 0xCC:
- case 0xCD:
- case 0xCE:
- case 0xCF:
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
- e = "PERMISSION_FAULT";
- else
- e = "UNKNOWN";
- break;
- case 0xD0:
- case 0xD1:
- case 0xD2:
- case 0xD3:
- case 0xD4:
- case 0xD5:
- case 0xD6:
- case 0xD7:
- e = "TRANSTAB_BUS_FAULT";
- break;
- case 0xD8:
- e = "ACCESS_FLAG";
- break;
- case 0xD9:
- case 0xDA:
- case 0xDB:
- case 0xDC:
- case 0xDD:
- case 0xDE:
- case 0xDF:
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
- e = "ACCESS_FLAG";
- else
- e = "UNKNOWN";
- break;
- case 0xE0:
- case 0xE1:
- case 0xE2:
- case 0xE3:
- case 0xE4:
- case 0xE5:
- case 0xE6:
- case 0xE7:
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
- e = "ADDRESS_SIZE_FAULT";
- else
- e = "UNKNOWN";
- break;
- case 0xE8:
- case 0xE9:
- case 0xEA:
- case 0xEB:
- case 0xEC:
- case 0xED:
- case 0xEE:
- case 0xEF:
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
- e = "MEMORY_ATTRIBUTES_FAULT";
- else
- e = "UNKNOWN";
- break;
- default:
- e = "UNKNOWN";
- break;
- };
-
- return e;
-}
-
static const char *access_type_name(struct kbase_device *kbdev,
u32 fault_status)
{
@@ -2355,7 +2188,7 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
reason_str,
fault->status,
(fault->status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
- exception_type, kbase_exception_name(kbdev, exception_type),
+ exception_type, kbase_gpu_exception_name(exception_type),
access_type, access_type_name(kbdev, fault->status),
source_id,
kctx->pid);
diff --git a/mali_kbase/mali_kbase_mmu_mode_aarch64.c b/mali_kbase/mali_kbase_mmu_mode_aarch64.c
index 7b9cc0c..92cf8a3 100644
--- a/mali_kbase/mali_kbase_mmu_mode_aarch64.c
+++ b/mali_kbase/mali_kbase_mmu_mode_aarch64.c
@@ -22,7 +22,7 @@
#include "mali_kbase.h"
-#include "mali_midg_regmap.h"
+#include <gpu/mali_kbase_gpu_regmap.h>
#include "mali_kbase_defs.h"
#define ENTRY_TYPE_MASK 3ULL
diff --git a/mali_kbase/mali_kbase_mmu_mode_lpae.c b/mali_kbase/mali_kbase_mmu_mode_lpae.c
index 7ec90cf..27c2c86 100644
--- a/mali_kbase/mali_kbase_mmu_mode_lpae.c
+++ b/mali_kbase/mali_kbase_mmu_mode_lpae.c
@@ -22,7 +22,7 @@
#include "mali_kbase.h"
-#include "mali_midg_regmap.h"
+#include <gpu/mali_kbase_gpu_regmap.h>
#include "mali_kbase_defs.h"
#define ENTRY_TYPE_MASK 3ULL
diff --git a/mali_kbase/mali_kbase_pm.c b/mali_kbase/mali_kbase_pm.c
index 5699eb8..2251031 100644
--- a/mali_kbase/mali_kbase_pm.c
+++ b/mali_kbase/mali_kbase_pm.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,7 +28,7 @@
*/
#include <mali_kbase.h>
-#include <mali_midg_regmap.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_vinstr.h>
#include <mali_kbase_hwcnt_context.h>
diff --git a/mali_kbase/mali_kbase_regs_history_debugfs.c b/mali_kbase/mali_kbase_regs_history_debugfs.c
index 53d9427..1d114a6 100644
--- a/mali_kbase/mali_kbase_regs_history_debugfs.c
+++ b/mali_kbase/mali_kbase_regs_history_debugfs.c
@@ -85,8 +85,8 @@ static int regs_history_show(struct seq_file *sfile, void *data)
&h->buf[(h->count - iters + i) % h->size];
char const access = (io->addr & 1) ? 'w' : 'r';
- seq_printf(sfile, "%6i: %c: reg 0x%p val %08x\n", i, access,
- (void *)(io->addr & ~0x1), io->value);
+ seq_printf(sfile, "%6i: %c: reg 0x%016lx val %08x\n", i, access,
+ (unsigned long)(io->addr & ~0x1), io->value);
}
spin_unlock_irqrestore(&h->lock, flags);
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index 6c740c0..537a239 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -719,48 +719,6 @@ out_cleanup:
return ret;
}
-void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
- void *extres_page, struct page **pages, unsigned int nr_pages,
- unsigned int *target_page_nr, size_t offset, size_t *to_copy)
-{
- void *target_page = kmap(pages[*target_page_nr]);
- size_t chunk = PAGE_SIZE-offset;
-
- lockdep_assert_held(&kctx->reg_lock);
-
- if (!target_page) {
- *target_page_nr += 1;
- dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
- return;
- }
-
- chunk = min(chunk, *to_copy);
-
- memcpy(target_page + offset, extres_page, chunk);
- *to_copy -= chunk;
-
- kunmap(pages[*target_page_nr]);
-
- *target_page_nr += 1;
- if (*target_page_nr >= nr_pages)
- return;
-
- target_page = kmap(pages[*target_page_nr]);
- if (!target_page) {
- *target_page_nr += 1;
- dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
- return;
- }
-
- KBASE_DEBUG_ASSERT(target_page);
-
- chunk = min(offset, *to_copy);
- memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
- *to_copy -= chunk;
-
- kunmap(pages[*target_page_nr]);
-}
-
int kbase_mem_copy_from_extres(struct kbase_context *kctx,
struct kbase_debug_copy_buffer *buf_data)
{
@@ -785,22 +743,21 @@ int kbase_mem_copy_from_extres(struct kbase_context *kctx,
switch (gpu_alloc->type) {
case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
{
- for (i = 0; i < buf_data->nr_extres_pages; i++) {
+ for (i = 0; i < buf_data->nr_extres_pages &&
+ target_page_nr < buf_data->nr_pages; i++) {
struct page *pg = buf_data->extres_pages[i];
void *extres_page = kmap(pg);
- if (extres_page)
- kbase_mem_copy_from_extres_page(kctx,
- extres_page, pages,
+ if (extres_page) {
+ ret = kbase_mem_copy_to_pinned_user_pages(
+ pages, extres_page, &to_copy,
buf_data->nr_pages,
- &target_page_nr,
- offset, &to_copy);
-
- kunmap(pg);
- if (target_page_nr >= buf_data->nr_pages)
- break;
+ &target_page_nr, offset);
+ kunmap(pg);
+ if (ret)
+ goto out_unlock;
+ }
}
- break;
}
break;
case KBASE_MEM_TYPE_IMPORTED_UMM: {
@@ -820,20 +777,21 @@ int kbase_mem_copy_from_extres(struct kbase_context *kctx,
if (ret)
goto out_unlock;
- for (i = 0; i < dma_to_copy/PAGE_SIZE; i++) {
+ for (i = 0; i < dma_to_copy/PAGE_SIZE &&
+ target_page_nr < buf_data->nr_pages; i++) {
void *extres_page = dma_buf_kmap(dma_buf, i);
- if (extres_page)
- kbase_mem_copy_from_extres_page(kctx,
- extres_page, pages,
+ if (extres_page) {
+ ret = kbase_mem_copy_to_pinned_user_pages(
+ pages, extres_page, &to_copy,
buf_data->nr_pages,
- &target_page_nr,
- offset, &to_copy);
+ &target_page_nr, offset);
- dma_buf_kunmap(dma_buf, i, extres_page);
- if (target_page_nr >= buf_data->nr_pages)
- break;
+ dma_buf_kunmap(dma_buf, i, extres_page);
+ if (ret)
+ goto out_unlock;
+ }
}
dma_buf_end_cpu_access(dma_buf,
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
@@ -848,7 +806,6 @@ int kbase_mem_copy_from_extres(struct kbase_context *kctx,
out_unlock:
kbase_gpu_vm_unlock(kctx);
return ret;
-
}
static int kbase_debug_copy(struct kbase_jd_atom *katom)
diff --git a/mali_kbase/mali_kbase_sync.h b/mali_kbase/mali_kbase_sync.h
index 785b9ff..80b54d0 100644
--- a/mali_kbase/mali_kbase_sync.h
+++ b/mali_kbase/mali_kbase_sync.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2016, 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -206,6 +206,7 @@ void kbase_sync_fence_info_get(struct dma_fence *fence,
*/
const char *kbase_sync_status_string(int status);
+
/*
* Internal worker used to continue processing of atom.
*/
diff --git a/mali_kbase/mali_kbase_sync_common.c b/mali_kbase/mali_kbase_sync_common.c
index 03c0df5..2e1ede5 100644
--- a/mali_kbase/mali_kbase_sync_common.c
+++ b/mali_kbase/mali_kbase_sync_common.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2016, 2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/mali_kbase_tracepoints.c b/mali_kbase/mali_kbase_tracepoints.c
index 68c0c1f..8232c2d 100644
--- a/mali_kbase/mali_kbase_tracepoints.c
+++ b/mali_kbase/mali_kbase_tracepoints.c
@@ -67,55 +67,52 @@ enum tl_msg_id_obj {
KBASE_TL_EVENT_ATOM_SOFTJOB_START,
KBASE_TL_EVENT_ATOM_SOFTJOB_END,
KBASE_JD_GPU_SOFT_RESET,
- KBASE_TL_NEW_KCPUQUEUE,
- KBASE_TL_RET_KCPUQUEUE_CTX,
- KBASE_TL_DEL_KCPUQUEUE,
- KBASE_TL_NRET_KCPUQUEUE_CTX,
- KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
- KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
- KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
- KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
- KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START,
- KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END,
- KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END,
- KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END,
- KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER,
+ KBASE_TL_KBASE_NEW_KCPUQUEUE,
+ KBASE_TL_KBASE_DEL_KCPUQUEUE,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER,
KBASE_OBJ_MSG_COUNT,
};
@@ -271,199 +268,187 @@ enum tl_msg_id_aux {
"gpu soft reset", \
"@p", \
"gpu") \
- TP_DESC(KBASE_TL_NEW_KCPUQUEUE, \
+ TP_DESC(KBASE_TL_KBASE_NEW_KCPUQUEUE, \
"New KCPU Queue", \
"@ppI", \
"kcpu_queue,ctx,kcpuq_num_pending_cmds") \
- TP_DESC(KBASE_TL_RET_KCPUQUEUE_CTX, \
- "Context retains KCPU Queue", \
- "@pp", \
- "kcpu_queue,ctx") \
- TP_DESC(KBASE_TL_DEL_KCPUQUEUE, \
+ TP_DESC(KBASE_TL_KBASE_DEL_KCPUQUEUE, \
"Delete KCPU Queue", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_NRET_KCPUQUEUE_CTX, \
- "Context releases KCPU Queue", \
- "@pp", \
- "kcpu_queue,ctx") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL, \
"KCPU Queue enqueues Signal on Fence", \
"@pp", \
"kcpu_queue,fence") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT, \
"KCPU Queue enqueues Wait on Fence", \
"@pp", \
"kcpu_queue,fence") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
"Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
"Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
"@pLI", \
"kcpu_queue,cqs_obj_gpu_addr,cqs_obj_compare_value") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
"End array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET, \
"Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET, \
"Array item of KCPU Queue enqueues Set on Cross Queue Sync Object", \
"@pL", \
"kcpu_queue,cqs_obj_gpu_addr") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET, \
"End array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
"Begin array of KCPU Queue enqueues Debug Copy", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
"Array item of KCPU Queue enqueues Debug Copy", \
"@pL", \
"kcpu_queue,debugcopy_dst_size") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
"End array of KCPU Queue enqueues Debug Copy", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
"KCPU Queue enqueues Map Import", \
"@pL", \
"kcpu_queue,map_import_buf_gpu_addr") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT, \
"KCPU Queue enqueues Unmap Import", \
"@pL", \
"kcpu_queue,map_import_buf_gpu_addr") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE, \
"KCPU Queue enqueues Unmap Import ignoring reference count", \
"@pL", \
"kcpu_queue,map_import_buf_gpu_addr") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
"Begin array of KCPU Queue enqueues JIT Alloc", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
"Array item of KCPU Queue enqueues JIT Alloc", \
"@pLLLLIIIII", \
"kcpu_queue,jit_alloc_gpu_alloc_addr_dest,jit_alloc_va_pages,jit_alloc_commit_pages,jit_alloc_extent,jit_alloc_jit_id,jit_alloc_bin_id,jit_alloc_max_allocations,jit_alloc_flags,jit_alloc_usage_id") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
"End array of KCPU Queue enqueues JIT Alloc", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE, \
"Begin array of KCPU Queue enqueues JIT Free", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE, \
"Array item of KCPU Queue enqueues JIT Free", \
"@pI", \
"kcpu_queue,jit_alloc_jit_id") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE, \
"End array of KCPU Queue enqueues JIT Free", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
"KCPU Queue starts a Signal on Fence", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END, \
"KCPU Queue ends a Signal on Fence", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START, \
"KCPU Queue starts a Wait on Fence", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END, \
"KCPU Queue ends a Wait on Fence", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
"KCPU Queue starts a Wait on an array of Cross Queue Sync Objects", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
"KCPU Queue ends a Wait on an array of Cross Queue Sync Objects", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START, \
- "KCPU Queue starts a Set on an array of Cross Queue Sync Objects", \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET, \
+ "KCPU Queue executes a Set on an array of Cross Queue Sync Objects", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END, \
- "KCPU Queue ends a Set on an array of Cross Queue Sync Objects", \
- "@p", \
- "kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START, \
"KCPU Queue starts an array of Debug Copys", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END, \
"KCPU Queue ends an array of Debug Copys", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
"KCPU Queue starts a Map Import", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END, \
"KCPU Queue ends a Map Import", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START, \
"KCPU Queue starts an Unmap Import", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END, \
"KCPU Queue ends an Unmap Import", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START, \
"KCPU Queue starts an Unmap Import ignoring reference count", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END, \
"KCPU Queue ends an Unmap Import ignoring reference count", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START, \
"KCPU Queue starts an array of JIT Allocs", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
"Begin array of KCPU Queue ends an array of JIT Allocs", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
"Array item of KCPU Queue ends an array of JIT Allocs", \
"@pLL", \
"kcpu_queue,jit_alloc_gpu_alloc_addr,jit_alloc_mmu_flags") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
"End array of KCPU Queue ends an array of JIT Allocs", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START, \
"KCPU Queue starts an array of JIT Frees", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
"Begin array of KCPU Queue ends an array of JIT Frees", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
"Array item of KCPU Queue ends an array of JIT Frees", \
"@pL", \
"kcpu_queue,jit_free_pages_used") \
- TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
"End array of KCPU Queue ends an array of JIT Frees", \
"@p", \
"kcpu_queue") \
- TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER, \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER, \
"KCPU Queue executes an Error Barrier", \
"@p", \
"kcpu_queue") \
@@ -1736,13 +1721,13 @@ void __kbase_tlstream_aux_event_job_slot(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_new_kcpuqueue(
+void __kbase_tlstream_tl_kbase_new_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *ctx,
u32 kcpuq_num_pending_cmds)
{
- const u32 msg_id = KBASE_TL_NEW_KCPUQUEUE;
+ const u32 msg_id = KBASE_TL_KBASE_NEW_KCPUQUEUE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(ctx)
@@ -1766,63 +1751,13 @@ void __kbase_tlstream_tl_new_kcpuqueue(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_ret_kcpuqueue_ctx(
- struct kbase_tlstream *stream,
- const void *kcpu_queue,
- const void *ctx)
-{
- const u32 msg_id = KBASE_TL_RET_KCPUQUEUE_CTX;
- const size_t msg_size = sizeof(msg_id) + sizeof(u64)
- + sizeof(kcpu_queue)
- + sizeof(ctx)
- ;
- char *buffer;
- unsigned long acq_flags;
- size_t pos = 0;
-
- buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
- pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_serialize_timestamp(buffer, pos);
- pos = kbasep_serialize_bytes(buffer,
- pos, &kcpu_queue, sizeof(kcpu_queue));
- pos = kbasep_serialize_bytes(buffer,
- pos, &ctx, sizeof(ctx));
-
- kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_del_kcpuqueue(
+void __kbase_tlstream_tl_kbase_del_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_DEL_KCPUQUEUE;
- const size_t msg_size = sizeof(msg_id) + sizeof(u64)
- + sizeof(kcpu_queue)
- ;
- char *buffer;
- unsigned long acq_flags;
- size_t pos = 0;
-
- buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
- pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_serialize_timestamp(buffer, pos);
- pos = kbasep_serialize_bytes(buffer,
- pos, &kcpu_queue, sizeof(kcpu_queue));
-
- kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
- struct kbase_tlstream *stream,
- const void *kcpu_queue,
- const void *ctx)
-{
- const u32 msg_id = KBASE_TL_NRET_KCPUQUEUE_CTX;
+ const u32 msg_id = KBASE_TL_KBASE_DEL_KCPUQUEUE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
- + sizeof(ctx)
;
char *buffer;
unsigned long acq_flags;
@@ -1834,18 +1769,16 @@ void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
pos = kbasep_serialize_timestamp(buffer, pos);
pos = kbasep_serialize_bytes(buffer,
pos, &kcpu_queue, sizeof(kcpu_queue));
- pos = kbasep_serialize_bytes(buffer,
- pos, &ctx, sizeof(ctx));
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *fence)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(fence)
@@ -1866,12 +1799,12 @@ void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *fence)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(fence)
@@ -1892,11 +1825,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -1914,13 +1847,13 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 cqs_obj_gpu_addr,
u32 cqs_obj_compare_value)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(cqs_obj_gpu_addr)
@@ -1944,11 +1877,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -1966,11 +1899,11 @@ void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -1988,12 +1921,12 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 cqs_obj_gpu_addr)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(cqs_obj_gpu_addr)
@@ -2014,11 +1947,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2036,11 +1969,11 @@ void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_debugcopy(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2058,12 +1991,12 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_debugcopy(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 debugcopy_dst_size)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(debugcopy_dst_size)
@@ -2084,11 +2017,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_debugcopy(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2106,12 +2039,12 @@ void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(map_import_buf_gpu_addr)
@@ -2132,12 +2065,12 @@ void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(map_import_buf_gpu_addr)
@@ -2158,12 +2091,12 @@ void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import_force(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(map_import_buf_gpu_addr)
@@ -2184,11 +2117,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import_force(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2206,7 +2139,7 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_alloc_gpu_alloc_addr_dest,
@@ -2219,7 +2152,7 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
u32 jit_alloc_flags,
u32 jit_alloc_usage_id)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(jit_alloc_gpu_alloc_addr_dest)
@@ -2264,11 +2197,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2286,11 +2219,11 @@ void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2308,12 +2241,12 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 jit_alloc_jit_id)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(jit_alloc_jit_id)
@@ -2334,33 +2267,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(
- struct kbase_tlstream *stream,
- const void *kcpu_queue)
-{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
- const size_t msg_size = sizeof(msg_id) + sizeof(u64)
- + sizeof(kcpu_queue)
- ;
- char *buffer;
- unsigned long acq_flags;
- size_t pos = 0;
-
- buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
- pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
- pos = kbasep_serialize_timestamp(buffer, pos);
- pos = kbasep_serialize_bytes(buffer,
- pos, &kcpu_queue, sizeof(kcpu_queue));
-
- kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2378,11 +2289,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2400,11 +2311,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2422,11 +2333,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2444,11 +2355,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2466,11 +2377,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2488,11 +2399,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2510,11 +2421,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2532,11 +2443,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2554,11 +2465,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2576,11 +2487,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2598,11 +2509,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2620,11 +2531,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2642,11 +2553,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2664,11 +2575,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2686,11 +2597,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2708,11 +2619,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2730,11 +2641,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2752,13 +2663,13 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_alloc_gpu_alloc_addr,
u64 jit_alloc_mmu_flags)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(jit_alloc_gpu_alloc_addr)
@@ -2782,11 +2693,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2804,11 +2715,11 @@ void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2826,11 +2737,11 @@ void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2848,12 +2759,12 @@ void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_free_pages_used)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
+ sizeof(jit_free_pages_used)
@@ -2874,11 +2785,11 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
@@ -2896,11 +2807,11 @@ void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
-void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_errorbarrier(
struct kbase_tlstream *stream,
const void *kcpu_queue)
{
- const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER;
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
;
diff --git a/mali_kbase/mali_kbase_tracepoints.h b/mali_kbase/mali_kbase_tracepoints.h
index 2bdf7e2..084ff56 100644
--- a/mali_kbase/mali_kbase_tracepoints.h
+++ b/mali_kbase/mali_kbase_tracepoints.h
@@ -271,77 +271,69 @@ void __kbase_tlstream_aux_event_job_slot(
u32 slot_nr,
u32 atom_nr,
u32 event);
-void __kbase_tlstream_tl_new_kcpuqueue(
+void __kbase_tlstream_tl_kbase_new_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *ctx,
u32 kcpuq_num_pending_cmds);
-void __kbase_tlstream_tl_ret_kcpuqueue_ctx(
- struct kbase_tlstream *stream,
- const void *kcpu_queue,
- const void *ctx);
-void __kbase_tlstream_tl_del_kcpuqueue(
+void __kbase_tlstream_tl_kbase_del_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
- struct kbase_tlstream *stream,
- const void *kcpu_queue,
- const void *ctx);
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *fence);
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *fence);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 cqs_obj_gpu_addr,
u32 cqs_obj_compare_value);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 cqs_obj_gpu_addr);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_debugcopy(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_debugcopy(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 debugcopy_dst_size);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_debugcopy(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr);
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr);
-void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import_force(
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_alloc_gpu_alloc_addr_dest,
@@ -353,95 +345,92 @@ void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
u32 jit_alloc_max_allocations,
u32 jit_alloc_flags,
u32 jit_alloc_usage_id);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 jit_alloc_jit_id);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(
- struct kbase_tlstream *stream,
- const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_end(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_alloc_gpu_alloc_addr,
u64 jit_alloc_mmu_flags);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end(
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end(
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_free_pages_used);
-void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end(
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue);
-void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_errorbarrier(
struct kbase_tlstream *stream,
const void *kcpu_queue);
@@ -1415,7 +1404,7 @@ struct kbase_tlstream;
} while (0)
/**
- * KBASE_TLSTREAM_TL_NEW_KCPUQUEUE -
+ * KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE -
* New KCPU Queue
*
* @kbdev: Kbase device
@@ -1424,7 +1413,7 @@ struct kbase_tlstream;
* @kcpuq_num_pending_cmds: Number of commands already enqueued
* in the KCPU queue
*/
-#define KBASE_TLSTREAM_TL_NEW_KCPUQUEUE( \
+#define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \
kbdev, \
kcpu_queue, \
ctx, \
@@ -1433,81 +1422,39 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_new_kcpuqueue( \
+ __kbase_tlstream_tl_kbase_new_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, ctx, kcpuq_num_pending_cmds); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX -
- * Context retains KCPU Queue
- *
- * @kbdev: Kbase device
- * @kcpu_queue: KCPU queue
- * @ctx: Name of the context object
- */
-#define KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX( \
- kbdev, \
- kcpu_queue, \
- ctx \
- ) \
- do { \
- int enabled = atomic_read(&kbdev->timeline_is_enabled); \
- if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_ret_kcpuqueue_ctx( \
- __TL_DISPATCH_STREAM(kbdev, obj), \
- kcpu_queue, ctx); \
- } while (0)
-
-/**
- * KBASE_TLSTREAM_TL_DEL_KCPUQUEUE -
+ * KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE -
* Delete KCPU Queue
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_DEL_KCPUQUEUE( \
+#define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_del_kcpuqueue( \
+ __kbase_tlstream_tl_kbase_del_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX -
- * Context releases KCPU Queue
- *
- * @kbdev: Kbase device
- * @kcpu_queue: KCPU queue
- * @ctx: Name of the context object
- */
-#define KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX( \
- kbdev, \
- kcpu_queue, \
- ctx \
- ) \
- do { \
- int enabled = atomic_read(&kbdev->timeline_is_enabled); \
- if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_nret_kcpuqueue_ctx( \
- __TL_DISPATCH_STREAM(kbdev, obj), \
- kcpu_queue, ctx); \
- } while (0)
-
-/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL -
* KCPU Queue enqueues Signal on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @fence: Fence object handle
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \
kbdev, \
kcpu_queue, \
fence \
@@ -1515,20 +1462,20 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, fence); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT -
* KCPU Queue enqueues Wait on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @fence: Fence object handle
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \
kbdev, \
kcpu_queue, \
fence \
@@ -1536,32 +1483,32 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, fence); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT -
* Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT -
* Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object
*
* @kbdev: Kbase device
@@ -1570,7 +1517,7 @@ struct kbase_tlstream;
* @cqs_obj_compare_value: Semaphore value that should be exceeded
* for the WAIT to pass
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
kbdev, \
kcpu_queue, \
cqs_obj_gpu_addr, \
@@ -1579,58 +1526,58 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, cqs_obj_gpu_addr, cqs_obj_compare_value); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT -
* End array of KCPU Queue enqueues Wait on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET -
* Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET -
* Array item of KCPU Queue enqueues Set on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @cqs_obj_gpu_addr: CQS Object GPU ptr
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET( \
kbdev, \
kcpu_queue, \
cqs_obj_gpu_addr \
@@ -1638,58 +1585,58 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, cqs_obj_gpu_addr); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET -
* End array of KCPU Queue enqueues Set on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
* Begin array of KCPU Queue enqueues Debug Copy
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_debugcopy( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
* Array item of KCPU Queue enqueues Debug Copy
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @debugcopy_dst_size: Debug Copy destination size
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
kbdev, \
kcpu_queue, \
debugcopy_dst_size \
@@ -1697,39 +1644,39 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_debugcopy( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, debugcopy_dst_size); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
* End array of KCPU Queue enqueues Debug Copy
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_debugcopy( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT -
* KCPU Queue enqueues Map Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @map_import_buf_gpu_addr: Map import buffer GPU ptr
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
@@ -1737,20 +1684,20 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, map_import_buf_gpu_addr); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT -
* KCPU Queue enqueues Unmap Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @map_import_buf_gpu_addr: Map import buffer GPU ptr
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
@@ -1758,20 +1705,20 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, map_import_buf_gpu_addr); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE -
* KCPU Queue enqueues Unmap Import ignoring reference count
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @map_import_buf_gpu_addr: Map import buffer GPU ptr
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
@@ -1779,32 +1726,32 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import_force( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, map_import_buf_gpu_addr); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
* Begin array of KCPU Queue enqueues JIT Alloc
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
* Array item of KCPU Queue enqueues JIT Alloc
*
* @kbdev: Kbase device
@@ -1829,7 +1776,7 @@ struct kbase_tlstream;
* reused. The kernel should attempt to use a previous allocation with the same
* usage_id
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue, \
jit_alloc_gpu_alloc_addr_dest, \
@@ -1845,51 +1792,51 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, jit_alloc_gpu_alloc_addr_dest, jit_alloc_va_pages, jit_alloc_commit_pages, jit_alloc_extent, jit_alloc_jit_id, jit_alloc_bin_id, jit_alloc_max_allocations, jit_alloc_flags, jit_alloc_usage_id); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
* End array of KCPU Queue enqueues JIT Alloc
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE -
* Begin array of KCPU Queue enqueues JIT Free
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE -
* Array item of KCPU Queue enqueues JIT Free
*
* @kbdev: Kbase device
@@ -1897,7 +1844,7 @@ struct kbase_tlstream;
* @jit_alloc_jit_id: Unique ID provided by the caller, this is used
* to pair allocation and free requests. Zero is not a valid value
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue, \
jit_alloc_jit_id \
@@ -1905,374 +1852,355 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, jit_alloc_jit_id); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE -
* End array of KCPU Queue enqueues JIT Free
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START -
* KCPU Queue starts a Signal on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END -
* KCPU Queue ends a Signal on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START -
* KCPU Queue starts a Wait on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END -
* KCPU Queue ends a Wait on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START -
* KCPU Queue starts a Wait on an array of Cross Queue Sync Objects
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END -
* KCPU Queue ends a Wait on an array of Cross Queue Sync Objects
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \
- kbdev, \
- kcpu_queue \
- ) \
- do { \
- int enabled = atomic_read(&kbdev->timeline_is_enabled); \
- if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end( \
- __TL_DISPATCH_STREAM(kbdev, obj), \
- kcpu_queue); \
- } while (0)
-
-/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START -
- * KCPU Queue starts a Set on an array of Cross Queue Sync Objects
- *
- * @kbdev: Kbase device
- * @kcpu_queue: KCPU queue
- */
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END -
- * KCPU Queue ends a Set on an array of Cross Queue Sync Objects
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET -
+ * KCPU Queue executes a Set on an array of Cross Queue Sync Objects
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START -
* KCPU Queue starts an array of Debug Copys
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END -
* KCPU Queue ends an array of Debug Copys
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START -
* KCPU Queue starts a Map Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END -
* KCPU Queue ends a Map Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START -
* KCPU Queue starts an Unmap Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END -
* KCPU Queue ends an Unmap Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START -
* KCPU Queue starts an Unmap Import ignoring reference count
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END -
* KCPU Queue ends an Unmap Import ignoring reference count
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_force_end( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START -
* KCPU Queue starts an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
* Begin array of KCPU Queue ends an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
* Array item of KCPU Queue ends an array of JIT Allocs
*
* @kbdev: Kbase device
@@ -2280,7 +2208,7 @@ struct kbase_tlstream;
* @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address
* @jit_alloc_mmu_flags: The MMU flags for the JIT allocation
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue, \
jit_alloc_gpu_alloc_addr, \
@@ -2289,70 +2217,70 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, jit_alloc_gpu_alloc_addr, jit_alloc_mmu_flags); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
* End array of KCPU Queue ends an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START -
* KCPU Queue starts an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END -
* Begin array of KCPU Queue ends an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end( \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END -
* Array item of KCPU Queue ends an array of JIT Frees
*
* @kbdev: Kbase device
@@ -2360,7 +2288,7 @@ struct kbase_tlstream;
* @jit_free_pages_used: The actual number of pages used by the JIT
* allocation
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue, \
jit_free_pages_used \
@@ -2368,45 +2296,45 @@ struct kbase_tlstream;
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end( \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, jit_free_pages_used); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END -
* End array of KCPU Queue ends an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end( \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
/**
- * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER -
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER -
* KCPU Queue executes an Error Barrier
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
-#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER( \
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_is_enabled); \
if (enabled & TLSTREAM_ENABLED) \
- __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier( \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_errorbarrier( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue); \
} while (0)
diff --git a/mali_kbase/platform/meson/Kbuild b/mali_kbase/platform/meson/Kbuild
deleted file mode 100644
index 243415b..0000000
--- a/mali_kbase/platform/meson/Kbuild
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# (C) COPYRIGHT 2012-2017, 2019 ARM Limited. All rights reserved.
-#
-# This program is free software and is provided to you under the terms of the
-# GNU General Public License version 2 as published by the Free Software
-# Foundation, and any use by you of this program is subject to the terms
-# of such GNU licence.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# SPDX-License-Identifier: GPL-2.0
-#
-#
-
-mali_kbase-y += \
- $(MALI_PLATFORM_DIR)/mali_kbase_config_meson.o \
- $(MALI_PLATFORM_DIR)/mali_kbase_runtime_pm.o
diff --git a/mali_kbase/platform/meson/mali_kbase_runtime_pm.c b/mali_kbase/platform/meson/mali_kbase_runtime_pm.c
deleted file mode 100644
index 5b7378d..0000000
--- a/mali_kbase/platform/meson/mali_kbase_runtime_pm.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2015, 2017-2019 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-#include <mali_kbase.h>
-#include <mali_kbase_defs.h>
-#include <backend/gpu/mali_kbase_device_internal.h>
-
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/regulator/consumer.h>
-
-#include "mali_kbase_config_platform.h"
-
-
-static struct reset_control **resets;
-static int nr_resets;
-
-static int resets_init(struct kbase_device *kbdev)
-{
- struct device_node *np;
- int i;
- int err = 0;
-
- np = kbdev->dev->of_node;
-
- nr_resets = of_count_phandle_with_args(np, "resets", "#reset-cells");
- if (nr_resets <= 0) {
- dev_err(kbdev->dev, "Failed to get GPU resets from dtb\n");
- return nr_resets;
- }
-
- resets = devm_kcalloc(kbdev->dev, nr_resets, sizeof(*resets),
- GFP_KERNEL);
- if (!resets)
- return -ENOMEM;
-
- for (i = 0; i < nr_resets; ++i) {
- resets[i] = devm_reset_control_get_exclusive_by_index(
- kbdev->dev, i);
- if (IS_ERR(resets[i])) {
- err = PTR_ERR(resets[i]);
- nr_resets = i;
- break;
- }
- }
-
- return err;
-}
-
-static int pm_callback_soft_reset(struct kbase_device *kbdev)
-{
- int ret, i;
-
- if (!resets) {
- ret = resets_init(kbdev);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < nr_resets; ++i)
- reset_control_assert(resets[i]);
-
- udelay(10);
-
- for (i = 0; i < nr_resets; ++i)
- reset_control_deassert(resets[i]);
-
- udelay(10);
-
- /* Override Power Management Settings, values from Amlogic KBase */
- kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_KEY), 0x2968A819);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1),
- 0xfff | (0x20 << 16));
-
- /*
- * RESET_COMPLETED interrupt will be raised, so continue with
- * the normal soft reset procedure
- */
- return 0;
-}
-
-static void enable_gpu_power_control(struct kbase_device *kbdev)
-{
- unsigned int i;
-
-#if defined(CONFIG_REGULATOR)
- for (i = 0; i < kbdev->nr_regulators; i++) {
- if (WARN_ON_ONCE(kbdev->regulators[i] == NULL))
- ;
- else if (!regulator_is_enabled(kbdev->regulators[i]))
- WARN_ON(regulator_enable(kbdev->regulators[i]));
- }
-#endif
-
- for (i = 0; i < kbdev->nr_clocks; i++) {
- if (WARN_ON_ONCE(kbdev->clocks[i] == NULL))
- ;
- else if (!__clk_is_enabled(kbdev->clocks[i]))
- WARN_ON(clk_prepare_enable(kbdev->clocks[i]));
- }
-}
-
-static void disable_gpu_power_control(struct kbase_device *kbdev)
-{
- unsigned int i;
-
- for (i = 0; i < kbdev->nr_clocks; i++) {
- if (WARN_ON_ONCE(kbdev->clocks[i] == NULL))
- ;
- else if (__clk_is_enabled(kbdev->clocks[i])) {
- clk_disable_unprepare(kbdev->clocks[i]);
- WARN_ON(__clk_is_enabled(kbdev->clocks[i]));
- }
-
- }
-
-#if defined(CONFIG_REGULATOR)
- for (i = 0; i < kbdev->nr_regulators; i++) {
- if (WARN_ON_ONCE(kbdev->regulators[i] == NULL))
- ;
- else if (regulator_is_enabled(kbdev->regulators[i]))
- WARN_ON(regulator_disable(kbdev->regulators[i]));
- }
-#endif
-}
-
-static int pm_callback_power_on(struct kbase_device *kbdev)
-{
- int ret = 1; /* Assume GPU has been powered off */
- int error;
-
- dev_dbg(kbdev->dev, "%s %p\n", __func__,
- (void *)kbdev->dev->pm_domain);
-
- enable_gpu_power_control(kbdev);
-
- error = pm_runtime_get_sync(kbdev->dev);
- if (error == 1) {
- /*
- * Let core know that the chip has not been
- * powered off, so we can save on re-initialization.
- */
- ret = 0;
- }
-
- dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error);
-
- return ret;
-}
-
-static void pm_callback_power_off(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "%s\n", __func__);
-
- pm_runtime_mark_last_busy(kbdev->dev);
- pm_runtime_put_autosuspend(kbdev->dev);
-
-#ifndef KBASE_PM_RUNTIME
- disable_gpu_power_control(kbdev);
-#endif
-}
-
-#ifdef KBASE_PM_RUNTIME
-static int kbase_device_runtime_init(struct kbase_device *kbdev)
-{
- int ret = 0;
-
- dev_dbg(kbdev->dev, "%s\n", __func__);
-
- pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY);
- pm_runtime_use_autosuspend(kbdev->dev);
-
- pm_runtime_set_active(kbdev->dev);
- pm_runtime_enable(kbdev->dev);
-
- if (!pm_runtime_enabled(kbdev->dev)) {
- dev_warn(kbdev->dev, "pm_runtime not enabled");
- ret = -ENOENT;
- }
-
- return ret;
-}
-
-static void kbase_device_runtime_disable(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "%s\n", __func__);
- pm_runtime_disable(kbdev->dev);
-}
-#endif
-
-static int pm_callback_runtime_on(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "%s\n", __func__);
-
- enable_gpu_power_control(kbdev);
- return 0;
-}
-
-static void pm_callback_runtime_off(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "%s\n", __func__);
-
- disable_gpu_power_control(kbdev);
-}
-
-static void pm_callback_resume(struct kbase_device *kbdev)
-{
- int ret = pm_callback_runtime_on(kbdev);
-
- WARN_ON(ret);
-}
-
-static void pm_callback_suspend(struct kbase_device *kbdev)
-{
- pm_callback_runtime_off(kbdev);
-}
-
-struct kbase_pm_callback_conf pm_callbacks = {
- .power_on_callback = pm_callback_power_on,
- .power_off_callback = pm_callback_power_off,
- .power_suspend_callback = pm_callback_suspend,
- .power_resume_callback = pm_callback_resume,
- .soft_reset_callback = pm_callback_soft_reset,
-#ifdef KBASE_PM_RUNTIME
- .power_runtime_init_callback = kbase_device_runtime_init,
- .power_runtime_term_callback = kbase_device_runtime_disable,
- .power_runtime_on_callback = pm_callback_runtime_on,
- .power_runtime_off_callback = pm_callback_runtime_off,
-#else /* KBASE_PM_RUNTIME */
- .power_runtime_init_callback = NULL,
- .power_runtime_term_callback = NULL,
- .power_runtime_on_callback = NULL,
- .power_runtime_off_callback = NULL,
-#endif /* KBASE_PM_RUNTIME */
-};