summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mali_kbase/Kbuild48
-rw-r--r--mali_kbase/Kconfig2
-rw-r--r--mali_kbase/Mconfig2
-rw-r--r--mali_kbase/backend/gpu/Kbuild6
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_debug_job_fault_backend.c4
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_devfreq.c2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_device_hw.c33
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_device_internal.h16
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_gpu.c162
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_instr_backend.c15
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_instr_defs.h2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_irq_linux.c33
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_defs.h5
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_hw.c5
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_rb.c11
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h62
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c8
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_ca.c8
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c30
-rw-r--r--mali_kbase/build.bp22
-rw-r--r--mali_kbase/context/backend/mali_kbase_context_jm.c201
-rw-r--r--mali_kbase/context/mali_kbase_context.c206
-rw-r--r--mali_kbase/context/mali_kbase_context.h (renamed from mali_kbase/mali_kbase_context.h)36
-rw-r--r--mali_kbase/context/mali_kbase_context_internal.h60
-rw-r--r--mali_kbase/device/backend/mali_kbase_device_jm.c114
-rw-r--r--mali_kbase/device/mali_kbase_device.c48
-rw-r--r--mali_kbase/device/mali_kbase_device_internal.h14
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu.c41
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_fault.h12
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_id.h3
-rw-r--r--mali_kbase/gpu/mali_kbase_gpu_regmap.h24
-rw-r--r--mali_kbase/mali_base_hwconfig_features.h57
-rw-r--r--mali_kbase/mali_base_hwconfig_issues.h37
-rw-r--r--mali_kbase/mali_base_kernel.h16
-rw-r--r--mali_kbase/mali_kbase.h3
-rw-r--r--mali_kbase/mali_kbase_context.c344
-rw-r--r--mali_kbase/mali_kbase_core_linux.c88
-rw-r--r--mali_kbase/mali_kbase_debug_mem_view.c6
-rw-r--r--mali_kbase/mali_kbase_defs.h18
-rw-r--r--mali_kbase/mali_kbase_dummy_job_wa.c515
-rw-r--r--mali_kbase/mali_kbase_dummy_job_wa.h43
-rw-r--r--mali_kbase/mali_kbase_event.c2
-rw-r--r--mali_kbase/mali_kbase_hw.c27
-rw-r--r--mali_kbase/mali_kbase_hwaccess_backend.h28
-rw-r--r--mali_kbase/mali_kbase_hwcnt_backend_gpu.c2
-rw-r--r--mali_kbase/mali_kbase_ioctl.h6
-rw-r--r--mali_kbase/mali_kbase_jd.c4
-rw-r--r--mali_kbase/mali_kbase_js.c34
-rw-r--r--mali_kbase/mali_kbase_js.h2
-rw-r--r--mali_kbase/mali_kbase_mem.c29
-rw-r--r--mali_kbase/mali_kbase_mem.h81
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c31
-rw-r--r--mali_kbase/mali_kbase_mem_pool_debugfs.c16
-rw-r--r--mali_kbase/mali_kbase_softjobs.c2
-rw-r--r--mali_kbase/mmu/backend/mali_kbase_mmu_jm.c391
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.c (renamed from mali_kbase/mali_kbase_mmu.c)375
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.h118
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw.h (renamed from mali_kbase/mali_kbase_mmu_hw.h)77
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw_direct.c (renamed from mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c)154
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_internal.h49
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c (renamed from mali_kbase/mali_kbase_mmu_mode_aarch64.c)31
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_mode_lpae.c (renamed from mali_kbase/mali_kbase_mmu_mode_lpae.c)3
-rw-r--r--mali_kbase/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c62
-rw-r--r--mali_kbase/tl/backend/mali_kbase_timeline_jm.c97
-rw-r--r--mali_kbase/tl/mali_kbase_timeline.c (renamed from mali_kbase/mali_kbase_timeline.c)70
-rw-r--r--mali_kbase/tl/mali_kbase_timeline.h (renamed from mali_kbase/mali_kbase_timeline.h)0
-rw-r--r--mali_kbase/tl/mali_kbase_timeline_io.c (renamed from mali_kbase/mali_kbase_timeline_io.c)6
-rw-r--r--mali_kbase/tl/mali_kbase_timeline_priv.h (renamed from mali_kbase/mali_kbase_timeline_priv.h)4
-rw-r--r--mali_kbase/tl/mali_kbase_tl_serialize.h (renamed from mali_kbase/mali_kbase_tl_serialize.h)0
-rw-r--r--mali_kbase/tl/mali_kbase_tlstream.c (renamed from mali_kbase/mali_kbase_tlstream.c)0
-rw-r--r--mali_kbase/tl/mali_kbase_tlstream.h (renamed from mali_kbase/mali_kbase_tlstream.h)0
-rw-r--r--mali_kbase/tl/mali_kbase_trace_defs.h (renamed from mali_kbase/mali_kbase_trace_defs.h)2
-rw-r--r--mali_kbase/tl/mali_kbase_tracepoints.c (renamed from mali_kbase/mali_kbase_tracepoints.c)169
-rw-r--r--mali_kbase/tl/mali_kbase_tracepoints.h (renamed from mali_kbase/mali_kbase_tracepoints.h)136
74 files changed, 2942 insertions, 1428 deletions
diff --git a/mali_kbase/Kbuild b/mali_kbase/Kbuild
index 503d597..400ebe0 100644
--- a/mali_kbase/Kbuild
+++ b/mali_kbase/Kbuild
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2012-2020 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -21,7 +21,7 @@
# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r22p0-01rel0"
+MALI_RELEASE_NAME ?= "r23p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
@@ -64,18 +64,17 @@ ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH)
subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
SRC := \
+ context/mali_kbase_context.c \
device/mali_kbase_device.c \
mali_kbase_cache_policy.c \
mali_kbase_mem.c \
mali_kbase_mem_pool_group.c \
- mali_kbase_mmu.c \
mali_kbase_native_mgm.c \
mali_kbase_ctx_sched.c \
mali_kbase_jm.c \
mali_kbase_gpuprops.c \
mali_kbase_js.c \
mali_kbase_event.c \
- mali_kbase_context.c \
mali_kbase_pm.c \
mali_kbase_config.c \
mali_kbase_vinstr.c \
@@ -87,42 +86,51 @@ SRC := \
mali_kbase_hwcnt_virtualizer.c \
mali_kbase_softjobs.c \
mali_kbase_10969_workaround.c \
+ mali_kbase_dummy_job_wa.c \
mali_kbase_hw.c \
mali_kbase_debug.c \
mali_kbase_gpu_memory_debugfs.c \
mali_kbase_mem_linux.c \
mali_kbase_core_linux.c \
mali_kbase_mem_profile_debugfs.c \
- mali_kbase_mmu_mode_lpae.c \
- mali_kbase_mmu_mode_aarch64.c \
+ mmu/mali_kbase_mmu.c \
+ mmu/mali_kbase_mmu_hw_direct.c \
+ mmu/mali_kbase_mmu_mode_lpae.c \
+ mmu/mali_kbase_mmu_mode_aarch64.c \
mali_kbase_disjoint_events.c \
mali_kbase_debug_mem_view.c \
mali_kbase_smc.c \
mali_kbase_mem_pool.c \
mali_kbase_mem_pool_debugfs.c \
mali_kbase_debugfs_helper.c \
- mali_kbase_timeline.c \
- mali_kbase_timeline_io.c \
- mali_kbase_tlstream.c \
- mali_kbase_tracepoints.c \
mali_kbase_strings.c \
mali_kbase_as_fault_debugfs.c \
mali_kbase_regs_history_debugfs.c \
- thirdparty/mali_kbase_mmap.c
+ thirdparty/mali_kbase_mmap.c \
+ tl/mali_kbase_timeline.c \
+ tl/mali_kbase_timeline_io.c \
+ tl/mali_kbase_tlstream.c \
+ tl/mali_kbase_tracepoints.c \
+ gpu/mali_kbase_gpu.c
-ifeq ($(MALI_USE_CSF),0)
+ifeq ($(MALI_USE_CSF),1)
+ SRC += \
+ device/backend/mali_kbase_device_csf.c \
+ gpu/backend/mali_kbase_gpu_fault_csf.c \
+ tl/backend/mali_kbase_timeline_csf.c \
+ mmu/backend/mali_kbase_mmu_csf.c \
+ context/backend/mali_kbase_context_csf.c
+else
SRC += \
mali_kbase_debug_job_fault.c \
mali_kbase_jd.c \
mali_kbase_jd_debugfs.c \
mali_kbase_js_ctx_attr.c \
device/backend/mali_kbase_device_jm.c \
- gpu/backend/mali_kbase_gpu_fault_jm.c
-else
- SRC += \
- device/backend/mali_kbase_device_csf.c \
- gpu/backend/mali_kbase_gpu_fault_csf.c
-
+ gpu/backend/mali_kbase_gpu_fault_jm.c \
+ tl/backend/mali_kbase_timeline_jm.c \
+ mmu/backend/mali_kbase_mmu_jm.c \
+ context/backend/mali_kbase_context_jm.c
endif
ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
@@ -130,7 +138,7 @@ ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
endif
ifeq ($(MALI_UNIT_TEST),1)
- SRC += mali_kbase_timeline_test.c
+ SRC += tl/mali_kbase_timeline_test.c
endif
ifeq ($(MALI_CUSTOMER_RELEASE),0)
@@ -160,6 +168,8 @@ endif
ifeq ($(MALI_USE_CSF),1)
include $(src)/csf/Kbuild
+else
+# empty
endif
mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
diff --git a/mali_kbase/Kconfig b/mali_kbase/Kconfig
index 198ea79..a739363 100644
--- a/mali_kbase/Kconfig
+++ b/mali_kbase/Kconfig
@@ -210,7 +210,7 @@ config MALI_DMA_BUF_MAP_ON_DEMAND
config MALI_DMA_BUF_LEGACY_COMPAT
bool "Enable legacy compatibility cache flush on dma-buf map"
depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
- default y
+ default n
help
This option enables compatibility with legacy dma-buf mapping
behavior, then the dma-buf is mapped on import, by adding cache
diff --git a/mali_kbase/Mconfig b/mali_kbase/Mconfig
index ccdc2d4..27399fa 100644
--- a/mali_kbase/Mconfig
+++ b/mali_kbase/Mconfig
@@ -213,7 +213,7 @@ config MALI_DMA_BUF_MAP_ON_DEMAND
config MALI_DMA_BUF_LEGACY_COMPAT
bool "Enable legacy compatibility cache flush on dma-buf map"
depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
- default y
+ default n
help
This option enables compatibility with legacy dma-buf mapping
behavior, then the dma-buf is mapped on import, by adding cache
diff --git a/mali_kbase/backend/gpu/Kbuild b/mali_kbase/backend/gpu/Kbuild
index 9a913bf..8fe7aba 100644
--- a/mali_kbase/backend/gpu/Kbuild
+++ b/mali_kbase/backend/gpu/Kbuild
@@ -22,13 +22,11 @@
BACKEND += \
backend/gpu/mali_kbase_cache_policy_backend.c \
backend/gpu/mali_kbase_device_hw.c \
- backend/gpu/mali_kbase_gpu.c \
backend/gpu/mali_kbase_gpuprops_backend.c \
backend/gpu/mali_kbase_irq_linux.c \
backend/gpu/mali_kbase_instr_backend.c \
backend/gpu/mali_kbase_jm_as.c \
backend/gpu/mali_kbase_js_backend.c \
- backend/gpu/mali_kbase_mmu_hw_direct.c \
backend/gpu/mali_kbase_pm_backend.c \
backend/gpu/mali_kbase_pm_driver.c \
backend/gpu/mali_kbase_pm_metrics.c \
@@ -39,7 +37,9 @@ BACKEND += \
backend/gpu/mali_kbase_time.c \
backend/gpu/mali_kbase_l2_mmu_config.c
-ifeq ($(MALI_USE_CSF),0)
+ifeq ($(MALI_USE_CSF),1)
+# empty
+else
BACKEND += \
backend/gpu/mali_kbase_debug_job_fault_backend.c \
backend/gpu/mali_kbase_jm_hw.c \
diff --git a/mali_kbase/backend/gpu/mali_kbase_debug_job_fault_backend.c b/mali_kbase/backend/gpu/mali_kbase_debug_job_fault_backend.c
index 450f6e7..b05844e 100644
--- a/mali_kbase/backend/gpu/mali_kbase_debug_job_fault_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_debug_job_fault_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2015,2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -70,6 +70,8 @@ static int mmu_reg_snapshot[] = {
static int as_reg_snapshot[] = {
AS_TRANSTAB_LO,
AS_TRANSTAB_HI,
+ AS_TRANSCFG_LO,
+ AS_TRANSCFG_HI,
AS_MEMATTR_LO,
AS_MEMATTR_HI,
AS_FAULTSTATUS,
diff --git a/mali_kbase/backend/gpu/mali_kbase_devfreq.c b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
index 40847e2..e0c108c 100644
--- a/mali_kbase/backend/gpu/mali_kbase_devfreq.c
+++ b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
@@ -21,7 +21,7 @@
*/
#include <mali_kbase.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <linux/of.h>
diff --git a/mali_kbase/backend/gpu/mali_kbase_device_hw.c b/mali_kbase/backend/gpu/mali_kbase_device_hw.c
index 380f74f..0ec8cef 100644
--- a/mali_kbase/backend/gpu/mali_kbase_device_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_device_hw.c
@@ -29,12 +29,11 @@
#include <backend/gpu/mali_kbase_instr_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_device_internal.h>
-#include <backend/gpu/mali_kbase_mmu_hw_direct.h>
#include <mali_kbase_reset_gpu.h>
+#include <mmu/mali_kbase_mmu.h>
#if !defined(CONFIG_MALI_NO_MALI)
-
#ifdef CONFIG_DEBUG_FS
int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
@@ -296,18 +295,38 @@ static void kbase_clean_caches_done(struct kbase_device *kbdev)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
-void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev)
+static inline bool get_cache_clean_flag(struct kbase_device *kbdev)
{
+ bool cache_clean_in_progress;
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- while (kbdev->cache_clean_in_progress) {
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ cache_clean_in_progress = kbdev->cache_clean_in_progress;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return cache_clean_in_progress;
+}
+
+void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev)
+{
+ while (get_cache_clean_flag(kbdev)) {
wait_event_interruptible(kbdev->cache_clean_wait,
!kbdev->cache_clean_in_progress);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
}
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+int kbase_gpu_wait_cache_clean_timeout(struct kbase_device *kbdev,
+ unsigned int wait_timeout_ms)
+{
+ long remaining = msecs_to_jiffies(wait_timeout_ms);
+
+ while (remaining && get_cache_clean_flag(kbdev)) {
+ remaining = wait_event_timeout(kbdev->cache_clean_wait,
+ !kbdev->cache_clean_in_progress,
+ remaining);
+ }
+
+ return (remaining ? 0 : -ETIMEDOUT);
}
void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
diff --git a/mali_kbase/backend/gpu/mali_kbase_device_internal.h b/mali_kbase/backend/gpu/mali_kbase_device_internal.h
index c62f1e5..c3e5c03 100644
--- a/mali_kbase/backend/gpu/mali_kbase_device_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_device_internal.h
@@ -77,6 +77,22 @@ void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev);
void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev);
/**
+ * kbase_gpu_wait_cache_clean_timeout - Wait for certain time for cache
+ * cleaning to finish
+ * @kbdev: Kbase device
+ * @wait_timeout_ms: Time, in milli seconds, to wait for cache clean to complete.
+ *
+ * This function will take hwaccess_lock, and may sleep. This is supposed to be
+ * called from paths (like GPU reset) where an indefinite wait for the completion
+ * of cache clean operation can cause deadlock, as the operation may never
+ * complete.
+ *
+ * Return: 0 if successful or a negative error code on failure.
+ */
+int kbase_gpu_wait_cache_clean_timeout(struct kbase_device *kbdev,
+ unsigned int wait_timeout_ms);
+
+/**
* kbase_gpu_cache_clean_wait_complete - Called after the cache cleaning is
* finished. Would also be called after
* the GPU reset.
diff --git a/mali_kbase/backend/gpu/mali_kbase_gpu.c b/mali_kbase/backend/gpu/mali_kbase_gpu.c
deleted file mode 100644
index 9745df6..0000000
--- a/mali_kbase/backend/gpu/mali_kbase_gpu.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-/*
- * Register-based HW access backend APIs
- */
-#include <mali_kbase.h>
-#include <mali_kbase_hwaccess_backend.h>
-#include <mali_kbase_reset_gpu.h>
-#include <backend/gpu/mali_kbase_irq_internal.h>
-#include <backend/gpu/mali_kbase_jm_internal.h>
-#include <backend/gpu/mali_kbase_js_internal.h>
-#include <backend/gpu/mali_kbase_pm_internal.h>
-
-int kbase_backend_early_init(struct kbase_device *kbdev)
-{
- int err;
-
- err = kbasep_platform_device_init(kbdev);
- if (err)
- return err;
-
- err = kbase_pm_runtime_init(kbdev);
- if (err)
- goto fail_runtime_pm;
-
- /* Ensure we can access the GPU registers */
- kbase_pm_register_access_enable(kbdev);
-
- /* Find out GPU properties based on the GPU feature registers */
- kbase_gpuprops_set(kbdev);
-
- /* We're done accessing the GPU registers for now. */
- kbase_pm_register_access_disable(kbdev);
-
- err = kbase_install_interrupts(kbdev);
- if (err)
- goto fail_interrupts;
-
- return 0;
-
-fail_interrupts:
- kbase_pm_runtime_term(kbdev);
-fail_runtime_pm:
- kbasep_platform_device_term(kbdev);
-
- return err;
-}
-
-void kbase_backend_early_term(struct kbase_device *kbdev)
-{
- kbase_release_interrupts(kbdev);
- kbase_pm_runtime_term(kbdev);
- kbasep_platform_device_term(kbdev);
-}
-
-int kbase_backend_late_init(struct kbase_device *kbdev)
-{
- int err;
-
- err = kbase_hwaccess_pm_init(kbdev);
- if (err)
- return err;
-
- err = kbase_reset_gpu_init(kbdev);
- if (err)
- goto fail_reset_gpu_init;
-
- err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
- if (err)
- goto fail_pm_powerup;
-
- err = kbase_backend_timer_init(kbdev);
- if (err)
- goto fail_timer;
-
-#ifdef CONFIG_MALI_DEBUG
-#ifndef CONFIG_MALI_NO_MALI
- if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
- dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
- err = -EINVAL;
- goto fail_interrupt_test;
- }
-#endif /* !CONFIG_MALI_NO_MALI */
-#endif /* CONFIG_MALI_DEBUG */
-
- err = kbase_job_slot_init(kbdev);
- if (err)
- goto fail_job_slot;
-
- /* Do the initialisation of devfreq.
- * Devfreq needs backend_timer_init() for completion of its
- * initialisation and it also needs to catch the first callback
- * occurence of the runtime_suspend event for maintaining state
- * coherence with the backend power management, hence needs to be
- * placed before the kbase_pm_context_idle().
- */
- err = kbase_backend_devfreq_init(kbdev);
- if (err)
- goto fail_devfreq_init;
-
- /* Idle the GPU and/or cores, if the policy wants it to */
- kbase_pm_context_idle(kbdev);
-
- /* Update gpuprops with L2_FEATURES if applicable */
- kbase_gpuprops_update_l2_features(kbdev);
-
- init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
-
- return 0;
-
-fail_devfreq_init:
- kbase_job_slot_term(kbdev);
-fail_job_slot:
-
-#ifdef CONFIG_MALI_DEBUG
-#ifndef CONFIG_MALI_NO_MALI
-fail_interrupt_test:
-#endif /* !CONFIG_MALI_NO_MALI */
-#endif /* CONFIG_MALI_DEBUG */
-
- kbase_backend_timer_term(kbdev);
-fail_timer:
- kbase_hwaccess_pm_halt(kbdev);
-fail_pm_powerup:
- kbase_reset_gpu_term(kbdev);
-fail_reset_gpu_init:
- kbase_hwaccess_pm_term(kbdev);
-
- return err;
-}
-
-void kbase_backend_late_term(struct kbase_device *kbdev)
-{
- kbase_backend_devfreq_term(kbdev);
- kbase_job_slot_halt(kbdev);
- kbase_job_slot_term(kbdev);
- kbase_backend_timer_term(kbdev);
- kbase_hwaccess_pm_halt(kbdev);
- kbase_reset_gpu_term(kbdev);
- kbase_hwaccess_pm_term(kbdev);
-}
diff --git a/mali_kbase/backend/gpu/mali_kbase_instr_backend.c b/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
index 1f98863..724c664 100644
--- a/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
@@ -32,6 +32,7 @@
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
+
int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
struct kbase_context *kctx,
struct kbase_instr_hwcnt_enable *enable)
@@ -80,16 +81,17 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
enable->dump_buffer & 0xFFFFFFFF);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
enable->dump_buffer >> 32);
+
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
enable->jm_bm);
+
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
enable->shader_bm);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
enable->mmu_l2_bm);
- /* Due to PRLAM-8186 we need to disable the Tiler before we enable the
- * HW counter dump. */
+
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
- enable->tiler_bm);
+ enable->tiler_bm);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL);
@@ -198,6 +200,7 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
*/
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+
/* Reconfigure the dump address */
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
kbdev->hwcnt.addr & 0xFFFFFFFF);
@@ -207,6 +210,7 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
/* Start dumping */
KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
kbdev->hwcnt.addr, 0);
+
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
GPU_COMMAND_PRFCNT_SAMPLE);
@@ -216,6 +220,8 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
unlock:
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+
return err;
}
KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
@@ -277,6 +283,7 @@ void kbasep_cache_clean_worker(struct work_struct *data)
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
}
+
void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
{
unsigned long flags;
@@ -369,6 +376,8 @@ int kbase_instr_backend_init(struct kbase_device *kbdev)
init_waitqueue_head(&kbdev->hwcnt.backend.wait);
INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
kbasep_cache_clean_worker);
+
+
kbdev->hwcnt.backend.triggered = 0;
kbdev->hwcnt.backend.cache_clean_wq =
diff --git a/mali_kbase/backend/gpu/mali_kbase_instr_defs.h b/mali_kbase/backend/gpu/mali_kbase_instr_defs.h
index c9fb759..b7d9d31 100644
--- a/mali_kbase/backend/gpu/mali_kbase_instr_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_instr_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014, 2016, 2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014, 2016, 2018, 2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/backend/gpu/mali_kbase_irq_linux.c b/mali_kbase/backend/gpu/mali_kbase_irq_linux.c
index fa3d2cc..21b2aa2 100644
--- a/mali_kbase/backend/gpu/mali_kbase_irq_linux.c
+++ b/mali_kbase/backend/gpu/mali_kbase_irq_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016,2018-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016,2018-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -153,8 +153,6 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler);
-
static irq_handler_t kbase_handler_table[] = {
[JOB_IRQ_TAG] = kbase_job_irq_handler,
[MMU_IRQ_TAG] = kbase_mmu_irq_handler,
@@ -167,6 +165,35 @@ static irq_handler_t kbase_handler_table[] = {
#define GPU_IRQ_HANDLER GPU_IRQ_TAG
/**
+ * kbase_gpu_irq_test_handler - Variant (for test) of kbase_gpu_irq_handler()
+ * @irq: IRQ number
+ * @data: Data associated with this IRQ (i.e. kbdev)
+ * @val: Value of the GPU_CONTROL_REG(GPU_IRQ_STATUS)
+ *
+ * Handle the GPU device interrupt source requests reflected in the
+ * given source bit-pattern. The test code caller is responsible for
+ * undertaking the required device power maintenace.
+ *
+ * Return: IRQ_HANDLED if the requests are from the GPU device,
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t kbase_gpu_irq_test_handler(int irq, void *data, u32 val)
+{
+ struct kbase_device *kbdev = kbase_untag(data);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_gpu_interrupt(kbdev, val);
+
+ return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_irq_test_handler);
+
+/**
* kbase_set_custom_irq_handler - Set a custom IRQ handler
* @kbdev: Device for which the handler is to be registered
* @custom_handler: Handler to be registered
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_defs.h b/mali_kbase/backend/gpu/mali_kbase_jm_defs.h
index 4603229..7cda61a 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_defs.h
@@ -62,9 +62,6 @@ struct slot_rb {
/**
* struct kbase_backend_data - GPU backend specific data for HW access layer
* @slot_rb: Slot ringbuffers
- * @rmu_workaround_flag: When PRLAM-8987 is present, this flag determines
- * whether slots 0/1 or slot 2 are currently being
- * pulled from
* @scheduling_timer: The timer tick used for rescheduling jobs
* @timer_running: Is the timer running? The runpool_mutex must be
* held whilst modifying this.
@@ -83,8 +80,6 @@ struct slot_rb {
struct kbase_backend_data {
struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
- bool rmu_workaround_flag;
-
struct hrtimer scheduling_timer;
bool timer_running;
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
index 5e29721..2692f05 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
@@ -27,7 +27,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config.h>
#include <gpu/mali_kbase_gpu_regmap.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_reset_gpu.h>
@@ -37,9 +37,6 @@
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
-#define beenthere(kctx, f, a...) \
- dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
-
static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev);
static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
index ce0273a..c860bde 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
@@ -30,7 +30,7 @@
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_jm.h>
#include <mali_kbase_js.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_hwcnt_context.h>
#include <mali_kbase_10969_workaround.h>
#include <mali_kbase_reset_gpu.h>
@@ -300,9 +300,6 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
case KBASE_ATOM_GPU_RB_READY:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
- case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
- /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
-
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
break;
@@ -932,12 +929,6 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
break;
katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
-
- /* ***TRANSITION TO HIGHER STATE*** */
- /* fallthrough */
- case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
- katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_READY;
/* ***TRANSITION TO HIGHER STATE*** */
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
deleted file mode 100644
index 0a3fa7e..0000000
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2014-2015, 2019 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-/*
- * Interface file for the direct implementation for MMU hardware access
- *
- * Direct MMU hardware interface
- *
- * This module provides the interface(s) that are required by the direct
- * register access implementation of the MMU hardware interface
- */
-
-#ifndef _KBASE_MMU_HW_DIRECT_H_
-#define _KBASE_MMU_HW_DIRECT_H_
-
-#include <mali_kbase_defs.h>
-
-/**
- * kbase_mmu_interrupt - Process an MMU interrupt.
- *
- * Process the MMU interrupt that was reported by the &kbase_device.
- *
- * @kbdev: Pointer to the kbase device for which the interrupt happened.
- * @irq_stat: Value of the MMU_IRQ_STATUS register.
- */
-void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
-
-/**
- * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
- *
- * Process the bus fault interrupt that was reported for a particular GPU
- * address space.
- *
- * @kbdev: Pointer to the kbase device for which bus fault was reported.
- * @status: Value of the GPU_FAULTSTATUS register.
- * @as_nr: GPU address space for which the bus fault occurred.
- *
- * Return: zero if the operation was successful, non-zero otherwise.
- */
-int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
- u32 status, u32 as_nr);
-
-#endif /* _KBASE_MMU_HW_DIRECT_H_ */
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index e016221..6a9cb13 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -36,6 +36,7 @@
#include <mali_kbase_hwcnt_context.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_devfreq.h>
+#include <mali_kbase_dummy_job_wa.h>
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
@@ -627,6 +628,11 @@ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
lockdep_assert_held(&kbdev->hwaccess_lock);
lockdep_assert_held(&kbdev->pm.lock);
+ if (kbase_dummy_job_wa_enabled(kbdev)) {
+ dev_warn(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled");
+ new_core_mask_js0 = kbdev->pm.debug_core_mask[0];
+ }
+
kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_ca.c b/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
index 41f6429..b691524 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2018, 2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,6 +30,7 @@
#ifdef CONFIG_MALI_NO_MALI
#include <backend/gpu/mali_kbase_model_dummy.h>
#endif
+#include <mali_kbase_dummy_job_wa.h>
int kbase_pm_ca_init(struct kbase_device *kbdev)
{
@@ -64,6 +65,11 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
goto unlock;
}
+ if (kbase_dummy_job_wa_enabled(kbdev)) {
+ dev_info_once(kbdev->dev, "Dynamic core scaling not supported as dummy job WA is enabled");
+ goto unlock;
+ }
+
pm_backend->ca_cores_enabled = core_mask;
kbase_pm_update_state(kbdev);
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index cb8f647..d53acb2 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,7 +29,7 @@
#include <mali_kbase.h>
#include <mali_kbase_config_defaults.h>
#include <gpu/mali_kbase_gpu_regmap.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_smc.h>
@@ -42,6 +42,7 @@
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_l2_mmu_config.h>
+#include <mali_kbase_dummy_job_wa.h>
#include <linux/of.h>
@@ -315,11 +316,17 @@ static void kbase_pm_invoke(struct kbase_device *kbdev,
}
}
- if (lo != 0)
- kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo);
-
- if (hi != 0)
- kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi);
+ if (kbase_dummy_job_wa_enabled(kbdev) &&
+ action == ACTION_PWRON &&
+ core_type == KBASE_PM_CORE_SHADER &&
+ !(kbdev->dummy_job_wa.flags & KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER)) {
+ kbase_dummy_job_wa_execute(kbdev, cores);
+ } else {
+ if (lo != 0)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo);
+ if (hi != 0)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi);
+ }
}
/**
@@ -1580,6 +1587,15 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->mmu_hw_mutex);
+ if (kbdev->dummy_job_wa.flags &
+ KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER) {
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_dummy_job_wa_execute(kbdev,
+ kbase_pm_get_present_cores(kbdev,
+ KBASE_PM_CORE_SHADER));
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+
/* Enable the interrupts */
kbase_pm_enable_interrupts(kbdev);
diff --git a/mali_kbase/build.bp b/mali_kbase/build.bp
index 967d12c..d331dd2 100644
--- a/mali_kbase/build.bp
+++ b/mali_kbase/build.bp
@@ -79,6 +79,8 @@ bob_kernel_module {
"backend/gpu/*.c",
"backend/gpu/*.h",
"backend/gpu/Kbuild",
+ "context/*.c",
+ "context/*.h",
"ipa/*.c",
"ipa/*.h",
"ipa/Kbuild",
@@ -91,6 +93,10 @@ bob_kernel_module {
"device/*.h",
"gpu/*.c",
"gpu/*.h",
+ "tl/*.c",
+ "tl/*.h",
+ "mmu/*.c",
+ "mmu/*.h",
],
kbuild_options: [
"CONFIG_MALI_KUTF=n",
@@ -126,20 +132,26 @@ bob_kernel_module {
},
gpu_has_job_manager: {
srcs: [
- "jm/*.h",
+ "context/backend/*_jm.c",
"device/backend/*_jm.c",
"gpu/backend/*_jm.c",
"gpu/backend/*_jm.h",
- ],
+ "jm/*.h",
+ "tl/backend/*_jm.c",
+ "mmu/backend/*_jm.c",
+ ],
},
gpu_has_csf: {
srcs: [
- "device/backend/*_csf.c",
- "gpu/backend/*_csf.c",
- "gpu/backend/*_csf.h",
+ "context/backend/*_csf.c",
"csf/*.c",
"csf/*.h",
"csf/Kbuild",
+ "device/backend/*_csf.c",
+ "gpu/backend/*_csf.c",
+ "gpu/backend/*_csf.h",
+ "tl/backend/*_csf.c",
+ "mmu/backend/*_csf.c",
],
},
defaults: ["mali_kbase_shared_config_defaults"],
diff --git a/mali_kbase/context/backend/mali_kbase_context_jm.c b/mali_kbase/context/backend/mali_kbase_context_jm.c
new file mode 100644
index 0000000..0fe61c4
--- /dev/null
+++ b/mali_kbase/context/backend/mali_kbase_context_jm.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *
+ * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel context APIs for Job Manager GPUs
+ */
+
+#include <context/mali_kbase_context_internal.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
+#include <mali_kbase.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_dma_fence.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_mem_pool_group.h>
+#include <mmu/mali_kbase_mmu.h>
+#include <tl/mali_kbase_timeline.h>
+#include <tl/mali_kbase_tracepoints.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <mali_kbase_debug_mem_view.h>
+#include <mali_kbase_mem_pool_debugfs.h>
+
+void kbase_context_debugfs_init(struct kbase_context *const kctx)
+{
+ kbase_debug_mem_view_init(kctx);
+ kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx);
+ kbase_jit_debugfs_init(kctx);
+ kbasep_jd_debugfs_ctx_init(kctx);
+ kbase_debug_job_fault_context_init(kctx);
+}
+KBASE_EXPORT_SYMBOL(kbase_context_debugfs_init);
+
+void kbase_context_debugfs_term(struct kbase_context *const kctx)
+{
+ debugfs_remove_recursive(kctx->kctx_dentry);
+ kbase_debug_job_fault_context_term(kctx);
+}
+KBASE_EXPORT_SYMBOL(kbase_context_debugfs_term);
+#endif /* CONFIG_DEBUG_FS */
+
+static int kbase_context_kbase_timer_setup(struct kbase_context *kctx)
+{
+ kbase_timer_setup(&kctx->soft_job_timeout,
+ kbasep_soft_job_timeout_worker);
+
+ return 0;
+}
+
+static int kbase_context_submit_check(struct kbase_context *kctx)
+{
+ struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+ unsigned long irq_flags = 0;
+
+ base_context_create_flags const flags = kctx->create_flags;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+ /* Translate the flags */
+ if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return 0;
+}
+
+static const struct kbase_context_init context_init[] = {
+ {kbase_context_common_init, kbase_context_common_term, NULL},
+ {kbase_context_mem_pool_group_init, kbase_context_mem_pool_group_term,
+ "Memory pool goup initialization failed"},
+ {kbase_mem_evictable_init, kbase_mem_evictable_deinit,
+ "Memory evictable initialization failed"},
+ {kbasep_js_kctx_init, kbasep_js_kctx_term,
+ "JS kctx initialization failed"},
+ {kbase_jd_init, kbase_jd_exit,
+ "JD initialization failed"},
+ {kbase_event_init, kbase_event_cleanup,
+ "Event initialization failed"},
+ {kbase_dma_fence_init, kbase_dma_fence_term,
+ "DMA fence initialization failed"},
+ {kbase_context_mmu_init, kbase_context_mmu_term,
+ "MMU initialization failed"},
+ {kbase_context_mem_alloc_page, kbase_context_mem_pool_free,
+ "Memory alloc page failed"},
+ {kbase_region_tracker_init, kbase_region_tracker_term,
+ "Region tracker initialization failed"},
+ {kbase_sticky_resource_init, kbase_context_sticky_resource_term,
+ "Sticky resource initialization failed"},
+ {kbase_jit_init, kbase_jit_term,
+ "JIT initialization failed"},
+ {kbase_context_kbase_timer_setup, NULL, NULL},
+ {kbase_context_submit_check, NULL, NULL},
+};
+
+static void kbase_context_term_partial(
+ struct kbase_context *kctx,
+ unsigned int i)
+{
+ while (i-- > 0) {
+ if (context_init[i].term)
+ context_init[i].term(kctx);
+ }
+}
+
+struct kbase_context *kbase_create_context(struct kbase_device *kbdev,
+ bool is_compat,
+ base_context_create_flags const flags,
+ unsigned long const api_version,
+ struct file *const filp)
+{
+ struct kbase_context *kctx;
+ unsigned int i = 0;
+
+ if (WARN_ON(!kbdev))
+ return NULL;
+
+ /* Validate flags */
+ if (WARN_ON(flags != (flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS)))
+ return NULL;
+
+ /* zero-inited as lot of code assume it's zero'ed out on create */
+ kctx = vzalloc(sizeof(*kctx));
+ if (WARN_ON(!kctx))
+ return NULL;
+
+ kctx->kbdev = kbdev;
+ kctx->api_version = api_version;
+ kctx->filp = filp;
+ kctx->create_flags = flags;
+
+ if (is_compat)
+ kbase_ctx_flag_set(kctx, KCTX_COMPAT);
+#if defined(CONFIG_64BIT)
+ else
+ kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
+#endif /* !defined(CONFIG_64BIT) */
+
+ for (i = 0; i < ARRAY_SIZE(context_init); i++) {
+ int err = context_init[i].init(kctx);
+
+ if (err) {
+ dev_err(kbdev->dev, "%s error = %d\n",
+ context_init[i].err_mes, err);
+ kbase_context_term_partial(kctx, i);
+ return NULL;
+ }
+ }
+
+ return kctx;
+}
+KBASE_EXPORT_SYMBOL(kbase_create_context);
+
+void kbase_destroy_context(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev;
+
+ if (WARN_ON(!kctx))
+ return;
+
+ kbdev = kctx->kbdev;
+ if (WARN_ON(!kbdev))
+ return;
+
+ /* Ensure the core is powered up for the destroy process
+ * A suspend won't happen here, because we're in a syscall
+ * from a userspace thread.
+ */
+ kbase_pm_context_active(kbdev);
+
+ kbase_mem_pool_group_mark_dying(&kctx->mem_pools);
+
+ kbase_jd_zap_context(kctx);
+ flush_workqueue(kctx->jctx.job_done_wq);
+
+ kbase_context_term_partial(kctx, ARRAY_SIZE(context_init));
+
+ kbase_pm_context_idle(kbdev);
+}
+KBASE_EXPORT_SYMBOL(kbase_destroy_context);
diff --git a/mali_kbase/context/mali_kbase_context.c b/mali_kbase/context/mali_kbase_context.c
new file mode 100644
index 0000000..1ae149d
--- /dev/null
+++ b/mali_kbase/context/mali_kbase_context.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel context APIs
+ */
+
+#include <mali_kbase.h>
+#include <gpu/mali_kbase_gpu_regmap.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_dma_fence.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_mem_pool_group.h>
+#include <tl/mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_timeline.h>
+#include <mmu/mali_kbase_mmu.h>
+#include <context/mali_kbase_context_internal.h>
+
+int kbase_context_common_init(struct kbase_context *kctx)
+{
+ const unsigned long cookies_mask = KBASE_COOKIE_MASK;
+
+ /* creating a context is considered a disjoint event */
+ kbase_disjoint_event(kctx->kbdev);
+
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+
+ atomic_set(&kctx->refcount, 0);
+
+ spin_lock_init(&kctx->mm_update_lock);
+ kctx->process_mm = NULL;
+ atomic_set(&kctx->nonmapped_pages, 0);
+ atomic_set(&kctx->permanent_mapped_pages, 0);
+ kctx->slots_pullable = 0;
+
+ kctx->tgid = current->tgid;
+ kctx->pid = current->pid;
+
+ atomic_set(&kctx->used_pages, 0);
+
+ mutex_init(&kctx->reg_lock);
+
+ spin_lock_init(&kctx->mem_partials_lock);
+ INIT_LIST_HEAD(&kctx->mem_partials);
+
+ spin_lock_init(&kctx->waiting_soft_jobs_lock);
+ INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
+
+ init_waitqueue_head(&kctx->event_queue);
+
+ bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_set(&kctx->jctx.work_id, 0);
+#endif
+
+ kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
+
+ mutex_init(&kctx->legacy_hwcnt_lock);
+
+ mutex_lock(&kctx->kbdev->kctx_list_lock);
+ list_add(&kctx->kctx_list_link, &kctx->kbdev->kctx_list);
+ /* Trace with the AOM tracepoint even in CSF for dumping */
+ KBASE_TLSTREAM_TL_NEW_CTX(kctx->kbdev, kctx, kctx->id,
+ (u32)(kctx->tgid));
+ mutex_unlock(&kctx->kbdev->kctx_list_lock);
+
+ return 0;
+}
+
+void kbase_context_common_term(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ int pages;
+
+ mutex_lock(&kctx->kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
+ kbase_ctx_sched_remove_ctx(kctx);
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
+
+ pages = atomic_read(&kctx->used_pages);
+ if (pages != 0)
+ dev_warn(kctx->kbdev->dev,
+ "%s: %d pages in use!\n", __func__, pages);
+
+ WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+
+ mutex_lock(&kctx->kbdev->kctx_list_lock);
+ /* Trace with the AOM tracepoint even in CSF for dumping */
+ KBASE_TLSTREAM_TL_DEL_CTX(kctx->kbdev, kctx);
+ list_del(&kctx->kctx_list_link);
+ mutex_unlock(&kctx->kbdev->kctx_list_lock);
+
+ KBASE_TRACE_ADD(kctx->kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
+
+ /* Flush the timeline stream, so the user can see the termination
+ * tracepoints being fired.
+ * The "if" statement below is for optimization. It is safe to call
+ * kbase_timeline_streams_flush when timeline is disabled.
+ */
+ if (atomic_read(&kctx->kbdev->timeline_is_enabled) != 0)
+ kbase_timeline_streams_flush(kctx->kbdev->timeline);
+
+ vfree(kctx);
+}
+
+int kbase_context_mem_pool_group_init(struct kbase_context *kctx)
+{
+ return kbase_mem_pool_group_init(&kctx->mem_pools,
+ kctx->kbdev,
+ &kctx->kbdev->mem_pool_defaults,
+ &kctx->kbdev->mem_pools);
+}
+
+void kbase_context_mem_pool_group_term(struct kbase_context *kctx)
+{
+ kbase_mem_pool_group_term(&kctx->mem_pools);
+}
+
+int kbase_context_mmu_init(struct kbase_context *kctx)
+{
+ kbase_mmu_init(kctx->kbdev,
+ &kctx->mmu, kctx,
+ base_context_mmu_group_id_get(kctx->create_flags));
+
+ return 0;
+}
+
+void kbase_context_mmu_term(struct kbase_context *kctx)
+{
+ kbase_mmu_term(kctx->kbdev, &kctx->mmu);
+}
+
+int kbase_context_mem_alloc_page(struct kbase_context *kctx)
+{
+ struct page *p;
+
+ p = kbase_mem_alloc_page(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK]);
+ if (!p)
+ return -ENOMEM;
+
+ kctx->aliasing_sink_page = as_tagged(page_to_phys(p));
+
+ return 0;
+}
+
+void kbase_context_mem_pool_free(struct kbase_context *kctx)
+{
+ /* drop the aliasing sink page now that it can't be mapped anymore */
+ kbase_mem_pool_free(
+ &kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
+ as_page(kctx->aliasing_sink_page),
+ false);
+}
+
+void kbase_context_sticky_resource_term(struct kbase_context *kctx)
+{
+ unsigned long pending_regions_to_clean;
+
+ kbase_gpu_vm_lock(kctx);
+ kbase_sticky_resource_term(kctx);
+
+ /* free pending region setups */
+ pending_regions_to_clean = KBASE_COOKIE_MASK;
+ bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean,
+ kctx->cookies, BITS_PER_LONG);
+ while (pending_regions_to_clean) {
+ unsigned int cookie = find_first_bit(&pending_regions_to_clean,
+ BITS_PER_LONG);
+
+ if (!WARN_ON(!kctx->pending_regions[cookie])) {
+ dev_dbg(kctx->kbdev->dev, "Freeing pending unmapped region\n");
+ kbase_mem_phy_alloc_put(
+ kctx->pending_regions[cookie]->cpu_alloc);
+ kbase_mem_phy_alloc_put(
+ kctx->pending_regions[cookie]->gpu_alloc);
+ kfree(kctx->pending_regions[cookie]);
+
+ kctx->pending_regions[cookie] = NULL;
+ }
+
+ bitmap_clear(&pending_regions_to_clean, cookie, 1);
+ }
+ kbase_gpu_vm_unlock(kctx);
+}
diff --git a/mali_kbase/mali_kbase_context.h b/mali_kbase/context/mali_kbase_context.h
index 5037b4e..12b8e4f 100644
--- a/mali_kbase/mali_kbase_context.h
+++ b/mali_kbase/context/mali_kbase_context.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017, 2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -18,6 +18,16 @@
*
* SPDX-License-Identifier: GPL-2.0
*
+ *//* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2011-2017, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
*/
#ifndef _KBASE_CONTEXT_H_
@@ -25,6 +35,30 @@
#include <linux/atomic.h>
+#ifdef CONFIG_DEBUG_FS
+/**
+ * kbase_context_debugfs_init - Initialize the kctx platform
+ * specific debugfs
+ *
+ * @kctx: kbase context
+ *
+ * This initializes some debugfs interfaces specific to the platform the source
+ * is compiled for.
+ */
+void kbase_context_debugfs_init(struct kbase_context *const kctx);
+
+/**
+ * kbase_context_debugfs_term - Terminate the kctx platform
+ * specific debugfs
+ *
+ * @kctx: kbase context
+ *
+ * This terminates some debugfs interfaces specific to the platform the source
+ * is compiled for.
+ */
+void kbase_context_debugfs_term(struct kbase_context *const kctx);
+#endif /* CONFIG_DEBUG_FS */
+
/**
* kbase_create_context() - Create a kernel base context.
*
diff --git a/mali_kbase/context/mali_kbase_context_internal.h b/mali_kbase/context/mali_kbase_context_internal.h
new file mode 100644
index 0000000..818cdbe
--- /dev/null
+++ b/mali_kbase/context/mali_kbase_context_internal.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <mali_kbase.h>
+
+typedef int kbase_context_init_method(struct kbase_context *kctx);
+typedef void kbase_context_term_method(struct kbase_context *kctx);
+
+/**
+ * struct kbase_context_init - Device init/term methods.
+ * @init: Function pointer to a initialise method.
+ * @term: Function pointer to a terminate method.
+ * @err_mes: Error message to be printed when init method fails.
+ */
+struct kbase_context_init {
+ kbase_context_init_method *init;
+ kbase_context_term_method *term;
+ char *err_mes;
+};
+
+int kbase_context_common_init(struct kbase_context *kctx);
+void kbase_context_common_term(struct kbase_context *kctx);
+
+int kbase_context_mem_pool_group_init(struct kbase_context *kctx);
+void kbase_context_mem_pool_group_term(struct kbase_context *kctx);
+
+int kbase_context_mmu_init(struct kbase_context *kctx);
+void kbase_context_mmu_term(struct kbase_context *kctx);
+
+int kbase_context_mem_alloc_page(struct kbase_context *kctx);
+void kbase_context_mem_pool_free(struct kbase_context *kctx);
+
+void kbase_context_sticky_resource_term(struct kbase_context *kctx);
diff --git a/mali_kbase/device/backend/mali_kbase_device_jm.c b/mali_kbase/device/backend/mali_kbase_device_jm.c
index 2f53f10..24dbe80 100644
--- a/mali_kbase/device/backend/mali_kbase_device_jm.c
+++ b/mali_kbase/device/backend/mali_kbase_device_jm.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -32,6 +32,110 @@
#include <mali_kbase_model_linux.h>
#endif
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <mali_kbase_dummy_job_wa.h>
+
+/**
+ * kbase_backend_late_init - Perform any backend-specific initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int kbase_backend_late_init(struct kbase_device *kbdev)
+{
+ int err;
+
+ err = kbase_hwaccess_pm_init(kbdev);
+ if (err)
+ return err;
+
+ err = kbase_reset_gpu_init(kbdev);
+ if (err)
+ goto fail_reset_gpu_init;
+
+ err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
+ if (err)
+ goto fail_pm_powerup;
+
+ err = kbase_backend_timer_init(kbdev);
+ if (err)
+ goto fail_timer;
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+ if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
+ dev_err(kbdev->dev, "Interrupt assignment check failed.\n");
+ err = -EINVAL;
+ goto fail_interrupt_test;
+ }
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+ err = kbase_job_slot_init(kbdev);
+ if (err)
+ goto fail_job_slot;
+
+ /* Do the initialisation of devfreq.
+ * Devfreq needs backend_timer_init() for completion of its
+ * initialisation and it also needs to catch the first callback
+ * occurrence of the runtime_suspend event for maintaining state
+ * coherence with the backend power management, hence needs to be
+ * placed before the kbase_pm_context_idle().
+ */
+ err = kbase_backend_devfreq_init(kbdev);
+ if (err)
+ goto fail_devfreq_init;
+
+ /* Idle the GPU and/or cores, if the policy wants it to */
+ kbase_pm_context_idle(kbdev);
+
+ /* Update gpuprops with L2_FEATURES if applicable */
+ kbase_gpuprops_update_l2_features(kbdev);
+
+ init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
+
+ return 0;
+
+fail_devfreq_init:
+ kbase_job_slot_term(kbdev);
+fail_job_slot:
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+fail_interrupt_test:
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+ kbase_backend_timer_term(kbdev);
+fail_timer:
+ kbase_hwaccess_pm_halt(kbdev);
+fail_pm_powerup:
+ kbase_reset_gpu_term(kbdev);
+fail_reset_gpu_init:
+ kbase_hwaccess_pm_term(kbdev);
+
+ return err;
+}
+
+/**
+ * kbase_backend_late_term - Perform any backend-specific termination.
+ * @kbdev: Device pointer
+ */
+static void kbase_backend_late_term(struct kbase_device *kbdev)
+{
+ kbase_backend_devfreq_term(kbdev);
+ kbase_job_slot_halt(kbdev);
+ kbase_job_slot_term(kbdev);
+ kbase_backend_timer_term(kbdev);
+ kbase_hwaccess_pm_halt(kbdev);
+ kbase_reset_gpu_term(kbdev);
+ kbase_hwaccess_pm_term(kbdev);
+}
+
static const struct kbase_device_init dev_init[] = {
#ifdef CONFIG_MALI_NO_MALI
{kbase_gpu_device_create, kbase_gpu_device_destroy,
@@ -46,8 +150,8 @@ static const struct kbase_device_init dev_init[] = {
"Power control initialization failed"},
{kbase_device_io_history_init, kbase_device_io_history_term,
"Register access history initialization failed"},
- {kbase_backend_early_init, kbase_backend_early_term,
- "Early backend initialization failed"},
+ {kbase_device_early_init, kbase_device_early_term,
+ "Early device initialization failed"},
{kbase_device_populate_max_freq, NULL,
"Populating max frequency failed"},
{kbase_device_misc_init, kbase_device_misc_term,
@@ -104,6 +208,8 @@ static const struct kbase_device_init dev_init[] = {
{kbase_gpuprops_populate_user_buffer, kbase_gpuprops_free_user_buffer,
"GPU property population failed"},
#endif
+ {kbase_dummy_job_wa_load, kbase_dummy_job_wa_cleanup,
+ "Dummy job workaround load failed"},
};
static void kbase_device_term_partial(struct kbase_device *kbdev,
@@ -127,6 +233,8 @@ int kbase_device_init(struct kbase_device *kbdev)
int err = 0;
unsigned int i = 0;
+ dev_info(kbdev->dev, "Kernel DDK version %s", MALI_RELEASE_NAME);
+
kbase_device_id_init(kbdev);
kbase_disjoint_init(kbdev);
diff --git a/mali_kbase/device/mali_kbase_device.c b/mali_kbase/device/mali_kbase_device.c
index 3062fa3..8eb3153 100644
--- a/mali_kbase/device/mali_kbase_device.c
+++ b/mali_kbase/device/mali_kbase_device.c
@@ -40,13 +40,15 @@
#include <mali_kbase_hw.h>
#include <mali_kbase_config_defaults.h>
-#include <mali_kbase_timeline.h>
+#include <tl/mali_kbase_timeline.h>
#include "mali_kbase_vinstr.h"
#include "mali_kbase_hwcnt_context.h"
#include "mali_kbase_hwcnt_virtualizer.h"
#include "mali_kbase_device.h"
#include "mali_kbase_device_internal.h"
+#include "backend/gpu/mali_kbase_pm_internal.h"
+#include "backend/gpu/mali_kbase_irq_internal.h"
/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
* Supports tracing feature provided in the base module.
@@ -59,7 +61,7 @@ static const char *kbasep_trace_code_string[] = {
/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
* THIS MUST BE USED AT THE START OF THE ARRAY */
#define KBASE_TRACE_CODE_MAKE_CODE(X) # X
-#include "mali_kbase_trace_defs.h"
+#include "tl/mali_kbase_trace_defs.h"
#undef KBASE_TRACE_CODE_MAKE_CODE
};
#endif
@@ -647,3 +649,45 @@ void kbasep_trace_dump(struct kbase_device *kbdev)
CSTD_UNUSED(kbdev);
}
#endif /* KBASE_TRACE_ENABLE */
+
+int kbase_device_early_init(struct kbase_device *kbdev)
+{
+ int err;
+
+ err = kbasep_platform_device_init(kbdev);
+ if (err)
+ return err;
+
+ err = kbase_pm_runtime_init(kbdev);
+ if (err)
+ goto fail_runtime_pm;
+
+ /* Ensure we can access the GPU registers */
+ kbase_pm_register_access_enable(kbdev);
+
+ /* Find out GPU properties based on the GPU feature registers */
+ kbase_gpuprops_set(kbdev);
+
+ /* We're done accessing the GPU registers for now. */
+ kbase_pm_register_access_disable(kbdev);
+
+ err = kbase_install_interrupts(kbdev);
+ if (err)
+ goto fail_interrupts;
+
+ return 0;
+
+fail_interrupts:
+ kbase_pm_runtime_term(kbdev);
+fail_runtime_pm:
+ kbasep_platform_device_term(kbdev);
+
+ return err;
+}
+
+void kbase_device_early_term(struct kbase_device *kbdev)
+{
+ kbase_release_interrupts(kbdev);
+ kbase_pm_runtime_term(kbdev);
+ kbasep_platform_device_term(kbdev);
+}
diff --git a/mali_kbase/device/mali_kbase_device_internal.h b/mali_kbase/device/mali_kbase_device_internal.h
index 4ca57e7..9f96db0 100644
--- a/mali_kbase/device/mali_kbase_device_internal.h
+++ b/mali_kbase/device/mali_kbase_device_internal.h
@@ -62,3 +62,17 @@ int kbase_device_misc_register(struct kbase_device *kbdev);
void kbase_device_misc_deregister(struct kbase_device *kbdev);
void kbase_device_id_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_device_early_init - Perform any device-specific initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_device_early_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_device_early_term - Perform any device-specific termination.
+ * @kbdev: Device pointer
+ */
+void kbase_device_early_term(struct kbase_device *kbdev);
diff --git a/mali_kbase/gpu/mali_kbase_gpu.c b/mali_kbase/gpu/mali_kbase_gpu.c
new file mode 100644
index 0000000..3128db4
--- /dev/null
+++ b/mali_kbase/gpu/mali_kbase_gpu.c
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+
+const char *kbase_gpu_access_type_name(u32 fault_status)
+{
+ switch (AS_FAULTSTATUS_ACCESS_TYPE_GET(fault_status)) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ return "ATOMIC";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+}
diff --git a/mali_kbase/gpu/mali_kbase_gpu_fault.h b/mali_kbase/gpu/mali_kbase_gpu_fault.h
index f83980b..88d9d0f 100644
--- a/mali_kbase/gpu/mali_kbase_gpu_fault.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_fault.h
@@ -33,4 +33,16 @@
*/
const char *kbase_gpu_exception_name(u32 exception_code);
+/**
+ * kbase_gpu_access_type_name - Convert MMU_AS_CONTROL.FAULTSTATUS.ACCESS_TYPE
+ * into string.
+ * @fault_status: value of FAULTSTATUS register.
+ *
+ * After MMU fault, this function can be used to get readable information about
+ * access_type of the MMU fault.
+ *
+ * Return: String of the access type.
+ */
+const char *kbase_gpu_access_type_name(u32 fault_status);
+
#endif /* _KBASE_GPU_FAULT_H_ */
diff --git a/mali_kbase/gpu/mali_kbase_gpu_id.h b/mali_kbase/gpu/mali_kbase_gpu_id.h
index 24acab1..ec883cb 100644
--- a/mali_kbase/gpu/mali_kbase_gpu_id.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_id.h
@@ -103,6 +103,9 @@
#define GPU_ID2_PRODUCT_TGRX GPU_ID2_MODEL_MAKE(10, 3)
#define GPU_ID2_PRODUCT_TVAX GPU_ID2_MODEL_MAKE(10, 4)
#define GPU_ID2_PRODUCT_LODX GPU_ID2_MODEL_MAKE(10, 5)
+#define GPU_ID2_PRODUCT_TTUX GPU_ID2_MODEL_MAKE(11, 2)
+#define GPU_ID2_PRODUCT_LTUX GPU_ID2_MODEL_MAKE(11, 3)
+#define GPU_ID2_PRODUCT_TE2X GPU_ID2_MODEL_MAKE(11, 1)
/* Helper macro to create a GPU_ID assuming valid values for id, major,
minor, status */
diff --git a/mali_kbase/gpu/mali_kbase_gpu_regmap.h b/mali_kbase/gpu/mali_kbase_gpu_regmap.h
index 205b59a..31abae2 100644
--- a/mali_kbase/gpu/mali_kbase_gpu_regmap.h
+++ b/mali_kbase/gpu/mali_kbase_gpu_regmap.h
@@ -278,11 +278,25 @@
#define AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT (0x4<<3)
#define AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT (0x5<<3)
-#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3<<8)
-#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0<<8)
-#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1<<8)
-#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2<<8)
-#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3<<8)
+#define AS_FAULTSTATUS_EXCEPTION_TYPE_SHIFT 0
+#define AS_FAULTSTATUS_EXCEPTION_TYPE_MASK (0xFF << AS_FAULTSTATUS_EXCEPTION_TYPE_SHIFT)
+#define AS_FAULTSTATUS_EXCEPTION_TYPE_GET(reg_val) \
+ (((reg_val)&AS_FAULTSTATUS_EXCEPTION_TYPE_MASK) >> AS_FAULTSTATUS_EXCEPTION_TYPE_SHIFT)
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_SHIFT 8
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << AS_FAULTSTATUS_ACCESS_TYPE_SHIFT)
+#define AS_FAULTSTATUS_ACCESS_TYPE_GET(reg_val) \
+ (((reg_val)&AS_FAULTSTATUS_ACCESS_TYPE_MASK) >> AS_FAULTSTATUS_ACCESS_TYPE_SHIFT)
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3)
+
+#define AS_FAULTSTATUS_SOURCE_ID_SHIFT 16
+#define AS_FAULTSTATUS_SOURCE_ID_MASK (0xFFFF << AS_FAULTSTATUS_SOURCE_ID_SHIFT)
+#define AS_FAULTSTATUS_SOURCE_ID_GET(reg_val) \
+ (((reg_val)&AS_FAULTSTATUS_SOURCE_ID_MASK) >> AS_FAULTSTATUS_SOURCE_ID_SHIFT)
/*
* Begin MMU TRANSCFG register values
diff --git a/mali_kbase/mali_base_hwconfig_features.h b/mali_kbase/mali_base_hwconfig_features.h
index 9ce8a0b..6885f8d 100644
--- a/mali_kbase/mali_base_hwconfig_features.h
+++ b/mali_kbase/mali_base_hwconfig_features.h
@@ -426,4 +426,61 @@ static const enum base_hw_feature base_hw_features_tVAx[] = {
BASE_HW_FEATURE_END
};
+static const enum base_hw_feature base_hw_features_tTUx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tE2x[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+ BASE_HW_FEATURE_END
+};
+
#endif /* _BASE_HWCONFIG_FEATURES_H_ */
diff --git a/mali_kbase/mali_base_hwconfig_issues.h b/mali_kbase/mali_base_hwconfig_issues.h
index acbe77a..399d1b6 100644
--- a/mali_kbase/mali_base_hwconfig_issues.h
+++ b/mali_kbase/mali_base_hwconfig_issues.h
@@ -510,4 +510,41 @@ static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
BASE_HW_ISSUE_END
};
+static const enum base_hw_issue base_hw_issues_tTUx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tTUx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tE2x_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tE2x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_3414,
+ BASE_HW_ISSUE_TTRX_3083,
+ BASE_HW_ISSUE_TTRX_3470,
+ BASE_HW_ISSUE_END
+};
+
#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/mali_kbase/mali_base_kernel.h b/mali_kbase/mali_base_kernel.h
index 3f5a6da..8687736 100644
--- a/mali_kbase/mali_base_kernel.h
+++ b/mali_kbase/mali_base_kernel.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -234,12 +234,20 @@ typedef u32 base_mem_alloc_flags;
#define BASE_MEM_GROUP_ID_MASK \
((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+/* Must do CPU cache maintenance when imported memory is mapped/unmapped
+ * on GPU. Currently applicable to dma-buf type only.
+ */
+#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26)
+
+/* Use the GPU VA chosen by the kernel client */
+#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27)
+
/**
* Number of bits used as flags for base memory management
*
* Must be kept in sync with the base_mem_alloc_flags flags
*/
-#define BASE_MEM_FLAGS_NR_BITS 26
+#define BASE_MEM_FLAGS_NR_BITS 28
/* A mask for all output bits, excluding IN/OUT bits.
*/
@@ -304,7 +312,8 @@ static inline base_mem_alloc_flags base_mem_group_id_set(int id)
* and may not be passed from user space.
*/
#define BASEP_MEM_FLAGS_KERNEL_ONLY \
- (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \
+ BASE_MEM_FLAG_MAP_FIXED)
/* A mask of all the flags that can be returned via the base_mem_get_flags()
* interface.
@@ -1767,6 +1776,7 @@ static inline int base_context_mmu_group_id_get(
* to account for the performance impact. */
#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
BASE_TLSTREAM_JOB_DUMPING_ENABLED)
diff --git a/mali_kbase/mali_kbase.h b/mali_kbase/mali_kbase.h
index 119e2db..8a5088c 100644
--- a/mali_kbase/mali_kbase.h
+++ b/mali_kbase/mali_kbase.h
@@ -56,12 +56,13 @@
*/
#include "mali_kbase_defs.h"
-#include "mali_kbase_context.h"
+#include "context/mali_kbase_context.h"
#include "mali_kbase_strings.h"
#include "mali_kbase_mem_lowlevel.h"
#include "mali_kbase_js.h"
#include "mali_kbase_utility.h"
#include "mali_kbase_mem.h"
+#include "mmu/mali_kbase_mmu.h"
#include "mali_kbase_gpu_memory_debugfs.h"
#include "mali_kbase_mem_profile_debugfs.h"
#include "mali_kbase_gpuprops.h"
diff --git a/mali_kbase/mali_kbase_context.c b/mali_kbase/mali_kbase_context.c
deleted file mode 100644
index 53bcc4f..0000000
--- a/mali_kbase/mali_kbase_context.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-/*
- * Base kernel context APIs
- */
-
-#include <mali_kbase.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
-#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_dma_fence.h>
-#include <mali_kbase_ctx_sched.h>
-#include <mali_kbase_mem_pool_group.h>
-#include <mali_kbase_tracepoints.h>
-#include <mali_kbase_timeline.h>
-
-struct kbase_context *
-kbase_create_context(struct kbase_device *kbdev, bool is_compat,
- base_context_create_flags const flags,
- unsigned long const api_version,
- struct file *const filp)
-{
- struct kbase_context *kctx;
- int err;
- struct page *p;
- struct kbasep_js_kctx_info *js_kctx_info = NULL;
- unsigned long irq_flags = 0;
- const unsigned long cookies_mask = KBASE_COOKIE_MASK;
-
- if (WARN_ON(!kbdev))
- goto out;
-
- /* Validate flags */
- if (WARN_ON(flags != (flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS)))
- goto out;
-
- /* zero-inited as lot of code assume it's zero'ed out on create */
- kctx = vzalloc(sizeof(*kctx));
-
- if (!kctx)
- goto out;
-
- /* creating a context is considered a disjoint event */
- kbase_disjoint_event(kbdev);
-
- kctx->kbdev = kbdev;
- kctx->as_nr = KBASEP_AS_NR_INVALID;
- atomic_set(&kctx->refcount, 0);
- if (is_compat)
- kbase_ctx_flag_set(kctx, KCTX_COMPAT);
-#if defined(CONFIG_64BIT)
- else
- kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
-#endif /* !defined(CONFIG_64BIT) */
-
- spin_lock_init(&kctx->mm_update_lock);
- kctx->process_mm = NULL;
- atomic_set(&kctx->nonmapped_pages, 0);
- atomic_set(&kctx->permanent_mapped_pages, 0);
- kctx->slots_pullable = 0;
- kctx->tgid = current->tgid;
- kctx->pid = current->pid;
-
- err = kbase_mem_pool_group_init(&kctx->mem_pools, kbdev,
- &kbdev->mem_pool_defaults, &kbdev->mem_pools);
- if (err)
- goto free_kctx;
-
- err = kbase_mem_evictable_init(kctx);
- if (err)
- goto free_both_pools;
-
- atomic_set(&kctx->used_pages, 0);
-
- err = kbase_jd_init(kctx);
- if (err)
- goto deinit_evictable;
-
- err = kbasep_js_kctx_init(kctx);
- if (err)
- goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
-
- err = kbase_event_init(kctx);
- if (err)
- goto free_jd;
-
- mutex_init(&kctx->reg_lock);
-
- spin_lock_init(&kctx->mem_partials_lock);
- INIT_LIST_HEAD(&kctx->mem_partials);
-
- INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
- spin_lock_init(&kctx->waiting_soft_jobs_lock);
- err = kbase_dma_fence_init(kctx);
- if (err)
- goto free_event;
-
- err = kbase_mmu_init(kbdev, &kctx->mmu, kctx,
- base_context_mmu_group_id_get(flags));
- if (err)
- goto term_dma_fence;
-
- p = kbase_mem_alloc_page(
- &kctx->mem_pools.small[KBASE_MEM_GROUP_SINK]);
- if (!p)
- goto no_sink_page;
- kctx->aliasing_sink_page = as_tagged(page_to_phys(p));
-
- init_waitqueue_head(&kctx->event_queue);
-
- bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
-
- /* Make sure page 0 is not used... */
- err = kbase_region_tracker_init(kctx);
- if (err)
- goto no_region_tracker;
-
- err = kbase_sticky_resource_init(kctx);
- if (err)
- goto no_sticky;
-
- err = kbase_jit_init(kctx);
- if (err)
- goto no_jit;
-
-
-#ifdef CONFIG_GPU_TRACEPOINTS
- atomic_set(&kctx->jctx.work_id, 0);
-#endif
-
- kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
-
- mutex_init(&kctx->legacy_hwcnt_lock);
-
- kbase_timer_setup(&kctx->soft_job_timeout,
- kbasep_soft_job_timeout_worker);
-
- mutex_lock(&kbdev->kctx_list_lock);
- list_add(&kctx->kctx_list_link, &kbdev->kctx_list);
- KBASE_TLSTREAM_TL_NEW_CTX(kbdev, kctx, kctx->id, (u32)(kctx->tgid));
- mutex_unlock(&kbdev->kctx_list_lock);
-
- kctx->api_version = api_version;
- kctx->filp = filp;
-
- js_kctx_info = &kctx->jctx.sched_info;
-
- mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
-
- /* Translate the flags */
- if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
- kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
-
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
- mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
-
- return kctx;
-
-no_jit:
- kbase_gpu_vm_lock(kctx);
- kbase_sticky_resource_term(kctx);
- kbase_gpu_vm_unlock(kctx);
-no_sticky:
- kbase_region_tracker_term(kctx);
-no_region_tracker:
- kbase_mem_pool_free(
- &kctx->mem_pools.small[KBASE_MEM_GROUP_SINK], p, false);
-no_sink_page:
- kbase_mmu_term(kbdev, &kctx->mmu);
-term_dma_fence:
- kbase_dma_fence_term(kctx);
-free_event:
- kbase_event_cleanup(kctx);
-free_jd:
- /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
- kbasep_js_kctx_term(kctx);
- kbase_jd_exit(kctx);
-deinit_evictable:
- kbase_mem_evictable_deinit(kctx);
-free_both_pools:
- kbase_mem_pool_group_term(&kctx->mem_pools);
-free_kctx:
- vfree(kctx);
-out:
- return NULL;
-}
-KBASE_EXPORT_SYMBOL(kbase_create_context);
-
-static void kbase_reg_pending_dtor(struct kbase_device *kbdev,
- struct kbase_va_region *reg)
-{
- dev_dbg(kbdev->dev, "Freeing pending unmapped region\n");
- kbase_mem_phy_alloc_put(reg->cpu_alloc);
- kbase_mem_phy_alloc_put(reg->gpu_alloc);
- kfree(reg);
-}
-
-void kbase_destroy_context(struct kbase_context *kctx)
-{
- struct kbase_device *kbdev;
- int pages;
- unsigned long pending_regions_to_clean;
- unsigned long flags;
- struct page *p;
-
- if (WARN_ON(!kctx))
- return;
-
- kbdev = kctx->kbdev;
- if (WARN_ON(!kbdev))
- return;
-
- mutex_lock(&kbdev->kctx_list_lock);
- KBASE_TLSTREAM_TL_DEL_CTX(kbdev, kctx);
- list_del(&kctx->kctx_list_link);
- mutex_unlock(&kbdev->kctx_list_lock);
-
- KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
-
- /* Ensure the core is powered up for the destroy process */
- /* A suspend won't happen here, because we're in a syscall from a userspace
- * thread. */
- kbase_pm_context_active(kbdev);
-
- kbase_mem_pool_group_mark_dying(&kctx->mem_pools);
-
- kbase_jd_zap_context(kctx);
-
- /* We have already waited for the jobs to complete (and hereafter there
- * can be no more submissions for the context). However the wait could
- * have timedout and there could still be work items in flight that
- * would do the completion processing of jobs.
- * kbase_jd_exit() will destroy the 'job_done_wq'. And destroying the wq
- * will cause it do drain and implicitly wait for those work items to
- * complete.
- */
- kbase_jd_exit(kctx);
-
-#ifdef CONFIG_DEBUG_FS
- /* Removing the rest of the debugfs entries here as we want to keep the
- * atom debugfs interface alive until all atoms have completed. This
- * is useful for debugging hung contexts. */
- debugfs_remove_recursive(kctx->kctx_dentry);
-
- kbase_debug_job_fault_context_term(kctx);
-
-#endif
-
- kbase_event_cleanup(kctx);
-
- /*
- * JIT must be terminated before the code below as it must be called
- * without the region lock being held.
- * The code above ensures no new JIT allocations can be made by
- * by the time we get to this point of context tear down.
- */
- kbase_jit_term(kctx);
-
- kbase_gpu_vm_lock(kctx);
-
- kbase_sticky_resource_term(kctx);
-
- /* drop the aliasing sink page now that it can't be mapped anymore */
- p = as_page(kctx->aliasing_sink_page);
- kbase_mem_pool_free(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
- p, false);
-
- /* free pending region setups */
- pending_regions_to_clean = KBASE_COOKIE_MASK;
- bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean,
- kctx->cookies, BITS_PER_LONG);
- while (pending_regions_to_clean) {
- unsigned int cookie = find_first_bit(&pending_regions_to_clean,
- BITS_PER_LONG);
-
- BUG_ON(!kctx->pending_regions[cookie]);
-
- kbase_reg_pending_dtor(kbdev, kctx->pending_regions[cookie]);
-
- kctx->pending_regions[cookie] = NULL;
- bitmap_clear(&pending_regions_to_clean, cookie, 1);
- }
-
- kbase_region_tracker_term(kctx);
- kbase_gpu_vm_unlock(kctx);
-
- /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
- kbasep_js_kctx_term(kctx);
-
- kbase_dma_fence_term(kctx);
-
- mutex_lock(&kbdev->mmu_hw_mutex);
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
- kbase_ctx_sched_remove_ctx(kctx);
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
- mutex_unlock(&kbdev->mmu_hw_mutex);
-
- kbase_mmu_term(kbdev, &kctx->mmu);
-
- pages = atomic_read(&kctx->used_pages);
- if (pages != 0)
- dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
-
- kbase_mem_evictable_deinit(kctx);
-
- kbase_mem_pool_group_term(&kctx->mem_pools);
-
- WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
-
- vfree(kctx);
-
- kbase_pm_context_idle(kbdev);
-
- /* Flush the timeline stream, so the user can see the termination
- * tracepoints being fired.
- * The "if" statement below is for optimization. It is safe to call
- * kbase_timeline_streams_flush when timeline is disabled.
- */
- if (atomic_read(&kbdev->timeline_is_enabled) != 0)
- kbase_timeline_streams_flush(kbdev->timeline);
-}
-KBASE_EXPORT_SYMBOL(kbase_destroy_context);
diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c
index ca94279..fe2ae0e 100644
--- a/mali_kbase/mali_kbase_core_linux.c
+++ b/mali_kbase/mali_kbase_core_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -101,10 +101,11 @@
#include <linux/pm_runtime.h>
-#include <mali_kbase_timeline.h>
+#include <tl/mali_kbase_timeline.h>
#include <mali_kbase_as_fault_debugfs.h>
#include <device/mali_kbase_device.h>
+#include <context/mali_kbase_context.h>
/* GPU IRQ Tags */
#define JOB_IRQ_TAG 0
@@ -275,6 +276,8 @@ static void kbase_file_delete(struct kbase_file *const kfile)
kctx->legacy_hwcnt_cli = NULL;
mutex_unlock(&kctx->legacy_hwcnt_lock);
+ kbase_context_debugfs_term(kctx);
+
kbase_destroy_context(kctx);
dev_dbg(kbdev->dev, "deleted base context\n");
@@ -368,11 +371,11 @@ int assign_irqs(struct kbase_device *kbdev)
}
#ifdef CONFIG_OF
- if (!strncmp(irq_res->name, "JOB", 4)) {
+ if (!strncasecmp(irq_res->name, "JOB", 4)) {
irqtag = JOB_IRQ_TAG;
- } else if (!strncmp(irq_res->name, "MMU", 4)) {
+ } else if (!strncasecmp(irq_res->name, "MMU", 4)) {
irqtag = MMU_IRQ_TAG;
- } else if (!strncmp(irq_res->name, "GPU", 4)) {
+ } else if (!strncasecmp(irq_res->name, "GPU", 4)) {
irqtag = GPU_IRQ_TAG;
} else {
dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
@@ -576,22 +579,22 @@ static int kbase_file_create_kctx(struct kbase_file *const kfile,
/* we don't treat this as a fail - just warn about it */
dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
} else {
+#if (KERNEL_VERSION(4, 7, 0) > LINUX_VERSION_CODE)
+ /* prevent unprivileged use of debug file system
+ * in old kernel version
+ */
+ debugfs_create_file("infinite_cache", 0600, kctx->kctx_dentry,
+ kctx, &kbase_infinite_cache_fops);
+#else
debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
- kctx, &kbase_infinite_cache_fops);
- debugfs_create_file("force_same_va", 0600,
- kctx->kctx_dentry, kctx,
- &kbase_force_same_va_fops);
+ kctx, &kbase_infinite_cache_fops);
+#endif
+ debugfs_create_file("force_same_va", 0600, kctx->kctx_dentry,
+ kctx, &kbase_force_same_va_fops);
mutex_init(&kctx->mem_profile_lock);
- kbasep_jd_debugfs_ctx_init(kctx);
- kbase_debug_mem_view_init(kctx);
-
- kbase_debug_job_fault_context_init(kctx);
-
- kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx);
-
- kbase_jit_debugfs_init(kctx);
+ kbase_context_debugfs_init(kctx);
}
#endif /* CONFIG_DEBUG_FS */
@@ -2376,7 +2379,7 @@ static ssize_t show_js_softstop_always(struct device *dev,
* (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
*/
static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
-#endif /* !MALI_USE_CSF */
+#endif /* CONFIG_MALI_DEBUG */
#ifdef CONFIG_MALI_DEBUG
typedef void (kbasep_debug_command_func) (struct kbase_device *);
@@ -2532,6 +2535,12 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
.name = "Mali-TVAX" },
{ .id = GPU_ID2_PRODUCT_LODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-LODX" },
+ { .id = GPU_ID2_PRODUCT_TTUX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TTUX" },
+ { .id = GPU_ID2_PRODUCT_LTUX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-LTUX" },
+ { .id = GPU_ID2_PRODUCT_TE2X >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TE2X" },
};
const char *product_name = "(Unknown Mali GPU)";
struct kbase_device *kbdev;
@@ -3501,7 +3510,7 @@ void power_control_term(struct kbase_device *kbdev)
#ifdef MALI_KBASE_BUILD
#ifdef CONFIG_DEBUG_FS
-static void trigger_quirks_reload(struct kbase_device *kbdev)
+static void trigger_reset(struct kbase_device *kbdev)
{
kbase_pm_context_active(kbdev);
if (kbase_prepare_to_reset_gpu(kbdev))
@@ -3515,7 +3524,7 @@ static int type##_quirks_set(void *data, u64 val) \
struct kbase_device *kbdev; \
kbdev = (struct kbase_device *)data; \
kbdev->hw_quirks_##type = (u32)val; \
- trigger_quirks_reload(kbdev); \
+ trigger_reset(kbdev); \
return 0;\
} \
\
@@ -3534,6 +3543,25 @@ MAKE_QUIRK_ACCESSORS(tiler);
MAKE_QUIRK_ACCESSORS(mmu);
MAKE_QUIRK_ACCESSORS(jm);
+static ssize_t kbase_device_debugfs_reset_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct kbase_device *kbdev = file->private_data;
+ CSTD_UNUSED(ubuf);
+ CSTD_UNUSED(count);
+ CSTD_UNUSED(ppos);
+
+ trigger_reset(kbdev);
+
+ return count;
+}
+
+static const struct file_operations fops_trigger_reset = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = kbase_device_debugfs_reset_write,
+ .llseek = default_llseek,
+};
/**
* debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
@@ -3621,6 +3649,15 @@ int kbase_device_debugfs_init(struct kbase_device *kbdev)
{
struct dentry *debugfs_ctx_defaults_directory;
int err;
+ /* prevent unprivileged use of debug file system
+ * in old kernel version
+ */
+#if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE)
+ /* only for newer kernel version debug file system is safe */
+ const mode_t mode = 0644;
+#else
+ const mode_t mode = 0600;
+#endif
kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
NULL);
@@ -3670,16 +3707,16 @@ int kbase_device_debugfs_init(struct kbase_device *kbdev)
kbdev->mali_debugfs_directory, kbdev,
&fops_jm_quirks);
- debugfs_create_bool("infinite_cache", 0644,
+ debugfs_create_bool("infinite_cache", mode,
debugfs_ctx_defaults_directory,
&kbdev->infinite_cache_active_default);
- debugfs_create_file("mem_pool_max_size", 0644,
+ debugfs_create_file("mem_pool_max_size", mode,
debugfs_ctx_defaults_directory,
&kbdev->mem_pool_defaults.small,
&kbase_device_debugfs_mem_pool_max_size_fops);
- debugfs_create_file("lp_mem_pool_max_size", 0644,
+ debugfs_create_file("lp_mem_pool_max_size", mode,
debugfs_ctx_defaults_directory,
&kbdev->mem_pool_defaults.large,
&kbase_device_debugfs_mem_pool_max_size_fops);
@@ -3690,6 +3727,10 @@ int kbase_device_debugfs_init(struct kbase_device *kbdev)
&fops_protected_debug_mode);
}
+ debugfs_create_file("reset", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_trigger_reset);
+
#if KBASE_TRACE_ENABLE
kbasep_trace_debugfs_init(kbdev);
#endif /* KBASE_TRACE_ENABLE */
@@ -4114,6 +4155,7 @@ static const struct dev_pm_ops kbase_pm_ops = {
static const struct of_device_id kbase_dt_ids[] = {
{ .compatible = "arm,malit6xx" },
{ .compatible = "arm,mali-midgard" },
+ { .compatible = "arm,mali-bifrost" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, kbase_dt_ids);
diff --git a/mali_kbase/mali_kbase_debug_mem_view.c b/mali_kbase/mali_kbase_debug_mem_view.c
index c091f16..4788137 100644
--- a/mali_kbase/mali_kbase_debug_mem_view.c
+++ b/mali_kbase/mali_kbase_debug_mem_view.c
@@ -233,6 +233,12 @@ static int debug_mem_open(struct inode *i, struct file *file)
goto out;
}
+ ret = debug_mem_zone_open(&kctx->reg_rbtree_exec, mem_data);
+ if (0 != ret) {
+ kbase_gpu_vm_unlock(kctx);
+ goto out;
+ }
+
kbase_gpu_vm_unlock(kctx);
((struct seq_file *)file->private_data)->private = mem_data;
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index e6d9b88..059d850 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -36,7 +36,7 @@
#include <mali_base_hwconfig_features.h>
#include <mali_base_hwconfig_issues.h>
#include <mali_kbase_mem_lowlevel.h>
-#include <mali_kbase_mmu_hw.h>
+#include <mmu/mali_kbase_mmu_hw.h>
#include <mali_kbase_instr_defs.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_gpuprops_types.h>
@@ -418,8 +418,6 @@ static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependenc
* @KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: Atom is in slot fifo but is waiting
* for the cores, which are needed to execute the job
* chain represented by the atom, to become available
- * @KBASE_ATOM_GPU_RB_WAITING_AFFINITY: Atom is in slot fifo but is blocked on
- * affinity due to rmu workaround for Hw issue 8987.
* @KBASE_ATOM_GPU_RB_READY: Atom is in slot fifo and can be submitted to GPU.
* @KBASE_ATOM_GPU_RB_SUBMITTED: Atom is in slot fifo and has been submitted to GPU.
* @KBASE_ATOM_GPU_RB_RETURN_TO_JS: Atom must be returned to JS due to some failure,
@@ -432,7 +430,6 @@ enum kbase_atom_gpu_rb_state {
KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
- KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
KBASE_ATOM_GPU_RB_READY,
KBASE_ATOM_GPU_RB_SUBMITTED,
KBASE_ATOM_GPU_RB_RETURN_TO_JS = -1
@@ -950,7 +947,7 @@ enum kbase_trace_code {
/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
* THIS MUST BE USED AT THE START OF THE ENUM */
#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
-#include "mali_kbase_trace_defs.h"
+#include <tl/mali_kbase_trace_defs.h>
#undef KBASE_TRACE_CODE_MAKE_CODE
/* Comma on its own, to extend the list */
,
@@ -1722,6 +1719,13 @@ struct kbase_device {
/* See KBASE_JS_*_PRIORITY_MODE for details. */
u32 js_ctx_scheduling_mode;
+
+ struct {
+ struct kbase_context *ctx;
+ u64 jc;
+ int slot;
+ u64 flags;
+ } dummy_job_wa;
};
/**
@@ -2096,6 +2100,7 @@ struct kbase_sub_alloc {
* @priority: Indicates the context priority. Used along with @atoms_count
* for context scheduling, protected by hwaccess_lock.
* @atoms_count: Number of gpu atoms currently in use, per priority
+ * @create_flags: Flags used in context creation.
*
* A kernel base context is an entity among which the GPU is scheduled.
* Each context has its own GPU address space.
@@ -2236,6 +2241,7 @@ struct kbase_context {
int priority;
s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+ base_context_create_flags create_flags;
};
#ifdef CONFIG_MALI_CINSTR_GWT
diff --git a/mali_kbase/mali_kbase_dummy_job_wa.c b/mali_kbase/mali_kbase_dummy_job_wa.c
new file mode 100644
index 0000000..a72436a
--- /dev/null
+++ b/mali_kbase/mali_kbase_dummy_job_wa.c
@@ -0,0 +1,515 @@
+/*
+ *
+ * (C) COPYRIGHT 2020 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Implementation of the dummy job execution workaround for the GPU hang issue.
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_dummy_job_wa.h>
+
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#define DUMMY_JOB_WA_BINARY_NAME "valhall-1691526.wa"
+
+struct wa_header {
+ u16 signature;
+ u16 version;
+ u32 info_offset;
+} __packed;
+
+struct wa_v2_info {
+ u64 jc;
+ u32 js;
+ u32 blob_offset;
+ u64 flags;
+} __packed;
+
+struct wa_blob {
+ u64 base;
+ u32 size;
+ u32 map_flags;
+ u32 payload_offset;
+ u32 blob_offset;
+} __packed;
+
+static bool in_range(const u8 *base, const u8 *end, off_t off, size_t sz)
+{
+ return !(end - base - off < sz);
+}
+
+static u32 wait_any(struct kbase_device *kbdev, off_t offset, u32 bits)
+{
+ int loop;
+ const int timeout = 100;
+ u32 val;
+
+ for (loop = 0; loop < timeout; loop++) {
+ val = kbase_reg_read(kbdev, offset);
+ if (val & bits)
+ break;
+ udelay(10);
+ }
+
+ if (loop == timeout) {
+ dev_err(kbdev->dev,
+ "Timeout reading register 0x%lx, bits 0x%lx, last read was 0x%lx\n",
+ (unsigned long)offset, (unsigned long)bits,
+ (unsigned long)val);
+ }
+
+ return (val & bits);
+}
+
+static int wait(struct kbase_device *kbdev, off_t offset, u32 bits, bool set)
+{
+ int loop;
+ const int timeout = 100;
+ u32 val;
+ u32 target = 0;
+
+ if (set)
+ target = bits;
+
+ for (loop = 0; loop < timeout; loop++) {
+ val = kbase_reg_read(kbdev, (offset));
+ if ((val & bits) == target)
+ break;
+
+ udelay(10);
+ }
+
+ if (loop == timeout) {
+ dev_err(kbdev->dev,
+ "Timeout reading register 0x%lx, bits 0x%lx, last read was 0x%lx\n",
+ (unsigned long)offset, (unsigned long)bits,
+ (unsigned long)val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static inline int run_job(struct kbase_device *kbdev, int as, int slot,
+ u64 cores, u64 jc)
+{
+ u32 done;
+
+ /* setup job */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(slot, JS_HEAD_NEXT_LO),
+ jc & U32_MAX);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(slot, JS_HEAD_NEXT_HI),
+ jc >> 32);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(slot, JS_AFFINITY_NEXT_LO),
+ cores & U32_MAX);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(slot, JS_AFFINITY_NEXT_HI),
+ cores >> 32);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(slot, JS_CONFIG_NEXT),
+ JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK | as);
+
+ /* go */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(slot, JS_COMMAND_NEXT),
+ JS_COMMAND_START);
+
+ /* wait for the slot to finish (done, error) */
+ done = wait_any(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT),
+ (1ul << (16+slot)) | (1ul << slot));
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), done);
+
+ if (done != (1ul << slot)) {
+ dev_err(kbdev->dev,
+ "Failed to run WA job on slot %d cores 0x%llx: done 0x%lx\n",
+ slot, (unsigned long long)cores,
+ (unsigned long)done);
+ dev_err(kbdev->dev, "JS_STATUS on failure: 0x%x\n",
+ kbase_reg_read(kbdev, JOB_SLOT_REG(slot, JS_STATUS)));
+
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+/* To be called after power up & MMU init, but before everything else */
+int kbase_dummy_job_wa_execute(struct kbase_device *kbdev, u64 cores)
+{
+ int as;
+ int slot;
+ u64 jc;
+ int failed = 0;
+ int runs = 0;
+ u32 old_gpu_mask;
+ u32 old_job_mask;
+
+ if (!kbdev)
+ return -EFAULT;
+
+ if (!kbdev->dummy_job_wa.ctx)
+ return -EFAULT;
+
+ as = kbdev->dummy_job_wa.ctx->as_nr;
+ slot = kbdev->dummy_job_wa.slot;
+ jc = kbdev->dummy_job_wa.jc;
+
+ /* mask off all but MMU IRQs */
+ old_gpu_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+ old_job_mask = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK));
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0);
+
+ /* power up requested cores */
+ kbase_reg_write(kbdev, SHADER_PWRON_LO, (cores & U32_MAX));
+ kbase_reg_write(kbdev, SHADER_PWRON_HI, (cores >> 32));
+
+ if (kbdev->dummy_job_wa.flags & KBASE_DUMMY_JOB_WA_FLAG_WAIT_POWERUP) {
+ /* wait for power-ups */
+ wait(kbdev, SHADER_READY_LO, (cores & U32_MAX), true);
+ if (cores >> 32)
+ wait(kbdev, SHADER_READY_HI, (cores >> 32), true);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), U32_MAX);
+ }
+
+ if (kbdev->dummy_job_wa.flags & KBASE_DUMMY_JOB_WA_FLAG_SERIALIZE) {
+ int i;
+
+ /* do for each requested core */
+ for (i = 0; i < sizeof(cores) * 8; i++) {
+ u64 affinity;
+
+ affinity = 1ull << i;
+
+ if (!(cores & affinity))
+ continue;
+
+ if (run_job(kbdev, as, slot, affinity, jc))
+ failed++;
+ runs++;
+ }
+
+ } else {
+ if (run_job(kbdev, as, slot, cores, jc))
+ failed++;
+ runs++;
+ }
+
+ if (kbdev->dummy_job_wa.flags &
+ KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER) {
+ /* power off shader cores (to reduce any dynamic leakage) */
+ kbase_reg_write(kbdev, SHADER_PWROFF_LO, (cores & U32_MAX));
+ kbase_reg_write(kbdev, SHADER_PWROFF_HI, (cores >> 32));
+
+ /* wait for power off complete */
+ wait(kbdev, SHADER_READY_LO, (cores & U32_MAX), false);
+ wait(kbdev, SHADER_PWRTRANS_LO, (cores & U32_MAX), false);
+ if (cores >> 32) {
+ wait(kbdev, SHADER_READY_HI, (cores >> 32), false);
+ wait(kbdev, SHADER_PWRTRANS_HI, (cores >> 32), false);
+ }
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), U32_MAX);
+ }
+
+ /* restore IRQ masks */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), old_gpu_mask);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), old_job_mask);
+
+ if (failed)
+ dev_err(kbdev->dev,
+ "WA complete with %d failures out of %d runs\n", failed,
+ runs);
+
+ return failed ? -EFAULT : 0;
+}
+
+static ssize_t show_dummy_job_wa_info(struct device * const dev,
+ struct device_attribute * const attr, char * const buf)
+{
+ struct kbase_device *const kbdev = dev_get_drvdata(dev);
+ int err;
+
+ if (!kbdev || !kbdev->dummy_job_wa.ctx)
+ return -ENODEV;
+
+ err = scnprintf(buf, PAGE_SIZE, "slot %u flags %llx\n",
+ kbdev->dummy_job_wa.slot, kbdev->dummy_job_wa.flags);
+
+ return err;
+}
+
+static DEVICE_ATTR(dummy_job_wa_info, 0444, show_dummy_job_wa_info, NULL);
+
+#define FAIL_PROBE 1
+#define SKIP_WA 2
+#define LOAD_WA 3
+
+static int check_wa_validity(struct kbase_device *kbdev,
+ bool wa_blob_present)
+{
+ struct base_gpu_props *gpu_props = &kbdev->gpu_props.props;
+ const u32 major_revision = gpu_props->core_props.major_revision;
+ const u32 minor_revision = gpu_props->core_props.minor_revision;
+ const u32 gpu_id = gpu_props->raw_props.gpu_id;
+ const u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ int ret = FAIL_PROBE;
+
+ if (IS_ENABLED(CONFIG_ARCH_VEXPRESS))
+ return SKIP_WA;
+
+ switch (GPU_ID2_MODEL_MATCH_VALUE(product_id)) {
+ case GPU_ID2_PRODUCT_TTRX:
+ /* WA needed for r0p0, r0p1 only */
+ if (major_revision == 0) {
+ if ((minor_revision <= 1) && wa_blob_present)
+ ret = LOAD_WA;
+ else if ((minor_revision > 1) && !wa_blob_present)
+ ret = SKIP_WA;
+ } else if ((major_revision > 0) && !wa_blob_present)
+ ret = SKIP_WA;
+ break;
+ case GPU_ID2_PRODUCT_TNAX:
+ /* WA needed for r0p0 only */
+ if (major_revision == 0) {
+ if ((minor_revision == 0) && wa_blob_present)
+ ret = LOAD_WA;
+ else if ((minor_revision > 0) && !wa_blob_present)
+ ret = SKIP_WA;
+ } else if ((major_revision > 0) && !wa_blob_present)
+ ret = SKIP_WA;
+ break;
+ case GPU_ID2_PRODUCT_TBEX:
+ /* WA needed for r0p0 only */
+ if ((major_revision == 0) && (minor_revision == 0)) {
+ if (!wa_blob_present) {
+ dev_warn(kbdev->dev, "Dummy job WA not applied, susceptible to GPU hang. Contact support-mali@arm.com");
+ ret = SKIP_WA;
+ } else
+ ret = LOAD_WA;
+ } else if (!wa_blob_present)
+ ret = SKIP_WA;
+ break;
+ case GPU_ID2_PRODUCT_LBEX:
+ /* WA needed for r1p0 only */
+ if ((major_revision == 1) && (minor_revision == 0)) {
+ if (!wa_blob_present) {
+ dev_warn(kbdev->dev, "Dummy job WA not applied, susceptible to GPU hang. Contact support-mali@arm.com");
+ ret = SKIP_WA;
+ } else
+ ret = LOAD_WA;
+ } else if (!wa_blob_present)
+ ret = SKIP_WA;
+ break;
+ default:
+ if (!wa_blob_present)
+ ret = SKIP_WA;
+ break;
+ }
+
+ return ret;
+}
+
+int kbase_dummy_job_wa_load(struct kbase_device *kbdev)
+{
+ const struct firmware *firmware;
+ static const char wa_name[] = DUMMY_JOB_WA_BINARY_NAME;
+ const u32 signature = 0x4157;
+ const u32 version = 2;
+ const u8 *fw_end;
+ const u8 *fw;
+ const struct wa_header *header;
+ const struct wa_v2_info *v2_info;
+ u32 blob_offset;
+ int err;
+ int ret;
+ struct kbase_context *kctx;
+
+ /* load the wa */
+#if KERNEL_VERSION(4, 18, 0) <= LINUX_VERSION_CODE
+ err = firmware_request_nowarn(&firmware, wa_name, kbdev->dev);
+#else
+ err = request_firmware(&firmware, wa_name, kbdev->dev);
+#endif
+
+ ret = check_wa_validity(kbdev, err == 0);
+
+ if (ret == SKIP_WA) {
+ if (err == 0)
+ release_firmware(firmware);
+ return 0;
+ } else if (ret == FAIL_PROBE) {
+ if (err == 0) {
+ dev_err(kbdev->dev, "WA blob unexpectedly present. Please refer to the Arm Mali DDK Bifrost/Valhall Release Notes, "
+ "Part number DC-06002 or contact support-mali@arm.com - driver probe will be failed");
+ release_firmware(firmware);
+ } else {
+ dev_err(kbdev->dev, "WA blob missing. Please refer to the Arm Mali DDK Valhall Release Notes, "
+ "Part number DC-06002 or contact support-mali@arm.com - driver probe will be failed");
+ }
+ return -ENODEV;
+ }
+
+ kctx = kbase_create_context(kbdev, true,
+ BASE_CONTEXT_CREATE_FLAG_NONE, 0,
+ NULL);
+
+ if (!kctx) {
+ dev_err(kbdev->dev, "Failed to create WA context\n");
+ goto no_ctx;
+ }
+
+ fw = firmware->data;
+ fw_end = fw + firmware->size;
+
+ dev_dbg(kbdev->dev, "Loaded firmware of size %zu bytes\n",
+ firmware->size);
+
+ if (!in_range(fw, fw_end, 0, sizeof(*header))) {
+ dev_err(kbdev->dev, "WA too small\n");
+ goto bad_fw;
+ }
+
+ header = (const struct wa_header *)(fw + 0);
+
+ if (header->signature != signature) {
+ dev_err(kbdev->dev, "WA signature failure: 0x%lx\n",
+ (unsigned long)header->signature);
+ goto bad_fw;
+ }
+
+ if (header->version != version) {
+ dev_err(kbdev->dev, "WA version 0x%lx not supported\n",
+ (unsigned long)header->version);
+ goto bad_fw;
+ }
+
+ if (!in_range(fw, fw_end, header->info_offset, sizeof(*v2_info))) {
+ dev_err(kbdev->dev, "WA info offset out of bounds\n");
+ goto bad_fw;
+ }
+
+ v2_info = (const struct wa_v2_info *)(fw + header->info_offset);
+
+ if (v2_info->flags & ~KBASE_DUMMY_JOB_WA_FLAGS) {
+ dev_err(kbdev->dev, "Unsupported WA flag(s): 0x%llx\n",
+ (unsigned long long)v2_info->flags);
+ goto bad_fw;
+ }
+
+ kbdev->dummy_job_wa.slot = v2_info->js;
+ kbdev->dummy_job_wa.jc = v2_info->jc;
+ kbdev->dummy_job_wa.flags = v2_info->flags;
+
+ blob_offset = v2_info->blob_offset;
+
+ while (blob_offset) {
+ const struct wa_blob *blob;
+ size_t nr_pages;
+ u64 flags;
+ u64 gpu_va;
+ struct kbase_va_region *va_region;
+
+ if (!in_range(fw, fw_end, blob_offset, sizeof(*blob))) {
+ dev_err(kbdev->dev, "Blob offset out-of-range: 0x%lx\n",
+ (unsigned long)blob_offset);
+ goto bad_fw;
+ }
+
+ blob = (const struct wa_blob *)(fw + blob_offset);
+ if (!in_range(fw, fw_end, blob->payload_offset, blob->size)) {
+ dev_err(kbdev->dev, "Payload out-of-bounds\n");
+ goto bad_fw;
+ }
+
+ gpu_va = blob->base;
+ if (PAGE_ALIGN(gpu_va) != gpu_va) {
+ dev_err(kbdev->dev, "blob not page aligned\n");
+ goto bad_fw;
+ }
+ nr_pages = PFN_UP(blob->size);
+ flags = blob->map_flags | BASE_MEM_FLAG_MAP_FIXED;
+
+ va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages,
+ 0, &flags, &gpu_va);
+
+ if (!va_region) {
+ dev_err(kbdev->dev, "Failed to allocate for blob\n");
+ } else {
+ struct kbase_vmap_struct vmap = { 0 };
+ const u8 *payload;
+ void *dst;
+
+ /* copy the payload, */
+ payload = fw + blob->payload_offset;
+
+ dst = kbase_vmap(kctx,
+ va_region->start_pfn << PAGE_SHIFT,
+ nr_pages << PAGE_SHIFT, &vmap);
+
+ if (dst) {
+ memcpy(dst, payload, blob->size);
+ kbase_vunmap(kctx, &vmap);
+ } else {
+ dev_err(kbdev->dev,
+ "Failed to copy payload\n");
+ }
+
+ }
+ blob_offset = blob->blob_offset; /* follow chain */
+ }
+
+ release_firmware(firmware);
+
+ kbasep_js_schedule_privileged_ctx(kbdev, kctx);
+
+ kbdev->dummy_job_wa.ctx = kctx;
+
+ err = sysfs_create_file(&kbdev->dev->kobj,
+ &dev_attr_dummy_job_wa_info.attr);
+ if (err)
+ dev_err(kbdev->dev, "SysFS file creation for dummy job wa failed\n");
+
+ return 0;
+
+bad_fw:
+ kbase_destroy_context(kctx);
+no_ctx:
+ release_firmware(firmware);
+ return -EFAULT;
+}
+
+void kbase_dummy_job_wa_cleanup(struct kbase_device *kbdev)
+{
+ struct kbase_context *wa_ctx;
+
+ /* Can be safely called even if the file wasn't created on probe */
+ sysfs_remove_file(&kbdev->dev->kobj, &dev_attr_dummy_job_wa_info.attr);
+
+ wa_ctx = READ_ONCE(kbdev->dummy_job_wa.ctx);
+ /* make this write visible before we tear down the ctx */
+ smp_store_mb(kbdev->dummy_job_wa.ctx, NULL);
+
+ if (wa_ctx) {
+ kbasep_js_release_privileged_ctx(kbdev, wa_ctx);
+ kbase_destroy_context(wa_ctx);
+ }
+}
diff --git a/mali_kbase/mali_kbase_dummy_job_wa.h b/mali_kbase/mali_kbase_dummy_job_wa.h
new file mode 100644
index 0000000..0ffd5b9
--- /dev/null
+++ b/mali_kbase/mali_kbase_dummy_job_wa.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * (C) COPYRIGHT 2020 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DUMMY_JOB_WORKAROUND_
+#define _KBASE_DUMMY_JOB_WORKAROUND_
+
+#define KBASE_DUMMY_JOB_WA_FLAG_SERIALIZE (1ull << 0)
+#define KBASE_DUMMY_JOB_WA_FLAG_WAIT_POWERUP (1ull << 1)
+#define KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER (1ull << 2)
+
+#define KBASE_DUMMY_JOB_WA_FLAGS (KBASE_DUMMY_JOB_WA_FLAG_SERIALIZE | \
+ KBASE_DUMMY_JOB_WA_FLAG_WAIT_POWERUP | \
+ KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER)
+
+int kbase_dummy_job_wa_load(struct kbase_device *kbdev);
+void kbase_dummy_job_wa_cleanup(struct kbase_device *kbdev);
+int kbase_dummy_job_wa_execute(struct kbase_device *kbdev, u64 cores);
+
+static inline bool kbase_dummy_job_wa_enabled(struct kbase_device *kbdev)
+{
+ return (kbdev->dummy_job_wa.ctx != NULL);
+}
+
+#endif /* _KBASE_DUMMY_JOB_WORKAROUND_ */
diff --git a/mali_kbase/mali_kbase_event.c b/mali_kbase/mali_kbase_event.c
index 70e6dd6..0ba5f97 100644
--- a/mali_kbase/mali_kbase_event.c
+++ b/mali_kbase/mali_kbase_event.c
@@ -24,7 +24,7 @@
#include <mali_kbase.h>
#include <mali_kbase_debug.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
{
diff --git a/mali_kbase/mali_kbase_hw.c b/mali_kbase/mali_kbase_hw.c
index 7738630..b5304e8 100644
--- a/mali_kbase/mali_kbase_hw.c
+++ b/mali_kbase/mali_kbase_hw.c
@@ -81,6 +81,14 @@ void kbase_hw_set_features_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_TVAX:
features = base_hw_features_tVAx;
break;
+ case GPU_ID2_PRODUCT_TTUX:
+ /* Fallthrough */
+ case GPU_ID2_PRODUCT_LTUX:
+ features = base_hw_features_tTUx;
+ break;
+ case GPU_ID2_PRODUCT_TE2X:
+ features = base_hw_features_tE2x;
+ break;
default:
features = base_hw_features_generic;
break;
@@ -214,6 +222,18 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{GPU_ID2_PRODUCT_TVAX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tVAx_r0p0},
{U32_MAX, NULL} } },
+
+ {GPU_ID2_PRODUCT_TTUX,
+ {{GPU_ID2_VERSION_MAKE(2, 0, 0), base_hw_issues_tTUx_r0p0},
+ {U32_MAX, NULL} } },
+
+ {GPU_ID2_PRODUCT_LTUX,
+ {{GPU_ID2_VERSION_MAKE(3, 0, 0), base_hw_issues_tTUx_r0p0},
+ {U32_MAX, NULL} } },
+
+ {GPU_ID2_PRODUCT_TE2X,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tE2x_r0p0},
+ {U32_MAX, NULL} } },
};
u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
@@ -364,6 +384,13 @@ int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_TVAX:
issues = base_hw_issues_model_tVAx;
break;
+ case GPU_ID2_PRODUCT_TTUX:
+ case GPU_ID2_PRODUCT_LTUX:
+ issues = base_hw_issues_model_tTUx;
+ break;
+ case GPU_ID2_PRODUCT_TE2X:
+ issues = base_hw_issues_model_tE2x;
+ break;
default:
dev_err(kbdev->dev,
"Unknown GPU ID %x", gpu_id);
diff --git a/mali_kbase/mali_kbase_hwaccess_backend.h b/mali_kbase/mali_kbase_hwaccess_backend.h
index d5e3d3a..89df251 100644
--- a/mali_kbase/mali_kbase_hwaccess_backend.h
+++ b/mali_kbase/mali_kbase_hwaccess_backend.h
@@ -29,34 +29,6 @@
#define _KBASE_HWACCESS_BACKEND_H_
/**
- * kbase_backend_early_init - Perform any backend-specific initialization.
- * @kbdev: Device pointer
- *
- * Return: 0 on success, or an error code on failure.
- */
-int kbase_backend_early_init(struct kbase_device *kbdev);
-
-/**
- * kbase_backend_late_init - Perform any backend-specific initialization.
- * @kbdev: Device pointer
- *
- * Return: 0 on success, or an error code on failure.
- */
-int kbase_backend_late_init(struct kbase_device *kbdev);
-
-/**
- * kbase_backend_early_term - Perform any backend-specific termination.
- * @kbdev: Device pointer
- */
-void kbase_backend_early_term(struct kbase_device *kbdev);
-
-/**
- * kbase_backend_late_term - Perform any backend-specific termination.
- * @kbdev: Device pointer
- */
-void kbase_backend_late_term(struct kbase_device *kbdev);
-
-/**
* kbase_backend_devfreq_init - Perform backend devfreq related initialization.
* @kbdev: Device pointer
*
diff --git a/mali_kbase/mali_kbase_hwcnt_backend_gpu.c b/mali_kbase/mali_kbase_hwcnt_backend_gpu.c
index 1e9c25a..ae11630 100644
--- a/mali_kbase/mali_kbase_hwcnt_backend_gpu.c
+++ b/mali_kbase/mali_kbase_hwcnt_backend_gpu.c
@@ -30,6 +30,7 @@
#include "backend/gpu/mali_kbase_model_dummy.h"
#endif
+
/**
* struct kbase_hwcnt_backend_gpu_info - Information used to create an instance
* of a GPU hardware counter backend.
@@ -324,6 +325,7 @@ static int kbasep_hwcnt_backend_gpu_create(
const struct kbase_hwcnt_backend_gpu_info *info,
struct kbase_hwcnt_backend_gpu **out_backend)
{
+
int errcode;
struct kbase_device *kbdev;
struct kbase_hwcnt_backend_gpu *backend = NULL;
diff --git a/mali_kbase/mali_kbase_ioctl.h b/mali_kbase/mali_kbase_ioctl.h
index 9694d62..c041829 100644
--- a/mali_kbase/mali_kbase_ioctl.h
+++ b/mali_kbase/mali_kbase_ioctl.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -83,9 +83,11 @@ extern "C" {
* - Added BASE_JD_REQ_JOB_SLOT.
* - Reused padding field in base_jd_atom_v2 to pass job slot number.
* - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO
+ * 11.18:
+ * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags
*/
#define BASE_UK_VERSION_MAJOR 11
-#define BASE_UK_VERSION_MINOR 17
+#define BASE_UK_VERSION_MINOR 18
/**
* struct kbase_ioctl_version_check - Check version compatibility with kernel
diff --git a/mali_kbase/mali_kbase_jd.c b/mali_kbase/mali_kbase_jd.c
index fea1bc6..88ab962 100644
--- a/mali_kbase/mali_kbase_jd.c
+++ b/mali_kbase/mali_kbase_jd.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -33,7 +33,7 @@
#include <mali_kbase_jm.h>
#include <mali_kbase_hwaccess_jm.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include "mali_kbase_dma_fence.h"
diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c
index a0090a9..7ab25d1 100644
--- a/mali_kbase/mali_kbase_js.c
+++ b/mali_kbase/mali_kbase_js.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,7 +27,7 @@
*/
#include <mali_kbase.h>
#include <mali_kbase_js.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_ctx_sched.h>
@@ -204,7 +204,8 @@ jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
- for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ for (prio = BASE_JD_PRIO_MEDIUM;
+ prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
return false;
}
@@ -272,7 +273,8 @@ jsctx_queue_foreach(struct kbase_context *kctx, int js,
{
int prio;
- for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
+ for (prio = BASE_JD_PRIO_MEDIUM;
+ prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
jsctx_queue_foreach_prio(kctx, js, prio, callback);
}
@@ -322,7 +324,8 @@ jsctx_rb_peek(struct kbase_context *kctx, int js)
lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
- for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ for (prio = BASE_JD_PRIO_MEDIUM;
+ prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
struct kbase_jd_atom *katom;
katom = jsctx_rb_peek_prio(kctx, js, prio);
@@ -1095,7 +1098,8 @@ void kbase_js_update_ctx_priority(struct kbase_context *kctx)
/* Determine the new priority for context, as per the priority
* of currently in-use atoms.
*/
- for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ for (prio = BASE_JD_PRIO_MEDIUM;
+ prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
if (kctx->atoms_count[prio]) {
new_priority = prio;
break;
@@ -1898,7 +1902,6 @@ void kbasep_js_suspend(struct kbase_device *kbdev)
struct kbasep_js_device_data *js_devdata;
int i;
u16 retained = 0u;
- int nr_privileged_ctx = 0;
KBASE_DEBUG_ASSERT(kbdev);
KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
@@ -1919,16 +1922,14 @@ void kbasep_js_suspend(struct kbase_device *kbdev)
if (kctx && !(kbdev->as_free & (1u << i))) {
kbase_ctx_sched_retain_ctx_refcount(kctx);
retained |= 1u;
- /* We can only cope with up to 1 privileged context -
- * the instrumented context. It'll be suspended by
- * disabling instrumentation */
- if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
- ++nr_privileged_ctx;
- WARN_ON(nr_privileged_ctx != 1);
- }
+ /* This loop will not have an effect on the privileged
+ * contexts as they would have an extra ref count
+ * compared to the normal contexts, so they will hold
+ * on to their address spaces. MMU will re-enabled for
+ * them on resume.
+ */
}
}
- CSTD_UNUSED(nr_privileged_ctx);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -1958,7 +1959,8 @@ void kbasep_js_resume(struct kbase_device *kbdev)
mutex_lock(&js_devdata->queue_mutex);
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
- for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ for (prio = BASE_JD_PRIO_MEDIUM;
+ prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
struct kbase_context *kctx, *n;
unsigned long flags;
diff --git a/mali_kbase/mali_kbase_js.h b/mali_kbase/mali_kbase_js.h
index 588777c..e4bd4a2 100644
--- a/mali_kbase/mali_kbase_js.h
+++ b/mali_kbase/mali_kbase_js.h
@@ -31,7 +31,7 @@
#define _KBASE_JS_H_
#include "mali_kbase_js_defs.h"
-#include "mali_kbase_context.h"
+#include "context/mali_kbase_context.h"
#include "mali_kbase_defs.h"
#include "mali_kbase_debug.h"
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 278c59b..de57024 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -23,7 +23,6 @@
/**
- * @file mali_kbase_mem.c
* Base kernel memory APIs
*/
#include <linux/dma-buf.h>
@@ -41,10 +40,10 @@
#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_cache_policy.h>
#include <mali_kbase_hw.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_native_mgm.h>
#include <mali_kbase_mem_pool_group.h>
-
+#include <mmu/mali_kbase_mmu.h>
/* Forward declarations */
static void free_partial_locked(struct kbase_context *kctx,
@@ -2618,6 +2617,12 @@ bool kbase_check_alloc_flags(unsigned long flags)
if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
return false;
+ /* BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP is only valid for imported
+ * memory */
+ if ((flags & BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP) ==
+ BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP)
+ return false;
+
/* Should not combine BASE_MEM_COHERENT_LOCAL with
* BASE_MEM_COHERENT_SYSTEM */
if ((flags & (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM)) ==
@@ -2931,6 +2936,16 @@ KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
void kbase_jit_debugfs_init(struct kbase_context *kctx)
{
+ /* prevent unprivileged use of debug file system
+ * in old kernel version
+ */
+#if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE)
+ /* only for newer kernel version debug file system is safe */
+ const mode_t mode = 0444;
+#else
+ const mode_t mode = 0400;
+#endif
+
/* Caller already ensures this, but we keep the pattern for
* maintenance safety.
*/
@@ -2938,22 +2953,24 @@ void kbase_jit_debugfs_init(struct kbase_context *kctx)
WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
return;
+
+
/* Debugfs entry for getting the number of JIT allocations. */
- debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry,
+ debugfs_create_file("mem_jit_count", mode, kctx->kctx_dentry,
kctx, &kbase_jit_debugfs_count_fops);
/*
* Debugfs entry for getting the total number of virtual pages
* used by JIT allocations.
*/
- debugfs_create_file("mem_jit_vm", S_IRUGO, kctx->kctx_dentry,
+ debugfs_create_file("mem_jit_vm", mode, kctx->kctx_dentry,
kctx, &kbase_jit_debugfs_vm_fops);
/*
* Debugfs entry for getting the number of physical pages used
* by JIT allocations.
*/
- debugfs_create_file("mem_jit_phys", S_IRUGO, kctx->kctx_dentry,
+ debugfs_create_file("mem_jit_phys", mode, kctx->kctx_dentry,
kctx, &kbase_jit_debugfs_phys_fops);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h
index d2889f1..0ce3037 100644
--- a/mali_kbase/mali_kbase_mem.h
+++ b/mali_kbase/mali_kbase_mem.h
@@ -145,6 +145,7 @@ struct kbase_mem_phy_alloc {
struct dma_buf_attachment *dma_attachment;
unsigned int current_mapping_usage_count;
struct sg_table *sgt;
+ bool need_sync;
} umm;
struct {
u64 stride;
@@ -1009,72 +1010,6 @@ void kbase_gpu_vm_unlock(struct kbase_context *kctx);
int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
/**
- * kbase_mmu_init - Initialise an object representing GPU page tables
- *
- * The structure should be terminated using kbase_mmu_term()
- *
- * @kbdev: Instance of GPU platform device, allocated from the probe method.
- * @mmut: GPU page tables to be initialized.
- * @kctx: Optional kbase context, may be NULL if this set of MMU tables
- * is not associated with a context.
- * @group_id: The physical group ID from which to allocate GPU page tables.
- * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
- *
- * Return: 0 if successful, otherwise a negative error code.
- */
-int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- struct kbase_context *kctx, int group_id);
-/**
- * kbase_mmu_term - Terminate an object representing GPU page tables
- *
- * This will free any page tables that have been allocated
- *
- * @kbdev: Instance of GPU platform device, allocated from the probe method.
- * @mmut: GPU page tables to be destroyed.
- */
-void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
-
-/**
- * kbase_mmu_create_ate - Create an address translation entry
- *
- * @kbdev: Instance of GPU platform device, allocated from the probe method.
- * @phy: Physical address of the page to be mapped for GPU access.
- * @flags: Bitmask of attributes of the GPU memory region being mapped.
- * @level: Page table level for which to build an address translation entry.
- * @group_id: The physical memory group in which the page was allocated.
- * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
- *
- * This function creates an address translation entry to encode the physical
- * address of a page to be mapped for access by the GPU, along with any extra
- * attributes required for the GPU memory region.
- *
- * Return: An address translation entry, either in LPAE or AArch64 format
- * (depending on the driver's configuration).
- */
-u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
- struct tagged_addr phy, unsigned long flags, int level, int group_id);
-
-int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
- struct kbase_mmu_table *mmut,
- const u64 start_vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags, int group_id);
-int kbase_mmu_insert_pages(struct kbase_device *kbdev,
- struct kbase_mmu_table *mmut, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags, int as_nr, int group_id);
-int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr phys, size_t nr,
- unsigned long flags, int group_id);
-
-int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
- struct kbase_mmu_table *mmut, u64 vpfn,
- size_t nr, int as_nr);
-int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags, int const group_id);
-
-/**
* @brief Register region and map it on the GPU.
*
* Call kbase_add_va_region() and map the region on the GPU.
@@ -1389,20 +1324,6 @@ static inline void kbase_clear_dma_addr(struct page *p)
}
/**
- * kbase_mmu_interrupt_process - Process a bus or page fault.
- * @kbdev The kbase_device the fault happened on
- * @kctx The kbase_context for the faulting address space if one was found.
- * @as The address space that has the fault
- * @fault Data relating to the fault
- *
- * This function will process a fault on a specific address space
- */
-void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
- struct kbase_context *kctx, struct kbase_as *as,
- struct kbase_fault *fault);
-
-
-/**
* @brief Process a page fault.
*
* @param[in] data work_struct passed by queue_work()
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index b3358ca..57667be 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -46,8 +46,9 @@
#include <mali_kbase.h>
#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_ioctl.h>
+#include <mmu/mali_kbase_mmu.h>
#if ((KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE) || \
(KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE))
@@ -278,7 +279,10 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
dev_dbg(dev, "Allocating %lld va_pages, %lld commit_pages, %lld extent, 0x%llX flags\n",
va_pages, commit_pages, extent, *flags);
- *gpu_va = 0; /* return 0 on failure */
+ if (!(*flags & BASE_MEM_FLAG_MAP_FIXED))
+ *gpu_va = 0; /* return 0 on failure */
+ else
+ dev_err(dev, "Keeping requested GPU VA of 0x%llx\n", (unsigned long long)*gpu_va);
if (!kbase_check_alloc_flags(*flags)) {
dev_warn(dev,
@@ -334,7 +338,9 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
zone = KBASE_REG_ZONE_CUSTOM_VA;
}
- reg = kbase_alloc_free_region(rbtree, 0, va_pages, zone);
+ reg = kbase_alloc_free_region(rbtree, PFN_DOWN(*gpu_va),
+ va_pages, zone);
+
if (!reg) {
dev_err(dev, "Failed to allocate free region");
goto no_region;
@@ -404,7 +410,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
*gpu_va = (u64) cookie;
} else /* we control the VA */ {
- if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
+ if (kbase_gpu_mmap(kctx, reg, *gpu_va, va_pages, 1) != 0) {
dev_warn(dev, "Failed to map memory on GPU");
kbase_gpu_vm_unlock(kctx);
goto no_mmap;
@@ -1122,7 +1128,8 @@ int kbase_mem_umm_map(struct kbase_context *kctx,
alloc->imported.umm.current_mapping_usage_count++;
if (alloc->imported.umm.current_mapping_usage_count != 1) {
- if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT)) {
+ if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT) ||
+ alloc->imported.umm.need_sync) {
if (!kbase_is_region_invalid_or_free(reg)) {
err = kbase_mem_do_sync_imported(kctx, reg,
KBASE_SYNC_TO_DEVICE);
@@ -1193,7 +1200,8 @@ void kbase_mem_umm_unmap(struct kbase_context *kctx,
{
alloc->imported.umm.current_mapping_usage_count--;
if (alloc->imported.umm.current_mapping_usage_count) {
- if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT)) {
+ if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT) ||
+ alloc->imported.umm.need_sync) {
if (!kbase_is_region_invalid_or_free(reg)) {
int err = kbase_mem_do_sync_imported(kctx, reg,
KBASE_SYNC_TO_CPU);
@@ -1258,6 +1266,7 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
struct dma_buf *dma_buf;
struct dma_buf_attachment *dma_attachment;
bool shared_zone = false;
+ bool need_sync = false;
int group_id;
/* 64-bit address range is the max */
@@ -1298,6 +1307,9 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
if (*flags & BASE_MEM_IMPORT_SHARED)
shared_zone = true;
+ if (*flags & BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP)
+ need_sync = true;
+
#ifdef CONFIG_64BIT
if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
/*
@@ -1353,6 +1365,7 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
reg->gpu_alloc->imported.umm.dma_buf = dma_buf;
reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment;
reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
+ reg->gpu_alloc->imported.umm.need_sync = need_sync;
reg->extent = 0;
if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
@@ -1407,6 +1420,10 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
struct kbase_alloc_import_user_buf *user_buf;
struct page **pages = NULL;
+ /* Flag supported only for dma-buf imported memory */
+ if (*flags & BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP)
+ return NULL;
+
if ((address & (cache_line_alignment - 1)) != 0 ||
(size & (cache_line_alignment - 1)) != 0) {
if (*flags & BASE_MEM_UNCACHED_GPU) {
diff --git a/mali_kbase/mali_kbase_mem_pool_debugfs.c b/mali_kbase/mali_kbase_mem_pool_debugfs.c
index edb9cd4..5879fdf 100644
--- a/mali_kbase/mali_kbase_mem_pool_debugfs.c
+++ b/mali_kbase/mali_kbase_mem_pool_debugfs.c
@@ -169,15 +169,23 @@ static const struct file_operations kbase_mem_pool_debugfs_max_size_fops = {
void kbase_mem_pool_debugfs_init(struct dentry *parent,
struct kbase_context *kctx)
{
- debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent,
+ /* prevent unprivileged use of debug file in old kernel version */
+#if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE)
+ /* only for newer kernel version debug file system is safe */
+ const mode_t mode = 0644;
+#else
+ const mode_t mode = 0600;
+#endif
+
+ debugfs_create_file("mem_pool_size", mode, parent,
&kctx->mem_pools.small, &kbase_mem_pool_debugfs_fops);
- debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+ debugfs_create_file("mem_pool_max_size", mode, parent,
&kctx->mem_pools.small, &kbase_mem_pool_debugfs_max_size_fops);
- debugfs_create_file("lp_mem_pool_size", S_IRUGO | S_IWUSR, parent,
+ debugfs_create_file("lp_mem_pool_size", mode, parent,
&kctx->mem_pools.large, &kbase_mem_pool_debugfs_fops);
- debugfs_create_file("lp_mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+ debugfs_create_file("lp_mem_pool_max_size", mode, parent,
&kctx->mem_pools.large, &kbase_mem_pool_debugfs_max_size_fops);
}
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index 537a239..c264d0b 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -33,7 +33,7 @@
#include <mali_base_kernel.h>
#include <mali_kbase_hwaccess_time.h>
#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <linux/version.h>
#include <linux/ktime.h>
#include <linux/pfn.h>
diff --git a/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c b/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c
new file mode 100644
index 0000000..fd60e35
--- /dev/null
+++ b/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c
@@ -0,0 +1,391 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Base kernel MMU management specific for Job Manager GPU.
+ */
+
+#include <mali_kbase.h>
+#include <gpu/mali_kbase_gpu_fault.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
+ struct kbase_mmu_setup * const setup)
+{
+ /* Set up the required caching policies at the correct indices
+ * in the memattr register.
+ */
+ setup->memattr =
+ (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
+ (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (AS_MEMATTR_FORCE_TO_CACHE_ALL <<
+ (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (AS_MEMATTR_WRITE_ALLOC <<
+ (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
+ (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (AS_MEMATTR_AARCH64_OUTER_WA <<
+ (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
+ (AS_MEMATTR_AARCH64_NON_CACHEABLE <<
+ (AS_MEMATTR_INDEX_NON_CACHEABLE * 8));
+
+ setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
+ setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+}
+
+void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, struct kbase_fault *fault)
+{
+ struct kbase_device *const kbdev = kctx->kbdev;
+ u32 const status = fault->status;
+ u32 const exception_type = (status & 0xFF);
+ u32 const exception_data = (status >> 8) & 0xFFFFFF;
+ int const as_no = as->number;
+ unsigned long flags;
+
+ /* terminal fault, print info about the fault */
+ dev_err(kbdev->dev,
+ "GPU bus fault in AS%d at VA 0x%016llX\n"
+ "raw fault status: 0x%X\n"
+ "exception type 0x%X: %s\n"
+ "exception data 0x%X\n"
+ "pid: %d\n",
+ as_no, fault->addr,
+ status,
+ exception_type, kbase_gpu_exception_name(exception_type),
+ exception_data,
+ kctx->pid);
+
+ /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter
+ * dumping AS transaction begin
+ */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* Set the MMU into unmapped mode */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_disable(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ kbase_mmu_hw_clear_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+}
+
+/**
+ * The caller must ensure it's retained the ctx to prevent it from being
+ * scheduled out whilst it's being worked on.
+ */
+void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str,
+ struct kbase_fault *fault)
+{
+ unsigned long flags;
+ u32 exception_type;
+ u32 access_type;
+ u32 source_id;
+ int as_no;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+
+ as_no = as->number;
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+
+ /* Make sure the context was active */
+ if (WARN_ON(atomic_read(&kctx->refcount) <= 0))
+ return;
+
+ /* decode the fault status */
+ exception_type = fault->status & 0xFF;
+ access_type = (fault->status >> 8) & 0x3;
+ source_id = (fault->status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(kbdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n"
+ "pid: %d\n",
+ as_no, fault->addr,
+ reason_str,
+ fault->status,
+ exception_type, kbase_gpu_exception_name(exception_type),
+ access_type, kbase_gpu_access_type_name(fault->status),
+ source_id,
+ kctx->pid);
+
+ /* hardware counters dump fault handling */
+ if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
+ (kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_DUMPING)) {
+ if ((fault->addr >= kbdev->hwcnt.addr) &&
+ (fault->addr < (kbdev->hwcnt.addr +
+ kbdev->hwcnt.addr_bytes)))
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
+ }
+
+ /* Stop the kctx from submitting more jobs and cause it to be scheduled
+ * out/rescheduled - this will occur on releasing the context's refcount
+ */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ /* Kill any running jobs from the context. Submit is disallowed, so no
+ * more jobs from this context can appear in the job slots from this
+ * point on
+ */
+ kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* switch to UNMAPPED mode, will abort all jobs and stop
+ * any hw counter dumping
+ */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_disable(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ /* AS transaction end */
+ /* Clear down the fault */
+ kbase_mmu_hw_clear_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+}
+
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_as *as,
+ struct kbase_fault *fault)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!kctx) {
+ dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
+ kbase_as_has_bus_fault(as, fault) ?
+ "Bus error" : "Page fault",
+ as->number, fault->addr);
+
+ /* Since no ctx was found, the MMU must be disabled. */
+ WARN_ON(as->current_setup.transtab);
+
+ if (kbase_as_has_bus_fault(as, fault)) {
+ kbase_mmu_hw_clear_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ } else if (kbase_as_has_page_fault(as, fault)) {
+ kbase_mmu_hw_clear_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ }
+
+ return;
+ }
+
+ if (kbase_as_has_bus_fault(as, fault)) {
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ /*
+ * hw counters dumping in progress, signal the
+ * other thread that it failed
+ */
+ if ((kbdev->hwcnt.kctx == kctx) &&
+ (kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_DUMPING))
+ kbdev->hwcnt.backend.state =
+ KBASE_INSTR_STATE_FAULT;
+
+ /*
+ * Stop the kctx from submitting more jobs and cause it
+ * to be scheduled out/rescheduled when all references
+ * to it are released
+ */
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+ dev_warn(kbdev->dev,
+ "Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
+ as->number, fault->addr,
+ fault->extra_addr);
+ else
+ dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
+ as->number, fault->addr);
+
+ /*
+ * We need to switch to UNMAPPED mode - but we do this in a
+ * worker so that we can sleep
+ */
+ WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
+ atomic_inc(&kbdev->faults_pending);
+ } else {
+ WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
+ atomic_inc(&kbdev->faults_pending);
+ }
+}
+
+static void validate_protected_page_fault(struct kbase_device *kbdev)
+{
+ /* GPUs which support (native) protected mode shall not report page
+ * fault addresses unless it has protected debug mode and protected
+ * debug mode is turned on
+ */
+ u32 protected_debug_mode = 0;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+ protected_debug_mode = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
+ }
+
+ if (!protected_debug_mode) {
+ /* fault_addr should never be reported in protected mode.
+ * However, we just continue by printing an error message
+ */
+ dev_err(kbdev->dev, "Fault address reported in protected mode\n");
+ }
+}
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
+{
+ const int num_as = 16;
+ const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
+ const int pf_shift = 0;
+ const unsigned long as_bit_mask = (1UL << num_as) - 1;
+ unsigned long flags;
+ u32 new_mask;
+ u32 tmp;
+
+ /* bus faults */
+ u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
+ /* page faults (note: Ignore ASes with both pf and bf) */
+ u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
+
+ if (WARN_ON(kbdev == NULL))
+ return;
+
+ /* remember current mask */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+ new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
+ /* mask interrupts for now */
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+
+ while (bf_bits | pf_bits) {
+ struct kbase_as *as;
+ int as_no;
+ struct kbase_context *kctx;
+ struct kbase_fault *fault;
+
+ /*
+ * the while logic ensures we have a bit set, no need to check
+ * for not-found here
+ */
+ as_no = ffs(bf_bits | pf_bits) - 1;
+ as = &kbdev->as[as_no];
+
+ /* find the fault type */
+ if (bf_bits & (1 << as_no))
+ fault = &as->bf_data;
+ else
+ fault = &as->pf_data;
+
+ /*
+ * Refcount the kctx ASAP - it shouldn't disappear anyway, since
+ * Bus/Page faults _should_ only occur whilst jobs are running,
+ * and a job causing the Bus/Page fault shouldn't complete until
+ * the MMU is updated
+ */
+ kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
+
+ /* find faulting address */
+ fault->addr = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+ AS_FAULTADDRESS_HI));
+ fault->addr <<= 32;
+ fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+ AS_FAULTADDRESS_LO));
+ /* Mark the fault protected or not */
+ fault->protected_mode = kbdev->protected_mode;
+
+ if (kbdev->protected_mode && fault->addr) {
+ /* check if address reporting is allowed */
+ validate_protected_page_fault(kbdev);
+ }
+
+ /* report the fault to debugfs */
+ kbase_as_fault_debugfs_new(kbdev, as_no);
+
+ /* record the fault status */
+ fault->status = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+ AS_FAULTSTATUS));
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+ fault->extra_addr = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
+ fault->extra_addr <<= 32;
+ fault->extra_addr |= kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
+ }
+
+ if (kbase_as_has_bus_fault(as, fault)) {
+ /* Mark bus fault as handled.
+ * Note that a bus fault is processed first in case
+ * where both a bus fault and page fault occur.
+ */
+ bf_bits &= ~(1UL << as_no);
+
+ /* remove the queued BF (and PF) from the mask */
+ new_mask &= ~(MMU_BUS_ERROR(as_no) |
+ MMU_PAGE_FAULT(as_no));
+ } else {
+ /* Mark page fault as handled */
+ pf_bits &= ~(1UL << as_no);
+
+ /* remove the queued PF from the mask */
+ new_mask &= ~MMU_PAGE_FAULT(as_no);
+ }
+
+ /* Process the interrupt for this address space */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+
+ /* reenable interrupts */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+ tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
+ new_mask |= tmp;
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
diff --git a/mali_kbase/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c
index 1967bc9..5392305 100644
--- a/mali_kbase/mali_kbase_mmu.c
+++ b/mali_kbase/mmu/mali_kbase_mmu.c
@@ -20,32 +20,28 @@
*
*/
-
-
/**
* @file mali_kbase_mmu.c
* Base kernel MMU management.
*/
-/* #define DEBUG 1 */
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <mali_kbase.h>
#include <gpu/mali_kbase_gpu_fault.h>
#include <gpu/mali_kbase_gpu_regmap.h>
-#include <mali_kbase_tracepoints.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <mali_kbase_instr_defs.h>
#include <mali_kbase_debug.h>
-
-#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
-
#include <mali_kbase_defs.h>
#include <mali_kbase_hw.h>
-#include <mali_kbase_mmu_hw.h>
+#include <mmu/mali_kbase_mmu_hw.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_hwaccess_time.h>
#include <mali_kbase_mem.h>
#include <mali_kbase_reset_gpu.h>
+#include <mmu/mali_kbase_mmu.h>
+#include <mmu/mali_kbase_mmu_internal.h>
#define KBASE_MMU_PAGE_ENTRIES 512
@@ -108,15 +104,10 @@ static void kbase_mmu_sync_pgd(struct kbase_device *kbdev,
* - PGD: Page Directory.
* - PTE: Page Table Entry. A 64bit value pointing to the next
* level of translation
- * - ATE: Address Transation Entry. A 64bit value pointing to
+ * - ATE: Address Translation Entry. A 64bit value pointing to
* a 4kB physical page.
*/
-static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, const char *reason_str,
- struct kbase_fault *fault);
-
-
static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
struct tagged_addr *phys, size_t nr,
unsigned long flags, int group_id);
@@ -147,39 +138,46 @@ static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
if (!multiple) {
dev_warn(kbdev->dev,
- "VA Region 0x%llx extent was 0, allocator needs to set this properly for KBASE_REG_PF_GROW\n",
- ((unsigned long long)reg->start_pfn) << PAGE_SHIFT);
+ "VA Region 0x%llx extent was 0, allocator needs to set this properly for KBASE_REG_PF_GROW\n",
+ ((unsigned long long)reg->start_pfn) << PAGE_SHIFT);
return minimum_extra;
}
/* Calculate the remainder to subtract from minimum_extra to make it
* the desired (rounded down) multiple of the extent.
* Depending on reg's flags, the base used for calculating multiples is
- * different */
+ * different
+ */
if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
/* multiple is based from the top of the initial commit, which
* has been allocated in such a way that (start_pfn +
* initial_commit) is already aligned to multiple. Hence the
* pfn for the end of committed memory will also be aligned to
- * multiple */
+ * multiple
+ */
size_t initial_commit = reg->initial_commit;
if (fault_rel_pfn < initial_commit) {
/* this case is just to catch in case it's been
* recommitted by userspace to be smaller than the
- * initial commit */
+ * initial commit
+ */
minimum_extra = initial_commit - reg_current_size;
remainder = 0;
} else {
- /* same as calculating (fault_rel_pfn - initial_commit + 1) */
- size_t pages_after_initial = minimum_extra + reg_current_size - initial_commit;
+ /* same as calculating
+ * (fault_rel_pfn - initial_commit + 1)
+ */
+ size_t pages_after_initial = minimum_extra +
+ reg_current_size - initial_commit;
remainder = pages_after_initial % multiple;
}
} else {
/* multiple is based from the current backed size, even if the
* current backed size/pfn for end of committed memory are not
- * themselves aligned to multiple */
+ * themselves aligned to multiple
+ */
remainder = minimum_extra % multiple;
}
@@ -293,7 +291,7 @@ static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
{
struct kbase_fault *fault = &faulting_as->pf_data;
- switch (fault->status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ switch (AS_FAULTSTATUS_ACCESS_TYPE_GET(fault->status)) {
case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
kbase_gpu_mmu_handle_write_fault(kctx, faulting_as);
@@ -544,8 +542,9 @@ void page_fault_worker(struct work_struct *data)
kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
- /* Grab the context that was already refcounted in kbase_mmu_interrupt().
- * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+ /* Grab the context that was already refcounted in kbase_mmu_interrupt()
+ * Therefore, it cannot be scheduled out of this AS until we explicitly
+ * release it
*/
kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
if (WARN_ON(!kctx)) {
@@ -635,8 +634,9 @@ void page_fault_worker(struct work_struct *data)
#endif /* CONFIG_MALI_2MB_ALLOC */
page_fault_retry:
- /* so we have a translation fault, let's see if it is for growable
- * memory */
+ /* so we have a translation fault,
+ * let's see if it is for growable memory
+ */
kbase_gpu_vm_lock(kctx);
region = kbase_region_tracker_find_region_enclosing_address(kctx,
@@ -677,9 +677,11 @@ page_fault_retry:
goto fault_done;
}
- /* find the size we need to grow it by */
- /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
- * validating the fault_adress to be within a size_t from the start_pfn */
+ /* find the size we need to grow it by
+ * we know the result fit in a size_t due to
+ * kbase_region_tracker_find_region_enclosing_address
+ * validating the fault_address to be within a size_t from the start_pfn
+ */
fault_rel_pfn = fault_pfn - region->start_pfn;
if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
@@ -715,9 +717,10 @@ page_fault_retry:
new_pages = reg_grow_calc_extra_pages(kbdev, region, fault_rel_pfn);
/* cap to max vsize */
- new_pages = min(new_pages, region->nr_pages - kbase_reg_current_backed_size(region));
+ new_pages = min(new_pages, region->nr_pages -
+ kbase_reg_current_backed_size(region));
- if (0 == new_pages) {
+ if (new_pages == 0) {
mutex_lock(&kbdev->mmu_hw_mutex);
/* Duplicate of a fault we've already handled, nothing to do */
@@ -747,7 +750,8 @@ page_fault_retry:
u32 op;
/* alloc success */
- KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
+ KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region)
+ <= region->nr_pages);
/* set up the new pages */
pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
@@ -764,17 +768,21 @@ page_fault_retry:
&kbase_get_gpu_phy_pages(region)[pfn_offset],
new_pages, region->flags, region->gpu_alloc->group_id);
if (err) {
- kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
+ kbase_free_phy_pages_helper(region->gpu_alloc,
+ new_pages);
if (region->gpu_alloc != region->cpu_alloc)
kbase_free_phy_pages_helper(region->cpu_alloc,
new_pages);
kbase_gpu_vm_unlock(kctx);
- /* The locked VA region will be unlocked and the cache invalidated in here */
+ /* The locked VA region will be unlocked and the cache
+ * invalidated in here
+ */
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Page table update failure", fault);
goto fault_done;
}
- KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, kctx->id, as_no, (u64)new_pages);
+ KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, kctx->id, as_no,
+ (u64)new_pages);
/* AS transaction begin */
mutex_lock(&kbdev->mmu_hw_mutex);
@@ -892,7 +900,7 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
return 0;
page = kmap(p);
- if (NULL == page)
+ if (page == NULL)
goto alloc_free;
/* If the MMU tables belong to a context then account the memory usage
@@ -952,7 +960,7 @@ static int mmu_get_next_pgd(struct kbase_device *kbdev,
p = pfn_to_page(PFN_DOWN(*pgd));
page = kmap(p);
- if (NULL == page) {
+ if (page == NULL) {
dev_warn(kbdev->dev, "%s: kmap failure\n", __func__);
return -EINVAL;
}
@@ -1099,16 +1107,19 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
{
phys_addr_t pgd;
u64 *pgd_page;
- /* In case the insert_single_page only partially completes we need to be
- * able to recover */
+ /* In case the insert_single_page only partially completes
+ * we need to be able to recover
+ */
bool recover_required = false;
- u64 recover_vpfn = vpfn;
+ u64 start_vpfn = vpfn;
size_t recover_count = 0;
size_t remain = nr;
int err;
struct kbase_device *kbdev;
- KBASE_DEBUG_ASSERT(NULL != kctx);
+ if (WARN_ON(kctx == NULL))
+ return -EINVAL;
+
/* 64-bit address range is the max */
KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
@@ -1155,11 +1166,12 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
dev_warn(kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
if (recover_required) {
/* Invalidate the pages we have partially
- * completed */
+ * completed
+ */
mmu_insert_pages_failure_recovery(kbdev,
&kctx->mmu,
- recover_vpfn,
- recover_vpfn + recover_count);
+ start_vpfn,
+ start_vpfn + recover_count);
}
goto fail_unlock;
}
@@ -1170,11 +1182,12 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
dev_warn(kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
if (recover_required) {
/* Invalidate the pages we have partially
- * completed */
+ * completed
+ */
mmu_insert_pages_failure_recovery(kbdev,
&kctx->mmu,
- recover_vpfn,
- recover_vpfn + recover_count);
+ start_vpfn,
+ start_vpfn + recover_count);
}
err = -ENOMEM;
goto fail_unlock;
@@ -1200,17 +1213,18 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
kunmap(p);
/* We have started modifying the page table.
* If further pages need inserting and fail we need to undo what
- * has already taken place */
+ * has already taken place
+ */
recover_required = true;
recover_count += count;
}
mutex_unlock(&kctx->mmu.mmu_lock);
- kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ kbase_mmu_flush_invalidate(kctx, start_vpfn, nr, false);
return 0;
fail_unlock:
mutex_unlock(&kctx->mmu.mmu_lock);
- kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ kbase_mmu_flush_invalidate(kctx, start_vpfn, nr, false);
return err;
}
@@ -1314,7 +1328,8 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
"%s: mmu_get_bottom_pgd failure\n", __func__);
if (insert_vpfn != start_vpfn) {
/* Invalidate the pages we have partially
- * completed */
+ * completed
+ */
mmu_insert_pages_failure_recovery(kbdev,
mmut, start_vpfn, insert_vpfn);
}
@@ -1328,7 +1343,8 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
__func__);
if (insert_vpfn != start_vpfn) {
/* Invalidate the pages we have partially
- * completed */
+ * completed
+ */
mmu_insert_pages_failure_recovery(kbdev,
mmut, start_vpfn, insert_vpfn);
}
@@ -1398,7 +1414,8 @@ int kbase_mmu_insert_pages(struct kbase_device *kbdev,
if (mmut->kctx)
kbase_mmu_flush_invalidate(mmut->kctx, vpfn, nr, false);
else
- kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, nr, false, as_nr);
+ kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, nr, false,
+ as_nr);
return err;
}
@@ -1437,8 +1454,8 @@ static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
vpfn, nr, op, 0);
if (err) {
/* Flush failed to complete, assume the
- * GPU has hung and perform a reset to
- * recover */
+ * GPU has hung and perform a reset to recover
+ */
dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issuing GPU soft-reset to recover\n");
if (kbase_prepare_to_reset_gpu_locked(kbdev))
@@ -1548,7 +1565,8 @@ void kbase_mmu_disable(struct kbase_context *kctx)
/* ASSERT that the context has a valid as_nr, which is only the case
* when it's scheduled in.
*
- * as_nr won't change because the caller has the hwaccess_lock */
+ * as_nr won't change because the caller has the hwaccess_lock
+ */
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
@@ -1582,11 +1600,12 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr)
{
phys_addr_t pgd;
+ u64 start_vpfn = vpfn;
size_t requested_nr = nr;
struct kbase_mmu_mode const *mmu_mode;
int err = -EFAULT;
- if (0 == nr) {
+ if (nr == 0) {
/* early out if nothing to do */
return 0;
}
@@ -1692,9 +1711,11 @@ out:
mutex_unlock(&mmut->mmu_lock);
if (mmut->kctx)
- kbase_mmu_flush_invalidate(mmut->kctx, vpfn, requested_nr, true);
+ kbase_mmu_flush_invalidate(mmut->kctx, start_vpfn, requested_nr,
+ true);
else
- kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, requested_nr, true, as_nr);
+ kbase_mmu_flush_invalidate_no_ctx(kbdev, start_vpfn, requested_nr,
+ true, as_nr);
return err;
}
@@ -1728,7 +1749,9 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
int err;
struct kbase_device *kbdev;
- KBASE_DEBUG_ASSERT(NULL != kctx);
+ if (WARN_ON(kctx == NULL))
+ return -EINVAL;
+
KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
/* Early out if there is nothing to do */
@@ -1827,9 +1850,11 @@ static void mmu_teardown_level(struct kbase_device *kbdev,
pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
/* kmap_atomic should NEVER fail. */
- KBASE_DEBUG_ASSERT(NULL != pgd_page);
+ if (WARN_ON(pgd_page == NULL))
+ return;
/* Copy the page to our preallocated buffer so that we can minimize
- * kmap_atomic usage */
+ * kmap_atomic usage
+ */
memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE);
kunmap_atomic(pgd_page);
pgd_page = pgd_page_buffer;
@@ -1924,7 +1949,8 @@ void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
mutex_destroy(&mmut->mmu_lock);
}
-static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
+ int level, char ** const buffer, size_t *size_left)
{
phys_addr_t target_pgd;
u64 *pgd_page;
@@ -1934,7 +1960,8 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
struct kbase_device *kbdev;
struct kbase_mmu_mode const *mmu_mode;
- KBASE_DEBUG_ASSERT(NULL != kctx);
+ if (WARN_ON(kctx == NULL))
+ return 0;
lockdep_assert_held(&kctx->mmu.mmu_lock);
kbdev = kctx->kbdev;
@@ -1947,7 +1974,9 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
}
if (*size_left >= size) {
- /* A modified physical address that contains the page table level */
+ /* A modified physical address that contains
+ * the page table level
+ */
u64 m_pgd = pgd | level;
/* Put the modified physical address in the output buffer */
@@ -1991,14 +2020,15 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
KBASE_DEBUG_ASSERT(kctx);
- if (0 == nr_pages) {
+ if (nr_pages == 0) {
/* can't dump in a 0 sized buffer, early out */
return NULL;
}
size_left = nr_pages * PAGE_SIZE;
- KBASE_DEBUG_ASSERT(0 != size_left);
+ if (WARN_ON(size_left == 0))
+ return NULL;
kaddr = vmalloc_user(size_left);
mutex_lock(&kctx->mmu.mmu_lock);
@@ -2039,7 +2069,9 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
size += sizeof(u64);
if (size > (nr_pages * PAGE_SIZE)) {
- /* The buffer isn't big enough - free the memory and return failure */
+ /* The buffer isn't big enough - free the memory and
+ * return failure
+ */
goto fail_free;
}
@@ -2096,27 +2128,12 @@ void bus_fault_worker(struct work_struct *data)
}
- /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
- if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
- unsigned long flags;
-
- /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
- /* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
-
- /* Set the MMU into unmapped mode */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_mmu_disable(kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- mutex_unlock(&kbdev->mmu_hw_mutex);
- /* AS transaction end */
-
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
-
+ /* NOTE: If GPU already powered off for suspend,
+ * we don't need to switch to unmapped
+ */
+ if (!kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ kbase_gpu_report_bus_fault_and_kill(kctx, faulting_as, fault);
kbase_pm_context_idle(kbdev);
}
@@ -2125,184 +2142,6 @@ void bus_fault_worker(struct work_struct *data)
atomic_dec(&kbdev->faults_pending);
}
-static const char *access_type_name(struct kbase_device *kbdev,
- u32 fault_status)
-{
- switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
- case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
- return "ATOMIC";
- else
- return "UNKNOWN";
- case AS_FAULTSTATUS_ACCESS_TYPE_READ:
- return "READ";
- case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
- return "WRITE";
- case AS_FAULTSTATUS_ACCESS_TYPE_EX:
- return "EXECUTE";
- default:
- WARN_ON(1);
- return NULL;
- }
-}
-
-
-/**
- * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
- */
-static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, const char *reason_str,
- struct kbase_fault *fault)
-{
- unsigned long flags;
- int exception_type;
- int access_type;
- int source_id;
- int as_no;
- struct kbase_device *kbdev;
- struct kbasep_js_device_data *js_devdata;
-
- as_no = as->number;
- kbdev = kctx->kbdev;
- js_devdata = &kbdev->js_data;
-
- /* ASSERT that the context won't leave the runpool */
- KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
-
- /* decode the fault status */
- exception_type = fault->status & 0xFF;
- access_type = (fault->status >> 8) & 0x3;
- source_id = (fault->status >> 16);
-
- /* terminal fault, print info about the fault */
- dev_err(kbdev->dev,
- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "Reason: %s\n"
- "raw fault status: 0x%X\n"
- "decoded fault status: %s\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n"
- "pid: %d\n",
- as_no, fault->addr,
- reason_str,
- fault->status,
- (fault->status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
- exception_type, kbase_gpu_exception_name(exception_type),
- access_type, access_type_name(kbdev, fault->status),
- source_id,
- kctx->pid);
-
- /* hardware counters dump fault handling */
- if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
- (kbdev->hwcnt.backend.state ==
- KBASE_INSTR_STATE_DUMPING)) {
- if ((fault->addr >= kbdev->hwcnt.addr) &&
- (fault->addr < (kbdev->hwcnt.addr +
- kbdev->hwcnt.addr_bytes)))
- kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
- }
-
- /* Stop the kctx from submitting more jobs and cause it to be scheduled
- * out/rescheduled - this will occur on releasing the context's refcount */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbasep_js_clear_submit_allowed(js_devdata, kctx);
-
- /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
- * context can appear in the job slots from this point on */
- kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- /* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
-
- /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_mmu_disable(kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- mutex_unlock(&kbdev->mmu_hw_mutex);
-
-
- /* AS transaction end */
- /* Clear down the fault */
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
-}
-
-void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
- struct kbase_context *kctx, struct kbase_as *as,
- struct kbase_fault *fault)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- if (!kctx) {
- dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
- kbase_as_has_bus_fault(as, fault) ?
- "Bus error" : "Page fault",
- as->number, fault->addr);
-
- /* Since no ctx was found, the MMU must be disabled. */
- WARN_ON(as->current_setup.transtab);
-
- if (kbase_as_has_bus_fault(as, fault)) {
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- } else if (kbase_as_has_page_fault(as, fault)) {
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- }
-
- return;
- }
-
- if (kbase_as_has_bus_fault(as, fault)) {
- struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
-
- /*
- * hw counters dumping in progress, signal the
- * other thread that it failed
- */
- if ((kbdev->hwcnt.kctx == kctx) &&
- (kbdev->hwcnt.backend.state ==
- KBASE_INSTR_STATE_DUMPING))
- kbdev->hwcnt.backend.state =
- KBASE_INSTR_STATE_FAULT;
-
- /*
- * Stop the kctx from submitting more jobs and cause it
- * to be scheduled out/rescheduled when all references
- * to it are released
- */
- kbasep_js_clear_submit_allowed(js_devdata, kctx);
-
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
- dev_warn(kbdev->dev,
- "Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
- as->number, fault->addr,
- fault->extra_addr);
- else
- dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
- as->number, fault->addr);
-
- /*
- * We need to switch to UNMAPPED mode - but we do this in a
- * worker so that we can sleep
- */
- WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
- atomic_inc(&kbdev->faults_pending);
- } else {
- WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
- atomic_inc(&kbdev->faults_pending);
- }
-}
-
void kbase_flush_mmu_wqs(struct kbase_device *kbdev)
{
int i;
diff --git a/mali_kbase/mmu/mali_kbase_mmu.h b/mali_kbase/mmu/mali_kbase_mmu.h
new file mode 100644
index 0000000..c9e27b1
--- /dev/null
+++ b/mali_kbase/mmu/mali_kbase_mmu.h
@@ -0,0 +1,118 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_MMU_H_
+#define _KBASE_MMU_H_
+
+/**
+ * kbase_mmu_init - Initialise an object representing GPU page tables
+ *
+ * The structure should be terminated using kbase_mmu_term()
+ *
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
+ * @mmut: GPU page tables to be initialized.
+ * @kctx: Optional kbase context, may be NULL if this set of MMU tables
+ * is not associated with a context.
+ * @group_id: The physical group ID from which to allocate GPU page tables.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+ struct kbase_context *kctx, int group_id);
+
+/**
+ * kbase_mmu_interrupt - Process an MMU interrupt.
+ *
+ * Process the MMU interrupt that was reported by the &kbase_device.
+ *
+ * @kbdev: Pointer to the kbase device for which the interrupt happened.
+ * @irq_stat: Value of the MMU_IRQ_STATUS register.
+ */
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/**
+ * kbase_mmu_term - Terminate an object representing GPU page tables
+ *
+ * This will free any page tables that have been allocated
+ *
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
+ * @mmut: GPU page tables to be destroyed.
+ */
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
+
+/**
+ * kbase_mmu_create_ate - Create an address translation entry
+ *
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
+ * @phy: Physical address of the page to be mapped for GPU access.
+ * @flags: Bitmask of attributes of the GPU memory region being mapped.
+ * @level: Page table level for which to build an address translation entry.
+ * @group_id: The physical memory group in which the page was allocated.
+ * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * This function creates an address translation entry to encode the physical
+ * address of a page to be mapped for access by the GPU, along with any extra
+ * attributes required for the GPU memory region.
+ *
+ * Return: An address translation entry, either in LPAE or AArch64 format
+ * (depending on the driver's configuration).
+ */
+u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
+ struct tagged_addr phy, unsigned long flags, int level, int group_id);
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ const u64 start_vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags, int group_id);
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags, int as_nr, int group_id);
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr phys, size_t nr,
+ unsigned long flags, int group_id);
+
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 vpfn,
+ size_t nr, int as_nr);
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags, int const group_id);
+
+/**
+ * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
+ *
+ * Process the bus fault interrupt that was reported for a particular GPU
+ * address space.
+ *
+ * @kbdev: Pointer to the kbase device for which bus fault was reported.
+ * @status: Value of the GPU_FAULTSTATUS register.
+ * @as_nr: GPU address space for which the bus fault occurred.
+ *
+ * Return: zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status,
+ u32 as_nr);
+
+#endif /* _KBASE_MMU_H_ */
diff --git a/mali_kbase/mali_kbase_mmu_hw.h b/mali_kbase/mmu/mali_kbase_mmu_hw.h
index f49a1d4..e6eef86 100644
--- a/mali_kbase/mali_kbase_mmu_hw.h
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw.h
@@ -21,14 +21,8 @@
*/
/**
- * @file
- * Interface file for accessing MMU hardware functionality
- */
-
-/**
- * @page mali_kbase_mmu_hw_page MMU hardware interface
+ * DOC: Interface file for accessing MMU hardware functionality
*
- * @section mali_kbase_mmu_hw_intro_sec Introduction
* This module provides an abstraction for accessing the functionality provided
* by the midgard MMU and thus allows all MMU HW access to be contained within
* one common place and allows for different backends (implementations) to
@@ -44,16 +38,7 @@ struct kbase_as;
struct kbase_context;
/**
- * @addtogroup base_kbase_api
- * @{
- */
-
-/**
- * @addtogroup mali_kbase_mmu_hw MMU access APIs
- * @{
- */
-
-/** @brief MMU fault type descriptor.
+ * enum kbase_mmu_fault_type - MMU fault type descriptor.
*/
enum kbase_mmu_fault_type {
KBASE_MMU_FAULT_TYPE_UNKNOWN = 0,
@@ -63,62 +48,60 @@ enum kbase_mmu_fault_type {
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED
};
-/** @brief Configure an address space for use.
+/**
+ * kbase_mmu_hw_configure - Configure an address space for use.
+ * @kbdev: kbase device to configure.
+ * @as: address space to configure.
*
* Configure the MMU using the address space details setup in the
- * @ref kbase_context structure.
- *
- * @param[in] kbdev kbase device to configure.
- * @param[in] as address space to configure.
+ * kbase_context structure.
*/
void kbase_mmu_hw_configure(struct kbase_device *kbdev,
struct kbase_as *as);
-/** @brief Issue an operation to the MMU.
+/**
+ * kbase_mmu_hw_do_operation - Issue an operation to the MMU.
+ * @kbdev: kbase device to issue the MMU operation on.
+ * @as: address space to issue the MMU operation on.
+ * @vpfn: MMU Virtual Page Frame Number to start the operation on.
+ * @nr: Number of pages to work on.
+ * @type: Operation type (written to ASn_COMMAND).
+ * @handling_irq: Is this operation being called during the handling
+ * of an interrupt?
*
* Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
- * is associated with the provided @ref kbase_context over the specified range
+ * is associated with the provided kbase_context over the specified range
*
- * @param[in] kbdev kbase device to issue the MMU operation on.
- * @param[in] as address space to issue the MMU operation on.
- * @param[in] vpfn MMU Virtual Page Frame Number to start the
- * operation on.
- * @param[in] nr Number of pages to work on.
- * @param[in] type Operation type (written to ASn_COMMAND).
- * @param[in] handling_irq Is this operation being called during the handling
- * of an interrupt?
- *
- * @return Zero if the operation was successful, non-zero otherwise.
+ * Return: Zero if the operation was successful, non-zero otherwise.
*/
int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
u64 vpfn, u32 nr, u32 type,
unsigned int handling_irq);
-/** @brief Clear a fault that has been previously reported by the MMU.
+/**
+ * kbase_mmu_hw_clear_fault - Clear a fault that has been previously reported by
+ * the MMU.
+ * @kbdev: kbase device to clear the fault from.
+ * @as: address space to clear the fault from.
+ * @type: The type of fault that needs to be cleared.
*
* Clear a bus error or page fault that has been reported by the MMU.
- *
- * @param[in] kbdev kbase device to clear the fault from.
- * @param[in] as address space to clear the fault from.
- * @param[in] type The type of fault that needs to be cleared.
*/
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
enum kbase_mmu_fault_type type);
-/** @brief Enable fault that has been previously reported by the MMU.
+/**
+ * kbase_mmu_hw_enable_fault - Enable fault that has been previously reported by
+ * the MMU.
+ * @kbdev: kbase device to again enable the fault from.
+ * @as: address space to again enable the fault from.
+ * @type: The type of fault that needs to be enabled again.
*
* After a page fault or bus error has been reported by the MMU these
* will be disabled. After these are handled this function needs to be
* called to enable the page fault or bus error fault again.
- *
- * @param[in] kbdev kbase device to again enable the fault from.
- * @param[in] as address space to again enable the fault from.
- * @param[in] type The type of fault that needs to be enabled again.
*/
void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
enum kbase_mmu_fault_type type);
-/** @} *//* end group mali_kbase_mmu_hw */
-/** @} *//* end group base_kbase_api */
-
#endif /* _KBASE_MMU_HW_H_ */
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
index 3eb435f..f22e73e 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
@@ -21,11 +21,10 @@
*/
#include <linux/bitops.h>
-
#include <mali_kbase.h>
#include <mali_kbase_mem.h>
-#include <mali_kbase_mmu_hw.h>
-#include <mali_kbase_tracepoints.h>
+#include <mmu/mali_kbase_mmu_hw.h>
+#include <tl/mali_kbase_tracepoints.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <mali_kbase_as_fault_debugfs.h>
@@ -93,7 +92,8 @@ static int wait_ready(struct kbase_device *kbdev,
u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
/* Wait for the MMU status to indicate there is no active command, in
- * case one is pending. Do not log remaining register accesses. */
+ * case one is pending. Do not log remaining register accesses.
+ */
while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
@@ -121,137 +121,6 @@ static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
return status;
}
-static void validate_protected_page_fault(struct kbase_device *kbdev)
-{
- /* GPUs which support (native) protected mode shall not report page
- * fault addresses unless it has protected debug mode and protected
- * debug mode is turned on */
- u32 protected_debug_mode = 0;
-
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
- protected_debug_mode = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
- }
-
- if (!protected_debug_mode) {
- /* fault_addr should never be reported in protected mode.
- * However, we just continue by printing an error message */
- dev_err(kbdev->dev, "Fault address reported in protected mode\n");
- }
-}
-
-void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
-{
- const int num_as = 16;
- const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
- const int pf_shift = 0;
- const unsigned long as_bit_mask = (1UL << num_as) - 1;
- unsigned long flags;
- u32 new_mask;
- u32 tmp;
-
- /* bus faults */
- u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
- /* page faults (note: Ignore ASes with both pf and bf) */
- u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
-
- KBASE_DEBUG_ASSERT(NULL != kbdev);
-
- /* remember current mask */
- spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
- /* mask interrupts for now */
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
- spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
-
- while (bf_bits | pf_bits) {
- struct kbase_as *as;
- int as_no;
- struct kbase_context *kctx;
- struct kbase_fault *fault;
-
- /*
- * the while logic ensures we have a bit set, no need to check
- * for not-found here
- */
- as_no = ffs(bf_bits | pf_bits) - 1;
- as = &kbdev->as[as_no];
-
- /* find the fault type */
- if (bf_bits & (1 << as_no))
- fault = &as->bf_data;
- else
- fault = &as->pf_data;
-
- /*
- * Refcount the kctx ASAP - it shouldn't disappear anyway, since
- * Bus/Page faults _should_ only occur whilst jobs are running,
- * and a job causing the Bus/Page fault shouldn't complete until
- * the MMU is updated
- */
- kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
-
- /* find faulting address */
- fault->addr = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
- AS_FAULTADDRESS_HI));
- fault->addr <<= 32;
- fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
- AS_FAULTADDRESS_LO));
- /* Mark the fault protected or not */
- fault->protected_mode = kbdev->protected_mode;
-
- if (kbdev->protected_mode && fault->addr) {
- /* check if address reporting is allowed */
- validate_protected_page_fault(kbdev);
- }
-
- /* report the fault to debugfs */
- kbase_as_fault_debugfs_new(kbdev, as_no);
-
- /* record the fault status */
- fault->status = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
- AS_FAULTSTATUS));
-
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
- fault->extra_addr = kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
- fault->extra_addr <<= 32;
- fault->extra_addr |= kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
- }
-
- if (kbase_as_has_bus_fault(as, fault)) {
- /* Mark bus fault as handled.
- * Note that a bus fault is processed first in case
- * where both a bus fault and page fault occur.
- */
- bf_bits &= ~(1UL << as_no);
-
- /* remove the queued BF (and PF) from the mask */
- new_mask &= ~(MMU_BUS_ERROR(as_no) |
- MMU_PAGE_FAULT(as_no));
- } else {
- /* Mark page fault as handled */
- pf_bits &= ~(1UL << as_no);
-
- /* remove the queued PF from the mask */
- new_mask &= ~MMU_PAGE_FAULT(as_no);
- }
-
- /* Process the interrupt for this address space */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
-
- /* reenable interrupts */
- spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
- new_mask |= tmp;
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
- spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
-}
-
void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
{
struct kbase_mmu_setup *current_setup = &as->current_setup;
@@ -260,8 +129,9 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
transcfg = current_setup->transcfg;
- /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
- /* Clear PTW_MEMATTR bits */
+ /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK
+ * Clear PTW_MEMATTR bits
+ */
transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
/* Enable correct PTW_MEMATTR bits */
transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
@@ -271,8 +141,9 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
transcfg |= AS_TRANSCFG_R_ALLOCATE;
if (kbdev->system_coherency == COHERENCY_ACE) {
- /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
- /* Clear PTW_SH bits */
+ /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable)
+ * Clear PTW_SH bits
+ */
transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
/* Enable correct PTW_SH bits */
transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
@@ -375,8 +246,9 @@ void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
unsigned long flags;
u32 irq_mask;
- /* Enable the page fault IRQ (and bus fault IRQ as well in case one
- * occurred) */
+ /* Enable the page fault IRQ
+ * (and bus fault IRQ as well in case one occurred)
+ */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
/*
diff --git a/mali_kbase/mmu/mali_kbase_mmu_internal.h b/mali_kbase/mmu/mali_kbase_mmu_internal.h
new file mode 100644
index 0000000..54b0c35
--- /dev/null
+++ b/mali_kbase/mmu/mali_kbase_mmu_internal.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_MMU_INTERNAL_H_
+#define _KBASE_MMU_INTERNAL_H_
+
+void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
+ struct kbase_mmu_setup * const setup);
+
+void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, struct kbase_fault *fault);
+
+void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str,
+ struct kbase_fault *fault);
+
+/**
+ * kbase_mmu_interrupt_process - Process a bus or page fault.
+ * @kbdev The kbase_device the fault happened on
+ * @kctx The kbase_context for the faulting address space if one was found.
+ * @as The address space that has the fault
+ * @fault Data relating to the fault
+ *
+ * This function will process a fault on a specific address space
+ */
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_as *as,
+ struct kbase_fault *fault);
+
+#endif /* _KBASE_MMU_INTERNAL_H_ */
diff --git a/mali_kbase/mali_kbase_mmu_mode_aarch64.c b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c
index 92cf8a3..02493e9 100644
--- a/mali_kbase/mali_kbase_mmu_mode_aarch64.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c
@@ -20,10 +20,11 @@
*
*/
-
#include "mali_kbase.h"
#include <gpu/mali_kbase_gpu_regmap.h>
#include "mali_kbase_defs.h"
+#include <mmu/mali_kbase_mmu.h>
+#include <mmu/mali_kbase_mmu_internal.h>
#define ENTRY_TYPE_MASK 3ULL
/* For valid ATEs bit 1 = ((level == 3) ? 1 : 0).
@@ -68,30 +69,6 @@ static inline void page_table_entry_set(u64 *pte, u64 phy)
#endif
}
-static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
- struct kbase_mmu_setup * const setup)
-{
- /* Set up the required caching policies at the correct indices
- * in the memattr register.
- */
- setup->memattr =
- (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
- (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
- (AS_MEMATTR_FORCE_TO_CACHE_ALL <<
- (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
- (AS_MEMATTR_WRITE_ALLOC <<
- (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
- (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
- (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
- (AS_MEMATTR_AARCH64_OUTER_WA <<
- (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
- (AS_MEMATTR_AARCH64_NON_CACHEABLE <<
- (AS_MEMATTR_INDEX_NON_CACHEABLE * 8));
-
- setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
- setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
-}
-
static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
int as_nr)
{
@@ -104,7 +81,7 @@ static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
as = &kbdev->as[as_nr];
current_setup = &as->current_setup;
- mmu_get_as_setup(mmut, current_setup);
+ kbase_mmu_get_as_setup(mmut, current_setup);
/* Apply the address space setting */
kbase_mmu_hw_configure(kbdev, as);
@@ -206,7 +183,7 @@ static void entry_invalidate(u64 *entry)
static struct kbase_mmu_mode const aarch64_mode = {
.update = mmu_update,
- .get_as_setup = mmu_get_as_setup,
+ .get_as_setup = kbase_mmu_get_as_setup,
.disable_as = mmu_disable_as,
.pte_to_phy_addr = pte_to_phy_addr,
.ate_is_valid = ate_is_valid,
diff --git a/mali_kbase/mali_kbase_mmu_mode_lpae.c b/mali_kbase/mmu/mali_kbase_mmu_mode_lpae.c
index 27c2c86..91a2d7a 100644
--- a/mali_kbase/mali_kbase_mmu_mode_lpae.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_mode_lpae.c
@@ -70,7 +70,8 @@ static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
struct kbase_mmu_setup * const setup)
{
/* Set up the required caching policies at the correct indices
- * in the memattr register. */
+ * in the memattr register.
+ */
setup->memattr =
(AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY <<
(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
diff --git a/mali_kbase/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c b/mali_kbase/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
index 4181b7f..a220a46 100644
--- a/mali_kbase/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
+++ b/mali_kbase/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2020 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -26,6 +26,7 @@
#include "mali_kbase.h"
#include <midgard/backend/gpu/mali_kbase_device_internal.h>
+#include <midgard/backend/gpu/mali_kbase_pm_internal.h>
#include <kutf/kutf_suite.h>
#include <kutf/kutf_utils.h>
@@ -55,10 +56,11 @@ struct kutf_irq_fixture_data {
/* ID for the GPU IRQ */
#define GPU_IRQ_HANDLER 2
-#define NR_TEST_IRQS 1000000
+#define NR_TEST_IRQS ((u32)1000000)
/* IRQ for the test to trigger. Currently MULTIPLE_GPU_FAULTS as we would not
- * expect to see this in normal use (e.g., when Android is running). */
+ * expect to see this in normal use (e.g., when Android is running).
+ */
#define TEST_IRQ MULTIPLE_GPU_FAULTS
#define IRQ_TIMEOUT HZ
@@ -67,7 +69,7 @@ struct kutf_irq_fixture_data {
extern int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
irq_handler_t custom_handler,
int irq_type);
-extern irqreturn_t kbase_gpu_irq_handler(int irq, void *data);
+extern irqreturn_t kbase_gpu_irq_test_handler(int irq, void *data, u32 val);
static DECLARE_WAIT_QUEUE_HEAD(wait);
static bool triggered;
@@ -88,25 +90,30 @@ static void *kbase_untag(void *ptr)
static irqreturn_t kbase_gpu_irq_custom_handler(int irq, void *data)
{
struct kbase_device *kbdev = kbase_untag(data);
- u32 val;
-
- val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
- if (val & TEST_IRQ) {
- struct timespec tval;
+ u32 val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
+ irqreturn_t result;
+ struct timespec tval;
+ bool has_test_irq = val & TEST_IRQ;
+ if (has_test_irq) {
getnstimeofday(&tval);
- irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
+ /* Clear the test source only here */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
+ TEST_IRQ);
+ /* Remove the test IRQ status bit */
+ val = val ^ TEST_IRQ;
+ }
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
+ result = kbase_gpu_irq_test_handler(irq, data, val);
+ if (has_test_irq) {
+ irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
triggered = true;
wake_up(&wait);
-
- return IRQ_HANDLED;
+ result = IRQ_HANDLED;
}
- /* Trigger main irq handler */
- return kbase_gpu_irq_handler(irq, data);
+ return result;
}
/**
@@ -173,19 +180,19 @@ static void mali_kutf_irq_latency(struct kutf_context *context)
struct kutf_irq_fixture_data *data = context->fixture;
struct kbase_device *kbdev = data->kbdev;
u64 min_time = U64_MAX, max_time = 0, average_time = 0;
- int i;
- bool test_failed = false;
+ u32 i;
+ const char *results;
/* Force GPU to be powered */
kbase_pm_context_active(kbdev);
+ kbase_pm_wait_for_desired_state(kbdev);
kbase_set_custom_irq_handler(kbdev, kbase_gpu_irq_custom_handler,
GPU_IRQ_HANDLER);
- for (i = 0; i < NR_TEST_IRQS; i++) {
+ for (i = 1; i <= NR_TEST_IRQS; i++) {
struct timespec tval;
u64 start_time;
- int ret;
triggered = false;
getnstimeofday(&tval);
@@ -195,11 +202,9 @@ static void mali_kutf_irq_latency(struct kutf_context *context)
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
TEST_IRQ);
- ret = wait_event_timeout(wait, triggered != false, IRQ_TIMEOUT);
-
- if (ret == 0) {
- kutf_test_fail(context, "Timed out waiting for IRQ\n");
- test_failed = true;
+ if (wait_event_timeout(wait, triggered, IRQ_TIMEOUT) == 0) {
+ /* Wait extra time to see if it would come */
+ wait_event_timeout(wait, triggered, 10 * IRQ_TIMEOUT);
break;
}
@@ -217,14 +222,17 @@ static void mali_kutf_irq_latency(struct kutf_context *context)
kbase_pm_context_idle(kbdev);
- if (!test_failed) {
- const char *results;
-
+ if (i > NR_TEST_IRQS) {
do_div(average_time, NR_TEST_IRQS);
results = kutf_dsprintf(&context->fixture_pool,
"Min latency = %lldns, Max latency = %lldns, Average latency = %lldns\n",
min_time, max_time, average_time);
kutf_test_pass(context, results);
+ } else {
+ results = kutf_dsprintf(&context->fixture_pool,
+ "Timed out for the %u-th IRQ (loop_limit: %u), triggered late: %d\n",
+ i, NR_TEST_IRQS, triggered);
+ kutf_test_fail(context, results);
}
}
diff --git a/mali_kbase/tl/backend/mali_kbase_timeline_jm.c b/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
new file mode 100644
index 0000000..c368ac7
--- /dev/null
+++ b/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
@@ -0,0 +1,97 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "../mali_kbase_tracepoints.h"
+#include "../mali_kbase_timeline.h"
+#include "../mali_kbase_timeline_priv.h"
+
+#include <mali_kbase.h>
+
+void kbase_create_timeline_objects(struct kbase_device *kbdev)
+{
+ unsigned int lpu_id;
+ unsigned int as_nr;
+ struct kbase_context *kctx;
+ struct kbase_timeline *timeline = kbdev->timeline;
+ struct kbase_tlstream *summary =
+ &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
+
+ /* Summarize the LPU objects. */
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ u32 *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ __kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, *lpu);
+ }
+
+ /* Summarize the Address Space objects. */
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
+
+ /* Create GPU object and make it retain all LPUs and address spaces. */
+ __kbase_tlstream_tl_new_gpu(summary,
+ kbdev,
+ kbdev->gpu_props.props.raw_props.gpu_id,
+ kbdev->gpu_props.num_cores);
+
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ void *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ __kbase_tlstream_tl_lifelink_lpu_gpu(summary, lpu, kbdev);
+ }
+
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ __kbase_tlstream_tl_lifelink_as_gpu(summary,
+ &kbdev->as[as_nr],
+ kbdev);
+
+ /* Lock the context list, to ensure no changes to the list are made
+ * while we're summarizing the contexts and their contents.
+ */
+ mutex_lock(&kbdev->kctx_list_lock);
+
+ /* For each context in the device... */
+ list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
+ /* Summarize the context itself */
+ __kbase_tlstream_tl_new_ctx(summary,
+ kctx,
+ kctx->id,
+ (u32)(kctx->tgid));
+ };
+
+ /* Reset body stream buffers while holding the kctx lock.
+ * This ensures we can't fire both summary and normal tracepoints for
+ * the same objects.
+ * If we weren't holding the lock, it's possible that the summarized
+ * objects could have been created, destroyed, or used after we
+ * constructed the summary stream tracepoints, but before we reset
+ * the body stream, resulting in losing those object event tracepoints.
+ */
+ kbase_timeline_streams_body_reset(timeline);
+
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ /* Static object are placed into summary packet that needs to be
+ * transmitted first. Flush all streams to make it available to
+ * user space.
+ */
+ kbase_timeline_streams_flush(timeline);
+} \ No newline at end of file
diff --git a/mali_kbase/mali_kbase_timeline.c b/mali_kbase/tl/mali_kbase_timeline.c
index 17470fc..201b30e 100644
--- a/mali_kbase/mali_kbase_timeline.c
+++ b/mali_kbase/tl/mali_kbase_timeline.c
@@ -148,76 +148,6 @@ void kbase_timeline_term(struct kbase_timeline *timeline)
kfree(timeline);
}
-static void kbase_create_timeline_objects(struct kbase_device *kbdev)
-{
- unsigned int lpu_id;
- unsigned int as_nr;
- struct kbase_context *kctx;
- struct kbase_timeline *timeline = kbdev->timeline;
- struct kbase_tlstream *summary =
- &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
-
- /* Summarize the LPU objects. */
- for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- u32 *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- __kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, *lpu);
- }
-
- /* Summarize the Address Space objects. */
- for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
-
- /* Create GPU object and make it retain all LPUs and address spaces. */
- __kbase_tlstream_tl_new_gpu(summary,
- kbdev,
- kbdev->gpu_props.props.raw_props.gpu_id,
- kbdev->gpu_props.num_cores);
-
- for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- void *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- __kbase_tlstream_tl_lifelink_lpu_gpu(summary, lpu, kbdev);
- }
-
- for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- __kbase_tlstream_tl_lifelink_as_gpu(summary,
- &kbdev->as[as_nr],
- kbdev);
-
- /* Lock the context list, to ensure no changes to the list are made
- * while we're summarizing the contexts and their contents.
- */
- mutex_lock(&kbdev->kctx_list_lock);
-
- /* For each context in the device... */
- list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
- /* Summarize the context itself */
- __kbase_tlstream_tl_new_ctx(summary,
- kctx,
- kctx->id,
- (u32)(kctx->tgid));
- };
-
- /* Reset body stream buffers while holding the kctx lock.
- * This ensures we can't fire both summary and normal tracepoints for
- * the same objects.
- * If we weren't holding the lock, it's possible that the summarized
- * objects could have been created, destroyed, or used after we
- * constructed the summary stream tracepoints, but before we reset
- * the body stream, resulting in losing those object event tracepoints.
- */
- kbase_timeline_streams_body_reset(timeline);
-
- mutex_unlock(&kbdev->kctx_list_lock);
-
- /* Static object are placed into summary packet that needs to be
- * transmitted first. Flush all streams to make it available to
- * user space.
- */
- kbase_timeline_streams_flush(timeline);
-}
-
#ifdef CONFIG_MALI_DEVFREQ
static void kbase_tlstream_current_devfreq_target(struct kbase_device *kbdev)
{
diff --git a/mali_kbase/mali_kbase_timeline.h b/mali_kbase/tl/mali_kbase_timeline.h
index d800288..d800288 100644
--- a/mali_kbase/mali_kbase_timeline.h
+++ b/mali_kbase/tl/mali_kbase_timeline.h
diff --git a/mali_kbase/mali_kbase_timeline_io.c b/mali_kbase/tl/mali_kbase_timeline_io.c
index ffcf84a..9a899f2 100644
--- a/mali_kbase/mali_kbase_timeline_io.c
+++ b/mali_kbase/tl/mali_kbase_timeline_io.c
@@ -20,9 +20,9 @@
*
*/
-#include <mali_kbase_timeline_priv.h>
-#include <mali_kbase_tlstream.h>
-#include <mali_kbase_tracepoints.h>
+#include "mali_kbase_timeline_priv.h"
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tracepoints.h"
#include <linux/poll.h>
diff --git a/mali_kbase/mali_kbase_timeline_priv.h b/mali_kbase/tl/mali_kbase_timeline_priv.h
index e4a4a20..d4c4773 100644
--- a/mali_kbase/mali_kbase_timeline_priv.h
+++ b/mali_kbase/tl/mali_kbase_timeline_priv.h
@@ -24,7 +24,7 @@
#define _KBASE_TIMELINE_PRIV_H
#include <mali_kbase.h>
-#include <mali_kbase_tlstream.h>
+#include "mali_kbase_tlstream.h"
#include <linux/timer.h>
#include <linux/atomic.h>
@@ -60,4 +60,6 @@ struct kbase_timeline {
extern const struct file_operations kbasep_tlstream_fops;
+void kbase_create_timeline_objects(struct kbase_device *kbdev);
+
#endif /* _KBASE_TIMELINE_PRIV_H */
diff --git a/mali_kbase/mali_kbase_tl_serialize.h b/mali_kbase/tl/mali_kbase_tl_serialize.h
index 90808ce..90808ce 100644
--- a/mali_kbase/mali_kbase_tl_serialize.h
+++ b/mali_kbase/tl/mali_kbase_tl_serialize.h
diff --git a/mali_kbase/mali_kbase_tlstream.c b/mali_kbase/tl/mali_kbase_tlstream.c
index 2a76bc0..2a76bc0 100644
--- a/mali_kbase/mali_kbase_tlstream.c
+++ b/mali_kbase/tl/mali_kbase_tlstream.c
diff --git a/mali_kbase/mali_kbase_tlstream.h b/mali_kbase/tl/mali_kbase_tlstream.h
index 5797738..5797738 100644
--- a/mali_kbase/mali_kbase_tlstream.h
+++ b/mali_kbase/tl/mali_kbase_tlstream.h
diff --git a/mali_kbase/mali_kbase_trace_defs.h b/mali_kbase/tl/mali_kbase_trace_defs.h
index 77fb818..1ee6a59 100644
--- a/mali_kbase/mali_kbase_trace_defs.h
+++ b/mali_kbase/tl/mali_kbase_trace_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015,2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/mali_kbase/mali_kbase_tracepoints.c b/mali_kbase/tl/mali_kbase_tracepoints.c
index 8232c2d..bae95b4 100644
--- a/mali_kbase/mali_kbase_tracepoints.c
+++ b/mali_kbase/tl/mali_kbase_tracepoints.c
@@ -67,6 +67,11 @@ enum tl_msg_id_obj {
KBASE_TL_EVENT_ATOM_SOFTJOB_START,
KBASE_TL_EVENT_ATOM_SOFTJOB_END,
KBASE_JD_GPU_SOFT_RESET,
+ KBASE_TL_KBASE_NEW_DEVICE,
+ KBASE_TL_KBASE_DEVICE_PROGRAM_CSG,
+ KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG,
+ KBASE_TL_KBASE_NEW_CTX,
+ KBASE_TL_KBASE_DEL_CTX,
KBASE_TL_KBASE_NEW_KCPUQUEUE,
KBASE_TL_KBASE_DEL_KCPUQUEUE,
KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
@@ -268,10 +273,30 @@ enum tl_msg_id_aux {
"gpu soft reset", \
"@p", \
"gpu") \
+ TP_DESC(KBASE_TL_KBASE_NEW_DEVICE, \
+ "New KBase Device", \
+ "@III", \
+ "kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs") \
+ TP_DESC(KBASE_TL_KBASE_DEVICE_PROGRAM_CSG, \
+ "CSG is programmed to a slot", \
+ "@III", \
+ "kbase_device_id,gpu_cmdq_grp_handle,kbase_device_csg_slot_index") \
+ TP_DESC(KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG, \
+ "CSG is deprogrammed from a slot", \
+ "@II", \
+ "kbase_device_id,kbase_device_csg_slot_index") \
+ TP_DESC(KBASE_TL_KBASE_NEW_CTX, \
+ "New KBase Context", \
+ "@II", \
+ "kernel_ctx_id,kbase_device_id") \
+ TP_DESC(KBASE_TL_KBASE_DEL_CTX, \
+ "Delete KBase Context", \
+ "@I", \
+ "kernel_ctx_id") \
TP_DESC(KBASE_TL_KBASE_NEW_KCPUQUEUE, \
"New KCPU Queue", \
- "@ppI", \
- "kcpu_queue,ctx,kcpuq_num_pending_cmds") \
+ "@pII", \
+ "kcpu_queue,kernel_ctx_id,kcpuq_num_pending_cmds") \
TP_DESC(KBASE_TL_KBASE_DEL_KCPUQUEUE, \
"Delete KCPU Queue", \
"@p", \
@@ -1721,16 +1746,150 @@ void __kbase_tlstream_aux_event_job_slot(
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
+void __kbase_tlstream_tl_kbase_new_device(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_gpu_core_count,
+ u32 kbase_device_max_num_csgs)
+{
+ const u32 msg_id = KBASE_TL_KBASE_NEW_DEVICE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kbase_device_id)
+ + sizeof(kbase_device_gpu_core_count)
+ + sizeof(kbase_device_max_num_csgs)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_gpu_core_count, sizeof(kbase_device_gpu_core_count));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_max_num_csgs, sizeof(kbase_device_max_num_csgs));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_program_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 gpu_cmdq_grp_handle,
+ u32 kbase_device_csg_slot_index)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEVICE_PROGRAM_CSG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kbase_device_id)
+ + sizeof(gpu_cmdq_grp_handle)
+ + sizeof(kbase_device_csg_slot_index)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_deprogram_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_csg_slot_index)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kbase_device_id)
+ + sizeof(kbase_device_csg_slot_index)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_new_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id,
+ u32 kbase_device_id)
+{
+ const u32 msg_id = KBASE_TL_KBASE_NEW_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kernel_ctx_id)
+ + sizeof(kbase_device_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_del_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEL_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kernel_ctx_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
void __kbase_tlstream_tl_kbase_new_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue,
- const void *ctx,
+ u32 kernel_ctx_id,
u32 kcpuq_num_pending_cmds)
{
const u32 msg_id = KBASE_TL_KBASE_NEW_KCPUQUEUE;
const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ sizeof(kcpu_queue)
- + sizeof(ctx)
+ + sizeof(kernel_ctx_id)
+ sizeof(kcpuq_num_pending_cmds)
;
char *buffer;
@@ -1744,7 +1903,7 @@ void __kbase_tlstream_tl_kbase_new_kcpuqueue(
pos = kbasep_serialize_bytes(buffer,
pos, &kcpu_queue, sizeof(kcpu_queue));
pos = kbasep_serialize_bytes(buffer,
- pos, &ctx, sizeof(ctx));
+ pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
pos = kbasep_serialize_bytes(buffer,
pos, &kcpuq_num_pending_cmds, sizeof(kcpuq_num_pending_cmds));
diff --git a/mali_kbase/mali_kbase_tracepoints.h b/mali_kbase/tl/mali_kbase_tracepoints.h
index 084ff56..b2c20ae 100644
--- a/mali_kbase/mali_kbase_tracepoints.h
+++ b/mali_kbase/tl/mali_kbase_tracepoints.h
@@ -271,10 +271,31 @@ void __kbase_tlstream_aux_event_job_slot(
u32 slot_nr,
u32 atom_nr,
u32 event);
+void __kbase_tlstream_tl_kbase_new_device(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_gpu_core_count,
+ u32 kbase_device_max_num_csgs);
+void __kbase_tlstream_tl_kbase_device_program_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 gpu_cmdq_grp_handle,
+ u32 kbase_device_csg_slot_index);
+void __kbase_tlstream_tl_kbase_device_deprogram_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_csg_slot_index);
+void __kbase_tlstream_tl_kbase_new_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id,
+ u32 kbase_device_id);
+void __kbase_tlstream_tl_kbase_del_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id);
void __kbase_tlstream_tl_kbase_new_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue,
- const void *ctx,
+ u32 kernel_ctx_id,
u32 kcpuq_num_pending_cmds);
void __kbase_tlstream_tl_kbase_del_kcpuqueue(
struct kbase_tlstream *stream,
@@ -1404,19 +1425,126 @@ struct kbase_tlstream;
} while (0)
/**
+ * KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE -
+ * New KBase Device
+ *
+ * @kbdev: Kbase device
+ * @kbase_device_id: The id of the physical hardware
+ * @kbase_device_gpu_core_count: The number of gpu cores in the physical hardware
+ * @kbase_device_max_num_csgs: The max number of CSGs the physical hardware supports
+ */
+#define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \
+ kbdev, \
+ kbase_device_id, \
+ kbase_device_gpu_core_count, \
+ kbase_device_max_num_csgs \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_new_device( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kbase_device_id, kbase_device_gpu_core_count, kbase_device_max_num_csgs); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG -
+ * CSG is programmed to a slot
+ *
+ * @kbdev: Kbase device
+ * @kbase_device_id: The id of the physical hardware
+ * @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace
+ * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \
+ kbdev, \
+ kbase_device_id, \
+ gpu_cmdq_grp_handle, \
+ kbase_device_csg_slot_index \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_device_program_csg( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kbase_device_id, gpu_cmdq_grp_handle, kbase_device_csg_slot_index); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG -
+ * CSG is deprogrammed from a slot
+ *
+ * @kbdev: Kbase device
+ * @kbase_device_id: The id of the physical hardware
+ * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \
+ kbdev, \
+ kbase_device_id, \
+ kbase_device_csg_slot_index \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_device_deprogram_csg( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kbase_device_id, kbase_device_csg_slot_index); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_NEW_CTX -
+ * New KBase Context
+ *
+ * @kbdev: Kbase device
+ * @kernel_ctx_id: Unique ID for the KBase Context
+ * @kbase_device_id: The id of the physical hardware
+ */
+#define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \
+ kbdev, \
+ kernel_ctx_id, \
+ kbase_device_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_new_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kernel_ctx_id, kbase_device_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEL_CTX -
+ * Delete KBase Context
+ *
+ * @kbdev: Kbase device
+ * @kernel_ctx_id: Unique ID for the KBase Context
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \
+ kbdev, \
+ kernel_ctx_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_del_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kernel_ctx_id); \
+ } while (0)
+
+/**
* KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE -
* New KCPU Queue
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
- * @ctx: Name of the context object
+ * @kernel_ctx_id: Unique ID for the KBase Context
* @kcpuq_num_pending_cmds: Number of commands already enqueued
* in the KCPU queue
*/
#define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \
kbdev, \
kcpu_queue, \
- ctx, \
+ kernel_ctx_id, \
kcpuq_num_pending_cmds \
) \
do { \
@@ -1424,7 +1552,7 @@ struct kbase_tlstream;
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_kbase_new_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
- kcpu_queue, ctx, kcpuq_num_pending_cmds); \
+ kcpu_queue, kernel_ctx_id, kcpuq_num_pending_cmds); \
} while (0)
/**