summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2017-11-08 13:23:34 +0100
committerSidath Senanayake <sidaths@google.com>2017-11-08 13:23:34 +0100
commitdbd2655766535ffc24e24503a7279f3abfd40d7e (patch)
tree87f651aaa936267783d365eab5502d24f6be2d14
parentc19c62718d90e8efa4675528aae6ab6fde13a12f (diff)
downloadgpu-dbd2655766535ffc24e24503a7279f3abfd40d7e.tar.gz
Mali Bifrost DDK r9p0 KMD
Provenance: b336f554d (collaborate/EAC/b_r9p0) BX304L01B-BU-00000-r9p0-01rel0 BX304L06A-BU-00000-r9p0-01rel0 BX304X07X-BU-00000-r9p0-01rel0 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Iaff4ad1413fa7d768d5d250afadbb24d19e1e2e2
-rw-r--r--mali_kbase/Kbuild4
-rw-r--r--mali_kbase/Kconfig16
-rw-r--r--mali_kbase/Makefile4
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_gpu.c7
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_hw.c107
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_js_backend.c20
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c84
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c28
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_internal.h21
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa.c27
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_debugfs.c30
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_debugfs.h14
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_simple.c11
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c20
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_vinstr_common.h18
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_vinstr_g71.c171
-rw-r--r--mali_kbase/mali_base_hwconfig_features.h150
-rw-r--r--mali_kbase/mali_base_hwconfig_issues.h67
-rw-r--r--mali_kbase/mali_base_kernel.h26
-rw-r--r--mali_kbase/mali_kbase.h5
-rw-r--r--mali_kbase/mali_kbase_config_defaults.h45
-rw-r--r--mali_kbase/mali_kbase_context.c7
-rw-r--r--mali_kbase/mali_kbase_core_linux.c805
-rw-r--r--mali_kbase/mali_kbase_ctx_sched.h2
-rw-r--r--mali_kbase/mali_kbase_debug_mem_view.c2
-rw-r--r--mali_kbase/mali_kbase_defs.h16
-rw-r--r--mali_kbase/mali_kbase_device.c1
-rw-r--r--mali_kbase/mali_kbase_fence.c9
-rw-r--r--mali_kbase/mali_kbase_fence_defs.h7
-rw-r--r--mali_kbase/mali_kbase_gator_api.c4
-rw-r--r--mali_kbase/mali_kbase_gator_hwcnt_names.h2
-rw-r--r--mali_kbase/mali_kbase_gator_hwcnt_names_tkax.h291
-rw-r--r--mali_kbase/mali_kbase_gator_hwcnt_names_tnox.h291
-rw-r--r--mali_kbase/mali_kbase_gpu_id.h5
-rw-r--r--mali_kbase/mali_kbase_gpuprops.c57
-rw-r--r--mali_kbase/mali_kbase_gpuprops.h12
-rw-r--r--mali_kbase/mali_kbase_hw.c46
-rw-r--r--mali_kbase/mali_kbase_ioctl.h10
-rw-r--r--mali_kbase/mali_kbase_jd.c246
-rw-r--r--mali_kbase/mali_kbase_jd_debugfs.c1
-rw-r--r--mali_kbase/mali_kbase_js.c13
-rw-r--r--mali_kbase/mali_kbase_mem.c65
-rw-r--r--mali_kbase/mali_kbase_mem.h17
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c9
-rw-r--r--mali_kbase/mali_kbase_mmu.c47
-rw-r--r--mali_kbase/mali_kbase_replay.c16
-rw-r--r--mali_kbase/mali_kbase_softjobs.c8
-rw-r--r--mali_kbase/mali_kbase_uku.h532
-rw-r--r--mali_kbase/mali_kbase_vinstr.c3
-rw-r--r--mali_kbase/mali_kbase_vinstr.h26
-rw-r--r--mali_kbase/mali_midg_regmap.h8
-rw-r--r--mali_kbase/platform/devicetree/mali_kbase_config_devicetree.c2
-rw-r--r--mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c6
-rw-r--r--mali_kbase/platform/vexpress/mali_kbase_config_vexpress.c15
-rw-r--r--mali_kbase/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c15
-rw-r--r--mali_kbase/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c15
-rw-r--r--mali_kbase/sconscript8
-rw-r--r--mali_kbase/tests/include/kutf/kutf_helpers.h202
-rw-r--r--mali_kbase/tests/include/kutf/kutf_helpers_user.h49
-rw-r--r--mali_kbase/tests/include/kutf/kutf_resultset.h77
-rw-r--r--mali_kbase/tests/include/kutf/kutf_suite.h98
-rw-r--r--mali_kbase/tests/kutf/kutf_helpers.c762
-rw-r--r--mali_kbase/tests/kutf/kutf_helpers_user.c116
-rw-r--r--mali_kbase/tests/kutf/kutf_resultset.c98
-rw-r--r--mali_kbase/tests/kutf/kutf_suite.c596
65 files changed, 1989 insertions, 3503 deletions
diff --git a/mali_kbase/Kbuild b/mali_kbase/Kbuild
index 49d016e..9947768 100644
--- a/mali_kbase/Kbuild
+++ b/mali_kbase/Kbuild
@@ -15,7 +15,7 @@
# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r8p0-01rel0"
+MALI_RELEASE_NAME ?= "r9p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
@@ -33,7 +33,6 @@ MALI_KERNEL_TEST_API ?= 0
MALI_ERROR_INJECT_ON ?= 0
MALI_MOCK_TEST ?= 0
MALI_COVERAGE ?= 0
-MALI_INSTRUMENTATION_LEVEL ?= 0
CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
# This workaround is for what seems to be a compiler bug we observed in
# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling
@@ -55,7 +54,6 @@ DEFINES = \
-DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
-DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \
-DMALI_COVERAGE=$(MALI_COVERAGE) \
- -DMALI_INSTRUMENTATION_LEVEL=$(MALI_INSTRUMENTATION_LEVEL) \
-DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
-DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598)
diff --git a/mali_kbase/Kconfig b/mali_kbase/Kconfig
index dc78f84..bcc2d4a 100644
--- a/mali_kbase/Kconfig
+++ b/mali_kbase/Kconfig
@@ -61,13 +61,13 @@ config MALI_DEVFREQ
config MALI_DMA_FENCE
bool "DMA_BUF fence support for Mali"
- depends on MALI_MIDGARD && !KDS
+ depends on MALI_MIDGARD
default n
help
Support DMA_BUF fences for Mali.
- This option should only be enabled if KDS is not present and
- the Linux Kernel has built in support for DMA_BUF fences.
+ This option should only be enabled if the Linux Kernel has built in
+ support for DMA_BUF fences.
config MALI_PLATFORM_NAME
depends on MALI_MIDGARD
@@ -187,11 +187,15 @@ config MALI_2MB_ALLOC
config MALI_PWRSOFT_765
bool "PWRSOFT-765 ticket"
+ depends on MALI_MIDGARD && MALI_EXPERT
default n
help
- PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
- not merged in mainline kernel yet. So this define helps to guard those
- parts of the code.
+ PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
+ in kernel v4.10, however if backported into the kernel then this
+ option must be manually selected.
+
+ If using kernel >= v4.10 then say N, otherwise if devfreq cooling
+ changes have been backported say Y to avoid compilation errors.
source "drivers/gpu/arm/midgard/platform/Kconfig"
source "drivers/gpu/arm/midgard/tests/Kconfig"
diff --git a/mali_kbase/Makefile b/mali_kbase/Makefile
index 26522d5..b7b261a 100644
--- a/mali_kbase/Makefile
+++ b/mali_kbase/Makefile
@@ -19,7 +19,6 @@ KDIR ?= /lib/modules/$(shell uname -r)/build
BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump
KBASE_PATH_RELATIVE = $(CURDIR)
-KDS_PATH_RELATIVE = $(CURDIR)/../../../..
EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers
ifeq ($(MALI_UNIT_TEST), 1)
@@ -31,9 +30,6 @@ ifeq ($(CONFIG_MALI_FPGA_BUS_LOGGER),y)
EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
endif
-# GPL driver supports KDS
-EXTRA_SYMBOLS += $(KDS_PATH_RELATIVE)/drivers/base/kds/Module.symvers
-
# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
all:
$(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
diff --git a/mali_kbase/backend/gpu/mali_kbase_gpu.c b/mali_kbase/backend/gpu/mali_kbase_gpu.c
index 3b78100..6bceba1 100644
--- a/mali_kbase/backend/gpu/mali_kbase_gpu.c
+++ b/mali_kbase/backend/gpu/mali_kbase_gpu.c
@@ -34,6 +34,10 @@ int kbase_backend_early_init(struct kbase_device *kbdev)
if (err)
return err;
+ err = kbase_pm_runtime_init(kbdev);
+ if (err)
+ goto fail_runtime_pm;
+
/* Ensure we can access the GPU registers */
kbase_pm_register_access_enable(kbdev);
@@ -56,6 +60,8 @@ int kbase_backend_early_init(struct kbase_device *kbdev)
fail_pm:
kbase_release_interrupts(kbdev);
fail_interrupts:
+ kbase_pm_runtime_term(kbdev);
+fail_runtime_pm:
kbasep_platform_device_term(kbdev);
return err;
@@ -65,6 +71,7 @@ void kbase_backend_early_term(struct kbase_device *kbdev)
{
kbase_hwaccess_pm_term(kbdev);
kbase_release_interrupts(kbdev);
+ kbase_pm_runtime_term(kbdev);
kbasep_platform_device_term(kbdev);
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
index be88ec8..0a2a0b7 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
@@ -235,10 +235,8 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
int i;
u32 count = 0;
ktime_t end_timestamp = ktime_get();
- struct kbasep_js_device_data *js_devdata;
KBASE_DEBUG_ASSERT(kbdev);
- js_devdata = &kbdev->js_data;
KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
@@ -756,92 +754,43 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
}
}
-struct zap_reset_data {
- /* The stages are:
- * 1. The timer has never been called
- * 2. The zap has timed out, all slots are soft-stopped - the GPU reset
- * will happen. The GPU has been reset when
- * kbdev->hwaccess.backend.reset_waitq is signalled
- *
- * (-1 - The timer has been cancelled)
- */
- int stage;
- struct kbase_device *kbdev;
- struct hrtimer timer;
- spinlock_t lock; /* protects updates to stage member */
-};
-
-static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
-{
- struct zap_reset_data *reset_data = container_of(timer,
- struct zap_reset_data, timer);
- struct kbase_device *kbdev = reset_data->kbdev;
- unsigned long flags;
-
- spin_lock_irqsave(&reset_data->lock, flags);
-
- if (reset_data->stage == -1)
- goto out;
-
-#if KBASE_GPU_RESET_EN
- if (kbase_prepare_to_reset_gpu(kbdev)) {
- dev_err(kbdev->dev, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
- ZAP_TIMEOUT);
- kbase_reset_gpu(kbdev);
- }
-#endif /* KBASE_GPU_RESET_EN */
- reset_data->stage = 2;
-
- out:
- spin_unlock_irqrestore(&reset_data->lock, flags);
-
- return HRTIMER_NORESTART;
-}
-
void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
{
struct kbase_device *kbdev = kctx->kbdev;
- struct zap_reset_data reset_data;
- unsigned long flags;
-
- hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- reset_data.timer.function = zap_timeout_callback;
+ unsigned long timeout = msecs_to_jiffies(ZAP_TIMEOUT);
- spin_lock_init(&reset_data.lock);
+ timeout = wait_event_timeout(kctx->jctx.zero_jobs_wait,
+ kctx->jctx.job_nr == 0, timeout);
- reset_data.kbdev = kbdev;
- reset_data.stage = 1;
+ if (timeout != 0)
+ timeout = wait_event_timeout(
+ kctx->jctx.sched_info.ctx.is_scheduled_wait,
+ !kbase_ctx_flag(kctx, KCTX_SCHEDULED),
+ timeout);
- hrtimer_start(&reset_data.timer, HR_TIMER_DELAY_MSEC(ZAP_TIMEOUT),
- HRTIMER_MODE_REL);
+ /* Neither wait timed out; all done! */
+ if (timeout != 0)
+ goto exit;
- /* Wait for all jobs to finish, and for the context to be not-scheduled
- * (due to kbase_job_zap_context(), we also guarentee it's not in the JS
- * policy queue either */
- wait_event(kctx->jctx.zero_jobs_wait, kctx->jctx.job_nr == 0);
- wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
- !kbase_ctx_flag(kctx, KCTX_SCHEDULED));
-
- spin_lock_irqsave(&reset_data.lock, flags);
- if (reset_data.stage == 1) {
- /* The timer hasn't run yet - so cancel it */
- reset_data.stage = -1;
+#if KBASE_GPU_RESET_EN
+ if (kbase_prepare_to_reset_gpu(kbdev)) {
+ dev_err(kbdev->dev,
+ "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+ ZAP_TIMEOUT);
+ kbase_reset_gpu(kbdev);
}
- spin_unlock_irqrestore(&reset_data.lock, flags);
- hrtimer_cancel(&reset_data.timer);
-
- if (reset_data.stage == 2) {
- /* The reset has already started.
- * Wait for the reset to complete
- */
- wait_event(kbdev->hwaccess.backend.reset_wait,
- atomic_read(&kbdev->hwaccess.backend.reset_gpu)
- == KBASE_RESET_GPU_NOT_PENDING);
- }
- destroy_hrtimer_on_stack(&reset_data.timer);
+ /* Wait for the reset to complete */
+ wait_event(kbdev->hwaccess.backend.reset_wait,
+ atomic_read(&kbdev->hwaccess.backend.reset_gpu)
+ == KBASE_RESET_GPU_NOT_PENDING);
+#else
+ dev_warn(kbdev->dev,
+ "Jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+ ZAP_TIMEOUT);
+#endif
+exit:
dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
/* Ensure that the signallers of the waitqs have finished */
@@ -874,8 +823,6 @@ int kbase_job_slot_init(struct kbase_device *kbdev)
if (NULL == kbdev->hwaccess.backend.reset_workq)
return -EINVAL;
- KBASE_DEBUG_ASSERT(0 ==
- object_is_on_stack(&kbdev->hwaccess.backend.reset_work));
INIT_WORK(&kbdev->hwaccess.backend.reset_work,
kbasep_reset_timeout_worker);
diff --git a/mali_kbase/backend/gpu/mali_kbase_js_backend.c b/mali_kbase/backend/gpu/mali_kbase_js_backend.c
index a8c1af2..63d048f 100644
--- a/mali_kbase/backend/gpu/mali_kbase_js_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_js_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -26,14 +26,6 @@
#include <backend/gpu/mali_kbase_js_internal.h>
/*
- * Define for when dumping is enabled.
- * This should not be based on the instrumentation level as whether dumping is
- * enabled for a particular level is down to the integrator. However this is
- * being used for now as otherwise the cinstr headers would be needed.
- */
-#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL)
-
-/*
* Hold the runpool_mutex for this
*/
static inline bool timer_callback_should_run(struct kbase_device *kbdev)
@@ -119,7 +111,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
u32 ticks = atom->ticks++;
-#if !CINSTR_DUMPING_ENABLED
+#ifndef CONFIG_MALI_JOB_DUMP
u32 soft_stop_ticks, hard_stop_ticks,
gpu_reset_ticks;
if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
@@ -209,8 +201,8 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
*/
reset_needed = true;
}
-#else /* !CINSTR_DUMPING_ENABLED */
- /* NOTE: During CINSTR_DUMPING_ENABLED, we use
+#else /* !CONFIG_MALI_JOB_DUMP */
+ /* NOTE: During CONFIG_MALI_JOB_DUMP, we use
* the alternate timeouts, which makes the hard-
* stop and GPU reset timeout much longer. We
* also ensure that we don't soft-stop at all.
@@ -219,7 +211,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
/* Job has been scheduled for at least
* js_devdata->soft_stop_ticks. We do
* not soft-stop during
- * CINSTR_DUMPING_ENABLED, however.
+ * CONFIG_MALI_JOB_DUMP, however.
*/
dev_dbg(kbdev->dev, "Soft-stop");
} else if (ticks ==
@@ -248,7 +240,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
*/
reset_needed = true;
}
-#endif /* !CINSTR_DUMPING_ENABLED */
+#endif /* !CONFIG_MALI_JOB_DUMP */
}
}
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index c88b80a..a871eae 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -32,6 +32,57 @@
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+int kbase_pm_runtime_init(struct kbase_device *kbdev)
+{
+ struct kbase_pm_callback_conf *callbacks;
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+ if (callbacks) {
+ kbdev->pm.backend.callback_power_on =
+ callbacks->power_on_callback;
+ kbdev->pm.backend.callback_power_off =
+ callbacks->power_off_callback;
+ kbdev->pm.backend.callback_power_suspend =
+ callbacks->power_suspend_callback;
+ kbdev->pm.backend.callback_power_resume =
+ callbacks->power_resume_callback;
+ kbdev->pm.callback_power_runtime_init =
+ callbacks->power_runtime_init_callback;
+ kbdev->pm.callback_power_runtime_term =
+ callbacks->power_runtime_term_callback;
+ kbdev->pm.backend.callback_power_runtime_on =
+ callbacks->power_runtime_on_callback;
+ kbdev->pm.backend.callback_power_runtime_off =
+ callbacks->power_runtime_off_callback;
+ kbdev->pm.backend.callback_power_runtime_idle =
+ callbacks->power_runtime_idle_callback;
+
+ if (callbacks->power_runtime_init_callback)
+ return callbacks->power_runtime_init_callback(kbdev);
+ else
+ return 0;
+ }
+
+ kbdev->pm.backend.callback_power_on = NULL;
+ kbdev->pm.backend.callback_power_off = NULL;
+ kbdev->pm.backend.callback_power_suspend = NULL;
+ kbdev->pm.backend.callback_power_resume = NULL;
+ kbdev->pm.callback_power_runtime_init = NULL;
+ kbdev->pm.callback_power_runtime_term = NULL;
+ kbdev->pm.backend.callback_power_runtime_on = NULL;
+ kbdev->pm.backend.callback_power_runtime_off = NULL;
+ kbdev->pm.backend.callback_power_runtime_idle = NULL;
+
+ return 0;
+}
+
+void kbase_pm_runtime_term(struct kbase_device *kbdev)
+{
+ if (kbdev->pm.callback_power_runtime_term) {
+ kbdev->pm.callback_power_runtime_term(kbdev);
+ }
+}
+
void kbase_pm_register_access_enable(struct kbase_device *kbdev)
{
struct kbase_pm_callback_conf *callbacks;
@@ -59,7 +110,6 @@ void kbase_pm_register_access_disable(struct kbase_device *kbdev)
int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
{
int ret = 0;
- struct kbase_pm_callback_conf *callbacks;
KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -81,38 +131,6 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
kbdev->pm.backend.gpu_in_desired_state = true;
init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
- callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
- if (callbacks) {
- kbdev->pm.backend.callback_power_on =
- callbacks->power_on_callback;
- kbdev->pm.backend.callback_power_off =
- callbacks->power_off_callback;
- kbdev->pm.backend.callback_power_suspend =
- callbacks->power_suspend_callback;
- kbdev->pm.backend.callback_power_resume =
- callbacks->power_resume_callback;
- kbdev->pm.callback_power_runtime_init =
- callbacks->power_runtime_init_callback;
- kbdev->pm.callback_power_runtime_term =
- callbacks->power_runtime_term_callback;
- kbdev->pm.backend.callback_power_runtime_on =
- callbacks->power_runtime_on_callback;
- kbdev->pm.backend.callback_power_runtime_off =
- callbacks->power_runtime_off_callback;
- kbdev->pm.backend.callback_power_runtime_idle =
- callbacks->power_runtime_idle_callback;
- } else {
- kbdev->pm.backend.callback_power_on = NULL;
- kbdev->pm.backend.callback_power_off = NULL;
- kbdev->pm.backend.callback_power_suspend = NULL;
- kbdev->pm.backend.callback_power_resume = NULL;
- kbdev->pm.callback_power_runtime_init = NULL;
- kbdev->pm.callback_power_runtime_term = NULL;
- kbdev->pm.backend.callback_power_runtime_on = NULL;
- kbdev->pm.backend.callback_power_runtime_off = NULL;
- kbdev->pm.backend.callback_power_runtime_idle = NULL;
- }
-
/* Initialise the metrics subsystem */
ret = kbasep_pm_metrics_init(kbdev);
if (ret)
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index cbc258c..26802e4 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -304,10 +304,8 @@ u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
return kbdev->gpu_props.props.raw_props.shader_present;
case KBASE_PM_CORE_TILER:
return kbdev->gpu_props.props.raw_props.tiler_present;
-#ifdef CONFIG_MALI_CORESTACK
case KBASE_PM_CORE_STACK:
return kbdev->gpu_props.props.raw_props.stack_present;
-#endif /* CONFIG_MALI_CORESTACK */
default:
break;
}
@@ -585,7 +583,7 @@ u64 kbase_pm_core_stack_mask(u64 cores)
if (test_bit(i, (unsigned long *)&cores)) {
/* Every core which ID >= 16 is filled to stacks 4-7
* instead of 0-3 */
- size_t const stack_num = (i > 16) ?
+ size_t const stack_num = (i >= 16) ?
(i % NUM_CORES_PER_STACK) + 4 :
(i % NUM_CORES_PER_STACK);
set_bit(stack_num, (unsigned long *)&stack_mask);
@@ -1229,15 +1227,25 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
- /* Limit read ID width for AXI */
- kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
- kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
+
+ /* Limit read & write ID width for AXI */
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG)) {
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS);
+ kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_ARID_LIMIT & 0x7) <<
+ L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT;
+
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
+ kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_AWID_LIMIT & 0x7) <<
+ L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT;
+ } else {
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
+ kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
- /* Limit write ID width for AXI */
- kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
- kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+ kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
+ }
if (kbdev->system_coherency == COHERENCY_ACE) {
/* Allow memory configuration disparity to be ignored, we
@@ -1297,6 +1305,8 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
}
}
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
+ kbdev->hw_quirks_sc |= SC_TLS_HASH_ENABLE;
if (!kbdev->hw_quirks_jm)
kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
index 6804f45..7b77823 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
@@ -411,14 +411,29 @@ void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
/**
+ * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
+ *
+ * Setup the power management callbacks and initialize/enable the runtime-pm
+ * for the Mali GPU platform device, using the callback function. This must be
+ * called before the kbase_pm_register_access_enable() function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+int kbase_pm_runtime_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_runtime_term(struct kbase_device *kbdev);
+
+/**
* kbase_pm_register_access_enable - Enable access to GPU registers
*
* Enables access to the GPU registers before power management has powered up
* the GPU with kbase_pm_powerup().
*
- * Access to registers should be done using kbase_os_reg_read()/write() at this
- * stage, not kbase_reg_read()/write().
- *
* This results in the power management callbacks provided in the driver
* configuration to get called to turn on power and/or clocks to the GPU. See
* kbase_pm_callback_conf.
diff --git a/mali_kbase/ipa/mali_kbase_ipa.c b/mali_kbase/ipa/mali_kbase_ipa.c
index e7e2299..1450c2c 100644
--- a/mali_kbase/ipa/mali_kbase_ipa.c
+++ b/mali_kbase/ipa/mali_kbase_ipa.c
@@ -390,7 +390,7 @@ static u32 kbase_scale_dynamic_power(const u32 c, const u32 freq,
const u64 v2fc = (u64) c * (u64) v2f;
/* Range: 0 < v2fc / 1000 < 2^13 mW */
- return v2fc / 1000;
+ return div_u64(v2fc, 1000);
}
/**
@@ -419,7 +419,7 @@ u32 kbase_scale_static_power(const u32 c, const u32 voltage)
const u64 v3c_big = (u64) c * (u64) v3;
/* Range: 0 < v3c_big / 1000000 < 2^13 mW */
- return v3c_big / 1000000;
+ return div_u64(v3c_big, 1000000);
}
static struct kbase_ipa_model *get_current_model(struct kbase_device *kbdev)
@@ -455,7 +455,8 @@ static u32 get_static_power_locked(struct kbase_device *kbdev,
return power;
}
-#ifdef CONFIG_MALI_PWRSOFT_765
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
static unsigned long kbase_get_static_power(struct devfreq *df,
unsigned long voltage)
#else
@@ -464,7 +465,8 @@ static unsigned long kbase_get_static_power(unsigned long voltage)
{
struct kbase_ipa_model *model;
u32 power = 0;
-#ifdef CONFIG_MALI_PWRSOFT_765
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
#else
struct kbase_device *kbdev = kbase_find_device(-1);
@@ -477,14 +479,16 @@ static unsigned long kbase_get_static_power(unsigned long voltage)
mutex_unlock(&kbdev->ipa.lock);
-#ifndef CONFIG_MALI_PWRSOFT_765
+#if !(defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
kbase_release_device(kbdev);
#endif
return power;
}
-#ifdef CONFIG_MALI_PWRSOFT_765
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
static unsigned long kbase_get_dynamic_power(struct devfreq *df,
unsigned long freq,
unsigned long voltage)
@@ -496,7 +500,8 @@ static unsigned long kbase_get_dynamic_power(unsigned long freq,
struct kbase_ipa_model *model;
u32 power_coeff = 0, power = 0;
int err = 0;
-#ifdef CONFIG_MALI_PWRSOFT_765
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
#else
struct kbase_device *kbdev = kbase_find_device(-1);
@@ -517,7 +522,8 @@ static unsigned long kbase_get_dynamic_power(unsigned long freq,
mutex_unlock(&kbdev->ipa.lock);
-#ifndef CONFIG_MALI_PWRSOFT_765
+#if !(defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
kbase_release_device(kbdev);
#endif
@@ -557,7 +563,7 @@ int kbase_get_real_power(struct devfreq *df, u32 *power,
unsigned long total_time = max(status->total_time, 1ul);
u64 busy_time = min(status->busy_time, total_time);
- *power = ((u64) *power * (u64) busy_time) / total_time;
+ *power = div_u64((u64) *power * (u64) busy_time, total_time);
}
*power += get_static_power_locked(kbdev, model, voltage);
@@ -576,7 +582,8 @@ struct devfreq_cooling_power kbase_ipa_power_model_ops = {
#endif
.get_static_power = &kbase_get_static_power,
.get_dynamic_power = &kbase_get_dynamic_power,
-#ifdef CONFIG_MALI_PWRSOFT_765
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
.get_real_power = &kbase_get_real_power,
#endif
};
diff --git a/mali_kbase/ipa/mali_kbase_ipa_debugfs.c b/mali_kbase/ipa/mali_kbase_ipa_debugfs.c
index eafc140..d3ac7c3 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_debugfs.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_debugfs.c
@@ -142,6 +142,12 @@ int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
/* 'name' is stack-allocated for array elements, so copy it into
* heap-allocated storage */
param->name = kstrdup(name, GFP_KERNEL);
+
+ if (!param->name) {
+ kfree(param);
+ return -ENOMEM;
+ }
+
param->addr.voidp = addr;
param->size = size;
param->type = type;
@@ -207,6 +213,30 @@ static void kbase_ipa_model_debugfs_init(struct kbase_ipa_model *model)
}
}
+void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model,
+ const char *name, s32 val)
+{
+ struct kbase_ipa_model_param *param;
+
+ mutex_lock(&model->kbdev->ipa.lock);
+
+ list_for_each_entry(param, &model->params, link) {
+ if (!strcmp(param->name, name)) {
+ if (param->type == PARAM_TYPE_S32) {
+ *param->addr.s32p = val;
+ } else {
+ dev_err(model->kbdev->dev,
+ "Wrong type for %s parameter %s\n",
+ model->ops->name, param->name);
+ }
+ break;
+ }
+ }
+
+ mutex_unlock(&model->kbdev->ipa.lock);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_param_set_s32);
+
void kbase_ipa_debugfs_init(struct kbase_device *kbdev)
{
mutex_lock(&kbdev->ipa.lock);
diff --git a/mali_kbase/ipa/mali_kbase_ipa_debugfs.h b/mali_kbase/ipa/mali_kbase_ipa_debugfs.h
index ec06e20..f624de9 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_debugfs.h
+++ b/mali_kbase/ipa/mali_kbase_ipa_debugfs.h
@@ -31,6 +31,20 @@ int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
enum kbase_ipa_model_param_type type);
void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model);
+/**
+ * kbase_ipa_model_param_set_s32 - Set an integer model parameter
+ *
+ * @model: pointer to IPA model
+ * @name: name of corresponding debugfs entry
+ * @val: new value of the parameter
+ *
+ * This function is only exposed for use by unit tests running in
+ * kernel space. Normally it is expected that parameter values will
+ * instead be set via debugfs.
+ */
+void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model,
+ const char *name, s32 val);
+
#else /* CONFIG_DEBUG_FS */
static inline int kbase_ipa_model_param_add(struct kbase_ipa_model *model,
diff --git a/mali_kbase/ipa/mali_kbase_ipa_simple.c b/mali_kbase/ipa/mali_kbase_ipa_simple.c
index de6c3e6..70e08b3 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_simple.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_simple.c
@@ -46,7 +46,7 @@ static int dummy_temp;
static int kbase_simple_power_model_get_dummy_temp(
struct thermal_zone_device *tz,
- int *dummy_temp)
+ int *temp)
{
*temp = ACCESS_ONCE(dummy_temp);
return 0;
@@ -113,10 +113,10 @@ struct kbase_ipa_model_simple_data {
static u32 calculate_temp_scaling_factor(s32 ts[4], s64 t)
{
/* Range: -2^24 < t2 < 2^24 m(Deg^2) */
- const s64 t2 = (t * t) / 1000;
+ const s64 t2 = div_s64((t * t), 1000);
/* Range: -2^31 < t3 < 2^31 m(Deg^3) */
- const s64 t3 = (t * t2) / 1000;
+ const s64 t3 = div_s64((t * t2), 1000);
/*
* Sum the parts. t^[1-3] are in m(Deg^N), but the coefficients are in
@@ -129,7 +129,7 @@ static u32 calculate_temp_scaling_factor(s32 ts[4], s64 t)
+ ts[0] * 1000; /* +/- 2^41 */
/* Range: -2^60 < res_unclamped < 2^60 */
- s64 res_unclamped = res_big / 1000;
+ s64 res_unclamped = div_s64(res_big, 1000);
/* Clamp to range of 0x to 10x the static power */
return clamp(res_unclamped, (s64) 0, (s64) 10000000);
@@ -192,7 +192,7 @@ static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp)
* 0 <= static_coefficient < 2^28.
*/
coeff_big = (u64) model_data->static_coefficient * (u64) temp_scaling_factor;
- *coeffp = coeff_big / 1000000;
+ *coeffp = div_u64(coeff_big, 1000000);
return 0;
}
@@ -320,3 +320,4 @@ struct kbase_ipa_model_ops kbase_simple_ipa_model_ops = {
.get_static_coeff = &model_static_coeff,
.do_utilization_scaling_in_framework = true,
};
+KBASE_EXPORT_TEST_API(kbase_simple_ipa_model_ops);
diff --git a/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c b/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c
index d3964d0..9b9fa0e 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.c
@@ -24,6 +24,8 @@ static ktime_t dummy_time;
#ifdef ktime_get
#undef ktime_get
#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
#define ktime_get() (ACCESS_ONCE(dummy_time))
void kbase_ipa_set_dummy_time(ktime_t t)
@@ -31,6 +33,16 @@ void kbase_ipa_set_dummy_time(ktime_t t)
ACCESS_ONCE(dummy_time) = t;
}
KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time);
+#else
+#define ktime_get() (READ_ONCE(dummy_time))
+
+void kbase_ipa_set_dummy_time(ktime_t t)
+{
+ WRITE_ONCE(dummy_time, t);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time);
+
+#endif
#endif /* MALI_UNIT_TEST */
@@ -85,7 +97,7 @@ s64 kbase_ipa_sum_all_shader_cores(
/* Range: -2^54 < ret < 2^54 */
ret *= coeff;
- return ret / 1000000;
+ return div_s64(ret, 1000000);
}
s64 kbase_ipa_single_counter(
@@ -99,7 +111,7 @@ s64 kbase_ipa_single_counter(
const s64 multiplied = (s64) counter_value * (s64) coeff;
/* Range: -2^29 < return < 2^29 */
- return multiplied / 1000000;
+ return div_s64(multiplied, 1000000);
}
int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
@@ -173,7 +185,7 @@ int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
s32 coeff, group_energy;
coeff = model_data->group_values[i];
- group_energy = group->op(model_data, coeff, group->counter);
+ group_energy = group->op(model_data, coeff, group->counter_block_offset);
energy = kbase_ipa_add_saturate(energy, group_energy);
}
@@ -199,7 +211,7 @@ int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
/* Range: 2^20 < num_cycles < 2^40 mCycles */
num_cycles = (u64) current_freq * (u64) time_since_last_sample_ms;
/* Range: 2^10 < num_cycles < 2^30 Cycles */
- num_cycles /= 1000000;
+ num_cycles = div_u64(num_cycles, 1000000);
/* num_cycles should never be 0 in _normal_ usage (because we expect
* frequencies on the order of MHz and >10ms polling intervals), but
diff --git a/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.h b/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.h
index 25b36c8..d212c87 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.h
+++ b/mali_kbase/ipa/mali_kbase_ipa_vinstr_common.h
@@ -69,21 +69,22 @@ struct kbase_ipa_model_vinstr_data {
* Coefficients are interpreted as fractions where the
* denominator is 1000000.
* @op: which operation to be performed on the counter values
- * @counter: counter used to calculate energy for IPA group
+ * @counter_block_offset: block offset in bytes of the counter used to calculate energy for IPA group
*/
struct kbase_ipa_group {
char name[KBASE_IPA_MAX_GROUP_NAME_LEN + 1];
s32 default_value;
s64 (*op)(struct kbase_ipa_model_vinstr_data *, s32, u32);
- u32 counter;
+ u32 counter_block_offset;
};
-/*
+/**
* sum_all_shader_cores() - sum a counter over all cores
* @model_data pointer to model data
* @coeff model coefficient. Unity is ~2^20, so range approx
* +/- 4.0: -2^22 < coeff < 2^22
-
+ * @counter offset in bytes of the counter used to calculate energy for IPA group
+ *
* Calculate energy estimation based on hardware counter `counter'
* across all shader cores.
*
@@ -93,12 +94,13 @@ s64 kbase_ipa_sum_all_shader_cores(
struct kbase_ipa_model_vinstr_data *model_data,
s32 coeff, u32 counter);
-/*
+/**
* sum_single_counter() - sum a single counter
* @model_data pointer to model data
* @coeff model coefficient. Unity is ~2^20, so range approx
* +/- 4.0: -2^22 < coeff < 2^22
-
+ * @counter offset in bytes of the counter used to calculate energy for IPA group
+ *
* Calculate energy estimation based on hardware counter `counter'.
*
* Return: Counter value. Range: -2^34 < ret < 2^34
@@ -107,7 +109,7 @@ s64 kbase_ipa_single_counter(
struct kbase_ipa_model_vinstr_data *model_data,
s32 coeff, u32 counter);
-/*
+/**
* attach_vinstr() - attach a vinstr_buffer to an IPA model.
* @model_data pointer to model data
*
@@ -119,7 +121,7 @@ s64 kbase_ipa_single_counter(
*/
int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
-/*
+/**
* detach_vinstr() - detach a vinstr_buffer from an IPA model.
* @model_data pointer to model data
*
diff --git a/mali_kbase/ipa/mali_kbase_ipa_vinstr_g71.c b/mali_kbase/ipa/mali_kbase_ipa_vinstr_g71.c
index 81f6fdd..4e4c059 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_vinstr_g71.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_vinstr_g71.c
@@ -21,54 +21,169 @@
#include "mali_kbase_ipa_debugfs.h"
-#define JM_BASE (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define TILER_BASE (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define MMU_BASE (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define SC0_BASE (3 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-
-#define GPU_ACTIVE (JM_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 6)
-#define TILER_ACTIVE (TILER_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 45)
-#define L2_ANY_LOOKUP (MMU_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 25)
-#define FRAG_ACTIVE (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 4)
-#define EXEC_CORE_ACTIVE (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 26)
-#define EXEC_INSTR_COUNT (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 28)
-#define TEX_COORD_ISSUE (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 40)
-#define VARY_SLOT_32 (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 50)
-#define VARY_SLOT_16 (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 51)
-#define BEATS_RD_LSC (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 56)
-#define BEATS_WR_LSC (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 61)
-#define BEATS_WR_TIB (SC0_BASE + KBASE_IPA_NR_BYTES_PER_CNT * 62)
+/* Performance counter blocks base offsets */
+#define JM_BASE (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define TILER_BASE (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define MEMSYS_BASE (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define SC0_BASE_ONE_MEMSYS (3 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define SC0_BASE_TWO_MEMSYS (4 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+
+/* JM counter block offsets */
+#define JM_GPU_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 6)
+
+/* Tiler counter block offsets */
+#define TILER_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 45)
+
+/* MEMSYS counter block offsets */
+#define MEMSYS_L2_ANY_LOOKUP (KBASE_IPA_NR_BYTES_PER_CNT * 25)
+
+/* SC counter block offsets */
+#define SC_FRAG_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 4)
+#define SC_EXEC_CORE_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 26)
+#define SC_EXEC_INSTR_COUNT (KBASE_IPA_NR_BYTES_PER_CNT * 28)
+#define SC_TEX_COORD_ISSUE (KBASE_IPA_NR_BYTES_PER_CNT * 40)
+#define SC_VARY_SLOT_32 (KBASE_IPA_NR_BYTES_PER_CNT * 50)
+#define SC_VARY_SLOT_16 (KBASE_IPA_NR_BYTES_PER_CNT * 51)
+#define SC_BEATS_RD_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 56)
+#define SC_BEATS_WR_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 61)
+#define SC_BEATS_WR_TIB (KBASE_IPA_NR_BYTES_PER_CNT * 62)
+
+/** Maximum number of cores for which a single Memory System block of performance counters is present. */
+#define KBASE_G71_SINGLE_MEMSYS_MAX_NUM_CORES ((u8)4)
+
+
+/**
+ * get_jm_counter() - get performance counter offset inside the Job Manager block
+ * @model_data: pointer to GPU model data.
+ * @counter_block_offset: offset in bytes of the performance counter inside the Job Manager block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g71_power_model_get_jm_counter(struct kbase_ipa_model_vinstr_data *model_data,
+ u32 counter_block_offset)
+{
+ return JM_BASE + counter_block_offset;
+}
+
+/**
+ * get_memsys_counter() - get peformance counter offset inside the Memory System block
+ * @model_data: pointer to GPU model data.
+ * @counter_block_offset: offset in bytes of the performance counter inside the (first) Memory System block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g71_power_model_get_memsys_counter(struct kbase_ipa_model_vinstr_data *model_data,
+ u32 counter_block_offset)
+{
+ /* The base address of Memory System performance counters is always the same, although their number
+ * may vary based on the number of cores. For the moment it's ok to return a constant.
+ */
+ return MEMSYS_BASE + counter_block_offset;
+}
+
+/**
+ * get_sc_counter() - get performance counter offset inside the Shader Cores block
+ * @model_data: pointer to GPU model data.
+ * @counter_block_offset: offset in bytes of the performance counter inside the (first) Shader Cores block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g71_power_model_get_sc_counter(struct kbase_ipa_model_vinstr_data *model_data,
+ u32 counter_block_offset)
+{
+ const u32 sc_base = model_data->kbdev->gpu_props.num_cores <= KBASE_G71_SINGLE_MEMSYS_MAX_NUM_CORES ?
+ SC0_BASE_ONE_MEMSYS :
+ SC0_BASE_TWO_MEMSYS;
+
+ return sc_base + counter_block_offset;
+}
+
+/**
+ * memsys_single_counter() - calculate energy for a single Memory System performance counter.
+ * @model_data: pointer to GPU model data.
+ * @coeff: default value of coefficient for IPA group.
+ * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Memory System performance counter.
+ */
+static s64 kbase_g71_memsys_single_counter(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff,
+ u32 counter_block_offset)
+{
+ return kbase_ipa_single_counter(model_data, coeff,
+ kbase_g71_power_model_get_memsys_counter(model_data, counter_block_offset));
+}
+
+/**
+ * sum_all_shader_cores() - calculate energy for a Shader Cores performance counter for all cores.
+ * @model_data: pointer to GPU model data.
+ * @coeff: default value of coefficient for IPA group.
+ * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a Shader Cores performance counter for all cores.
+ */
+static s64 kbase_g71_sum_all_shader_cores(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff,
+ u32 counter_block_offset)
+{
+ return kbase_ipa_sum_all_shader_cores(model_data, coeff,
+ kbase_g71_power_model_get_sc_counter(model_data, counter_block_offset));
+}
+
+/**
+ * jm_single_counter() - calculate energy for a single Job Manager performance counter.
+ * @model_data: pointer to GPU model data.
+ * @coeff: default value of coefficient for IPA group.
+ * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Job Manager performance counter.
+ */
+static s64 kbase_g71_jm_single_counter(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff,
+ u32 counter_block_offset)
+{
+ return kbase_ipa_single_counter(model_data, coeff,
+ kbase_g71_power_model_get_jm_counter(model_data, counter_block_offset));
+}
+/** Table of IPA group definitions.
+ *
+ * For each IPA group, this table defines a function to access the given performance block counter (or counters,
+ * if the operation needs to be iterated on multiple blocks) and calculate energy estimation.
+ */
static const struct kbase_ipa_group ipa_groups_def[] = {
{
.name = "l2_access",
.default_value = 526300,
- .op = kbase_ipa_single_counter,
- .counter = L2_ANY_LOOKUP,
+ .op = kbase_g71_memsys_single_counter,
+ .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
},
{
.name = "exec_instr_count",
.default_value = 301100,
- .op = kbase_ipa_sum_all_shader_cores,
- .counter = EXEC_INSTR_COUNT,
+ .op = kbase_g71_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_COUNT,
},
{
.name = "tex_issue",
.default_value = 197400,
- .op = kbase_ipa_sum_all_shader_cores,
- .counter = TEX_COORD_ISSUE,
+ .op = kbase_g71_sum_all_shader_cores,
+ .counter_block_offset = SC_TEX_COORD_ISSUE,
},
{
.name = "tile_wb",
.default_value = -156400,
- .op = kbase_ipa_sum_all_shader_cores,
- .counter = BEATS_WR_TIB,
+ .op = kbase_g71_sum_all_shader_cores,
+ .counter_block_offset = SC_BEATS_WR_TIB,
},
{
.name = "gpu_active",
.default_value = 115800,
- .op = kbase_ipa_single_counter,
- .counter = GPU_ACTIVE,
+ .op = kbase_g71_jm_single_counter,
+ .counter_block_offset = JM_GPU_ACTIVE,
},
};
@@ -99,7 +214,7 @@ static int kbase_g71_power_model_init(struct kbase_ipa_model *model)
goto exit;
}
- model_data->scaling_factor = 15000;
+ model_data->scaling_factor = 5;
err = kbase_ipa_model_add_param_s32(model, "scale",
&model_data->scaling_factor,
1, false);
diff --git a/mali_kbase/mali_base_hwconfig_features.h b/mali_kbase/mali_base_hwconfig_features.h
index 219586d..c077461 100644
--- a/mali_kbase/mali_base_hwconfig_features.h
+++ b/mali_kbase/mali_base_hwconfig_features.h
@@ -53,6 +53,8 @@ enum base_hw_feature {
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_TLS_HASHING,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+ BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
BASE_HW_FEATURE_END
};
@@ -63,6 +65,7 @@ static const enum base_hw_feature base_hw_features_generic[] = {
static const enum base_hw_feature base_hw_features_t60x[] = {
BASE_HW_FEATURE_LD_ST_LEA_TEX,
BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_V4,
BASE_HW_FEATURE_END
};
@@ -71,6 +74,7 @@ static const enum base_hw_feature base_hw_features_t62x[] = {
BASE_HW_FEATURE_LD_ST_LEA_TEX,
BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_V4,
BASE_HW_FEATURE_END
};
@@ -82,6 +86,7 @@ static const enum base_hw_feature base_hw_features_t72x[] = {
BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
BASE_HW_FEATURE_WARPING,
BASE_HW_FEATURE_V4,
@@ -103,6 +108,7 @@ static const enum base_hw_feature base_hw_features_t76x[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_END
};
@@ -123,6 +129,7 @@ static const enum base_hw_feature base_hw_features_tFxx[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_END
};
@@ -145,6 +152,7 @@ static const enum base_hw_feature base_hw_features_t83x[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_END
};
@@ -167,6 +175,7 @@ static const enum base_hw_feature base_hw_features_t82x[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_END
};
@@ -189,6 +198,7 @@ static const enum base_hw_feature base_hw_features_tMIx[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
@@ -214,6 +224,7 @@ static const enum base_hw_feature base_hw_features_tHEx[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
@@ -241,6 +252,7 @@ static const enum base_hw_feature base_hw_features_tSIx[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
@@ -268,6 +280,7 @@ static const enum base_hw_feature base_hw_features_tDVx[] = {
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_T7XX_PAIRING_RULES,
BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
BASE_HW_FEATURE_FLUSH_REDUCTION,
BASE_HW_FEATURE_PROTECTED_MODE,
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
@@ -275,8 +288,145 @@ static const enum base_hw_feature base_hw_features_tDVx[] = {
BASE_HW_FEATURE_END
};
+static const enum base_hw_feature base_hw_features_tNOx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_TLS_HASHING,
+ BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tGOx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_TLS_HASHING,
+ BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+ BASE_HW_FEATURE_END
+};
+static const enum base_hw_feature base_hw_features_tKAx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_END
+};
+static const enum base_hw_feature base_hw_features_tTRx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_END
+};
+static const enum base_hw_feature base_hw_features_tBOx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_AARCH64_MMU,
+ BASE_HW_FEATURE_END
+};
#endif /* _BASE_HWCONFIG_FEATURES_H_ */
diff --git a/mali_kbase/mali_base_hwconfig_issues.h b/mali_kbase/mali_base_hwconfig_issues.h
index 1c5ee49..58710f6 100644
--- a/mali_kbase/mali_base_hwconfig_issues.h
+++ b/mali_kbase/mali_base_hwconfig_issues.h
@@ -1115,12 +1115,79 @@ static const enum base_hw_issue base_hw_issues_model_tDVx[] = {
BASE_HW_ISSUE_END
};
+static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_model_tNOx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_tKAx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_model_tKAx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_tBOx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
+static const enum base_hw_issue base_hw_issues_model_tBOx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_END
+};
#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/mali_kbase/mali_base_kernel.h b/mali_kbase/mali_base_kernel.h
index 6f5c68e..94d1d74 100644
--- a/mali_kbase/mali_base_kernel.h
+++ b/mali_kbase/mali_base_kernel.h
@@ -24,12 +24,6 @@
#ifndef _BASE_KERNEL_H_
#define _BASE_KERNEL_H_
-/* Support UK10_2 IOCTLS */
-#define BASE_LEGACY_UK10_2_SUPPORT 1
-
-/* Support UK10_4 IOCTLS */
-#define BASE_LEGACY_UK10_4_SUPPORT 1
-
typedef struct base_mem_handle {
struct {
u64 handle;
@@ -52,7 +46,7 @@ typedef struct base_mem_handle {
#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
-#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4
#define BASE_MAX_COHERENT_GROUPS 16
@@ -1403,7 +1397,7 @@ typedef struct base_dump_cpu_gpu_counters {
* @{
*/
-#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4
#define BASE_MAX_COHERENT_GROUPS 16
@@ -1599,7 +1593,7 @@ struct gpu_raw_gpu_props {
u32 js_present;
u32 js_features[GPU_MAX_JOB_SLOTS];
u32 tiler_features;
- u32 texture_features[3];
+ u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
u32 gpu_id;
@@ -1766,20 +1760,6 @@ typedef struct base_jd_replay_payload {
base_jd_core_req fragment_core_req;
} base_jd_replay_payload;
-#ifdef BASE_LEGACY_UK10_2_SUPPORT
-typedef struct base_jd_replay_payload_uk10_2 {
- u64 tiler_jc_list;
- u64 fragment_jc;
- u64 tiler_heap_free;
- u16 fragment_hierarchy_mask;
- u16 tiler_hierarchy_mask;
- u32 hierarchy_default_weight;
- u16 tiler_core_req;
- u16 fragment_core_req;
- u8 padding[4];
-} base_jd_replay_payload_uk10_2;
-#endif /* BASE_LEGACY_UK10_2_SUPPORT */
-
/**
* @brief An entry in the linked list of job chains to be replayed. This must
* be in GPU memory.
diff --git a/mali_kbase/mali_kbase.h b/mali_kbase/mali_kbase.h
index 56b364e..11113a9 100644
--- a/mali_kbase/mali_kbase.h
+++ b/mali_kbase/mali_kbase.h
@@ -45,7 +45,6 @@
#include <linux/workqueue.h>
#include "mali_base_kernel.h"
-#include <mali_kbase_uku.h>
#include <mali_kbase_linux.h>
/*
@@ -222,10 +221,6 @@ int kbase_device_trace_buffer_install(
struct kbase_context *kctx, u32 *tb, size_t size);
void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
-/* api to be ported per OS, only need to do the raw register access */
-void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
-u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
-
void kbasep_as_do_poke(struct work_struct *work);
/** Returns the name associated with a Mali exception code
diff --git a/mali_kbase/mali_kbase_config_defaults.h b/mali_kbase/mali_kbase_config_defaults.h
index 69079e7..aee6b0c 100644
--- a/mali_kbase/mali_kbase_config_defaults.h
+++ b/mali_kbase/mali_kbase_config_defaults.h
@@ -74,6 +74,35 @@ enum {
KBASE_AID_4 = 0x1
};
+enum {
+ /**
+ * Use unrestricted Address ID width on the AXI bus.
+ * Restricting ID width will reduce performance & bus load due to GPU.
+ */
+ KBASE_3BIT_AID_32 = 0x0,
+
+ /* Restrict GPU to 7/8 of maximum Address ID count. */
+ KBASE_3BIT_AID_28 = 0x1,
+
+ /* Restrict GPU to 3/4 of maximum Address ID count. */
+ KBASE_3BIT_AID_24 = 0x2,
+
+ /* Restrict GPU to 5/8 of maximum Address ID count. */
+ KBASE_3BIT_AID_20 = 0x3,
+
+ /* Restrict GPU to 1/2 of maximum Address ID count. */
+ KBASE_3BIT_AID_16 = 0x4,
+
+ /* Restrict GPU to 3/8 of maximum Address ID count. */
+ KBASE_3BIT_AID_12 = 0x5,
+
+ /* Restrict GPU to 1/4 of maximum Address ID count. */
+ KBASE_3BIT_AID_8 = 0x6,
+
+ /* Restrict GPU to 1/8 of maximum Address ID count. */
+ KBASE_3BIT_AID_4 = 0x7
+};
+
/**
* Default setting for read Address ID limiting on AXI bus.
*
@@ -101,6 +130,22 @@ enum {
#define DEFAULT_AWID_LIMIT KBASE_AID_32
/**
+ * Default setting for read Address ID limiting on AXI bus.
+ *
+ * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_3BIT_ARID_LIMIT KBASE_3BIT_AID_32
+
+/**
+ * Default setting for write Address ID limiting on AXI.
+ *
+ * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_3BIT_AWID_LIMIT KBASE_3BIT_AID_32
+
+/**
* Default UMP device mapping. A UMP_DEVICE_<device>_SHIFT value which
* defines which UMP device this GPU should be mapped to.
*/
diff --git a/mali_kbase/mali_kbase_context.c b/mali_kbase/mali_kbase_context.c
index ad20e61..f43db48 100644
--- a/mali_kbase/mali_kbase_context.c
+++ b/mali_kbase/mali_kbase_context.c
@@ -114,9 +114,6 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
spin_lock_init(&kctx->waiting_soft_jobs_lock);
-#ifdef CONFIG_KDS
- INIT_LIST_HEAD(&kctx->waiting_kds_resource);
-#endif
err = kbase_dma_fence_init(kctx);
if (err)
goto free_event;
@@ -296,8 +293,6 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_jd_exit(kctx);
- kbase_pm_context_idle(kbdev);
-
kbase_dma_fence_term(kctx);
mutex_lock(&kbdev->mmu_hw_mutex);
@@ -318,6 +313,8 @@ void kbase_destroy_context(struct kbase_context *kctx)
WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
vfree(kctx);
+
+ kbase_pm_context_idle(kbdev);
}
KBASE_EXPORT_SYMBOL(kbase_destroy_context);
diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c
index 3da2b6c..a6b0ac7 100644
--- a/mali_kbase/mali_kbase_core_linux.c
+++ b/mali_kbase/mali_kbase_core_linux.c
@@ -17,7 +17,6 @@
#include <mali_kbase.h>
#include <mali_kbase_config_defaults.h>
-#include <mali_kbase_uku.h>
#include <mali_midg_regmap.h>
#include <mali_kbase_gator.h>
#include <mali_kbase_mem_linux.h>
@@ -89,21 +88,12 @@
#define MMU_IRQ_TAG 1
#define GPU_IRQ_TAG 2
-#if MALI_UNIT_TEST
-static struct kbase_exported_test_data shared_kernel_test_data;
-EXPORT_SYMBOL(shared_kernel_test_data);
-#endif /* MALI_UNIT_TEST */
-
static int kbase_dev_nr;
static DEFINE_MUTEX(kbase_dev_list_lock);
static LIST_HEAD(kbase_dev_list);
#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
-static inline void __compile_time_asserts(void)
-{
- CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
-}
static int kbase_api_handshake(struct kbase_context *kctx,
struct kbase_ioctl_version_check *version)
@@ -152,7 +142,9 @@ enum mali_error {
enum {
inited_mem = (1u << 0),
inited_js = (1u << 1),
- inited_pm_runtime_init = (1u << 2),
+ /* Bit number 2 was earlier assigned to the runtime-pm initialization
+ * stage (which has been merged with the backend_early stage).
+ */
#ifdef CONFIG_MALI_DEVFREQ
inited_devfreq = (1u << 3),
#endif /* CONFIG_MALI_DEVFREQ */
@@ -193,726 +185,6 @@ void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
#endif /* CONFIG_MALI_DEBUG */
-/**
- * kbase_legacy_dispatch - UKK dispatch function
- *
- * This is the dispatch function for the legacy UKK ioctl interface. No new
- * ioctls should be added to this function, see kbase_ioctl instead.
- *
- * @kctx: The kernel context structure
- * @args: Pointer to the data structure passed from/to user space
- * @args_size: Size of the data structure
- */
-static int kbase_legacy_dispatch(struct kbase_context *kctx,
- void * const args, u32 args_size)
-{
- struct kbase_device *kbdev;
- union uk_header *ukh = args;
- u32 id;
- int ret = 0;
-
- KBASE_DEBUG_ASSERT(ukh != NULL);
-
- kbdev = kctx->kbdev;
- id = ukh->id;
- ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
-
-#ifdef CONFIG_MALI_DEBUG
- wait_event(kbdev->driver_inactive_wait,
- kbdev->driver_inactive == false);
-#endif /* CONFIG_MALI_DEBUG */
-
- if (UKP_FUNC_ID_CHECK_VERSION == id) {
- struct uku_version_check_args *version_check;
- struct kbase_ioctl_version_check version;
-
- if (args_size != sizeof(struct uku_version_check_args)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- return 0;
- }
- version_check = (struct uku_version_check_args *)args;
- version.minor = version_check->minor;
- version.major = version_check->major;
-
- kbase_api_handshake(kctx, &version);
-
- version_check->minor = version.minor;
- version_check->major = version.major;
- ukh->ret = MALI_ERROR_NONE;
- return 0;
- }
-
- /* block calls until version handshake */
- if (kctx->api_version == 0)
- return -EINVAL;
-
- if (!atomic_read(&kctx->setup_complete)) {
- struct kbase_uk_set_flags *kbase_set_flags;
-
- /* setup pending, try to signal that we'll do the setup,
- * if setup was already in progress, err this call
- */
- if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
- return -EINVAL;
-
- /* if unexpected call, will stay stuck in setup mode
- * (is it the only call we accept?)
- */
- if (id != KBASE_FUNC_SET_FLAGS)
- return -EINVAL;
-
- kbase_set_flags = (struct kbase_uk_set_flags *)args;
-
- /* if not matching the expected call, stay in setup mode */
- if (sizeof(*kbase_set_flags) != args_size)
- goto bad_size;
-
- /* if bad flags, will stay stuck in setup mode */
- if (kbase_context_set_create_flags(kctx,
- kbase_set_flags->create_flags) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
-
- atomic_set(&kctx->setup_complete, 1);
- return 0;
- }
-
- /* setup complete, perform normal operation */
- switch (id) {
- case KBASE_FUNC_MEM_JIT_INIT:
- {
- struct kbase_uk_mem_jit_init *jit_init = args;
-
- if (sizeof(*jit_init) != args_size)
- goto bad_size;
-
- if (kbase_region_tracker_init_jit(kctx,
- jit_init->va_pages))
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_MEM_ALLOC:
- {
- struct kbase_uk_mem_alloc *mem = args;
- struct kbase_va_region *reg;
-
- if (sizeof(*mem) != args_size)
- goto bad_size;
-
-#if defined(CONFIG_64BIT)
- if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
- /* force SAME_VA if a 64-bit client */
- mem->flags |= BASE_MEM_SAME_VA;
- }
-#endif
-
- reg = kbase_mem_alloc(kctx, mem->va_pages,
- mem->commit_pages, mem->extent,
- &mem->flags, &mem->gpu_va);
- mem->va_alignment = 0;
-
- if (!reg)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_MEM_IMPORT: {
- struct kbase_uk_mem_import *mem_import = args;
- void __user *phandle;
-
- if (sizeof(*mem_import) != args_size)
- goto bad_size;
-#ifdef CONFIG_COMPAT
- if (kbase_ctx_flag(kctx, KCTX_COMPAT))
- phandle = compat_ptr(mem_import->phandle);
- else
-#endif
- phandle = u64_to_user_ptr(mem_import->phandle);
-
- if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
-
- if (kbase_mem_import(kctx,
- (enum base_mem_import_type)
- mem_import->type,
- phandle,
- 0,
- &mem_import->gpu_va,
- &mem_import->va_pages,
- &mem_import->flags)) {
- mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- }
- break;
- }
- case KBASE_FUNC_MEM_ALIAS: {
- struct kbase_uk_mem_alias *alias = args;
- struct base_mem_aliasing_info __user *user_ai;
- struct base_mem_aliasing_info *ai;
-
- if (sizeof(*alias) != args_size)
- goto bad_size;
-
- if (alias->nents > 2048) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (!alias->nents) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
-
-#ifdef CONFIG_COMPAT
- if (kbase_ctx_flag(kctx, KCTX_COMPAT))
- user_ai = compat_ptr(alias->ai);
- else
-#endif
- user_ai = u64_to_user_ptr(alias->ai);
-
- ai = vmalloc(sizeof(*ai) * alias->nents);
-
- if (!ai) {
- ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
- break;
- }
-
- if (copy_from_user(ai, user_ai,
- sizeof(*ai) * alias->nents)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- goto copy_failed;
- }
-
- alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
- alias->stride,
- alias->nents, ai,
- &alias->va_pages);
- if (!alias->gpu_va) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- goto no_alias;
- }
-no_alias:
-copy_failed:
- vfree(ai);
- break;
- }
- case KBASE_FUNC_MEM_COMMIT:
- {
- struct kbase_uk_mem_commit *commit = args;
- int ret;
-
- if (sizeof(*commit) != args_size)
- goto bad_size;
-
- ret = kbase_mem_commit(kctx, commit->gpu_addr,
- commit->pages);
-
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- commit->result_subcode =
- BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
-
- if (ret == 0) {
- ukh->ret = MALI_ERROR_NONE;
- commit->result_subcode =
- BASE_BACKING_THRESHOLD_OK;
- } else if (ret == -ENOMEM) {
- commit->result_subcode =
- BASE_BACKING_THRESHOLD_ERROR_OOM;
- }
-
- break;
- }
-
- case KBASE_FUNC_MEM_QUERY:
- {
- struct kbase_uk_mem_query *query = args;
-
- if (sizeof(*query) != args_size)
- goto bad_size;
-
- if (kbase_mem_query(kctx, query->gpu_addr,
- query->query, &query->value) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
- break;
- }
- break;
-
- case KBASE_FUNC_MEM_FLAGS_CHANGE:
- {
- struct kbase_uk_mem_flags_change *fc = args;
-
- if (sizeof(*fc) != args_size)
- goto bad_size;
-
- if (kbase_mem_flags_change(kctx, fc->gpu_va,
- fc->flags, fc->mask) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
-
- break;
- }
- case KBASE_FUNC_MEM_FREE:
- {
- struct kbase_uk_mem_free *mem = args;
-
- if (sizeof(*mem) != args_size)
- goto bad_size;
-
- if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
-
- case KBASE_FUNC_JOB_SUBMIT:
- {
- struct kbase_uk_job_submit *job = args;
- char __user *user_buf;
-
- if (sizeof(*job) != args_size)
- goto bad_size;
-
-#ifdef CONFIG_COMPAT
- if (kbase_ctx_flag(kctx, KCTX_COMPAT))
- user_buf = compat_ptr(job->addr);
- else
-#endif
- user_buf = u64_to_user_ptr(job->addr);
-
- if (kbase_jd_submit(kctx, user_buf,
- job->nr_atoms,
- job->stride,
- false) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
-
- case KBASE_FUNC_SYNC:
- {
- struct kbase_uk_sync_now *sn = args;
-
- if (sizeof(*sn) != args_size)
- goto bad_size;
-
- if (kbase_sync_now(kctx, &sn->sset.basep_sset) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
-
- case KBASE_FUNC_DISJOINT_QUERY:
- {
- struct kbase_uk_disjoint_query *dquery = args;
-
- if (sizeof(*dquery) != args_size)
- goto bad_size;
-
- /* Get the disjointness counter value. */
- dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
- break;
- }
-
- case KBASE_FUNC_POST_TERM:
- {
- kbase_event_close(kctx);
- break;
- }
-
- case KBASE_FUNC_HWCNT_SETUP:
- {
- struct kbase_uk_hwcnt_setup *setup = args;
-
- if (sizeof(*setup) != args_size)
- goto bad_size;
-
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
- &kctx->vinstr_cli, setup) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
-
- case KBASE_FUNC_HWCNT_DUMP:
- {
- /* args ignored */
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
- BASE_HWCNT_READER_EVENT_MANUAL) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
-
- case KBASE_FUNC_HWCNT_CLEAR:
- {
- /* args ignored */
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
-
- case KBASE_FUNC_HWCNT_READER_SETUP:
- {
- struct kbase_uk_hwcnt_reader_setup *setup = args;
-
- if (sizeof(*setup) != args_size)
- goto bad_size;
-
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
- setup) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
-
- case KBASE_FUNC_GPU_PROPS_REG_DUMP:
- {
- struct kbase_uk_gpuprops *setup = args;
-
- if (sizeof(*setup) != args_size)
- goto bad_size;
-
- if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_FIND_CPU_OFFSET:
- {
- struct kbase_uk_find_cpu_offset *find = args;
-
- if (sizeof(*find) != args_size)
- goto bad_size;
-
- if (find->gpu_addr & ~PAGE_MASK) {
- dev_warn(kbdev->dev, "kbase_legacy_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
- goto out_bad;
- }
-
- if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- } else {
- int err;
-
- err = kbasep_find_enclosing_cpu_mapping_offset(
- kctx,
- find->cpu_addr,
- find->size,
- &find->offset);
-
- if (err)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- }
- break;
- }
- case KBASE_FUNC_GET_VERSION:
- {
- struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
-
- if (sizeof(*get_version) != args_size)
- goto bad_size;
-
- /* version buffer size check is made in compile time assert */
- memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
- get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
- break;
- }
-
- case KBASE_FUNC_STREAM_CREATE:
- {
-#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
- struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
-
- if (sizeof(*screate) != args_size)
- goto bad_size;
-
- if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
- /* not NULL terminated */
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
-
- if (kbase_sync_fence_stream_create(screate->name,
- &screate->fd) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
-#else /* CONFIG_SYNC || CONFIG_SYNC_FILE */
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
-#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
- break;
- }
- case KBASE_FUNC_FENCE_VALIDATE:
- {
-#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
- struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
-
- if (sizeof(*fence_validate) != args_size)
- goto bad_size;
-
- if (kbase_sync_fence_validate(fence_validate->fd) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
-#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
- break;
- }
-
- case KBASE_FUNC_SET_TEST_DATA:
- {
-#if MALI_UNIT_TEST
- struct kbase_uk_set_test_data *set_data = args;
-
- shared_kernel_test_data = set_data->test_data;
- shared_kernel_test_data.kctx = (uintptr_t)kctx;
- shared_kernel_test_data.mm = (uintptr_t)current->mm;
- ukh->ret = MALI_ERROR_NONE;
-#endif /* MALI_UNIT_TEST */
- break;
- }
-
- case KBASE_FUNC_INJECT_ERROR:
- {
-#ifdef CONFIG_MALI_ERROR_INJECT
- unsigned long flags;
- struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
-
- /*mutex lock */
- spin_lock_irqsave(&kbdev->reg_op_lock, flags);
- if (job_atom_inject_error(&params) != 0)
- ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
- else
- ukh->ret = MALI_ERROR_NONE;
- spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
- /*mutex unlock */
-#endif /* CONFIG_MALI_ERROR_INJECT */
- break;
- }
-
- case KBASE_FUNC_MODEL_CONTROL:
- {
-#ifdef CONFIG_MALI_NO_MALI
- unsigned long flags;
- struct kbase_model_control_params params =
- ((struct kbase_uk_model_control_params *)args)->params;
-
- /*mutex lock */
- spin_lock_irqsave(&kbdev->reg_op_lock, flags);
- if (gpu_model_control(kbdev->model, &params) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
- spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
- /*mutex unlock */
-#endif /* CONFIG_MALI_NO_MALI */
- break;
- }
-
- case KBASE_FUNC_GET_PROFILING_CONTROLS:
- {
- struct kbase_uk_profiling_controls *controls =
- (struct kbase_uk_profiling_controls *)args;
- u32 i;
-
- if (sizeof(*controls) != args_size)
- goto bad_size;
-
- for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
- controls->profiling_controls[i] =
- kbdev->kbase_profiling_controls[i];
-
- break;
- }
-
- /* used only for testing purposes; these controls are to be set by gator through gator API */
- case KBASE_FUNC_SET_PROFILING_CONTROLS:
- {
- struct kbase_uk_profiling_controls *controls =
- (struct kbase_uk_profiling_controls *)args;
- u32 i;
-
- if (sizeof(*controls) != args_size)
- goto bad_size;
-
- for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
- _mali_profiling_control(i, controls->profiling_controls[i]);
-
- break;
- }
-
- case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
- {
- struct kbase_uk_debugfs_mem_profile_add *add_data =
- (struct kbase_uk_debugfs_mem_profile_add *)args;
- char *buf;
- char __user *user_buf;
-
- if (sizeof(*add_data) != args_size)
- goto bad_size;
-
- if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
- dev_err(kbdev->dev, "buffer too big\n");
- goto out_bad;
- }
-
-#ifdef CONFIG_COMPAT
- if (kbase_ctx_flag(kctx, KCTX_COMPAT))
- user_buf = compat_ptr(add_data->buf);
- else
-#endif
- user_buf = u64_to_user_ptr(add_data->buf);
-
- buf = kmalloc(add_data->len, GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(buf))
- goto out_bad;
-
- if (0 != copy_from_user(buf, user_buf, add_data->len)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- kfree(buf);
- goto out_bad;
- }
-
- if (kbasep_mem_profile_debugfs_insert(kctx, buf,
- add_data->len)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- goto out_bad;
- }
-
- break;
- }
-
-#ifdef CONFIG_MALI_NO_MALI
- case KBASE_FUNC_SET_PRFCNT_VALUES:
- {
-
- struct kbase_uk_prfcnt_values *params =
- ((struct kbase_uk_prfcnt_values *)args);
- gpu_model_set_dummy_prfcnt_sample(params->data,
- params->size);
-
- break;
- }
-#endif /* CONFIG_MALI_NO_MALI */
-#ifdef BASE_LEGACY_UK10_4_SUPPORT
- case KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4:
- {
- struct kbase_uk_tlstream_acquire_v10_4 *tlstream_acquire
- = args;
- int ret;
-
- if (sizeof(*tlstream_acquire) != args_size)
- goto bad_size;
-
- ret = kbase_tlstream_acquire(
- kctx, 0);
- if (ret < 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- tlstream_acquire->fd = ret;
- break;
- }
-#endif /* BASE_LEGACY_UK10_4_SUPPORT */
- case KBASE_FUNC_TLSTREAM_ACQUIRE:
- {
- struct kbase_uk_tlstream_acquire *tlstream_acquire =
- args;
- int ret;
-
- if (sizeof(*tlstream_acquire) != args_size)
- goto bad_size;
-
- if (tlstream_acquire->flags & ~BASE_TLSTREAM_FLAGS_MASK)
- goto out_bad;
-
- ret = kbase_tlstream_acquire(
- kctx, tlstream_acquire->flags);
- if (ret < 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- tlstream_acquire->fd = ret;
- break;
- }
- case KBASE_FUNC_TLSTREAM_FLUSH:
- {
- struct kbase_uk_tlstream_flush *tlstream_flush =
- args;
-
- if (sizeof(*tlstream_flush) != args_size)
- goto bad_size;
-
- kbase_tlstream_flush_streams();
- break;
- }
-#if MALI_UNIT_TEST
- case KBASE_FUNC_TLSTREAM_TEST:
- {
- struct kbase_uk_tlstream_test *tlstream_test = args;
-
- if (sizeof(*tlstream_test) != args_size)
- goto bad_size;
-
- kbase_tlstream_test(
- tlstream_test->tpw_count,
- tlstream_test->msg_delay,
- tlstream_test->msg_count,
- tlstream_test->aux_msg);
- break;
- }
- case KBASE_FUNC_TLSTREAM_STATS:
- {
- struct kbase_uk_tlstream_stats *tlstream_stats = args;
-
- if (sizeof(*tlstream_stats) != args_size)
- goto bad_size;
-
- kbase_tlstream_stats(
- &tlstream_stats->bytes_collected,
- &tlstream_stats->bytes_generated);
- break;
- }
-#endif /* MALI_UNIT_TEST */
-
- case KBASE_FUNC_GET_CONTEXT_ID:
- {
- struct kbase_uk_context_id *info = args;
-
- info->id = kctx->id;
- break;
- }
-
- case KBASE_FUNC_SOFT_EVENT_UPDATE:
- {
- struct kbase_uk_soft_event_update *update = args;
-
- if (sizeof(*update) != args_size)
- goto bad_size;
-
- if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
- (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
- (update->flags != 0))
- goto out_bad;
-
- if (kbase_soft_event_update(kctx, update->evt,
- update->new_status))
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
-
- break;
- }
-
- default:
- dev_err(kbdev->dev, "unknown ioctl %u\n", id);
- goto out_bad;
- }
-
- return ret;
-
- bad_size:
- dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
- out_bad:
- return -EINVAL;
-}
-
static struct kbase_device *to_kbase_device(struct device *dev)
{
return dev_get_drvdata(dev);
@@ -1192,33 +464,6 @@ static int kbase_release(struct inode *inode, struct file *filp)
return 0;
}
-#define CALL_MAX_SIZE 536
-
-static long kbase_legacy_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
- u32 size = _IOC_SIZE(cmd);
- struct kbase_context *kctx = filp->private_data;
-
- if (size > CALL_MAX_SIZE)
- return -ENOTTY;
-
- if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
- dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
- return -EFAULT;
- }
-
- if (kbase_legacy_dispatch(kctx, &msg, size) != 0)
- return -EFAULT;
-
- if (0 != copy_to_user((void __user *)arg, &msg, size)) {
- dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
- return -EFAULT;
- }
- return 0;
-}
-
static int kbase_api_set_flags(struct kbase_context *kctx,
struct kbase_ioctl_set_flags *flags)
{
@@ -1697,20 +942,6 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct kbase_device *kbdev = kctx->kbdev;
void __user *uarg = (void __user *)arg;
- /* The UK ioctl values overflow the cmd field causing the type to be
- * incremented
- */
- if (_IOC_TYPE(cmd) == LINUX_UK_BASE_MAGIC+2)
- return kbase_legacy_ioctl(filp, cmd, arg);
-
- /* The UK version check IOCTL doesn't overflow the cmd field, so is
- * handled separately here
- */
- if (cmd == _IOC(_IOC_READ|_IOC_WRITE, LINUX_UK_BASE_MAGIC,
- UKP_FUNC_ID_CHECK_VERSION,
- sizeof(struct uku_version_check_args)))
- return kbase_legacy_ioctl(filp, cmd, arg);
-
/* Only these ioctls are available until setup is complete */
switch (cmd) {
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
@@ -2139,18 +1370,6 @@ static const struct file_operations kbase_fops = {
.get_unmapped_area = kbase_get_unmapped_area,
};
-#ifndef CONFIG_MALI_NO_MALI
-void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
-{
- writel(value, kbdev->reg + offset);
-}
-
-u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
-{
- return readl(kbdev->reg + offset);
-}
-#endif /* !CONFIG_MALI_NO_MALI */
-
/**
* show_policy - Show callback for the power_policy sysfs file.
*
@@ -3170,6 +2389,8 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
.name = "Mali-G72" },
{ .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-G51" },
+ { .id = GPU_ID2_PRODUCT_TNOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TNOx" },
};
const char *product_name = "(Unknown Mali GPU)";
struct kbase_device *kbdev;
@@ -4369,11 +3590,6 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
kbdev->inited_subsys &= ~inited_mem;
}
- if (kbdev->inited_subsys & inited_pm_runtime_init) {
- kbdev->pm.callback_power_runtime_term(kbdev);
- kbdev->inited_subsys &= ~inited_pm_runtime_init;
- }
-
if (kbdev->inited_subsys & inited_ctx_sched) {
kbase_ctx_sched_term(kbdev);
kbdev->inited_subsys &= ~inited_ctx_sched;
@@ -4528,17 +3744,6 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
}
kbdev->inited_subsys |= inited_ctx_sched;
- if (kbdev->pm.callback_power_runtime_init) {
- err = kbdev->pm.callback_power_runtime_init(kbdev);
- if (err) {
- dev_err(kbdev->dev,
- "Runtime PM initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_pm_runtime_init;
- }
-
err = kbase_mem_init(kbdev);
if (err) {
dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
diff --git a/mali_kbase/mali_kbase_ctx_sched.h b/mali_kbase/mali_kbase_ctx_sched.h
index 2330d48..77d2232 100644
--- a/mali_kbase/mali_kbase_ctx_sched.h
+++ b/mali_kbase/mali_kbase_ctx_sched.h
@@ -28,7 +28,7 @@
* The initial implementation of the Context Scheduler does not schedule
* contexts. Instead it relies on the Job Scheduler to make decisions of
* when to schedule/evict contexts if address spaces are starved. In the
- * future, once an interface between the CS and JS have been devised to
+ * future, once an interface between the CS and JS has been devised to
* provide enough information about how each context is consuming GPU resources,
* those decisions can be made in the CS itself, thereby reducing duplicated
* code.
diff --git a/mali_kbase/mali_kbase_debug_mem_view.c b/mali_kbase/mali_kbase_debug_mem_view.c
index aa27156..c65b4df 100644
--- a/mali_kbase/mali_kbase_debug_mem_view.c
+++ b/mali_kbase/mali_kbase_debug_mem_view.c
@@ -299,7 +299,7 @@ void kbase_debug_mem_view_init(struct file *kctx_file)
{
struct kbase_context *kctx = kctx_file->private_data;
- debugfs_create_file("mem_view", S_IRUGO, kctx->kctx_dentry, kctx_file,
+ debugfs_create_file("mem_view", S_IRUSR, kctx->kctx_dentry, kctx_file,
&kbase_debug_mem_view_fops);
}
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index 1ab6425..09415b3 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -34,6 +34,7 @@
#include <mali_kbase_mmu_hw.h>
#include <mali_kbase_instr_defs.h>
#include <mali_kbase_pm.h>
+#include <mali_kbase_gpuprops_types.h>
#include <protected_mode_switcher.h>
#include <linux/atomic.h>
@@ -47,10 +48,6 @@
#endif
-#ifdef CONFIG_KDS
-#include <linux/kds.h>
-#endif /* CONFIG_KDS */
-
#if defined(CONFIG_SYNC)
#include <sync.h>
#else
@@ -440,11 +437,6 @@ struct kbase_jd_atom {
u64 affinity;
u64 jc;
enum kbase_atom_coreref_state coreref_state;
-#ifdef CONFIG_KDS
- struct list_head node;
- struct kds_resource_set *kds_rset;
- bool kds_dep_satisfied;
-#endif /* CONFIG_KDS */
#if defined(CONFIG_SYNC)
/* Stores either an input or output fence, depending on soft-job type */
struct sync_fence *fence;
@@ -655,9 +647,6 @@ struct kbase_jd_context {
u32 *tb;
size_t tb_wrap_offset;
-#ifdef CONFIG_KDS
- struct kds_callback kds_cb;
-#endif /* CONFIG_KDS */
#ifdef CONFIG_GPU_TRACEPOINTS
atomic_t work_id;
#endif
@@ -1420,9 +1409,6 @@ struct kbase_context {
struct list_head waiting_soft_jobs;
spinlock_t waiting_soft_jobs_lock;
-#ifdef CONFIG_KDS
- struct list_head waiting_kds_resource;
-#endif
#ifdef CONFIG_MALI_DMA_FENCE
struct {
struct list_head waiting_resource;
diff --git a/mali_kbase/mali_kbase_device.c b/mali_kbase/mali_kbase_device.c
index d635fcc..717795b 100644
--- a/mali_kbase/mali_kbase_device.c
+++ b/mali_kbase/mali_kbase_device.c
@@ -94,7 +94,6 @@ static int kbase_device_as_init(struct kbase_device *kbdev, int i)
destroy_workqueue(kbdev->as[i].pf_wq);
return -EINVAL;
}
- KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
INIT_WORK(poke_work, kbasep_as_do_poke);
hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
diff --git a/mali_kbase/mali_kbase_fence.c b/mali_kbase/mali_kbase_fence.c
index fcb3733..17b5621 100644
--- a/mali_kbase/mali_kbase_fence.c
+++ b/mali_kbase/mali_kbase_fence.c
@@ -176,8 +176,13 @@ kbase_fence_add_callback(struct kbase_jd_atom *katom,
err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb,
callback);
if (err == -ENOENT) {
- /* Fence signaled, clear the error and return */
- err = 0;
+ /* Fence signaled, get the completion result */
+ err = dma_fence_get_status(fence);
+
+ /* remap success completion to err code */
+ if (err == 1)
+ err = 0;
+
kfree(kbase_fence_cb);
} else if (err) {
kfree(kbase_fence_cb);
diff --git a/mali_kbase/mali_kbase_fence_defs.h b/mali_kbase/mali_kbase_fence_defs.h
index fa2c6df..9e027fc 100644
--- a/mali_kbase/mali_kbase_fence_defs.h
+++ b/mali_kbase/mali_kbase_fence_defs.h
@@ -39,11 +39,18 @@
#define dma_fence_is_signaled(a) fence_is_signaled(a)
#define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
#define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
+#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0)
#else
#include <linux/dma-fence.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \
+ (a)->status ?: 1 \
+ : 0)
+#endif
+
#endif /* < 4.10.0 */
#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE */
diff --git a/mali_kbase/mali_kbase_gator_api.c b/mali_kbase/mali_kbase_gator_api.c
index 860e101..fac4d94 100644
--- a/mali_kbase/mali_kbase_gator_api.c
+++ b/mali_kbase/mali_kbase_gator_api.c
@@ -68,6 +68,10 @@ const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters)
hardware_counters = hardware_counters_mali_tSIx;
count = ARRAY_SIZE(hardware_counters_mali_tSIx);
break;
+ case GPU_ID2_PRODUCT_TNOX:
+ hardware_counters = hardware_counters_mali_tNOx;
+ count = ARRAY_SIZE(hardware_counters_mali_tNOx);
+ break;
default:
hardware_counters = NULL;
count = 0;
diff --git a/mali_kbase/mali_kbase_gator_hwcnt_names.h b/mali_kbase/mali_kbase_gator_hwcnt_names.h
index 24103e2..6fe6530 100644
--- a/mali_kbase/mali_kbase_gator_hwcnt_names.h
+++ b/mali_kbase/mali_kbase_gator_hwcnt_names.h
@@ -2162,6 +2162,8 @@ static const char * const hardware_counters_mali_t88x[] = {
#include "mali_kbase_gator_hwcnt_names_tsix.h"
+#include "mali_kbase_gator_hwcnt_names_tnox.h"
+#include "mali_kbase_gator_hwcnt_names_tkax.h"
#endif
diff --git a/mali_kbase/mali_kbase_gator_hwcnt_names_tkax.h b/mali_kbase/mali_kbase_gator_hwcnt_names_tkax.h
new file mode 100644
index 0000000..a131d45
--- /dev/null
+++ b/mali_kbase/mali_kbase_gator_hwcnt_names_tkax.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TKAX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TKAX_H_
+
+static const char * const hardware_counters_mali_tKAx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "TKAx_MESSAGES_SENT",
+ "TKAx_MESSAGES_RECEIVED",
+ "TKAx_GPU_ACTIVE",
+ "TKAx_IRQ_ACTIVE",
+ "TKAx_JS0_JOBS",
+ "TKAx_JS0_TASKS",
+ "TKAx_JS0_ACTIVE",
+ "",
+ "TKAx_JS0_WAIT_READ",
+ "TKAx_JS0_WAIT_ISSUE",
+ "TKAx_JS0_WAIT_DEPEND",
+ "TKAx_JS0_WAIT_FINISH",
+ "TKAx_JS1_JOBS",
+ "TKAx_JS1_TASKS",
+ "TKAx_JS1_ACTIVE",
+ "",
+ "TKAx_JS1_WAIT_READ",
+ "TKAx_JS1_WAIT_ISSUE",
+ "TKAx_JS1_WAIT_DEPEND",
+ "TKAx_JS1_WAIT_FINISH",
+ "TKAx_JS2_JOBS",
+ "TKAx_JS2_TASKS",
+ "TKAx_JS2_ACTIVE",
+ "",
+ "TKAx_JS2_WAIT_READ",
+ "TKAx_JS2_WAIT_ISSUE",
+ "TKAx_JS2_WAIT_DEPEND",
+ "TKAx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "TKAx_TILER_ACTIVE",
+ "TKAx_JOBS_PROCESSED",
+ "TKAx_TRIANGLES",
+ "TKAx_LINES",
+ "TKAx_POINTS",
+ "TKAx_FRONT_FACING",
+ "TKAx_BACK_FACING",
+ "TKAx_PRIM_VISIBLE",
+ "TKAx_PRIM_CULLED",
+ "TKAx_PRIM_CLIPPED",
+ "TKAx_PRIM_SAT_CULLED",
+ "TKAx_BIN_ALLOC_INIT",
+ "TKAx_BIN_ALLOC_OVERFLOW",
+ "TKAx_BUS_READ",
+ "",
+ "TKAx_BUS_WRITE",
+ "TKAx_LOADING_DESC",
+ "TKAx_IDVS_POS_SHAD_REQ",
+ "TKAx_IDVS_POS_SHAD_WAIT",
+ "TKAx_IDVS_POS_SHAD_STALL",
+ "TKAx_IDVS_POS_FIFO_FULL",
+ "TKAx_PREFETCH_STALL",
+ "TKAx_VCACHE_HIT",
+ "TKAx_VCACHE_MISS",
+ "TKAx_VCACHE_LINE_WAIT",
+ "TKAx_VFETCH_POS_READ_WAIT",
+ "TKAx_VFETCH_VERTEX_WAIT",
+ "TKAx_VFETCH_STALL",
+ "TKAx_PRIMASSY_STALL",
+ "TKAx_BBOX_GEN_STALL",
+ "TKAx_IDVS_VBU_HIT",
+ "TKAx_IDVS_VBU_MISS",
+ "TKAx_IDVS_VBU_LINE_DEALLOCATE",
+ "TKAx_IDVS_VAR_SHAD_REQ",
+ "TKAx_IDVS_VAR_SHAD_STALL",
+ "TKAx_BINNER_STALL",
+ "TKAx_ITER_STALL",
+ "TKAx_COMPRESS_MISS",
+ "TKAx_COMPRESS_STALL",
+ "TKAx_PCACHE_HIT",
+ "TKAx_PCACHE_MISS",
+ "TKAx_PCACHE_MISS_STALL",
+ "TKAx_PCACHE_EVICT_STALL",
+ "TKAx_PMGR_PTR_WR_STALL",
+ "TKAx_PMGR_PTR_RD_STALL",
+ "TKAx_PMGR_CMD_WR_STALL",
+ "TKAx_WRBUF_ACTIVE",
+ "TKAx_WRBUF_HIT",
+ "TKAx_WRBUF_MISS",
+ "TKAx_WRBUF_NO_FREE_LINE_STALL",
+ "TKAx_WRBUF_NO_AXI_ID_STALL",
+ "TKAx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "TKAx_UTLB_TRANS",
+ "TKAx_UTLB_TRANS_HIT",
+ "TKAx_UTLB_TRANS_STALL",
+ "TKAx_UTLB_TRANS_MISS_DELAY",
+ "TKAx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "TKAx_FRAG_ACTIVE",
+ "TKAx_FRAG_PRIMITIVES",
+ "TKAx_FRAG_PRIM_RAST",
+ "TKAx_FRAG_FPK_ACTIVE",
+ "TKAx_FRAG_STARVING",
+ "TKAx_FRAG_WARPS",
+ "TKAx_FRAG_PARTIAL_WARPS",
+ "TKAx_FRAG_QUADS_RAST",
+ "TKAx_FRAG_QUADS_EZS_TEST",
+ "TKAx_FRAG_QUADS_EZS_UPDATE",
+ "TKAx_FRAG_QUADS_EZS_KILL",
+ "TKAx_FRAG_LZS_TEST",
+ "TKAx_FRAG_LZS_KILL",
+ "",
+ "TKAx_FRAG_PTILES",
+ "TKAx_FRAG_TRANS_ELIM",
+ "TKAx_QUAD_FPK_KILLER",
+ "",
+ "TKAx_COMPUTE_ACTIVE",
+ "TKAx_COMPUTE_TASKS",
+ "TKAx_COMPUTE_WARPS",
+ "TKAx_COMPUTE_STARVING",
+ "TKAx_EXEC_CORE_ACTIVE",
+ "TKAx_EXEC_ACTIVE",
+ "TKAx_EXEC_INSTR_COUNT",
+ "TKAx_EXEC_INSTR_DIVERGED",
+ "TKAx_EXEC_INSTR_STARVING",
+ "TKAx_ARITH_INSTR_SINGLE_FMA",
+ "TKAx_ARITH_INSTR_DOUBLE",
+ "TKAx_ARITH_INSTR_MSG",
+ "TKAx_ARITH_INSTR_MSG_ONLY",
+ "TKAx_TEX_MSGI_NUM_QUADS",
+ "TKAx_TEX_DFCH_NUM_PASSES",
+ "TKAx_TEX_DFCH_NUM_PASSES_MISS",
+ "TKAx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+ "TKAx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+ "TKAx_TEX_TFCH_NUM_LINES_FETCHED",
+ "TKAx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+ "TKAx_TEX_TFCH_NUM_OPERATIONS",
+ "TKAx_TEX_FILT_NUM_OPERATIONS",
+ "TKAx_LS_MEM_READ_FULL",
+ "TKAx_LS_MEM_READ_SHORT",
+ "TKAx_LS_MEM_WRITE_FULL",
+ "TKAx_LS_MEM_WRITE_SHORT",
+ "TKAx_LS_MEM_ATOMIC",
+ "TKAx_VARY_INSTR",
+ "TKAx_VARY_SLOT_32",
+ "TKAx_VARY_SLOT_16",
+ "TKAx_ATTR_INSTR",
+ "TKAx_ARITH_INSTR_FP_MUL",
+ "TKAx_BEATS_RD_FTC",
+ "TKAx_BEATS_RD_FTC_EXT",
+ "TKAx_BEATS_RD_LSC",
+ "TKAx_BEATS_RD_LSC_EXT",
+ "TKAx_BEATS_RD_TEX",
+ "TKAx_BEATS_RD_TEX_EXT",
+ "TKAx_BEATS_RD_OTHER",
+ "TKAx_BEATS_WR_LSC_WB",
+ "TKAx_BEATS_WR_TIB",
+ "TKAx_BEATS_WR_LSC_OTHER",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "TKAx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "TKAx_L2_RD_MSG_IN",
+ "TKAx_L2_RD_MSG_IN_STALL",
+ "TKAx_L2_WR_MSG_IN",
+ "TKAx_L2_WR_MSG_IN_STALL",
+ "TKAx_L2_SNP_MSG_IN",
+ "TKAx_L2_SNP_MSG_IN_STALL",
+ "TKAx_L2_RD_MSG_OUT",
+ "TKAx_L2_RD_MSG_OUT_STALL",
+ "TKAx_L2_WR_MSG_OUT",
+ "TKAx_L2_ANY_LOOKUP",
+ "TKAx_L2_READ_LOOKUP",
+ "TKAx_L2_WRITE_LOOKUP",
+ "TKAx_L2_EXT_SNOOP_LOOKUP",
+ "TKAx_L2_EXT_READ",
+ "TKAx_L2_EXT_READ_NOSNP",
+ "TKAx_L2_EXT_READ_UNIQUE",
+ "TKAx_L2_EXT_READ_BEATS",
+ "TKAx_L2_EXT_AR_STALL",
+ "TKAx_L2_EXT_AR_CNT_Q1",
+ "TKAx_L2_EXT_AR_CNT_Q2",
+ "TKAx_L2_EXT_AR_CNT_Q3",
+ "TKAx_L2_EXT_RRESP_0_127",
+ "TKAx_L2_EXT_RRESP_128_191",
+ "TKAx_L2_EXT_RRESP_192_255",
+ "TKAx_L2_EXT_RRESP_256_319",
+ "TKAx_L2_EXT_RRESP_320_383",
+ "TKAx_L2_EXT_WRITE",
+ "TKAx_L2_EXT_WRITE_NOSNP_FULL",
+ "TKAx_L2_EXT_WRITE_NOSNP_PTL",
+ "TKAx_L2_EXT_WRITE_SNP_FULL",
+ "TKAx_L2_EXT_WRITE_SNP_PTL",
+ "TKAx_L2_EXT_WRITE_BEATS",
+ "TKAx_L2_EXT_W_STALL",
+ "TKAx_L2_EXT_AW_CNT_Q1",
+ "TKAx_L2_EXT_AW_CNT_Q2",
+ "TKAx_L2_EXT_AW_CNT_Q3",
+ "TKAx_L2_EXT_SNOOP",
+ "TKAx_L2_EXT_SNOOP_STALL",
+ "TKAx_L2_EXT_SNOOP_RESP_CLEAN",
+ "TKAx_L2_EXT_SNOOP_RESP_DATA",
+ "TKAx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TKAX_H_ */
diff --git a/mali_kbase/mali_kbase_gator_hwcnt_names_tnox.h b/mali_kbase/mali_kbase_gator_hwcnt_names_tnox.h
new file mode 100644
index 0000000..b6175a5
--- /dev/null
+++ b/mali_kbase/mali_kbase_gator_hwcnt_names_tnox.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TNOX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TNOX_H_
+
+static const char * const hardware_counters_mali_tNOx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "TNOx_MESSAGES_SENT",
+ "TNOx_MESSAGES_RECEIVED",
+ "TNOx_GPU_ACTIVE",
+ "TNOx_IRQ_ACTIVE",
+ "TNOx_JS0_JOBS",
+ "TNOx_JS0_TASKS",
+ "TNOx_JS0_ACTIVE",
+ "",
+ "TNOx_JS0_WAIT_READ",
+ "TNOx_JS0_WAIT_ISSUE",
+ "TNOx_JS0_WAIT_DEPEND",
+ "TNOx_JS0_WAIT_FINISH",
+ "TNOx_JS1_JOBS",
+ "TNOx_JS1_TASKS",
+ "TNOx_JS1_ACTIVE",
+ "",
+ "TNOx_JS1_WAIT_READ",
+ "TNOx_JS1_WAIT_ISSUE",
+ "TNOx_JS1_WAIT_DEPEND",
+ "TNOx_JS1_WAIT_FINISH",
+ "TNOx_JS2_JOBS",
+ "TNOx_JS2_TASKS",
+ "TNOx_JS2_ACTIVE",
+ "",
+ "TNOx_JS2_WAIT_READ",
+ "TNOx_JS2_WAIT_ISSUE",
+ "TNOx_JS2_WAIT_DEPEND",
+ "TNOx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "TNOx_TILER_ACTIVE",
+ "TNOx_JOBS_PROCESSED",
+ "TNOx_TRIANGLES",
+ "TNOx_LINES",
+ "TNOx_POINTS",
+ "TNOx_FRONT_FACING",
+ "TNOx_BACK_FACING",
+ "TNOx_PRIM_VISIBLE",
+ "TNOx_PRIM_CULLED",
+ "TNOx_PRIM_CLIPPED",
+ "TNOx_PRIM_SAT_CULLED",
+ "TNOx_BIN_ALLOC_INIT",
+ "TNOx_BIN_ALLOC_OVERFLOW",
+ "TNOx_BUS_READ",
+ "",
+ "TNOx_BUS_WRITE",
+ "TNOx_LOADING_DESC",
+ "TNOx_IDVS_POS_SHAD_REQ",
+ "TNOx_IDVS_POS_SHAD_WAIT",
+ "TNOx_IDVS_POS_SHAD_STALL",
+ "TNOx_IDVS_POS_FIFO_FULL",
+ "TNOx_PREFETCH_STALL",
+ "TNOx_VCACHE_HIT",
+ "TNOx_VCACHE_MISS",
+ "TNOx_VCACHE_LINE_WAIT",
+ "TNOx_VFETCH_POS_READ_WAIT",
+ "TNOx_VFETCH_VERTEX_WAIT",
+ "TNOx_VFETCH_STALL",
+ "TNOx_PRIMASSY_STALL",
+ "TNOx_BBOX_GEN_STALL",
+ "TNOx_IDVS_VBU_HIT",
+ "TNOx_IDVS_VBU_MISS",
+ "TNOx_IDVS_VBU_LINE_DEALLOCATE",
+ "TNOx_IDVS_VAR_SHAD_REQ",
+ "TNOx_IDVS_VAR_SHAD_STALL",
+ "TNOx_BINNER_STALL",
+ "TNOx_ITER_STALL",
+ "TNOx_COMPRESS_MISS",
+ "TNOx_COMPRESS_STALL",
+ "TNOx_PCACHE_HIT",
+ "TNOx_PCACHE_MISS",
+ "TNOx_PCACHE_MISS_STALL",
+ "TNOx_PCACHE_EVICT_STALL",
+ "TNOx_PMGR_PTR_WR_STALL",
+ "TNOx_PMGR_PTR_RD_STALL",
+ "TNOx_PMGR_CMD_WR_STALL",
+ "TNOx_WRBUF_ACTIVE",
+ "TNOx_WRBUF_HIT",
+ "TNOx_WRBUF_MISS",
+ "TNOx_WRBUF_NO_FREE_LINE_STALL",
+ "TNOx_WRBUF_NO_AXI_ID_STALL",
+ "TNOx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "TNOx_UTLB_TRANS",
+ "TNOx_UTLB_TRANS_HIT",
+ "TNOx_UTLB_TRANS_STALL",
+ "TNOx_UTLB_TRANS_MISS_DELAY",
+ "TNOx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "TNOx_FRAG_ACTIVE",
+ "TNOx_FRAG_PRIMITIVES",
+ "TNOx_FRAG_PRIM_RAST",
+ "TNOx_FRAG_FPK_ACTIVE",
+ "TNOx_FRAG_STARVING",
+ "TNOx_FRAG_WARPS",
+ "TNOx_FRAG_PARTIAL_WARPS",
+ "TNOx_FRAG_QUADS_RAST",
+ "TNOx_FRAG_QUADS_EZS_TEST",
+ "TNOx_FRAG_QUADS_EZS_UPDATE",
+ "TNOx_FRAG_QUADS_EZS_KILL",
+ "TNOx_FRAG_LZS_TEST",
+ "TNOx_FRAG_LZS_KILL",
+ "",
+ "TNOx_FRAG_PTILES",
+ "TNOx_FRAG_TRANS_ELIM",
+ "TNOx_QUAD_FPK_KILLER",
+ "",
+ "TNOx_COMPUTE_ACTIVE",
+ "TNOx_COMPUTE_TASKS",
+ "TNOx_COMPUTE_WARPS",
+ "TNOx_COMPUTE_STARVING",
+ "TNOx_EXEC_CORE_ACTIVE",
+ "TNOx_EXEC_ACTIVE",
+ "TNOx_EXEC_INSTR_COUNT",
+ "TNOx_EXEC_INSTR_DIVERGED",
+ "TNOx_EXEC_INSTR_STARVING",
+ "TNOx_ARITH_INSTR_SINGLE_FMA",
+ "TNOx_ARITH_INSTR_DOUBLE",
+ "TNOx_ARITH_INSTR_MSG",
+ "TNOx_ARITH_INSTR_MSG_ONLY",
+ "TNOx_TEX_MSGI_NUM_QUADS",
+ "TNOx_TEX_DFCH_NUM_PASSES",
+ "TNOx_TEX_DFCH_NUM_PASSES_MISS",
+ "TNOx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+ "TNOx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+ "TNOx_TEX_TFCH_NUM_LINES_FETCHED",
+ "TNOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+ "TNOx_TEX_TFCH_NUM_OPERATIONS",
+ "TNOx_TEX_FILT_NUM_OPERATIONS",
+ "TNOx_LS_MEM_READ_FULL",
+ "TNOx_LS_MEM_READ_SHORT",
+ "TNOx_LS_MEM_WRITE_FULL",
+ "TNOx_LS_MEM_WRITE_SHORT",
+ "TNOx_LS_MEM_ATOMIC",
+ "TNOx_VARY_INSTR",
+ "TNOx_VARY_SLOT_32",
+ "TNOx_VARY_SLOT_16",
+ "TNOx_ATTR_INSTR",
+ "TNOx_ARITH_INSTR_FP_MUL",
+ "TNOx_BEATS_RD_FTC",
+ "TNOx_BEATS_RD_FTC_EXT",
+ "TNOx_BEATS_RD_LSC",
+ "TNOx_BEATS_RD_LSC_EXT",
+ "TNOx_BEATS_RD_TEX",
+ "TNOx_BEATS_RD_TEX_EXT",
+ "TNOx_BEATS_RD_OTHER",
+ "TNOx_BEATS_WR_LSC_WB",
+ "TNOx_BEATS_WR_TIB",
+ "TNOx_BEATS_WR_LSC_OTHER",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "TNOx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "TNOx_L2_RD_MSG_IN",
+ "TNOx_L2_RD_MSG_IN_STALL",
+ "TNOx_L2_WR_MSG_IN",
+ "TNOx_L2_WR_MSG_IN_STALL",
+ "TNOx_L2_SNP_MSG_IN",
+ "TNOx_L2_SNP_MSG_IN_STALL",
+ "TNOx_L2_RD_MSG_OUT",
+ "TNOx_L2_RD_MSG_OUT_STALL",
+ "TNOx_L2_WR_MSG_OUT",
+ "TNOx_L2_ANY_LOOKUP",
+ "TNOx_L2_READ_LOOKUP",
+ "TNOx_L2_WRITE_LOOKUP",
+ "TNOx_L2_EXT_SNOOP_LOOKUP",
+ "TNOx_L2_EXT_READ",
+ "TNOx_L2_EXT_READ_NOSNP",
+ "TNOx_L2_EXT_READ_UNIQUE",
+ "TNOx_L2_EXT_READ_BEATS",
+ "TNOx_L2_EXT_AR_STALL",
+ "TNOx_L2_EXT_AR_CNT_Q1",
+ "TNOx_L2_EXT_AR_CNT_Q2",
+ "TNOx_L2_EXT_AR_CNT_Q3",
+ "TNOx_L2_EXT_RRESP_0_127",
+ "TNOx_L2_EXT_RRESP_128_191",
+ "TNOx_L2_EXT_RRESP_192_255",
+ "TNOx_L2_EXT_RRESP_256_319",
+ "TNOx_L2_EXT_RRESP_320_383",
+ "TNOx_L2_EXT_WRITE",
+ "TNOx_L2_EXT_WRITE_NOSNP_FULL",
+ "TNOx_L2_EXT_WRITE_NOSNP_PTL",
+ "TNOx_L2_EXT_WRITE_SNP_FULL",
+ "TNOx_L2_EXT_WRITE_SNP_PTL",
+ "TNOx_L2_EXT_WRITE_BEATS",
+ "TNOx_L2_EXT_W_STALL",
+ "TNOx_L2_EXT_AW_CNT_Q1",
+ "TNOx_L2_EXT_AW_CNT_Q2",
+ "TNOx_L2_EXT_AW_CNT_Q3",
+ "TNOx_L2_EXT_SNOOP",
+ "TNOx_L2_EXT_SNOOP_STALL",
+ "TNOx_L2_EXT_SNOOP_RESP_CLEAN",
+ "TNOx_L2_EXT_SNOOP_RESP_DATA",
+ "TNOx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TNOX_H_ */
diff --git a/mali_kbase/mali_kbase_gpu_id.h b/mali_kbase/mali_kbase_gpu_id.h
index 2d368df..32b9513 100644
--- a/mali_kbase/mali_kbase_gpu_id.h
+++ b/mali_kbase/mali_kbase_gpu_id.h
@@ -102,6 +102,11 @@
#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6u, 1)
#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7u, 0)
#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7u, 3)
+#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7u, 1)
+#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7u, 2)
+#define GPU_ID2_PRODUCT_TKAX GPU_ID2_MODEL_MAKE(8u, 0)
+#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(8u, 1)
+#define GPU_ID2_PRODUCT_TBOX GPU_ID2_MODEL_MAKE(8u, 2)
/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
#define GPU_ID_S_15DEV0 0x1
diff --git a/mali_kbase/mali_kbase_gpuprops.c b/mali_kbase/mali_kbase_gpuprops.c
index baf3c49..624d078 100644
--- a/mali_kbase/mali_kbase_gpuprops.c
+++ b/mali_kbase/mali_kbase_gpuprops.c
@@ -43,54 +43,6 @@
#define KBASE_UBFX32(value, offset, size) \
(((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
-int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props)
-{
- kbase_gpu_clk_speed_func get_gpu_speed_mhz;
- u32 gpu_speed_mhz;
- int rc = 1;
-
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL != kbase_props);
-
- /* Current GPU speed is requested from the system integrator via the GPU_SPEED_FUNC function.
- * If that function fails, or the function is not provided by the system integrator, we report the maximum
- * GPU speed as specified by GPU_FREQ_KHZ_MAX.
- */
- get_gpu_speed_mhz = (kbase_gpu_clk_speed_func) GPU_SPEED_FUNC;
- if (get_gpu_speed_mhz != NULL) {
- rc = get_gpu_speed_mhz(&gpu_speed_mhz);
-#ifdef CONFIG_MALI_DEBUG
- /* Issue a warning message when the reported GPU speed falls outside the min/max range */
- if (rc == 0) {
- u32 gpu_speed_khz = gpu_speed_mhz * 1000;
-
- if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min ||
- gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max)
- dev_warn(kctx->kbdev->dev, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n",
- (unsigned long)gpu_speed_khz,
- (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min,
- (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
- }
-#endif /* CONFIG_MALI_DEBUG */
- }
- if (kctx->kbdev->clock) {
- gpu_speed_mhz = clk_get_rate(kctx->kbdev->clock) / 1000000;
- rc = 0;
- }
- if (rc != 0)
- gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000;
-
- kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz;
-
- memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props));
-
- /* Before API 8.2 they expect L3 cache info here, which was always 0 */
- if (kctx->api_version < KBASE_API_VERSION(8, 2))
- kbase_props->props.raw_props.suspend_size = 0;
-
- return 0;
-}
-
static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
{
struct mali_base_gpu_coherent_group *current_group;
@@ -195,13 +147,9 @@ static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kb
gpu_props->raw_props.l2_present =
((u64) regdump.l2_present_hi << 32) +
regdump.l2_present_lo;
-#ifdef CONFIG_MALI_CORESTACK
gpu_props->raw_props.stack_present =
((u64) regdump.stack_present_hi << 32) +
regdump.stack_present_lo;
-#else /* CONFIG_MALI_CORESTACK */
- gpu_props->raw_props.stack_present = 0;
-#endif /* CONFIG_MALI_CORESTACK */
for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
gpu_props->raw_props.js_features[i] = regdump.js_features[i];
@@ -336,6 +284,9 @@ void kbase_gpuprops_set_features(struct kbase_device *kbdev)
*/
gpu_props->raw_props.coherency_mode = regdump.coherency_features |
COHERENCY_FEATURE_BIT(COHERENCY_NONE);
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_THREAD_GROUP_SPLIT))
+ gpu_props->thread_props.max_thread_group_split = 0;
}
static struct {
@@ -357,6 +308,7 @@ static struct {
PROP(TEXTURE_FEATURES_0, core_props.texture_features[0]),
PROP(TEXTURE_FEATURES_1, core_props.texture_features[1]),
PROP(TEXTURE_FEATURES_2, core_props.texture_features[2]),
+ PROP(TEXTURE_FEATURES_3, core_props.texture_features[3]),
PROP(GPU_AVAILABLE_MEMORY_SIZE, core_props.gpu_available_memory_size),
PROP(L2_LOG2_LINE_SIZE, l2_props.log2_line_size),
@@ -404,6 +356,7 @@ static struct {
PROP(RAW_TEXTURE_FEATURES_0, raw_props.texture_features[0]),
PROP(RAW_TEXTURE_FEATURES_1, raw_props.texture_features[1]),
PROP(RAW_TEXTURE_FEATURES_2, raw_props.texture_features[2]),
+ PROP(RAW_TEXTURE_FEATURES_3, raw_props.texture_features[3]),
PROP(RAW_GPU_ID, raw_props.gpu_id),
PROP(RAW_THREAD_MAX_THREADS, raw_props.thread_max_threads),
PROP(RAW_THREAD_MAX_WORKGROUP_SIZE,
diff --git a/mali_kbase/mali_kbase_gpuprops.h b/mali_kbase/mali_kbase_gpuprops.h
index 57b3eaf..e88af25 100644
--- a/mali_kbase/mali_kbase_gpuprops.h
+++ b/mali_kbase/mali_kbase_gpuprops.h
@@ -50,18 +50,6 @@ void kbase_gpuprops_set(struct kbase_device *kbdev);
void kbase_gpuprops_set_features(struct kbase_device *kbdev);
/**
- * @brief Provide GPU properties to userside through UKU call.
- *
- * Fill the struct kbase_uk_gpuprops with values from GPU configuration registers.
- *
- * @param kctx The struct kbase_context structure
- * @param kbase_props A copy of the struct kbase_uk_gpuprops structure from userspace
- *
- * @return 0 on success. Any other value indicates failure.
- */
-int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props);
-
-/**
* kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer
* @kbdev: The kbase device
*
diff --git a/mali_kbase/mali_kbase_hw.c b/mali_kbase/mali_kbase_hw.c
index eb8368c..89342e1 100644
--- a/mali_kbase/mali_kbase_hw.c
+++ b/mali_kbase/mali_kbase_hw.c
@@ -51,6 +51,21 @@ void kbase_hw_set_features_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_TDVX:
features = base_hw_features_tDVx;
break;
+ case GPU_ID2_PRODUCT_TNOX:
+ features = base_hw_features_tNOx;
+ break;
+ case GPU_ID2_PRODUCT_TGOX:
+ features = base_hw_features_tGOx;
+ break;
+ case GPU_ID2_PRODUCT_TKAX:
+ features = base_hw_features_tKAx;
+ break;
+ case GPU_ID2_PRODUCT_TTRX:
+ features = base_hw_features_tTRx;
+ break;
+ case GPU_ID2_PRODUCT_TBOX:
+ features = base_hw_features_tBOx;
+ break;
default:
features = base_hw_features_generic;
break;
@@ -150,9 +165,25 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDVx_r0p0},
{U32_MAX, NULL} } },
+ {GPU_ID2_PRODUCT_TNOX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNOx_r0p0},
+ {U32_MAX, NULL} } },
+
+ {GPU_ID2_PRODUCT_TGOX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGOx_r0p0},
+ {U32_MAX, NULL} } },
+ {GPU_ID2_PRODUCT_TKAX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tKAx_r0p0},
+ {U32_MAX, NULL} } },
+ {GPU_ID2_PRODUCT_TTRX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0},
+ {U32_MAX, NULL} } },
+ {GPU_ID2_PRODUCT_TBOX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tBOx_r0p0},
+ {U32_MAX, NULL} } },
};
u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
@@ -370,6 +401,21 @@ int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
case GPU_ID2_PRODUCT_TDVX:
issues = base_hw_issues_model_tDVx;
break;
+ case GPU_ID2_PRODUCT_TNOX:
+ issues = base_hw_issues_model_tNOx;
+ break;
+ case GPU_ID2_PRODUCT_TGOX:
+ issues = base_hw_issues_model_tGOx;
+ break;
+ case GPU_ID2_PRODUCT_TKAX:
+ issues = base_hw_issues_model_tKAx;
+ break;
+ case GPU_ID2_PRODUCT_TTRX:
+ issues = base_hw_issues_model_tTRx;
+ break;
+ case GPU_ID2_PRODUCT_TBOX:
+ issues = base_hw_issues_model_tBOx;
+ break;
default:
dev_err(kbdev->dev,
"Unknown GPU ID %x", gpu_id);
diff --git a/mali_kbase/mali_kbase_ioctl.h b/mali_kbase/mali_kbase_ioctl.h
index e7c1dae..041c61a 100644
--- a/mali_kbase/mali_kbase_ioctl.h
+++ b/mali_kbase/mali_kbase_ioctl.h
@@ -26,6 +26,9 @@ extern "C" {
#define KBASE_IOCTL_TYPE 0x80
+#define BASE_UK_VERSION_MAJOR 11
+#define BASE_UK_VERSION_MINOR 0
+
#ifdef ANDROID
/* Android's definition of ioctl is incorrect, specifying the type argument as
* 'int'. This creates a warning when using _IOWR (as the top bit is set). Work
@@ -246,6 +249,7 @@ struct kbase_ioctl_disjoint_query {
* struct kbase_ioctl_get_ddk_version - Query the kernel version
* @version_buffer: Buffer to receive the kernel version string
* @size: Size of the buffer
+ * @padding: Padding
*
* The ioctl will return the number of bytes written into version_buffer
* (which includes a NULL byte) or a negative error code
@@ -253,6 +257,7 @@ struct kbase_ioctl_disjoint_query {
struct kbase_ioctl_get_ddk_version {
__u64 version_buffer;
__u32 size;
+ __u32 padding;
};
#define KBASE_IOCTL_GET_DDK_VERSION \
@@ -472,10 +477,12 @@ struct kbase_ioctl_fence_validate {
* struct kbase_ioctl_get_profiling_controls - Get the profiling controls
* @count: The size of @buffer in u32 words
* @buffer: The buffer to receive the profiling controls
+ * @padding: Padding
*/
struct kbase_ioctl_get_profiling_controls {
__u64 buffer;
__u32 count;
+ __u32 padding;
};
#define KBASE_IOCTL_GET_PROFILING_CONTROLS \
@@ -651,6 +658,9 @@ struct kbase_ioctl_tlstream_stats {
#define KBASE_GPUPROP_COHERENCY_GROUP_14 78
#define KBASE_GPUPROP_COHERENCY_GROUP_15 79
+#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81
+
#ifdef __cpluscplus
}
#endif
diff --git a/mali_kbase/mali_kbase_jd.c b/mali_kbase/mali_kbase_jd.c
index 9f9e0c4..aa545fd 100644
--- a/mali_kbase/mali_kbase_jd.c
+++ b/mali_kbase/mali_kbase_jd.c
@@ -24,7 +24,6 @@
#include <linux/compat.h>
#endif
#include <mali_kbase.h>
-#include <mali_kbase_uku.h>
#include <linux/random.h>
#include <linux/version.h>
#include <linux/ratelimit.h>
@@ -106,7 +105,7 @@ static int jd_run_atom(struct kbase_jd_atom *katom)
return kbasep_js_add_job(kctx, katom);
}
-#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+#if defined(CONFIG_MALI_DMA_FENCE)
void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
{
struct kbase_device *kbdev;
@@ -139,91 +138,8 @@ void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
}
#endif
-#ifdef CONFIG_KDS
-
-/* Add the katom to the kds waiting list.
- * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
- * The caller must hold the kbase_jd_context.lock */
-
-static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom)
-{
- struct kbase_context *kctx;
-
- KBASE_DEBUG_ASSERT(katom);
-
- kctx = katom->kctx;
-
- list_add_tail(&katom->node, &kctx->waiting_kds_resource);
-}
-
-/* Remove the katom from the kds waiting list.
- * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync.
- * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
- * The caller must hold the kbase_jd_context.lock */
-
-static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom)
-{
- KBASE_DEBUG_ASSERT(katom);
- list_del(&katom->node);
-}
-
-static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
-{
- struct kbase_jd_atom *katom;
- struct kbase_jd_context *ctx;
-
- katom = (struct kbase_jd_atom *)callback_parameter;
- KBASE_DEBUG_ASSERT(katom);
-
- ctx = &katom->kctx->jctx;
-
- /* If KDS resource has already been satisfied (e.g. due to zapping)
- * do nothing.
- */
- mutex_lock(&ctx->lock);
- if (!katom->kds_dep_satisfied) {
- katom->kds_dep_satisfied = true;
- kbase_jd_dep_clear_locked(katom);
- }
- mutex_unlock(&ctx->lock);
-}
-
-static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom)
-{
- KBASE_DEBUG_ASSERT(katom);
-
- /* Prevent job_done_nolock from being called twice on an atom when
- * there is a race between job completion and cancellation */
-
- if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
- /* Wait was cancelled - zap the atom */
- katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
- if (jd_done_nolock(katom, NULL))
- kbase_js_sched_all(katom->kctx->kbdev);
- }
-}
-#endif /* CONFIG_KDS */
-
void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
{
-#ifdef CONFIG_KDS
- if (katom->kds_rset) {
- struct kbase_jd_context *jctx = &katom->kctx->jctx;
-
- /*
- * As the atom is no longer waiting, remove it from
- * the waiting list.
- */
-
- mutex_lock(&jctx->lock);
- kbase_jd_kds_waiters_remove(katom);
- mutex_unlock(&jctx->lock);
-
- /* Release the kds resource or cancel if zapping */
- kds_resource_set_release_sync(&katom->kds_rset);
- }
-#endif /* CONFIG_KDS */
-
#ifdef CONFIG_MALI_DMA_FENCE
/* Flush dma-fence workqueue to ensure that any callbacks that may have
* been queued are done before continuing.
@@ -240,12 +156,6 @@ static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
KBASE_DEBUG_ASSERT(katom);
KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
-#ifdef CONFIG_KDS
- /* Prevent the KDS resource from triggering the atom in case of zapping */
- if (katom->kds_rset)
- katom->kds_dep_satisfied = true;
-#endif /* CONFIG_KDS */
-
#ifdef CONFIG_MALI_DMA_FENCE
kbase_dma_fence_signal(katom);
#endif /* CONFIG_MALI_DMA_FENCE */
@@ -281,11 +191,6 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
{
int err_ret_val = -EINVAL;
u32 res_no;
-#ifdef CONFIG_KDS
- u32 kds_res_count = 0;
- struct kds_resource **kds_resources = NULL;
- unsigned long *kds_access_bitmap = NULL;
-#endif /* CONFIG_KDS */
#ifdef CONFIG_MALI_DMA_FENCE
struct kbase_dma_fence_resv_info info = {
.dma_fence_resv_count = 0,
@@ -332,25 +237,6 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
err_ret_val = -EINVAL;
goto early_err_out;
}
-#ifdef CONFIG_KDS
- /* assume we have to wait for all */
- KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
- kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
-
- if (!kds_resources) {
- err_ret_val = -ENOMEM;
- goto early_err_out;
- }
-
- KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
- kds_access_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!kds_access_bitmap) {
- err_ret_val = -ENOMEM;
- goto early_err_out;
- }
-#endif /* CONFIG_KDS */
#ifdef CONFIG_MALI_DMA_FENCE
if (implicit_sync) {
@@ -401,12 +287,7 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
}
alloc = kbase_map_external_resource(katom->kctx, reg,
- current->mm
-#ifdef CONFIG_KDS
- , &kds_res_count, kds_resources,
- kds_access_bitmap, exclusive
-#endif
- );
+ current->mm);
if (!alloc) {
err_ret_val = -EINVAL;
goto failed_loop;
@@ -434,36 +315,12 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
katom->extres[res_no].alloc = alloc;
}
/* successfully parsed the extres array */
- /* drop the vm lock before we call into kds */
+ /* drop the vm lock now */
kbase_gpu_vm_unlock(katom->kctx);
/* Release the processes mmap lock */
up_read(&current->mm->mmap_sem);
-#ifdef CONFIG_KDS
- if (kds_res_count) {
- int wait_failed;
-
- /* We have resources to wait for with kds */
- katom->kds_dep_satisfied = false;
-
- wait_failed = kds_async_waitall(&katom->kds_rset,
- &katom->kctx->jctx.kds_cb, katom, NULL,
- kds_res_count, kds_access_bitmap,
- kds_resources);
-
- if (wait_failed)
- goto failed_kds_setup;
- else
- kbase_jd_kds_waiters_add(katom);
- } else {
- /* Nothing to wait for, so kds dep met */
- katom->kds_dep_satisfied = true;
- }
- kfree(kds_resources);
- kfree(kds_access_bitmap);
-#endif /* CONFIG_KDS */
-
#ifdef CONFIG_MALI_DMA_FENCE
if (implicit_sync) {
if (info.dma_fence_resv_count) {
@@ -486,24 +343,6 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
#ifdef CONFIG_MALI_DMA_FENCE
failed_dma_fence_setup:
-#ifdef CONFIG_KDS
- /* If we are here, dma_fence setup failed but KDS didn't.
- * Revert KDS setup if any.
- */
- if (kds_res_count) {
- mutex_unlock(&katom->kctx->jctx.lock);
- kds_resource_set_release_sync(&katom->kds_rset);
- mutex_lock(&katom->kctx->jctx.lock);
-
- kbase_jd_kds_waiters_remove(katom);
- katom->kds_dep_satisfied = true;
- }
-#endif /* CONFIG_KDS */
-#endif /* CONFIG_MALI_DMA_FENCE */
-#ifdef CONFIG_KDS
-failed_kds_setup:
-#endif
-#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
/* Lock the processes mmap lock */
down_read(&current->mm->mmap_sem);
@@ -526,10 +365,6 @@ failed_kds_setup:
early_err_out:
kfree(katom->extres);
katom->extres = NULL;
-#ifdef CONFIG_KDS
- kfree(kds_resources);
- kfree(kds_access_bitmap);
-#endif /* CONFIG_KDS */
#ifdef CONFIG_MALI_DMA_FENCE
if (implicit_sync) {
kfree(info.resv_objs);
@@ -559,15 +394,6 @@ static inline void jd_resolve_dep(struct list_head *out_list,
if (katom->event_code != BASE_JD_EVENT_DONE &&
(dep_type != BASE_JD_DEP_TYPE_ORDER)) {
-#ifdef CONFIG_KDS
- if (!dep_atom->kds_dep_satisfied) {
- /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
- * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up
- */
- dep_atom->kds_dep_satisfied = true;
- }
-#endif
-
#ifdef CONFIG_MALI_DMA_FENCE
kbase_dma_fence_cancel_callbacks(dep_atom);
#endif
@@ -617,10 +443,6 @@ static inline void jd_resolve_dep(struct list_head *out_list,
}
#endif /* CONFIG_MALI_DMA_FENCE */
-#ifdef CONFIG_KDS
- dep_satisfied = dep_satisfied && dep_atom->kds_dep_satisfied;
-#endif
-
if (dep_satisfied) {
dep_atom->in_jd_list = true;
list_add_tail(&dep_atom->jd_item, out_list);
@@ -756,10 +578,6 @@ static void jd_try_submitting_deps(struct list_head *out_list,
dep_satisfied = false;
}
#endif /* CONFIG_MALI_DMA_FENCE */
-#ifdef CONFIG_KDS
- dep_satisfied = dep_satisfied &&
- dep_atom->kds_dep_satisfied;
-#endif
if (dep0_valid && dep1_valid && dep_satisfied) {
dep_atom->in_jd_list = true;
@@ -1004,12 +822,6 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
katom->age = kctx->age_count++;
INIT_LIST_HEAD(&katom->jd_item);
-#ifdef CONFIG_KDS
- /* Start by assuming that the KDS dependencies are satisfied,
- * kbase_jd_pre_external_resources will correct this if there are dependencies */
- katom->kds_dep_satisfied = true;
- katom->kds_rset = NULL;
-#endif /* CONFIG_KDS */
#ifdef CONFIG_MALI_DMA_FENCE
kbase_fence_dep_count_set(katom, -1);
#endif
@@ -1227,14 +1039,6 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
ret = false;
goto out;
}
-#ifdef CONFIG_KDS
- if (!katom->kds_dep_satisfied) {
- /* Queue atom due to KDS dependency */
- ret = false;
- goto out;
- }
-#endif /* CONFIG_KDS */
-
#ifdef CONFIG_MALI_DMA_FENCE
if (kbase_fence_dep_count_read(katom) != -1) {
@@ -1322,12 +1126,6 @@ int kbase_jd_submit(struct kbase_context *kctx,
break;
}
-#ifdef BASE_LEGACY_UK10_2_SUPPORT
- if (KBASE_API_VERSION(10, 3) > kctx->api_version)
- user_atom.core_req = (u32)(user_atom.compat_core_req
- & 0x7fff);
-#endif /* BASE_LEGACY_UK10_2_SUPPORT */
-
user_addr = (void __user *)((uintptr_t) user_addr + stride);
mutex_lock(&jctx->lock);
@@ -1673,7 +1471,6 @@ void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
#endif
WARN_ON(work_pending(&katom->work));
- KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
INIT_WORK(&katom->work, kbase_jd_done_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
}
@@ -1698,7 +1495,6 @@ void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
- KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
INIT_WORK(&katom->work, jd_cancel_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
}
@@ -1732,24 +1528,6 @@ void kbase_jd_zap_context(struct kbase_context *kctx)
}
-#ifdef CONFIG_KDS
-
- /* For each job waiting on a kds resource, cancel the wait and force the job to
- * complete early, this is done so that we don't leave jobs outstanding waiting
- * on kds resources which may never be released when contexts are zapped, resulting
- * in a hang.
- *
- * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held,
- * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
- */
-
- list_for_each(entry, &kctx->waiting_kds_resource) {
- katom = list_entry(entry, struct kbase_jd_atom, node);
-
- kbase_cancel_kds_wait_job(katom);
- }
-#endif
-
#ifdef CONFIG_MALI_DMA_FENCE
kbase_dma_fence_cancel_all_atoms(kctx);
#endif
@@ -1772,9 +1550,6 @@ int kbase_jd_init(struct kbase_context *kctx)
{
int i;
int mali_err = 0;
-#ifdef CONFIG_KDS
- int err;
-#endif /* CONFIG_KDS */
KBASE_DEBUG_ASSERT(kctx);
@@ -1809,24 +1584,12 @@ int kbase_jd_init(struct kbase_context *kctx)
spin_lock_init(&kctx->jctx.tb_lock);
-#ifdef CONFIG_KDS
- err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear);
- if (0 != err) {
- mali_err = -EINVAL;
- goto out2;
- }
-#endif /* CONFIG_KDS */
-
kctx->jctx.job_nr = 0;
INIT_LIST_HEAD(&kctx->completed_jobs);
atomic_set(&kctx->work_count, 0);
return 0;
-#ifdef CONFIG_KDS
- out2:
- destroy_workqueue(kctx->jctx.job_done_wq);
-#endif /* CONFIG_KDS */
out1:
return mali_err;
}
@@ -1837,9 +1600,6 @@ void kbase_jd_exit(struct kbase_context *kctx)
{
KBASE_DEBUG_ASSERT(kctx);
-#ifdef CONFIG_KDS
- kds_callback_term(&kctx->jctx.kds_cb);
-#endif /* CONFIG_KDS */
/* Work queue is emptied by this */
destroy_workqueue(kctx->jctx.job_done_wq);
}
diff --git a/mali_kbase/mali_kbase_jd_debugfs.c b/mali_kbase/mali_kbase_jd_debugfs.c
index c8b37c4..25f1ed5 100644
--- a/mali_kbase/mali_kbase_jd_debugfs.c
+++ b/mali_kbase/mali_kbase_jd_debugfs.c
@@ -24,6 +24,7 @@
#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
#include <mali_kbase_sync.h>
#endif
+#include <mali_kbase_ioctl.h>
struct kbase_jd_debugfs_depinfo {
u8 id;
diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c
index 219e8c8..a3e8248 100644
--- a/mali_kbase/mali_kbase_js.c
+++ b/mali_kbase/mali_kbase_js.c
@@ -1146,9 +1146,8 @@ bool kbasep_js_add_job(struct kbase_context *kctx,
if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
if (kbase_ctx_flag(kctx, KCTX_DYING)) {
/* A job got added while/after kbase_job_zap_context()
- * was called on a non-scheduled context (e.g. KDS
- * dependency resolved). Kill that job by killing the
- * context. */
+ * was called on a non-scheduled context. Kill that job
+ * by killing the context. */
kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
false);
} else if (js_kctx_info->ctx.nr_jobs == 1) {
@@ -1174,13 +1173,11 @@ void kbasep_js_remove_job(struct kbase_device *kbdev,
struct kbase_context *kctx, struct kbase_jd_atom *atom)
{
struct kbasep_js_kctx_info *js_kctx_info;
- struct kbasep_js_device_data *js_devdata;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(kctx != NULL);
KBASE_DEBUG_ASSERT(atom != NULL);
- js_devdata = &kbdev->js_data;
js_kctx_info = &kctx->jctx.sched_info;
KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
@@ -1196,15 +1193,12 @@ bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
{
unsigned long flags;
struct kbasep_js_atom_retained_state katom_retained_state;
- struct kbasep_js_device_data *js_devdata;
bool attr_state_changed;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(kctx != NULL);
KBASE_DEBUG_ASSERT(katom != NULL);
- js_devdata = &kbdev->js_data;
-
kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
kbasep_js_remove_job(kbdev, kctx, katom);
@@ -1227,11 +1221,9 @@ bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
unsigned long flags;
- struct kbasep_js_device_data *js_devdata;
bool result;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- js_devdata = &kbdev->js_data;
mutex_lock(&kbdev->mmu_hw_mutex);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
@@ -2280,7 +2272,6 @@ void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
kbase_job_check_leave_disjoint(kctx->kbdev, katom);
- KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
INIT_WORK(&katom->work, js_return_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
}
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 34222cb..375e484 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -1440,6 +1440,11 @@ int kbase_alloc_phy_pages_helper(
KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+ if (alloc->reg) {
+ if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
+ goto invalid_request;
+ }
+
kctx = alloc->imported.kctx;
if (nr_pages_requested == 0)
@@ -1595,6 +1600,7 @@ alloc_failed:
kbase_atomic_sub_pages(nr_pages_requested,
&kctx->kbdev->memdev.used_pages);
+invalid_request:
return -ENOMEM;
}
@@ -2594,35 +2600,9 @@ static void kbase_jd_umm_unmap(struct kbase_context *kctx,
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
-#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) \
- || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
-static void add_kds_resource(struct kds_resource *kds_res,
- struct kds_resource **kds_resources, u32 *kds_res_count,
- unsigned long *kds_access_bitmap, bool exclusive)
-{
- u32 i;
-
- for (i = 0; i < *kds_res_count; i++) {
- /* Duplicate resource, ignore */
- if (kds_resources[i] == kds_res)
- return;
- }
-
- kds_resources[*kds_res_count] = kds_res;
- if (exclusive)
- set_bit(*kds_res_count, kds_access_bitmap);
- (*kds_res_count)++;
-}
-#endif
-
struct kbase_mem_phy_alloc *kbase_map_external_resource(
struct kbase_context *kctx, struct kbase_va_region *reg,
- struct mm_struct *locked_mm
-#ifdef CONFIG_KDS
- , u32 *kds_res_count, struct kds_resource **kds_resources,
- unsigned long *kds_access_bitmap, bool exclusive
-#endif
- )
+ struct mm_struct *locked_mm)
{
int err;
@@ -2643,34 +2623,10 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
}
break;
case KBASE_MEM_TYPE_IMPORTED_UMP: {
-#if defined(CONFIG_KDS) && defined(CONFIG_UMP)
- if (kds_res_count) {
- struct kds_resource *kds_res;
-
- kds_res = ump_dd_kds_resource_get(
- reg->gpu_alloc->imported.ump_handle);
- if (kds_res)
- add_kds_resource(kds_res, kds_resources,
- kds_res_count,
- kds_access_bitmap, exclusive);
- }
-#endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
break;
}
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
-#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
- if (kds_res_count) {
- struct kds_resource *kds_res;
-
- kds_res = get_dma_buf_kds_resource(
- reg->gpu_alloc->imported.umm.dma_buf);
- if (kds_res)
- add_kds_resource(kds_res, kds_resources,
- kds_res_count,
- kds_access_bitmap, exclusive);
- }
-#endif
reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
err = kbase_jd_umm_map(kctx, reg);
@@ -2778,12 +2734,7 @@ struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
* Fill in the metadata object and acquire a reference
* for the physical resource.
*/
- meta->alloc = kbase_map_external_resource(kctx, reg, NULL
-#ifdef CONFIG_KDS
- , NULL, NULL,
- NULL, false
-#endif
- );
+ meta->alloc = kbase_map_external_resource(kctx, reg, NULL);
if (!meta->alloc)
goto fail_map;
diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h
index 820a9be..815fab4 100644
--- a/mali_kbase/mali_kbase_mem.h
+++ b/mali_kbase/mali_kbase_mem.h
@@ -30,9 +30,6 @@
#endif
#include <linux/kref.h>
-#ifdef CONFIG_KDS
-#include <linux/kds.h>
-#endif /* CONFIG_KDS */
#ifdef CONFIG_UMP
#include <linux/ump.h>
#endif /* CONFIG_UMP */
@@ -320,9 +317,6 @@ struct kbase_va_region {
struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
- /* non-NULL if this memory object is a kds_resource */
- struct kds_resource *kds_res;
-
/* List head used to store the region in the JIT allocation pool */
struct list_head jit_node;
};
@@ -1040,22 +1034,13 @@ void kbase_jit_term(struct kbase_context *kctx);
* @kctx: kbase context.
* @reg: The region to map.
* @locked_mm: The mm_struct which has been locked for this operation.
- * @kds_res_count: The number of KDS resources.
- * @kds_resources: Array of KDS resources.
- * @kds_access_bitmap: Access bitmap for KDS.
- * @exclusive: If the KDS resource requires exclusive access.
*
* Return: The physical allocation which backs the region on success or NULL
* on failure.
*/
struct kbase_mem_phy_alloc *kbase_map_external_resource(
struct kbase_context *kctx, struct kbase_va_region *reg,
- struct mm_struct *locked_mm
-#ifdef CONFIG_KDS
- , u32 *kds_res_count, struct kds_resource **kds_resources,
- unsigned long *kds_access_bitmap, bool exclusive
-#endif
- );
+ struct mm_struct *locked_mm);
/**
* kbase_unmap_external_resource - Unmap an external resource from the GPU.
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 842444c..37f7a6a 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -44,6 +44,7 @@
#include <mali_kbase_mem_linux.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_tlstream.h>
+#include <mali_kbase_ioctl.h>
static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
@@ -152,8 +153,11 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
goto prepare_failed;
}
- if (*flags & BASE_MEM_GROW_ON_GPF)
+ if (*flags & BASE_MEM_GROW_ON_GPF) {
reg->extent = extent;
+ if (reg->extent == 0)
+ goto invalid_extent;
+ }
else
reg->extent = 0;
@@ -235,6 +239,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
no_mmap:
no_cookie:
no_mem:
+invalid_extent:
kbase_mem_phy_alloc_put(reg->cpu_alloc);
kbase_mem_phy_alloc_put(reg->gpu_alloc);
invalid_flags:
@@ -2608,7 +2613,7 @@ void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_
page_array = kbase_get_cpu_phy_pages(reg);
for (i = 0; i < pages; i++)
- page_array[i] = as_tagged(dma_pa + (i << PAGE_SHIFT));
+ page_array[i] = as_tagged(dma_pa + ((dma_addr_t)i << PAGE_SHIFT));
reg->cpu_alloc->nents = pages;
diff --git a/mali_kbase/mali_kbase_mmu.c b/mali_kbase/mali_kbase_mmu.c
index 2dd20fc..1694779 100644
--- a/mali_kbase/mali_kbase_mmu.c
+++ b/mali_kbase/mali_kbase_mmu.c
@@ -99,14 +99,31 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
struct kbase_as *as, const char *reason_str);
-static size_t make_multiple(size_t minimum, size_t multiple)
+/**
+ * make_multiple() - Calculate the nearest integral multiple of a given number.
+ *
+ * @minimum: The number to round up.
+ * @multiple: The number of which the function shall calculate an integral multiple.
+ * @result: The number rounded up to the nearest integral multiple in case of success,
+ * or just the number itself in case of failure.
+ *
+ * Return: 0 in case of success, or -1 in case of failure.
+ */
+static int make_multiple(size_t minimum, size_t multiple, size_t *result)
{
- size_t remainder = minimum % multiple;
+ int err = -1;
- if (remainder == 0)
- return minimum;
+ *result = minimum;
- return minimum + multiple - remainder;
+ if (multiple > 0) {
+ size_t remainder = minimum % multiple;
+
+ if (remainder != 0)
+ *result = minimum + multiple - remainder;
+ err = 0;
+ }
+
+ return err;
}
void page_fault_worker(struct work_struct *data)
@@ -266,15 +283,13 @@ void page_fault_worker(struct work_struct *data)
goto fault_done;
}
- new_pages = make_multiple(fault_rel_pfn -
- kbase_reg_current_backed_size(region) + 1,
- region->extent);
+ err = make_multiple(fault_rel_pfn - kbase_reg_current_backed_size(region) + 1,
+ region->extent,
+ &new_pages);
+ WARN_ON(err);
/* cap to max vsize */
- if (new_pages + kbase_reg_current_backed_size(region) >
- region->nr_pages)
- new_pages = region->nr_pages -
- kbase_reg_current_backed_size(region);
+ new_pages = min(new_pages, region->nr_pages - kbase_reg_current_backed_size(region));
if (0 == new_pages) {
mutex_lock(&kbdev->mmu_hw_mutex);
@@ -2114,14 +2129,10 @@ void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_contex
* We need to switch to UNMAPPED mode - but we do this in a
* worker so that we can sleep
*/
- KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
- WARN_ON(work_pending(&as->work_busfault));
- queue_work(as->pf_wq, &as->work_busfault);
+ WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
atomic_inc(&kbdev->faults_pending);
} else {
- KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
- WARN_ON(work_pending(&as->work_pagefault));
- queue_work(as->pf_wq, &as->work_pagefault);
+ WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
atomic_inc(&kbdev->faults_pending);
}
}
diff --git a/mali_kbase/mali_kbase_replay.c b/mali_kbase/mali_kbase_replay.c
index 2f8eccf..203a065 100644
--- a/mali_kbase/mali_kbase_replay.c
+++ b/mali_kbase/mali_kbase_replay.c
@@ -772,22 +772,6 @@ static int kbasep_replay_parse_payload(struct kbase_context *kctx,
return -EINVAL;
}
-#ifdef BASE_LEGACY_UK10_2_SUPPORT
- if (KBASE_API_VERSION(10, 3) > replay_atom->kctx->api_version) {
- base_jd_replay_payload_uk10_2 *payload_uk10_2;
- u16 tiler_core_req;
- u16 fragment_core_req;
-
- payload_uk10_2 = (base_jd_replay_payload_uk10_2 *) payload;
- memcpy(&tiler_core_req, &payload_uk10_2->tiler_core_req,
- sizeof(tiler_core_req));
- memcpy(&fragment_core_req, &payload_uk10_2->fragment_core_req,
- sizeof(fragment_core_req));
- payload->tiler_core_req = (u32)(tiler_core_req & 0x7fff);
- payload->fragment_core_req = (u32)(fragment_core_req & 0x7fff);
- }
-#endif /* BASE_LEGACY_UK10_2_SUPPORT */
-
#ifdef CONFIG_MALI_DEBUG
dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index cd86b98..127ada0 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -1240,13 +1240,13 @@ static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
return;
failed_loop:
- while (--i > 0) {
- u64 gpu_addr;
-
- gpu_addr = ext_res->ext_res[i].ext_resource &
+ while (i > 0) {
+ u64 const gpu_addr = ext_res->ext_res[i - 1].ext_resource &
~BASE_EXT_RES_ACCESS_EXCLUSIVE;
kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
+
+ --i;
}
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
diff --git a/mali_kbase/mali_kbase_uku.h b/mali_kbase/mali_kbase_uku.h
deleted file mode 100644
index 2a69da7..0000000
--- a/mali_kbase/mali_kbase_uku.h
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2008-2017 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-
-
-
-
-#ifndef _KBASE_UKU_H_
-#define _KBASE_UKU_H_
-
-#include "mali_uk.h"
-#include "mali_base_kernel.h"
-
-/* This file needs to support being included from kernel and userside (which use different defines) */
-#if defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON
-#define SUPPORT_MALI_ERROR_INJECT
-#endif /* defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON */
-#if defined(CONFIG_MALI_NO_MALI)
-#define SUPPORT_MALI_NO_MALI
-#elif defined(MALI_NO_MALI)
-#if MALI_NO_MALI
-#define SUPPORT_MALI_NO_MALI
-#endif
-#endif
-
-#if defined(SUPPORT_MALI_NO_MALI) || defined(SUPPORT_MALI_ERROR_INJECT)
-#include "backend/gpu/mali_kbase_model_dummy.h"
-#endif
-
-#include "mali_kbase_gpuprops_types.h"
-
-/*
- * 10.1:
- * - Do mmap in kernel for SAME_VA memory allocations rather then
- * calling back into the kernel as a 2nd stage of the allocation request.
- *
- * 10.2:
- * - Add KBASE_FUNC_MEM_JIT_INIT which allows clients to request a custom VA
- * region for use with JIT (ignored on 32-bit platforms)
- *
- * 10.3:
- * - base_jd_core_req typedef-ed to u32 (instead of to u16)
- * - two flags added: BASE_JD_REQ_SKIP_CACHE_STAT / _END
- *
- * 10.4:
- * - Removed KBASE_FUNC_EXT_BUFFER_LOCK used only in internal tests
- *
- * 10.5:
- * - Reverted to performing mmap in user space so that tools like valgrind work.
- *
- * 10.6:
- * - Add flags input variable to KBASE_FUNC_TLSTREAM_ACQUIRE
- */
-#define BASE_UK_VERSION_MAJOR 10
-#define BASE_UK_VERSION_MINOR 6
-
-#define LINUX_UK_BASE_MAGIC 0x80
-
-struct kbase_uk_mem_alloc {
- union uk_header header;
- /* IN */
- u64 va_pages;
- u64 commit_pages;
- u64 extent;
- /* IN/OUT */
- u64 flags;
- /* OUT */
- u64 gpu_va;
- u16 va_alignment;
- u8 padding[6];
-};
-
-struct kbase_uk_mem_free {
- union uk_header header;
- /* IN */
- u64 gpu_addr;
- /* OUT */
-};
-
-struct kbase_uk_mem_alias {
- union uk_header header;
- /* IN/OUT */
- u64 flags;
- /* IN */
- u64 stride;
- u64 nents;
- u64 ai;
- /* OUT */
- u64 gpu_va;
- u64 va_pages;
-};
-
-struct kbase_uk_mem_import {
- union uk_header header;
- /* IN */
- u64 phandle;
- u32 type;
- u32 padding;
- /* IN/OUT */
- u64 flags;
- /* OUT */
- u64 gpu_va;
- u64 va_pages;
-};
-
-struct kbase_uk_mem_flags_change {
- union uk_header header;
- /* IN */
- u64 gpu_va;
- u64 flags;
- u64 mask;
-};
-
-struct kbase_uk_job_submit {
- union uk_header header;
- /* IN */
- u64 addr;
- u32 nr_atoms;
- u32 stride; /* bytes between atoms, i.e. sizeof(base_jd_atom_v2) */
- /* OUT */
-};
-
-struct kbase_uk_post_term {
- union uk_header header;
-};
-
-struct kbase_uk_sync_now {
- union uk_header header;
-
- /* IN */
- struct base_syncset sset;
-
- /* OUT */
-};
-
-struct kbase_uk_hwcnt_setup {
- union uk_header header;
-
- /* IN */
- u64 dump_buffer;
- u32 jm_bm;
- u32 shader_bm;
- u32 tiler_bm;
- u32 unused_1; /* keep for backwards compatibility */
- u32 mmu_l2_bm;
- u32 padding;
- /* OUT */
-};
-
-/**
- * struct kbase_uk_hwcnt_reader_setup - User/Kernel space data exchange structure
- * @header: UK structure header
- * @buffer_count: requested number of dumping buffers
- * @jm_bm: counters selection bitmask (JM)
- * @shader_bm: counters selection bitmask (Shader)
- * @tiler_bm: counters selection bitmask (Tiler)
- * @mmu_l2_bm: counters selection bitmask (MMU_L2)
- * @fd: dumping notification file descriptor
- *
- * This structure sets up HWC dumper/reader for this context.
- * Multiple instances can be created for single context.
- */
-struct kbase_uk_hwcnt_reader_setup {
- union uk_header header;
-
- /* IN */
- u32 buffer_count;
- u32 jm_bm;
- u32 shader_bm;
- u32 tiler_bm;
- u32 mmu_l2_bm;
-
- /* OUT */
- s32 fd;
-};
-
-struct kbase_uk_hwcnt_dump {
- union uk_header header;
-};
-
-struct kbase_uk_hwcnt_clear {
- union uk_header header;
-};
-
-struct kbase_uk_fence_validate {
- union uk_header header;
- /* IN */
- s32 fd;
- u32 padding;
- /* OUT */
-};
-
-struct kbase_uk_stream_create {
- union uk_header header;
- /* IN */
- char name[32];
- /* OUT */
- s32 fd;
- u32 padding;
-};
-
-struct kbase_uk_gpuprops {
- union uk_header header;
-
- /* IN */
- struct mali_base_gpu_props props;
- /* OUT */
-};
-
-struct kbase_uk_mem_query {
- union uk_header header;
- /* IN */
- u64 gpu_addr;
-#define KBASE_MEM_QUERY_COMMIT_SIZE 1
-#define KBASE_MEM_QUERY_VA_SIZE 2
-#define KBASE_MEM_QUERY_FLAGS 3
- u64 query;
- /* OUT */
- u64 value;
-};
-
-struct kbase_uk_mem_commit {
- union uk_header header;
- /* IN */
- u64 gpu_addr;
- u64 pages;
- /* OUT */
- u32 result_subcode;
- u32 padding;
-};
-
-struct kbase_uk_find_cpu_offset {
- union uk_header header;
- /* IN */
- u64 gpu_addr;
- u64 cpu_addr;
- u64 size;
- /* OUT */
- u64 offset;
-};
-
-#define KBASE_GET_VERSION_BUFFER_SIZE 64
-struct kbase_uk_get_ddk_version {
- union uk_header header;
- /* OUT */
- char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
- u32 version_string_size;
- u32 padding;
-};
-
-struct kbase_uk_disjoint_query {
- union uk_header header;
- /* OUT */
- u32 counter;
- u32 padding;
-};
-
-struct kbase_uk_set_flags {
- union uk_header header;
- /* IN */
- u32 create_flags;
- u32 padding;
-};
-
-#if MALI_UNIT_TEST
-#define TEST_ADDR_COUNT 4
-#define KBASE_TEST_BUFFER_SIZE 128
-struct kbase_exported_test_data {
- u64 test_addr[TEST_ADDR_COUNT]; /**< memory address */
- u32 test_addr_pages[TEST_ADDR_COUNT]; /**< memory size in pages */
- u64 kctx; /**< base context created by process */
- u64 mm; /**< pointer to process address space */
- u8 buffer1[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */
- u8 buffer2[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */
-};
-
-struct kbase_uk_set_test_data {
- union uk_header header;
- /* IN */
- struct kbase_exported_test_data test_data;
-};
-
-#endif /* MALI_UNIT_TEST */
-
-#ifdef SUPPORT_MALI_ERROR_INJECT
-struct kbase_uk_error_params {
- union uk_header header;
- /* IN */
- struct kbase_error_params params;
-};
-#endif /* SUPPORT_MALI_ERROR_INJECT */
-
-#ifdef SUPPORT_MALI_NO_MALI
-struct kbase_uk_model_control_params {
- union uk_header header;
- /* IN */
- struct kbase_model_control_params params;
-};
-#endif /* SUPPORT_MALI_NO_MALI */
-
-struct kbase_uk_profiling_controls {
- union uk_header header;
- u32 profiling_controls[FBDUMP_CONTROL_MAX];
-};
-
-struct kbase_uk_debugfs_mem_profile_add {
- union uk_header header;
- u32 len;
- u32 padding;
- u64 buf;
-};
-
-struct kbase_uk_context_id {
- union uk_header header;
- /* OUT */
- int id;
-};
-
-/**
- * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure
- * @header: UK structure header
- * @flags: timeline stream flags
- * @fd: timeline stream file descriptor
- *
- * This structure is used when performing a call to acquire kernel side timeline
- * stream file descriptor.
- */
-struct kbase_uk_tlstream_acquire {
- union uk_header header;
- /* IN */
- u32 flags;
- /* OUT */
- s32 fd;
-};
-
-/**
- * struct kbase_uk_tlstream_acquire_v10_4 - User/Kernel space data exchange
- * structure
- * @header: UK structure header
- * @fd: timeline stream file descriptor
- *
- * This structure is used when performing a call to acquire kernel side timeline
- * stream file descriptor.
- */
-struct kbase_uk_tlstream_acquire_v10_4 {
- union uk_header header;
- /* IN */
- /* OUT */
- s32 fd;
-};
-
-/**
- * struct kbase_uk_tlstream_flush - User/Kernel space data exchange structure
- * @header: UK structure header
- *
- * This structure is used when performing a call to flush kernel side
- * timeline streams.
- */
-struct kbase_uk_tlstream_flush {
- union uk_header header;
- /* IN */
- /* OUT */
-};
-
-#if MALI_UNIT_TEST
-/**
- * struct kbase_uk_tlstream_test - User/Kernel space data exchange structure
- * @header: UK structure header
- * @tpw_count: number of trace point writers in each context
- * @msg_delay: time delay between tracepoints from one writer in milliseconds
- * @msg_count: number of trace points written by one writer
- * @aux_msg: if non-zero aux messages will be included
- *
- * This structure is used when performing a call to start timeline stream test
- * embedded in kernel.
- */
-struct kbase_uk_tlstream_test {
- union uk_header header;
- /* IN */
- u32 tpw_count;
- u32 msg_delay;
- u32 msg_count;
- u32 aux_msg;
- /* OUT */
-};
-
-/**
- * struct kbase_uk_tlstream_stats - User/Kernel space data exchange structure
- * @header: UK structure header
- * @bytes_collected: number of bytes read by user
- * @bytes_generated: number of bytes generated by tracepoints
- *
- * This structure is used when performing a call to obtain timeline stream
- * statistics.
- */
-struct kbase_uk_tlstream_stats {
- union uk_header header; /**< UK structure header. */
- /* IN */
- /* OUT */
- u32 bytes_collected;
- u32 bytes_generated;
-};
-#endif /* MALI_UNIT_TEST */
-
-/**
- * struct struct kbase_uk_prfcnt_value for the KBASE_FUNC_SET_PRFCNT_VALUES ioctl
- * @header: UK structure header
- * @data: Counter samples for the dummy model
- * @size:............Size of the counter sample data
- */
-struct kbase_uk_prfcnt_values {
- union uk_header header;
- /* IN */
- u32 *data;
- u32 size;
-};
-
-/**
- * struct kbase_uk_soft_event_update - User/Kernel space data exchange structure
- * @header: UK structure header
- * @evt: the GPU address containing the event
- * @new_status: the new event status, must be either BASE_JD_SOFT_EVENT_SET or
- * BASE_JD_SOFT_EVENT_RESET
- * @flags: reserved for future uses, must be set to 0
- *
- * This structure is used to update the status of a software event. If the
- * event's status is set to BASE_JD_SOFT_EVENT_SET, any job currently waiting
- * on this event will complete.
- */
-struct kbase_uk_soft_event_update {
- union uk_header header;
- /* IN */
- u64 evt;
- u32 new_status;
- u32 flags;
-};
-
-/**
- * struct kbase_uk_mem_jit_init - User/Kernel space data exchange structure
- * @header: UK structure header
- * @va_pages: Number of virtual pages required for JIT
- *
- * This structure is used when requesting initialization of JIT.
- */
-struct kbase_uk_mem_jit_init {
- union uk_header header;
- /* IN */
- u64 va_pages;
-};
-
-enum kbase_uk_function_id {
- KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0),
- KBASE_FUNC_MEM_IMPORT = (UK_FUNC_ID + 1),
- KBASE_FUNC_MEM_COMMIT = (UK_FUNC_ID + 2),
- KBASE_FUNC_MEM_QUERY = (UK_FUNC_ID + 3),
- KBASE_FUNC_MEM_FREE = (UK_FUNC_ID + 4),
- KBASE_FUNC_MEM_FLAGS_CHANGE = (UK_FUNC_ID + 5),
- KBASE_FUNC_MEM_ALIAS = (UK_FUNC_ID + 6),
-
- /* UK_FUNC_ID + 7 not in use since BASE_LEGACY_UK6_SUPPORT dropped */
-
- KBASE_FUNC_SYNC = (UK_FUNC_ID + 8),
-
- KBASE_FUNC_POST_TERM = (UK_FUNC_ID + 9),
-
- KBASE_FUNC_HWCNT_SETUP = (UK_FUNC_ID + 10),
- KBASE_FUNC_HWCNT_DUMP = (UK_FUNC_ID + 11),
- KBASE_FUNC_HWCNT_CLEAR = (UK_FUNC_ID + 12),
-
- KBASE_FUNC_GPU_PROPS_REG_DUMP = (UK_FUNC_ID + 14),
-
- KBASE_FUNC_FIND_CPU_OFFSET = (UK_FUNC_ID + 15),
-
- KBASE_FUNC_GET_VERSION = (UK_FUNC_ID + 16),
- KBASE_FUNC_SET_FLAGS = (UK_FUNC_ID + 18),
-
- KBASE_FUNC_SET_TEST_DATA = (UK_FUNC_ID + 19),
- KBASE_FUNC_INJECT_ERROR = (UK_FUNC_ID + 20),
- KBASE_FUNC_MODEL_CONTROL = (UK_FUNC_ID + 21),
-
- /* UK_FUNC_ID + 22 not in use since BASE_LEGACY_UK8_SUPPORT dropped */
-
- KBASE_FUNC_FENCE_VALIDATE = (UK_FUNC_ID + 23),
- KBASE_FUNC_STREAM_CREATE = (UK_FUNC_ID + 24),
- KBASE_FUNC_GET_PROFILING_CONTROLS = (UK_FUNC_ID + 25),
- KBASE_FUNC_SET_PROFILING_CONTROLS = (UK_FUNC_ID + 26),
- /* to be used only for testing
- * purposes, otherwise these controls
- * are set through gator API */
-
- KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD = (UK_FUNC_ID + 27),
- KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28),
- KBASE_FUNC_DISJOINT_QUERY = (UK_FUNC_ID + 29),
-
- KBASE_FUNC_GET_CONTEXT_ID = (UK_FUNC_ID + 31),
-
- KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4 = (UK_FUNC_ID + 32),
-#if MALI_UNIT_TEST
- KBASE_FUNC_TLSTREAM_TEST = (UK_FUNC_ID + 33),
- KBASE_FUNC_TLSTREAM_STATS = (UK_FUNC_ID + 34),
-#endif /* MALI_UNIT_TEST */
- KBASE_FUNC_TLSTREAM_FLUSH = (UK_FUNC_ID + 35),
-
- KBASE_FUNC_HWCNT_READER_SETUP = (UK_FUNC_ID + 36),
-
-#ifdef SUPPORT_MALI_NO_MALI
- KBASE_FUNC_SET_PRFCNT_VALUES = (UK_FUNC_ID + 37),
-#endif
-
- KBASE_FUNC_SOFT_EVENT_UPDATE = (UK_FUNC_ID + 38),
-
- KBASE_FUNC_MEM_JIT_INIT = (UK_FUNC_ID + 39),
-
- KBASE_FUNC_TLSTREAM_ACQUIRE = (UK_FUNC_ID + 40),
-
- KBASE_FUNC_MAX
-};
-
-#endif /* _KBASE_UKU_H_ */
-
diff --git a/mali_kbase/mali_kbase_vinstr.c b/mali_kbase/mali_kbase_vinstr.c
index ed12945..c0d4292 100644
--- a/mali_kbase/mali_kbase_vinstr.c
+++ b/mali_kbase/mali_kbase_vinstr.c
@@ -33,6 +33,9 @@
#include <mali_kbase_hwcnt_reader.h>
#include <mali_kbase_mem_linux.h>
#include <mali_kbase_tlstream.h>
+#ifdef CONFIG_MALI_NO_MALI
+#include <backend/gpu/mali_kbase_model_dummy.h>
+#endif
/*****************************************************************************/
diff --git a/mali_kbase/mali_kbase_vinstr.h b/mali_kbase/mali_kbase_vinstr.h
index 6207d25..4dbf7ee 100644
--- a/mali_kbase/mali_kbase_vinstr.h
+++ b/mali_kbase/mali_kbase_vinstr.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -18,7 +18,6 @@
#ifndef _KBASE_VINSTR_H_
#define _KBASE_VINSTR_H_
-#include <mali_kbase.h>
#include <mali_kbase_hwcnt_reader.h>
/*****************************************************************************/
@@ -26,6 +25,29 @@
struct kbase_vinstr_context;
struct kbase_vinstr_client;
+struct kbase_uk_hwcnt_setup {
+ /* IN */
+ u64 dump_buffer;
+ u32 jm_bm;
+ u32 shader_bm;
+ u32 tiler_bm;
+ u32 unused_1; /* keep for backwards compatibility */
+ u32 mmu_l2_bm;
+ u32 padding;
+ /* OUT */
+};
+
+struct kbase_uk_hwcnt_reader_setup {
+ /* IN */
+ u32 buffer_count;
+ u32 jm_bm;
+ u32 shader_bm;
+ u32 tiler_bm;
+ u32 mmu_l2_bm;
+
+ /* OUT */
+ s32 fd;
+};
/*****************************************************************************/
/**
diff --git a/mali_kbase/mali_midg_regmap.h b/mali_kbase/mali_midg_regmap.h
index 554ed8d..59adfb8 100644
--- a/mali_kbase/mali_midg_regmap.h
+++ b/mali_kbase/mali_midg_regmap.h
@@ -92,6 +92,7 @@
#define TEXTURE_FEATURES_0 0x0B0 /* (RO) Support flags for indexed texture formats 0..31 */
#define TEXTURE_FEATURES_1 0x0B4 /* (RO) Support flags for indexed texture formats 32..63 */
#define TEXTURE_FEATURES_2 0x0B8 /* (RO) Support flags for indexed texture formats 64..95 */
+#define TEXTURE_FEATURES_3 0x0BC /* (RO) Support flags for texture order */
#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2))
@@ -557,6 +558,13 @@
#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT (12)
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT (15)
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
/* End L2_MMU_CONFIG register */
/* THREAD_* registers */
diff --git a/mali_kbase/platform/devicetree/mali_kbase_config_devicetree.c b/mali_kbase/platform/devicetree/mali_kbase_config_devicetree.c
index 29ccc29..299d0e7 100644
--- a/mali_kbase/platform/devicetree/mali_kbase_config_devicetree.c
+++ b/mali_kbase/platform/devicetree/mali_kbase_config_devicetree.c
@@ -30,6 +30,7 @@ struct kbase_platform_config *kbase_get_platform_config(void)
return &dummy_platform_config;
}
+#ifndef CONFIG_OF
int kbase_platform_register(void)
{
return 0;
@@ -38,3 +39,4 @@ int kbase_platform_register(void)
void kbase_platform_unregister(void)
{
}
+#endif
diff --git a/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c b/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c
index 9fe37c8..372420a 100644
--- a/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c
+++ b/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c
@@ -50,7 +50,8 @@ static void pm_callback_power_off(struct kbase_device *kbdev)
pm_runtime_put_autosuspend(kbdev->dev);
}
-int kbase_device_runtime_init(struct kbase_device *kbdev)
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_init(struct kbase_device *kbdev)
{
int ret = 0;
@@ -70,11 +71,12 @@ int kbase_device_runtime_init(struct kbase_device *kbdev)
return ret;
}
-void kbase_device_runtime_disable(struct kbase_device *kbdev)
+static void kbase_device_runtime_disable(struct kbase_device *kbdev)
{
dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
pm_runtime_disable(kbdev->dev);
}
+#endif
static int pm_callback_runtime_on(struct kbase_device *kbdev)
{
diff --git a/mali_kbase/platform/vexpress/mali_kbase_config_vexpress.c b/mali_kbase/platform/vexpress/mali_kbase_config_vexpress.c
index 15ce2bc..745884f 100644
--- a/mali_kbase/platform/vexpress/mali_kbase_config_vexpress.c
+++ b/mali_kbase/platform/vexpress/mali_kbase_config_vexpress.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,8 +24,6 @@
#include "mali_kbase_cpu_vexpress.h"
#include "mali_kbase_config_platform.h"
-#define HARD_RESET_AT_POWER_OFF 0
-
#ifndef CONFIG_OF
static struct kbase_io_resources io_resources = {
.job_irq_number = 68,
@@ -46,17 +44,6 @@ static int pm_callback_power_on(struct kbase_device *kbdev)
static void pm_callback_power_off(struct kbase_device *kbdev)
{
-#if HARD_RESET_AT_POWER_OFF
- /* Cause a GPU hard reset to test whether we have actually idled the GPU
- * and that we properly reconfigure the GPU on power up.
- * Usually this would be dangerous, but if the GPU is working correctly it should
- * be completely safe as the GPU should not be active at this point.
- * However this is disabled normally because it will most likely interfere with
- * bus logging etc.
- */
- KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
- kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
-#endif
}
struct kbase_pm_callback_conf pm_callbacks = {
diff --git a/mali_kbase/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/mali_kbase/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
index 3ff0930..5a69758 100644
--- a/mali_kbase/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
+++ b/mali_kbase/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,8 +20,6 @@
#include <mali_kbase_defs.h>
#include <mali_kbase_config.h>
-#define HARD_RESET_AT_POWER_OFF 0
-
#ifndef CONFIG_OF
static struct kbase_io_resources io_resources = {
.job_irq_number = 68,
@@ -41,17 +39,6 @@ static int pm_callback_power_on(struct kbase_device *kbdev)
static void pm_callback_power_off(struct kbase_device *kbdev)
{
-#if HARD_RESET_AT_POWER_OFF
- /* Cause a GPU hard reset to test whether we have actually idled the GPU
- * and that we properly reconfigure the GPU on power up.
- * Usually this would be dangerous, but if the GPU is working correctly it should
- * be completely safe as the GPU should not be active at this point.
- * However this is disabled normally because it will most likely interfere with
- * bus logging etc.
- */
- KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
- kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
-#endif
}
struct kbase_pm_callback_conf pm_callbacks = {
diff --git a/mali_kbase/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/mali_kbase/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
index 76ffe4a..5d8ec2d 100644
--- a/mali_kbase/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
+++ b/mali_kbase/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,8 +23,6 @@
#include <mali_kbase_config.h>
#include "mali_kbase_cpu_vexpress.h"
-#define HARD_RESET_AT_POWER_OFF 0
-
#ifndef CONFIG_OF
static struct kbase_io_resources io_resources = {
.job_irq_number = 75,
@@ -44,17 +42,6 @@ static int pm_callback_power_on(struct kbase_device *kbdev)
static void pm_callback_power_off(struct kbase_device *kbdev)
{
-#if HARD_RESET_AT_POWER_OFF
- /* Cause a GPU hard reset to test whether we have actually idled the GPU
- * and that we properly reconfigure the GPU on power up.
- * Usually this would be dangerous, but if the GPU is working correctly it should
- * be completely safe as the GPU should not be active at this point.
- * However this is disabled normally because it will most likely interfere with
- * bus logging etc.
- */
- KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
- kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
-#endif
}
struct kbase_pm_callback_conf pm_callbacks = {
diff --git a/mali_kbase/sconscript b/mali_kbase/sconscript
index e738dd7..f11933a 100644
--- a/mali_kbase/sconscript
+++ b/mali_kbase/sconscript
@@ -46,20 +46,12 @@ make_args = env.kernel_get_config_defines(ret_list = True) + [
'MALI_RELEASE_NAME=%s' % env['mali_release_name'],
'MALI_MOCK_TEST=%s' % mock_test,
'MALI_CUSTOMER_RELEASE=%s' % env['release'],
- 'MALI_INSTRUMENTATION_LEVEL=%s' % env['instr'],
'MALI_COVERAGE=%s' % env['coverage'],
]
kbase = env.BuildKernelModule('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src,
make_args = make_args)
-# Add a dependency on kds.ko.
-# Only necessary when KDS is not built into the kernel.
-#
-if env['os'] != 'android':
- if not env.KernelConfigEnabled("CONFIG_KDS"):
- env.Depends(kbase, '$STATIC_LIB_PATH/kds.ko')
-
# need Module.symvers from ump.ko build
if int(env['ump']) == 1:
env.Depends(kbase, '$STATIC_LIB_PATH/ump.ko')
diff --git a/mali_kbase/tests/include/kutf/kutf_helpers.h b/mali_kbase/tests/include/kutf/kutf_helpers.h
index 3f1dfc2..3667687 100644
--- a/mali_kbase/tests/include/kutf/kutf_helpers.h
+++ b/mali_kbase/tests/include/kutf/kutf_helpers.h
@@ -21,196 +21,52 @@
/* kutf_helpers.h
* Test helper functions for the kernel UTF test infrastructure.
*
- * This collection of helper functions are provided as 'stock' implementation
- * helpers for certain features of kutf. Tests can implement common/boilerplate
- * functionality using these, whilst still providing them the option of
- * implementing completely custom functions themselves to use those kutf
- * features.
+ * These functions provide methods for enqueuing/dequeuing lines of text sent
+ * by user space. They are used to implement the transfer of "userdata" from
+ * user space to kernel.
*/
#include <kutf/kutf_suite.h>
-#include <kutf/kutf_mem.h>
-#include <linux/wait.h>
/**
- * enum kutf_helper_textbuf_flag - flags for textbufs
- * @KUTF_HELPER_TEXTBUF_FLAG_DYING: Test is dying, textbuf should not allow
- * writes, nor block on empty.
- */
-enum kutf_helper_textbuf_flag {
- KUTF_HELPER_TEXTBUF_FLAG_DYING = (1u << 0),
-};
-
-/**
- * struct kutf_helper_textbuf_line - Structure representing a line of text
+ * kutf_helper_input_dequeue() - Dequeue a line sent by user space
+ * @context: KUTF context
+ * @str_size: Pointer to an integer to receive the size of the string
*
- * The string itself is stored immediately after this.
+ * If no line is available then this function will wait (interruptibly) until
+ * a line is available.
*
- * @node: List node for the textbuf's textbuf_list
- * @str_size: Length of the string buffer, including the \0 terminator
- * @str: 'Flexible array' for the string representing the line
+ * Return: The line dequeued, ERR_PTR(-EINTR) if interrupted or NULL on end
+ * of data.
*/
-struct kutf_helper_textbuf_line {
- struct list_head node;
- int str_size;
- char str[];
-};
-
-/**
- * struct kutf_helper_textbuf - Structure to representing sequential lines of
- * text
- * @lock: mutex to hold whilst accessing the structure
- * @nr_user_clients: Number of userspace clients connected via an open()
- * call
- * @mempool: mempool for allocating lines
- * @scratchpad: scratch area for receiving text of size max_line_size
- * @used_bytes: number of valid bytes in the scratchpad
- * @prev_pos: Previous position userspace has accessed
- * @prev_line_pos: Previous start of line position userspace has accessed
- * @textbuf_list: List head to store all the lines of text
- * @max_line_size: Maximum size in memory allowed for a line of text
- * @max_nr_lines: Maximum number of lines permitted in this textbuf
- * @nr_lines: Number of entries in textbuf_list
- * @flags: Flags indicating state of the textbuf, using values
- * from enum kutf_helper_textbuf_flag
- * @user_opened_wq: Waitq for when there's at least one userspace client
- * connected to the textbuf via an open() call
- * @not_full_wq: Waitq for when the textbuf can be enqueued into/can
- * consume data from userspace
- * @not_empty_wq: Waitq for when the textbuf can be dequeued from/can
- * produce data for userspace
- */
-
-struct kutf_helper_textbuf {
- struct mutex lock;
- int nr_user_clients;
- struct kutf_mempool *mempool;
- char *scratchpad;
- int used_bytes;
- loff_t prev_pos;
- loff_t prev_line_pos;
- struct list_head textbuf_list;
- int max_line_size;
- int max_nr_lines;
- int nr_lines;
- unsigned long flags;
- wait_queue_head_t user_opened_wq;
- wait_queue_head_t not_full_wq;
- wait_queue_head_t not_empty_wq;
-
-};
-
-/* stock callbacks for userspace to read from/write to the 'data' file as a
- * textbuf */
-extern struct kutf_userdata_ops kutf_helper_textbuf_userdata_ops;
+char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size);
/**
- * kutf_helper_textbuf_init() - init a textbuf for use as a 'data' file
- * consumer/producer
- * @textbuf: textbuf to initialize
- * @mempool: mempool to allocate from
- * @max_line_size: maximum line size expected to/from userspace
- * @max_nr_lines: maximum number of lines to expect to/from userspace
+ * kutf_helper_input_enqueue() - Enqueue a line sent by user space
+ * @context: KUTF context
+ * @str: The user space address of the line
+ * @size: The length in bytes of the string
*
- * Initialize a textbuf so that it can consume writes made to the 'data' file,
- * and produce reads for userspace on the 'data' file. Tests may then read the
- * lines written by userspace, or fill the buffer so it may be read back by
- * userspace.
+ * This function will use copy_from_user to copy the string out of user space.
+ * The string need not be NULL-terminated (@size should not include the NULL
+ * termination).
*
- * The caller should write the @textbuf pointer into the kutf_context's
- * userdata_producer_priv or userdata_consumer_priv member during fixture
- * creation.
+ * As a special case @str==NULL and @size==0 is valid to mark the end of input,
+ * but callers should use kutf_helper_input_enqueue_end_of_data() instead.
*
- * Usually a test will have separate textbufs for userspace to write to and
- * read from. Using the same one for both will echo back to the user what they
- * are writing.
- *
- * Lines are understood as being separated by the '\n' character, but no '\n'
- * characters will be observed by the test
- *
- * @max_line_size puts an upper bound on the size of lines in a textbuf,
- * including the \0 terminator. Lines exceeding this will be truncated,
- * effectively ignoring incoming data until the next '\n'
- *
- * Combining this with @max_nr_lines puts an upper bound on the size of the
- * file read in
- *
- * Return: 0 on success, or negative value on error.
+ * Return: 0 on success, -EFAULT if the line cannot be copied from user space,
+ * -ENOMEM if out of memory.
*/
-int kutf_helper_textbuf_init(struct kutf_helper_textbuf *textbuf,
- struct kutf_mempool *mempool, int max_line_size,
- int max_nr_lines);
+int kutf_helper_input_enqueue(struct kutf_context *context,
+ const char __user *str, size_t size);
/**
- * kutf_helper_textbuf_wait_for_user() - wait for userspace to open the 'data'
- * file
- * @textbuf: textbuf to wait on
- *
- * This can be used to synchronize with userspace so that subsequent calls to
- * kutf_helper_textbuf_dequeue() and kutf_helper_textbuf_enqueue() should
- * succeed.
- *
- * Waiting is done on a timeout.
- *
- * There is of course no guarantee that userspace will keep the file open after
- * this, but any error in the dequeue/enqueue functions afterwards can be
- * treated as such rather than "we're still waiting for userspace to begin"
- *
- * Return: 0 if waited successfully, -ETIMEDOUT if we exceeded the
- * timeout, or some other negative value if there was an
- * error during waiting.
- */
-
-int kutf_helper_textbuf_wait_for_user(struct kutf_helper_textbuf *textbuf);
-
-
-/**
- * kutf_helper_textbuf_dequeue() - dequeue a line from a textbuf
- * @textbuf: textbuf dequeue a line as a string from
- * @str_size: pointer to storage to receive the size of the string,
- * which includes the '\0' terminator, or NULL if not
- * required
- *
- * Dequeue (remove) a line from the start of the textbuf as a string, and
- * return it.
- *
- * If no lines are available, then this will block until a line has been
- * submitted. If a userspace client is not connected and there are no remaining
- * lines, then this function returns NULL instead.
- *
- * The memory for the string comes from the kutf_mempool given during
- * initialization of the textbuf, and shares the same lifetime as it.
- *
- * Return: pointer to the next line of the textbuf. NULL indicated
- * all userspace clients disconnected. An error value to be
- * checked with IS_ERR() family of functions if a signal or
- * some other error occurred
- */
-char *kutf_helper_textbuf_dequeue(struct kutf_helper_textbuf *textbuf,
- int *str_size);
-
-/**
- * kutf_helper_textbuf_enqueue() - enqueue a line to a textbuf
- * @textbuf: textbuf to enqueue a line as a string to
- * @enqueue_str: pointer to the string to enqueue to the textbuf
- * @buf_max_size: maximum size of the buffer holding @enqueue_str
- *
- * Enqueue (add) a line to the end of a textbuf as a string.
- *
- * The caller should avoid placing '\n' characters in their strings, as these
- * will not be split into multiple lines.
- *
- * A copy of the string will be made into the textbuf, so @enqueue_str can be
- * freed immediately after if.the caller wishes to do so.
- *
- * If the maximum amount of lines has been reached, then this will block until
- * a line has been removed to make space. If a userspace client is not
- * connected and there is no space available, then this function returns
- * -EBUSY.
+ * kutf_helper_input_enqueue_end_of_data() - Signal no more data is to be sent
+ * @context: KUTF context
*
- * Return: 0 on success, or negative value on error
+ * After this function has been called, kutf_helper_input_dequeue() will always
+ * return NULL.
*/
-int kutf_helper_textbuf_enqueue(struct kutf_helper_textbuf *textbuf,
- char *enqueue_str, int buf_max_size);
+void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context);
#endif /* _KERNEL_UTF_HELPERS_H_ */
diff --git a/mali_kbase/tests/include/kutf/kutf_helpers_user.h b/mali_kbase/tests/include/kutf/kutf_helpers_user.h
index 759bf71..c7b5263 100644
--- a/mali_kbase/tests/include/kutf/kutf_helpers_user.h
+++ b/mali_kbase/tests/include/kutf/kutf_helpers_user.h
@@ -80,18 +80,16 @@ enum kutf_helper_err {
};
-/* textbuf Send named NAME=value pair, u64 value
+/* Send named NAME=value pair, u64 value
*
* NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long
*
- * This is assuming the kernel-side test is using the 'textbuf' helpers
- *
* Any failure will be logged on the suite's current test fixture
*
* Returns 0 on success, non-zero on failure
*/
-int kutf_helper_textbuf_send_named_u64(struct kutf_context *context,
- struct kutf_helper_textbuf *textbuf, char *val_name, u64 val);
+int kutf_helper_send_named_u64(struct kutf_context *context,
+ const char *val_name, u64 val);
/* Get the maximum length of a string that can be represented as a particular
* NAME="value" pair without string-value truncation in the kernel's buffer
@@ -101,53 +99,48 @@ int kutf_helper_textbuf_send_named_u64(struct kutf_context *context,
* without having the string value truncated. Any string longer than this will
* be truncated at some point during communication to this size.
*
- * The calculation is valid both for sending strings of val_str_len to kernel,
- * and for receiving a string that was originally val_str_len from the kernel.
- *
- * It is assumed that valname is a valid name for
- * kutf_test_helpers_textbuf_send_named_str(), and no checking will be made to
+ * It is assumed that val_name is a valid name for
+ * kutf_helper_send_named_str(), and no checking will be made to
* ensure this.
*
* Returns the maximum string length that can be represented, or a negative
* value if the NAME="value" encoding itself wouldn't fit in kern_buf_sz
*/
-int kutf_helper_textbuf_max_str_len_for_kern(char *val_name, int kern_buf_sz);
+int kutf_helper_max_str_len_for_kern(const char *val_name, int kern_buf_sz);
-/* textbuf Send named NAME="str" pair
+/* Send named NAME="str" pair
*
* no escaping allowed in str. Any of the following characters will terminate
* the string: '"' '\\' '\n'
*
* NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long
*
- * This is assuming the kernel-side test is using the 'textbuf' helpers
- *
* Any failure will be logged on the suite's current test fixture
*
* Returns 0 on success, non-zero on failure */
-int kutf_helper_textbuf_send_named_str(struct kutf_context *context,
- struct kutf_helper_textbuf *textbuf, char *val_name,
- char *val_str);
+int kutf_helper_send_named_str(struct kutf_context *context,
+ const char *val_name, const char *val_str);
-/* textbuf Receive named NAME=value pair
+/* Receive named NAME=value pair
*
* This can receive u64 and string values - check named_val->type
*
* If you are not planning on dynamic handling of the named value's name and
- * type, then kutf_test_helpers_textbuf_receive_check_val() is more useful as a
+ * type, then kutf_helper_receive_check_val() is more useful as a
* convenience function.
*
* String members of named_val will come from memory allocated on the fixture's mempool
*
- * Returns 0 on success. Negative value on failure to receive from the 'data'
+ * Returns 0 on success. Negative value on failure to receive from the 'run'
* file, positive value indicates an enum kutf_helper_err value for correct
* reception of data but invalid parsing */
-int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_val,
- struct kutf_helper_textbuf *textbuf);
+int kutf_helper_receive_named_val(
+ struct kutf_context *context,
+ struct kutf_helper_named_val *named_val);
-/* textbuf Receive and validate NAME=value pair
+/* Receive and validate NAME=value pair
*
- * As with kutf_test_helpers_textbuf_receive_named_val, but validate that the
+ * As with kutf_helper_receive_named_val, but validate that the
* name and type are as expected, as a convenience for a common pattern found
* in tests.
*
@@ -168,9 +161,11 @@ int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_va
*
* The rationale behind this is that we'd prefer to continue the rest of the
* test with failures propagated, rather than hitting a timeout */
-int kutf_helper_textbuf_receive_check_val(struct kutf_helper_named_val *named_val,
- struct kutf_context *context, struct kutf_helper_textbuf *textbuf,
- char *expect_val_name, enum kutf_helper_valtype expect_val_type);
+int kutf_helper_receive_check_val(
+ struct kutf_helper_named_val *named_val,
+ struct kutf_context *context,
+ const char *expect_val_name,
+ enum kutf_helper_valtype expect_val_type);
/* Output a named value to kmsg */
void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val);
diff --git a/mali_kbase/tests/include/kutf/kutf_resultset.h b/mali_kbase/tests/include/kutf/kutf_resultset.h
index 1cc85f1..6787f71 100644
--- a/mali_kbase/tests/include/kutf/kutf_resultset.h
+++ b/mali_kbase/tests/include/kutf/kutf_resultset.h
@@ -42,7 +42,12 @@
* @KUTF_RESULT_FATAL: The test result failed with a fatal error.
* @KUTF_RESULT_ABORT: The test result failed due to a non-UTF
* assertion failure.
- * @KUTF_RESULT_COUNT: The current number of possible status messages.
+ * @KUTF_RESULT_USERDATA: User data is ready to be read,
+ * this is not seen outside the kernel
+ * @KUTF_RESULT_USERDATA_WAIT: Waiting for user data to be sent,
+ * this is not seen outside the kernel
+ * @KUTF_RESULT_TEST_FINISHED: The test has finished, no more results will
+ * be produced. This is not seen outside kutf
*/
enum kutf_result_status {
KUTF_RESULT_BENCHMARK = -3,
@@ -57,7 +62,9 @@ enum kutf_result_status {
KUTF_RESULT_FATAL = 5,
KUTF_RESULT_ABORT = 6,
- KUTF_RESULT_COUNT
+ KUTF_RESULT_USERDATA = 7,
+ KUTF_RESULT_USERDATA_WAIT = 8,
+ KUTF_RESULT_TEST_FINISHED = 9
};
/* The maximum size of a kutf_result_status result when
@@ -68,6 +75,9 @@ enum kutf_result_status {
#ifdef __KERNEL__
#include <kutf/kutf_mem.h>
+#include <linux/wait.h>
+
+struct kutf_context;
/**
* struct kutf_result - Represents a single test result.
@@ -82,40 +92,85 @@ struct kutf_result {
};
/**
+ * KUTF_RESULT_SET_WAITING_FOR_INPUT - Test is waiting for user data
+ *
+ * This flag is set within a struct kutf_result_set whenever the test is blocked
+ * waiting for user data. Attempts to dequeue results when this flag is set
+ * will cause a dummy %KUTF_RESULT_USERDATA_WAIT result to be produced. This
+ * is used to output a warning message and end of file.
+ */
+#define KUTF_RESULT_SET_WAITING_FOR_INPUT 1
+
+/**
+ * struct kutf_result_set - Represents a set of results.
+ * @results: List head of a struct kutf_result list for storing the results
+ * @waitq: Wait queue signalled whenever new results are added.
+ * @flags: Flags see %KUTF_RESULT_SET_WAITING_FOR_INPUT
+ */
+struct kutf_result_set {
+ struct list_head results;
+ wait_queue_head_t waitq;
+ int flags;
+};
+
+/**
* kutf_create_result_set() - Create a new result set
* to which results can be added.
*
- * Return: The created resultset.
+ * Return: The created result set.
*/
struct kutf_result_set *kutf_create_result_set(void);
/**
- * kutf_add_result() - Add a result to the end of an existing resultset.
+ * kutf_add_result() - Add a result to the end of an existing result set.
*
- * @mempool: The memory pool to allocate the result storage from.
- * @set: The resultset to add the result to.
+ * @context: The kutf context
* @status: The result status to add.
* @message: The result message to add.
+ *
+ * Return: 0 if the result is successfully added. -ENOMEM if allocation fails.
*/
-void kutf_add_result(struct kutf_mempool *mempool, struct kutf_result_set *set,
+int kutf_add_result(struct kutf_context *context,
enum kutf_result_status status, const char *message);
/**
- * kutf_remove_result() - Remove a result from the head of a resultset.
- * @set: The resultset.
+ * kutf_remove_result() - Remove a result from the head of a result set.
+ * @set: The result set.
+ *
+ * This function will block until there is a result to read. The wait is
+ * interruptible, so this function will return with an ERR_PTR if interrupted.
*
- * Return: result or NULL if there are no further results in the resultset.
+ * Return: result or ERR_PTR if interrupted
*/
struct kutf_result *kutf_remove_result(
struct kutf_result_set *set);
/**
- * kutf_destroy_result_set() - Free a previously created resultset.
+ * kutf_destroy_result_set() - Free a previously created result set.
*
* @results: The result set whose resources to free.
*/
void kutf_destroy_result_set(struct kutf_result_set *results);
+/**
+ * kutf_set_waiting_for_input() - The test is waiting for userdata
+ *
+ * @set: The result set to update
+ *
+ * Causes the result set to always have results and return a fake
+ * %KUTF_RESULT_USERDATA_WAIT result.
+ */
+void kutf_set_waiting_for_input(struct kutf_result_set *set);
+
+/**
+ * kutf_clear_waiting_for_input() - The test is no longer waiting for userdata
+ *
+ * @set: The result set to update
+ *
+ * Cancels the effect of kutf_set_waiting_for_input()
+ */
+void kutf_clear_waiting_for_input(struct kutf_result_set *set);
+
#endif /* __KERNEL__ */
#endif /* _KERNEL_UTF_RESULTSET_H_ */
diff --git a/mali_kbase/tests/include/kutf/kutf_suite.h b/mali_kbase/tests/include/kutf/kutf_suite.h
index cba2b2d..ca30e57 100644
--- a/mali_kbase/tests/include/kutf/kutf_suite.h
+++ b/mali_kbase/tests/include/kutf/kutf_suite.h
@@ -27,10 +27,17 @@
*/
#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <kutf/kutf_mem.h>
#include <kutf/kutf_resultset.h>
+/* Arbitrary maximum size to prevent user space allocating too much kernel
+ * memory
+ */
+#define KUTF_MAX_LINE_LENGTH (1024u)
+
/**
* Pseudo-flag indicating an absence of any specified test class. Note that
* tests should not be annotated with this constant as it is simply a zero
@@ -149,24 +156,42 @@ union kutf_callback_data {
};
/**
- * struct kutf_userdata_ops- Structure defining methods to exchange data
- * with userspace via the 'data' file
- * @open: Function used to notify when the 'data' file was opened
- * @release: Function used to notify when the 'data' file was closed
- * @notify_ended: Function used to notify when the test has ended.
- * @consumer: Function used to consume writes from userspace
- * @producer: Function used to produce data for userspace to read
+ * struct kutf_userdata_line - A line of user data to be returned to the user
+ * @node: struct list_head to link this into a list
+ * @str: The line of user data to return to user space
+ * @size: The number of bytes within @str
+ */
+struct kutf_userdata_line {
+ struct list_head node;
+ char *str;
+ size_t size;
+};
+
+/**
+ * KUTF_USERDATA_WARNING_OUTPUT - Flag specifying that a warning has been output
*
- * All ops can be NULL.
- */
-struct kutf_userdata_ops {
- int (*open)(void *priv);
- void (*release)(void *priv);
- void (*notify_ended)(void *priv);
- ssize_t (*consumer)(void *priv, const char __user *userbuf,
- size_t userbuf_len, loff_t *ppos);
- ssize_t (*producer)(void *priv, char __user *userbuf,
- size_t userbuf_len, loff_t *ppos);
+ * If user space reads the "run" file while the test is waiting for user data,
+ * then the framework will output a warning message and set this flag within
+ * struct kutf_userdata. A subsequent read will then simply return an end of
+ * file condition rather than outputting the warning again. The upshot of this
+ * is that simply running 'cat' on a test which requires user data will produce
+ * the warning followed by 'cat' exiting due to EOF - which is much more user
+ * friendly than blocking indefinitely waiting for user data.
+ */
+#define KUTF_USERDATA_WARNING_OUTPUT 1
+
+/**
+ * struct kutf_userdata - Structure holding user data
+ * @flags: See %KUTF_USERDATA_WARNING_OUTPUT
+ * @input_head: List of struct kutf_userdata_line containing user data
+ * to be read by the kernel space test.
+ * @input_waitq: Wait queue signalled when there is new user data to be
+ * read by the kernel space test.
+ */
+struct kutf_userdata {
+ unsigned long flags;
+ struct list_head input_head;
+ wait_queue_head_t input_waitq;
};
/**
@@ -185,13 +210,8 @@ struct kutf_userdata_ops {
* @status: The status of the currently running fixture.
* @expected_status: The expected status on exist of the currently
* running fixture.
- * @userdata_consumer_priv: Parameter to pass into kutf_userdata_ops
- * consumer function. Must not be NULL if a
- * consumer function was specified
- * @userdata_producer_priv: Parameter to pass into kutf_userdata_ops
- * producer function. Must not be NULL if a
- * producer function was specified
- * @userdata_dentry: The debugfs file for userdata exchange
+ * @work: Work item to enqueue onto the work queue to run the test
+ * @userdata: Structure containing the user data for the test to read
*/
struct kutf_context {
struct kref kref;
@@ -205,9 +225,9 @@ struct kutf_context {
struct kutf_result_set *result_set;
enum kutf_result_status status;
enum kutf_result_status expected_status;
- void *userdata_consumer_priv;
- void *userdata_producer_priv;
- struct dentry *userdata_dentry;
+
+ struct work_struct work;
+ struct kutf_userdata userdata;
};
/**
@@ -391,30 +411,6 @@ void kutf_add_test_with_filters_and_data(
unsigned int filters,
union kutf_callback_data test_data);
-/**
- * kutf_add_test_with_filters_data_and_userdata() - Add a test to a kernel test suite with filters and setup for
- * receiving data from userside
- * @suite: The suite to add the test to.
- * @id: The ID of the test.
- * @name: The name of the test.
- * @execute: Callback to the test function to run.
- * @filters: A set of filtering flags, assigning test categories.
- * @test_data: Test specific callback data, provided during the
- * running of the test in the kutf_context
- * @userdata_ops: Callbacks to use for sending and receiving data to
- * userspace. A copy of the struct kutf_userdata_ops is
- * taken. Each callback can be NULL.
- *
- */
-void kutf_add_test_with_filters_data_and_userdata(
- struct kutf_suite *suite,
- unsigned int id,
- const char *name,
- void (*execute)(struct kutf_context *context),
- unsigned int filters,
- union kutf_callback_data test_data,
- struct kutf_userdata_ops *userdata_ops);
-
/* ============================================================================
Test functions
diff --git a/mali_kbase/tests/kutf/kutf_helpers.c b/mali_kbase/tests/kutf/kutf_helpers.c
index 793d58c..bf887a5 100644
--- a/mali_kbase/tests/kutf/kutf_helpers.c
+++ b/mali_kbase/tests/kutf/kutf_helpers.c
@@ -18,8 +18,6 @@
/* Kernel UTF test helpers */
#include <kutf/kutf_helpers.h>
-/* 10s timeout for user thread to open the 'data' file once the test is started */
-#define USERDATA_WAIT_TIMEOUT_MS 10000
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
@@ -27,742 +25,100 @@
#include <linux/wait.h>
#include <linux/uaccess.h>
+static DEFINE_SPINLOCK(kutf_input_lock);
-int kutf_helper_textbuf_init(struct kutf_helper_textbuf *textbuf,
- struct kutf_mempool *mempool, int max_line_size,
- int max_nr_lines)
+static bool pending_input(struct kutf_context *context)
{
- textbuf->scratchpad = kutf_mempool_alloc(mempool, max_line_size);
+ bool input_pending;
- if (!textbuf->scratchpad)
- return -ENOMEM;
-
- mutex_init(&textbuf->lock);
- textbuf->nr_user_clients = 0;
- textbuf->mempool = mempool;
- textbuf->used_bytes = 0;
- textbuf->prev_pos = 0;
- textbuf->prev_line_pos = 0;
- INIT_LIST_HEAD(&textbuf->textbuf_list);
- textbuf->max_line_size = max_line_size;
- textbuf->max_nr_lines = max_nr_lines;
- textbuf->nr_lines = 0;
- textbuf->flags = 0ul;
- init_waitqueue_head(&textbuf->user_opened_wq);
- init_waitqueue_head(&textbuf->not_full_wq);
- init_waitqueue_head(&textbuf->not_empty_wq);
-
- return 0;
-}
-EXPORT_SYMBOL(kutf_helper_textbuf_init);
-
-/**
- * kutf_helper_textbuf_open() - Notify that userspace has opened the 'data'
- * file for a textbuf
- *
- * @priv: private pointer from a kutf_userdata_exchange, which
- * should be a pointer to a struct kutf_helper_textbuf
- *
- * Return: 0 on success, or negative value on error.
- */
-static int kutf_helper_textbuf_open(void *priv)
-{
- struct kutf_helper_textbuf *textbuf = priv;
- int ret;
-
- ret = mutex_lock_interruptible(&textbuf->lock);
- if (ret)
- return -ERESTARTSYS;
-
- ++(textbuf->nr_user_clients);
- wake_up(&textbuf->user_opened_wq);
-
- mutex_unlock(&textbuf->lock);
- return ret;
-}
-
-/**
- * kutf_helper_textbuf_release() - Notify that userspace has closed the 'data'
- * file for a textbuf
- *
- * @priv: private pointer from a kutf_userdata_exchange, which
- * should be a pointer to a struct kutf_helper_textbuf
- */
-static void kutf_helper_textbuf_release(void *priv)
-{
- struct kutf_helper_textbuf *textbuf = priv;
-
- /* Shouldn't use interruptible variants here because if a signal is
- * pending, we can't abort and restart the call */
- mutex_lock(&textbuf->lock);
-
- --(textbuf->nr_user_clients);
- if (!textbuf->nr_user_clients) {
- /* All clients disconnected, wakeup kernel-side waiters */
- wake_up(&textbuf->not_full_wq);
- wake_up(&textbuf->not_empty_wq);
- }
-
- mutex_unlock(&textbuf->lock);
-}
-
-/**
- * kutf_helper_textbuf_notify_test_ended() - Notify that the test has ended
- *
- * @priv: private pointer from a kutf_userdata_exchange, which
- * should be a pointer to a struct kutf_helper_textbuf
- *
- * After this call, userspace should be allowed to finish remaining reads but
- * not make new ones, and not be allowed to make new writes.
- */
-static void kutf_helper_textbuf_notify_test_ended(void *priv)
-{
- struct kutf_helper_textbuf *textbuf = priv;
-
- /* Shouldn't use interruptible variants here because if a signal is
- * pending, we can't abort and restart the call */
- mutex_lock(&textbuf->lock);
-
- textbuf->flags |= KUTF_HELPER_TEXTBUF_FLAG_DYING;
-
- /* Consumers waiting due to being full should wake up and abort */
- wake_up(&textbuf->not_full_wq);
- /* Producers waiting due to being empty should wake up and abort */
- wake_up(&textbuf->not_empty_wq);
-
- mutex_unlock(&textbuf->lock);
-}
-
-/* Collect text in a textbuf scratchpad up to (but excluding) specified
- * newline_off, and add it as a textbuf_line
- *
- * newline_off is permissible to be at the character after the end of the
- * scratchpad (i.e. equal to textbuf->max_line_size), for handling when the
- * line was longer than the size of the scratchpad. Nevertheless, the resulting
- * size of the line is kept at textbuf->max_line_size, including the '\0'
- * terminator. That is, the string length will be textbuf->max_line_size-1.
- *
- * Remaining characters strictly after newline_off are moved to the beginning
- * of the scratchpad, to allow space for a longer line to be collected. This
- * means the character specified at newline_off will be removed from/no longer
- * be within the valid region of the scratchpad
- *
- * Returns number of bytes the scratchpad was shortened by, or an error
- * otherwise
- */
-static size_t collect_line(struct kutf_helper_textbuf *textbuf, int newline_off)
-{
- /* '\n' terminator will be replaced as '\0' */
- int str_buf_size;
- struct kutf_helper_textbuf_line *textbuf_line;
- char *str_start;
- int bytes_remain;
- char *scratch = textbuf->scratchpad;
- int nextline_off;
-
- str_buf_size = newline_off + 1;
- if (str_buf_size > textbuf->max_line_size)
- str_buf_size = textbuf->max_line_size;
-
- /* String is stored immediately after the line */
- textbuf_line = kutf_mempool_alloc(textbuf->mempool, str_buf_size + sizeof(struct kutf_helper_textbuf_line));
- if (!textbuf_line)
- return -ENOMEM;
-
- str_start = &textbuf_line->str[0];
-
- /* Copy in string, excluding the terminating '\n' character, replacing
- * it with '\0' */
- strncpy(str_start, scratch, str_buf_size - 1);
- str_start[str_buf_size-1] = '\0';
- textbuf_line->str_size = str_buf_size;
-
- /* Append to the textbuf */
- list_add_tail(&textbuf_line->node, &textbuf->textbuf_list);
- ++(textbuf->nr_lines);
-
- /* Move the rest of the scratchpad to the start */
- nextline_off = newline_off + 1;
- if (nextline_off > textbuf->used_bytes)
- nextline_off = textbuf->used_bytes;
-
- bytes_remain = textbuf->used_bytes - nextline_off;
- memmove(scratch, scratch + nextline_off, bytes_remain);
- textbuf->used_bytes = bytes_remain;
-
- /* Wakeup anyone blocked on empty */
- wake_up(&textbuf->not_empty_wq);
-
- return nextline_off;
-}
-
-/* Buffer size for truncating a string to its newline.
- * Allocated on the stack, so keep it moderately small (within PAGE_SIZE) */
-#define TRUNCATE_BUF_SZ 512
-
-/* Discard input from a userbuf up to a newline, then collect what was in the
- * scratchpad into a new textbuf line */
-static ssize_t collect_longline_truncate(struct kutf_helper_textbuf *textbuf,
- const char __user *userbuf, size_t userbuf_len)
-{
- ssize_t bytes_processed = 0;
-
- while (userbuf_len > 0) {
- int userbuf_copy_sz = userbuf_len;
- size_t res;
- char *newline_ptr;
- char truncate_buf[TRUNCATE_BUF_SZ];
-
- if (userbuf_len > TRUNCATE_BUF_SZ)
- userbuf_copy_sz = TRUNCATE_BUF_SZ;
- else
- userbuf_copy_sz = (int)userbuf_len;
-
- /* copy what we can */
- res = copy_from_user(truncate_buf, userbuf, userbuf_copy_sz);
- if (res == userbuf_copy_sz)
- return -EFAULT;
- userbuf_copy_sz -= res;
-
- /* Search for newline in what was copied */
- newline_ptr = strnchr(truncate_buf, userbuf_copy_sz, '\n');
-
- if (newline_ptr) {
- ssize_t sres;
- /* Newline found: collect scratchpad and exit out */
- int newline_off = newline_ptr - truncate_buf;
-
- sres = collect_line(textbuf, textbuf->used_bytes);
- if (sres < 0)
- return sres;
-
- bytes_processed += newline_off + 1;
- break;
- }
-
- /* Newline not yet found: advance to the next part to copy */
- userbuf += userbuf_copy_sz;
- userbuf_len -= userbuf_copy_sz;
- bytes_processed += userbuf_copy_sz;
- }
-
- return bytes_processed;
-}
-
-/**
- * kutf_helper_textbuf_consume() - 'data' file consumer function for writing to
- * a textbuf
- * @priv: private pointer from a kutf_userdata_exchange, which
- * should be a pointer to a struct kutf_helper_textbuf to
- * write into
- * @userbuf: the userspace buffer to read from
- * @userbuf_len: size of the userspace buffer
- * @ppos: the current position in the buffer
- *
- * This consumer function is used as a write consumer for the 'data' file,
- * receiving data that has been written to the 'data' file by userspace. It
- * will read from the userspace buffer @userbuf and separates it into '\n'
- * delimited lines for the textbuf pointed to by @priv .
- *
- * If there is insufficient space in textbuf, then it will block until there is
- * space - for example, a kernel-side test calls
- * kutf_helper_textbuf_dequeue(). Since this is expected to be called in the
- * context of a syscall, the call can only be cancelled by sending an
- * appropriate signal to the userspace process.
- *
- * The current position @ppos is advanced by the number of bytes successfully
- * read.
- *
- * Return: the number of bytes read, or negative value on error.
- */
-static ssize_t kutf_helper_textbuf_consume(void *priv,
- const char __user *userbuf, size_t userbuf_len, loff_t *ppos)
-{
- struct kutf_helper_textbuf *textbuf = priv;
- int userbuf_copy_sz;
- char *next_newline_ptr;
- size_t bytes_processed = 0;
- int newdata_off;
- ssize_t ret;
-
- ret = mutex_lock_interruptible(&textbuf->lock);
- if (ret)
- return -ERESTARTSYS;
-
- /* Validate input */
- if (*ppos < 0) {
- ret = -EINVAL;
- goto out_unlock;
- }
- if (!userbuf_len) {
- ret = 0;
- goto out_unlock;
- }
-
- while (textbuf->nr_lines >= textbuf->max_nr_lines &&
- !(textbuf->flags & KUTF_HELPER_TEXTBUF_FLAG_DYING)) {
- /* Block on kernel-side dequeue making space available
- * NOTE: should also handle O_NONBLOCK */
- mutex_unlock(&textbuf->lock);
- ret = wait_event_interruptible(textbuf->not_full_wq,
- (textbuf->nr_lines < textbuf->max_nr_lines ||
- (textbuf->flags & KUTF_HELPER_TEXTBUF_FLAG_DYING)));
- if (ret)
- return -ERESTARTSYS;
- ret = mutex_lock_interruptible(&textbuf->lock);
- if (ret)
- return -ERESTARTSYS;
- }
+ spin_lock(&kutf_input_lock);
- if (textbuf->flags & KUTF_HELPER_TEXTBUF_FLAG_DYING) {
- ret = -ENODEV;
- goto out_unlock;
- }
-
- if (textbuf->prev_pos != *ppos && textbuf->used_bytes) {
- /* Seeking causes a new line to occur:
- * Truncate what data was there into a textbuf-line, and reset
- * the buffer */
- ret = collect_line(textbuf, textbuf->used_bytes);
- if (ret < 0)
- goto finish;
- } else if (textbuf->used_bytes >= (textbuf->max_line_size - 1)) {
- /* Line too long discard input until we find a '\n' */
- ret = collect_longline_truncate(textbuf, userbuf, userbuf_len);
-
- if (ret < 0)
- goto finish;
-
- /* Update userbuf with how much was processed, which may be the
- * entire buffer now */
- userbuf += ret;
- userbuf_len -= ret;
- bytes_processed += ret;
-
- /* If there's buffer remaining and we fault later (e.g. can't
- * read or OOM) ensure ppos is updated */
- *ppos += ret;
-
- /* recheck in case entire buffer processed */
- if (!userbuf_len)
- goto finish;
- }
-
- /* An extra line may've been added, ensure we don't overfill */
- if (textbuf->nr_lines >= textbuf->max_nr_lines)
- goto finish_noerr;
-
- userbuf_copy_sz = userbuf_len;
-
- /* Copy in as much as we can */
- if (userbuf_copy_sz > textbuf->max_line_size - textbuf->used_bytes)
- userbuf_copy_sz = textbuf->max_line_size - textbuf->used_bytes;
-
- ret = copy_from_user(textbuf->scratchpad + textbuf->used_bytes, userbuf, userbuf_copy_sz);
- if (ret == userbuf_copy_sz) {
- ret = -EFAULT;
- goto finish;
- }
- userbuf_copy_sz -= ret;
-
- newdata_off = textbuf->used_bytes;
- textbuf->used_bytes += userbuf_copy_sz;
-
- while (textbuf->used_bytes && textbuf->nr_lines < textbuf->max_nr_lines) {
- int new_bytes_remain = textbuf->used_bytes - newdata_off;
- /* Find a new line - only the new part should be checked */
- next_newline_ptr = strnchr(textbuf->scratchpad + newdata_off, new_bytes_remain, '\n');
-
- if (next_newline_ptr) {
- int newline_off = next_newline_ptr - textbuf->scratchpad;
-
- /* if found, collect up to it, then memmove the rest */
- /* reset positions and see if we can fill any further */
- /* repeat until run out of data or line is filled */
- ret = collect_line(textbuf, newline_off);
-
- /* If filled up or OOM, rollback the remaining new
- * data. Instead we'll try to grab it next time we're
- * called */
- if (textbuf->nr_lines >= textbuf->max_nr_lines || ret < 0)
- textbuf->used_bytes = newdata_off;
-
- if (ret < 0)
- goto finish;
-
- /* Fix up ppos etc in case we'll be ending the loop */
- *ppos += ret - newdata_off;
- bytes_processed += ret - newdata_off;
- newdata_off = 0;
- } else {
- /* there's bytes left, but no new-line, so try to fill up next time */
- *ppos += new_bytes_remain;
- bytes_processed += new_bytes_remain;
- break;
- }
- }
+ input_pending = !list_empty(&context->userdata.input_head);
-finish_noerr:
- ret = bytes_processed;
-finish:
- textbuf->prev_pos = *ppos;
-out_unlock:
- mutex_unlock(&textbuf->lock);
+ spin_unlock(&kutf_input_lock);
- return ret;
+ return input_pending;
}
-/**
- * kutf_helper_textbuf_produce() - 'data' file producer function for reading
- * from a textbuf
- * @priv: private pointer from a kutf_userdata_exchange, which
- * should be a pointer to a struct kutf_helper_textbuf to
- * read from
- * @userbuf: the userspace buffer to write to
- * @userbuf_len: size of the userspace buffer
- * @ppos: the current position in the buffer
- *
- * This producer function is used as a read producer for the 'data' file,
- * allowing userspace to read from the 'data' file. It will write to the
- * userspace buffer @userbuf, taking lines from the textbuf pointed to by
- * @priv, separating each line with '\n'.
- *
- * If there is no data in the textbuf, then it will block until some appears -
- * for example, a kernel-side test calls kutf_helper_textbuf_enqueue(). Since
- * this is expected to be called in the context of a syscall, the call can only
- * be cancelled by sending an appropriate signal to the userspace process.
- *
- * The current position @ppos is advanced by the number of bytes successfully
- * written.
- *
- * Return: the number of bytes written, or negative value on error
- */
-static ssize_t kutf_helper_textbuf_produce(void *priv, char __user *userbuf,
- size_t userbuf_len, loff_t *ppos)
+char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size)
{
- struct kutf_helper_textbuf *textbuf = priv;
- loff_t pos_offset;
- struct kutf_helper_textbuf_line *line = NULL;
- int line_start_pos;
- size_t bytes_processed = 0;
- ssize_t ret;
- int copy_length;
-
- ret = mutex_lock_interruptible(&textbuf->lock);
- if (ret)
- return -ERESTARTSYS;
-
- /* Validate input */
- if (*ppos < 0) {
- ret = -EINVAL;
- goto finish;
- }
- if (!userbuf_len) {
- ret = 0;
- goto finish;
- }
-
- /* Seeking to before the beginning of the line will have the effect of
- * resetting the position to the start of the current data, since we've
- * already discarded previous data */
- if (*ppos < textbuf->prev_line_pos)
- textbuf->prev_line_pos = *ppos;
+ struct kutf_userdata_line *line;
- while (!line) {
- int needs_wake = 0;
+ spin_lock(&kutf_input_lock);
- pos_offset = *ppos - textbuf->prev_line_pos;
- line_start_pos = 0;
-
- /* Find the line for the offset, emptying the textbuf as we go */
- while (!list_empty(&textbuf->textbuf_list)) {
- int line_end_pos;
-
- line = list_first_entry(&textbuf->textbuf_list, struct kutf_helper_textbuf_line, node);
-
- /* str_size used in line_end_pos because lines implicitly have
- * a '\n', but we count the '\0' string terminator as that */
- line_end_pos = line_start_pos + line->str_size;
-
- if (pos_offset < line_end_pos)
- break;
-
- line_start_pos += line->str_size;
- /* Only discard a line when we're sure it's finished
- * with, to avoid awkward rollback conditions if we've
- * had to block */
- list_del(&line->node);
- --(textbuf->nr_lines);
- line = NULL;
- needs_wake = 1;
- }
+ while (list_empty(&context->userdata.input_head)) {
+ int err;
- /* Update the start of the line pos for next time we're called */
- textbuf->prev_line_pos += line_start_pos;
+ kutf_set_waiting_for_input(context->result_set);
- /* If space was freed up, wake waiters */
- if (needs_wake)
- wake_up(&textbuf->not_full_wq);
-;
- if (!line) {
- /* Only check before waiting, to ensure if the test
- * does the last enqueue and immediately finishes, then
- * we'll go back round the loop to receive the line
- * instead of just dying straight away */
- if (textbuf->flags & KUTF_HELPER_TEXTBUF_FLAG_DYING) {
- /* Indicate EOF rather than an error */
- ret = 0;
- goto finish;
- }
+ spin_unlock(&kutf_input_lock);
- /* No lines found, block for new ones
- * NOTE: should also handle O_NONBLOCK */
- mutex_unlock(&textbuf->lock);
- ret = wait_event_interruptible(textbuf->not_empty_wq,
- (textbuf->nr_lines > 0 ||
- (textbuf->flags & KUTF_HELPER_TEXTBUF_FLAG_DYING)));
+ err = wait_event_interruptible(context->userdata.input_waitq,
+ pending_input(context));
- /* signals here are not restartable */
- if (ret)
- return ret;
- ret = mutex_lock_interruptible(&textbuf->lock);
- if (ret)
- return ret;
- }
+ if (err)
+ return ERR_PTR(-EINTR);
+ spin_lock(&kutf_input_lock);
}
-
- /* Find offset within the line, guaranteed to be within line->str_size */
- pos_offset -= line_start_pos;
-
- while (userbuf_len && line) {
- /* Copy at most to the end of string, excluding terminator */
- copy_length = line->str_size - 1 - pos_offset;
- if (copy_length > userbuf_len)
- copy_length = userbuf_len;
-
- if (copy_length) {
- ret = copy_to_user(userbuf, &line->str[pos_offset], copy_length);
- if (ret == copy_length) {
- ret = -EFAULT;
- goto finish;
- }
- copy_length -= ret;
-
- userbuf += copy_length;
- userbuf_len -= copy_length;
- bytes_processed += copy_length;
- *ppos += copy_length;
- if (ret)
- goto finish_noerr;
- }
-
- /* Add terminator if one was needed */
- if (userbuf_len) {
- copy_length = 1;
- ret = copy_to_user(userbuf, "\n", copy_length);
- if (ret == copy_length) {
- ret = -EFAULT;
- goto finish;
- }
- copy_length -= ret;
-
- userbuf += copy_length;
- userbuf_len -= copy_length;
- bytes_processed += copy_length;
- *ppos += copy_length;
- } else {
- /* string wasn't completely copied this time - try to
- * finish it next call */
- break;
- }
-
- /* Line Completed - only now can safely delete it */
- textbuf->prev_line_pos += line->str_size;
+ line = list_first_entry(&context->userdata.input_head,
+ struct kutf_userdata_line, node);
+ if (line->str) {
+ /*
+ * Unless it is the end-of-input marker,
+ * remove it from the list
+ */
list_del(&line->node);
- --(textbuf->nr_lines);
- line = NULL;
- /* Space freed up, wake up waiters */
- wake_up(&textbuf->not_full_wq);
-
- /* Pick the next line */
- if (!list_empty(&textbuf->textbuf_list)) {
- line = list_first_entry(&textbuf->textbuf_list, struct kutf_helper_textbuf_line, node);
- pos_offset = 0;
- }
- /* if no more lines, we've copied at least some bytes, so only
- * need to block on new lines the next time we're called */
}
-finish_noerr:
- ret = bytes_processed;
-finish:
- mutex_unlock(&textbuf->lock);
+ spin_unlock(&kutf_input_lock);
- return ret;
+ if (str_size)
+ *str_size = line->size;
+ return line->str;
}
-int kutf_helper_textbuf_wait_for_user(struct kutf_helper_textbuf *textbuf)
+int kutf_helper_input_enqueue(struct kutf_context *context,
+ const char __user *str, size_t size)
{
- int err;
- unsigned long now;
- unsigned long timeout_jiffies = msecs_to_jiffies(USERDATA_WAIT_TIMEOUT_MS);
- unsigned long time_end;
- int ret = 0;
-
- /* Mutex locking using non-interruptible variants, since a signal to
- * the user process will generally have to wait until we finish the
- * test, because we can't restart the test. The exception is where
- * we're blocked on a waitq */
- mutex_lock(&textbuf->lock);
-
- now = jiffies;
- time_end = now + timeout_jiffies;
+ struct kutf_userdata_line *line;
- while (!textbuf->nr_user_clients && time_before_eq(now, time_end)) {
- unsigned long time_to_wait = time_end - now;
- /* No users yet, block or timeout */
- mutex_unlock(&textbuf->lock);
- /* Use interruptible here - in case we block for a long time
- * and want to kill the user process */
- err = wait_event_interruptible_timeout(textbuf->user_opened_wq,
- (textbuf->nr_user_clients > 0), time_to_wait);
- /* Any error is not restartable due to how kutf runs tests */
- if (err < 0)
- return -EINTR;
- mutex_lock(&textbuf->lock);
+ line = kutf_mempool_alloc(&context->fixture_pool,
+ sizeof(*line) + size + 1);
+ if (!line)
+ return -ENOMEM;
+ if (str) {
+ unsigned long bytes_not_copied;
- now = jiffies;
+ line->size = size;
+ line->str = (void *)(line + 1);
+ bytes_not_copied = copy_from_user(line->str, str, size);
+ if (bytes_not_copied != 0)
+ return -EFAULT;
+ /* Zero terminate the string */
+ line->str[size] = '\0';
+ } else {
+ /* This is used to mark the end of input */
+ WARN_ON(size);
+ line->size = 0;
+ line->str = NULL;
}
- if (!textbuf->nr_user_clients)
- ret = -ETIMEDOUT;
-
- mutex_unlock(&textbuf->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(kutf_helper_textbuf_wait_for_user);
-char *kutf_helper_textbuf_dequeue(struct kutf_helper_textbuf *textbuf,
- int *str_size)
-{
- struct kutf_helper_textbuf_line *line;
- char *ret = NULL;
+ spin_lock(&kutf_input_lock);
- /* Mutex locking using non-interruptible variants, since a signal to
- * the user process will generally have to wait until we finish the
- * test, because we can't restart the test. The exception is where
- * we're blocked on a waitq */
- mutex_lock(&textbuf->lock);
+ list_add_tail(&line->node, &context->userdata.input_head);
- while (list_empty(&textbuf->textbuf_list)) {
- int err;
+ kutf_clear_waiting_for_input(context->result_set);
- if (!textbuf->nr_user_clients) {
- /* No user-side clients - error */
- goto out;
- }
+ spin_unlock(&kutf_input_lock);
- /* No lines found, block for new ones from user-side consumer */
- mutex_unlock(&textbuf->lock);
- /* Use interruptible here - in case we block for a long time
- * and want to kill the user process */
- err = wait_event_interruptible(textbuf->not_empty_wq,
- (textbuf->nr_lines > 0 || !textbuf->nr_user_clients));
- /* Any error is not restartable due to how kutf runs tests */
- if (err)
- return ERR_PTR(-EINTR);
- mutex_lock(&textbuf->lock);
- }
+ wake_up(&context->userdata.input_waitq);
- line = list_first_entry(&textbuf->textbuf_list, struct kutf_helper_textbuf_line, node);
- list_del(&line->node);
- --(textbuf->nr_lines);
- /* Space freed up, wake up waiters */
- wake_up(&textbuf->not_full_wq);
-
- if (str_size)
- *str_size = line->str_size;
-
- ret = &line->str[0];
-
-out:
- mutex_unlock(&textbuf->lock);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(kutf_helper_textbuf_dequeue);
-int kutf_helper_textbuf_enqueue(struct kutf_helper_textbuf *textbuf,
- char *enqueue_str, int buf_max_size)
+void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context)
{
- struct kutf_helper_textbuf_line *textbuf_line;
- int str_size = strnlen(enqueue_str, buf_max_size) + 1;
- char *str_start;
- int ret = 0;
-
- /* Mutex locking using non-interruptible variants, since a signal to
- * the user process will generally have to wait until we finish the
- * test, because we can't restart the test. The exception is where
- * we're blocked on a waitq */
- mutex_lock(&textbuf->lock);
-
- if (str_size > textbuf->max_line_size)
- str_size = textbuf->max_line_size;
-
- while (textbuf->nr_lines >= textbuf->max_nr_lines) {
- if (!textbuf->nr_user_clients) {
- /* No user-side clients - error */
- ret = -EBUSY;
- goto out;
- }
-
- /* Block on user-side producer making space available */
- mutex_unlock(&textbuf->lock);
- /* Use interruptible here - in case we block for a long time
- * and want to kill the user process */
- ret = wait_event_interruptible(textbuf->not_full_wq,
- (textbuf->nr_lines < textbuf->max_nr_lines || !textbuf->nr_user_clients));
- /* Any error is not restartable due to how kutf runs tests */
- if (ret)
- return -EINTR;
- mutex_lock(&textbuf->lock);
- }
-
- /* String is stored immediately after the line */
- textbuf_line = kutf_mempool_alloc(textbuf->mempool, str_size + sizeof(struct kutf_helper_textbuf_line));
- if (!textbuf_line) {
- ret = -ENOMEM;
- goto out;
- }
-
- str_start = &textbuf_line->str[0];
-
- /* Copy in string */
- strncpy(str_start, enqueue_str, str_size);
- /* Enforce the '\0' termination */
- str_start[str_size-1] = '\0';
- textbuf_line->str_size = str_size;
-
- /* Append to the textbuf */
- list_add_tail(&textbuf_line->node, &textbuf->textbuf_list);
- ++(textbuf->nr_lines);
-
- /* Wakeup anyone blocked on empty */
- wake_up(&textbuf->not_empty_wq);
-
-out:
- mutex_unlock(&textbuf->lock);
- return ret;
+ kutf_helper_input_enqueue(context, NULL, 0);
}
-EXPORT_SYMBOL(kutf_helper_textbuf_enqueue);
-
-
-struct kutf_userdata_ops kutf_helper_textbuf_userdata_ops = {
- .open = kutf_helper_textbuf_open,
- .release = kutf_helper_textbuf_release,
- .notify_ended = kutf_helper_textbuf_notify_test_ended,
- .consumer = kutf_helper_textbuf_consume,
- .producer = kutf_helper_textbuf_produce,
-};
-EXPORT_SYMBOL(kutf_helper_textbuf_userdata_ops);
diff --git a/mali_kbase/tests/kutf/kutf_helpers_user.c b/mali_kbase/tests/kutf/kutf_helpers_user.c
index cf3b005..9e8ab99 100644
--- a/mali_kbase/tests/kutf/kutf_helpers_user.c
+++ b/mali_kbase/tests/kutf/kutf_helpers_user.c
@@ -17,6 +17,7 @@
/* Kernel UTF test helpers that mirror those for kutf-userside */
#include <kutf/kutf_helpers_user.h>
+#include <kutf/kutf_helpers.h>
#include <kutf/kutf_utils.h>
#include <linux/err.h>
@@ -48,7 +49,7 @@ static const char *get_val_type_name(enum kutf_helper_valtype valtype)
*
* - Has between 1 and KUTF_HELPER_MAX_VAL_NAME_LEN characters before the \0 terminator
* - And, each char is in the character set [A-Z0-9_] */
-static int validate_val_name(char *val_str, int str_len)
+static int validate_val_name(const char *val_str, int str_len)
{
int i = 0;
@@ -81,24 +82,44 @@ static int validate_val_name(char *val_str, int str_len)
*
* That is, before any '\\', '\n' or '"' characters. This is so we don't have
* to escape the string */
-static int find_quoted_string_valid_len(char *str)
+static int find_quoted_string_valid_len(const char *str)
{
char *ptr;
const char *check_chars = "\\\n\"";
ptr = strpbrk(str, check_chars);
if (ptr)
- return ptr-str;
+ return (int)(ptr-str);
- return strlen(str);
+ return (int)strlen(str);
+}
+
+static int kutf_helper_userdata_enqueue(struct kutf_context *context,
+ const char *str)
+{
+ char *str_copy;
+ size_t len;
+ int err;
+
+ len = strlen(str)+1;
+
+ str_copy = kutf_mempool_alloc(&context->fixture_pool, len);
+ if (!str_copy)
+ return -ENOMEM;
+
+ strcpy(str_copy, str);
+
+ err = kutf_add_result(context, KUTF_RESULT_USERDATA, str_copy);
+
+ return err;
}
#define MAX_U64_HEX_LEN 16
/* (Name size) + ("=0x" size) + (64-bit hex value size) + (terminator) */
#define NAMED_U64_VAL_BUF_SZ (KUTF_HELPER_MAX_VAL_NAME_LEN + 3 + MAX_U64_HEX_LEN + 1)
-int kutf_helper_textbuf_send_named_u64(struct kutf_context *context,
- struct kutf_helper_textbuf *textbuf, char *val_name, u64 val)
+int kutf_helper_send_named_u64(struct kutf_context *context,
+ const char *val_name, u64 val)
{
int ret = 1;
char msgbuf[NAMED_U64_VAL_BUF_SZ];
@@ -117,9 +138,8 @@ int kutf_helper_textbuf_send_named_u64(struct kutf_context *context,
val_name, NAMED_U64_VAL_BUF_SZ, ret);
goto out_err;
}
- msgbuf[NAMED_U64_VAL_BUF_SZ-1] = '\0';
- ret = kutf_helper_textbuf_enqueue(textbuf, msgbuf, NAMED_U64_VAL_BUF_SZ);
+ ret = kutf_helper_userdata_enqueue(context, msgbuf);
if (ret) {
errmsg = kutf_dsprintf(&context->fixture_pool,
"Failed to send u64 value named '%s': send returned %d",
@@ -132,33 +152,31 @@ out_err:
kutf_test_fail(context, errmsg);
return ret;
}
-EXPORT_SYMBOL(kutf_helper_textbuf_send_named_u64);
+EXPORT_SYMBOL(kutf_helper_send_named_u64);
#define NAMED_VALUE_SEP "="
#define NAMED_STR_START_DELIM NAMED_VALUE_SEP "\""
#define NAMED_STR_END_DELIM "\""
-int kutf_helper_textbuf_max_str_len_for_kern(char *val_name,
+int kutf_helper_max_str_len_for_kern(const char *val_name,
int kern_buf_sz)
{
- int val_name_len = strlen(val_name);
- int start_delim_len = strlen(NAMED_STR_START_DELIM);
- int max_msg_len = kern_buf_sz - 1;
+ const int val_name_len = strlen(val_name);
+ const int start_delim_len = strlen(NAMED_STR_START_DELIM);
+ const int end_delim_len = strlen(NAMED_STR_END_DELIM);
+ int max_msg_len = kern_buf_sz;
int max_str_len;
- /* We do not include the end delimiter. Providing there is a line
- * ending character when sending the message, the end delimiter can be
- * truncated off safely to allow proper NAME="value" reception when
- * value's length is too long */
- max_str_len = max_msg_len - val_name_len - start_delim_len;
+ max_str_len = max_msg_len - val_name_len - start_delim_len -
+ end_delim_len;
return max_str_len;
}
-EXPORT_SYMBOL(kutf_helper_textbuf_max_str_len_for_kern);
+EXPORT_SYMBOL(kutf_helper_max_str_len_for_kern);
-int kutf_helper_textbuf_send_named_str(struct kutf_context *context,
- struct kutf_helper_textbuf *textbuf, char *val_name,
- char *val_str)
+int kutf_helper_send_named_str(struct kutf_context *context,
+ const char *val_name,
+ const char *val_str)
{
int val_str_len;
int str_buf_sz;
@@ -215,7 +233,7 @@ int kutf_helper_textbuf_send_named_str(struct kutf_context *context,
/* Terminator */
*copy_ptr = '\0';
- ret = kutf_helper_textbuf_enqueue(textbuf, str_buf, str_buf_sz);
+ ret = kutf_helper_userdata_enqueue(context, str_buf);
if (ret) {
errmsg = kutf_dsprintf(&context->fixture_pool,
@@ -232,12 +250,13 @@ out_err:
kfree(str_buf);
return ret;
}
-EXPORT_SYMBOL(kutf_helper_textbuf_send_named_str);
+EXPORT_SYMBOL(kutf_helper_send_named_str);
-int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_val,
- struct kutf_helper_textbuf *textbuf)
+int kutf_helper_receive_named_val(
+ struct kutf_context *context,
+ struct kutf_helper_named_val *named_val)
{
- int recv_sz;
+ size_t recv_sz;
char *recv_str;
char *search_ptr;
char *name_str = NULL;
@@ -246,15 +265,13 @@ int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_va
enum kutf_helper_valtype type = KUTF_HELPER_VALTYPE_INVALID;
char *strval = NULL;
u64 u64val = 0;
- int orig_recv_sz;
int err = KUTF_HELPER_ERR_INVALID_VALUE;
- recv_str = kutf_helper_textbuf_dequeue(textbuf, &recv_sz);
+ recv_str = kutf_helper_input_dequeue(context, &recv_sz);
if (!recv_str)
return -EBUSY;
else if (IS_ERR(recv_str))
return PTR_ERR(recv_str);
- orig_recv_sz = recv_sz;
/* Find the '=', grab the name and validate it */
search_ptr = strnchr(recv_str, recv_sz, NAMED_VALUE_SEP[0]);
@@ -271,7 +288,8 @@ int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_va
}
}
if (!name_str) {
- pr_err("Invalid name part for recevied string '%s'\n", recv_str);
+ pr_err("Invalid name part for received string '%s'\n",
+ recv_str);
return KUTF_HELPER_ERR_INVALID_NAME;
}
@@ -299,24 +317,6 @@ int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_va
pr_err("String value contains invalid characters in rest of received string '%s'\n", recv_str);
err = KUTF_HELPER_ERR_CHARS_AFTER_VAL;
}
- } else if (orig_recv_sz == textbuf->max_line_size) {
- /* No end-delimiter found, but the line is at
- * the max line size. Assume that before
- * truncation the line had a closing delimiter
- * anyway */
- strval_len = strlen(recv_str);
- /* Validate the string to ensure it contains no quotes */
- if (strval_len == find_quoted_string_valid_len(recv_str)) {
- strval = recv_str;
-
- /* Move to the end of the string */
- recv_str += strval_len;
- recv_sz -= strval_len;
- type = KUTF_HELPER_VALTYPE_STR;
- } else {
- pr_err("String value contains invalid characters in rest of received string '%s'\n", recv_str);
- err = KUTF_HELPER_ERR_CHARS_AFTER_VAL;
- }
} else {
pr_err("End of string delimiter not found in rest of received string '%s'\n", recv_str);
err = KUTF_HELPER_ERR_NO_END_DELIMITER;
@@ -357,8 +357,8 @@ int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_va
named_val->u.val_str = strval;
break;
default:
- pr_err("Unreachable, fix textbuf_receive_named_val\n");
- /* Coding error, report as though 'data' file failed */
+ pr_err("Unreachable, fix kutf_helper_receive_named_val\n");
+ /* Coding error, report as though 'run' file failed */
return -EINVAL;
}
@@ -367,16 +367,18 @@ int kutf_helper_textbuf_receive_named_val(struct kutf_helper_named_val *named_va
return KUTF_HELPER_ERR_NONE;
}
-EXPORT_SYMBOL(kutf_helper_textbuf_receive_named_val);
+EXPORT_SYMBOL(kutf_helper_receive_named_val);
#define DUMMY_MSG "<placeholder due to test fail>"
-int kutf_helper_textbuf_receive_check_val(struct kutf_helper_named_val *named_val,
- struct kutf_context *context, struct kutf_helper_textbuf *textbuf,
- char *expect_val_name, enum kutf_helper_valtype expect_val_type)
+int kutf_helper_receive_check_val(
+ struct kutf_helper_named_val *named_val,
+ struct kutf_context *context,
+ const char *expect_val_name,
+ enum kutf_helper_valtype expect_val_type)
{
int err;
- err = kutf_helper_textbuf_receive_named_val(named_val, textbuf);
+ err = kutf_helper_receive_named_val(context, named_val);
if (err < 0) {
const char *msg = kutf_dsprintf(&context->fixture_pool,
"Failed to receive value named '%s'",
@@ -438,7 +440,7 @@ out_fail_and_fixup:
/* But at least allow the caller to continue in the test with failures */
return 0;
}
-EXPORT_SYMBOL(kutf_helper_textbuf_receive_check_val);
+EXPORT_SYMBOL(kutf_helper_receive_check_val);
void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val)
{
diff --git a/mali_kbase/tests/kutf/kutf_resultset.c b/mali_kbase/tests/kutf/kutf_resultset.c
index 5bd0496..41645a4 100644
--- a/mali_kbase/tests/kutf/kutf_resultset.c
+++ b/mali_kbase/tests/kutf/kutf_resultset.c
@@ -20,16 +20,15 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <kutf/kutf_suite.h>
#include <kutf/kutf_resultset.h>
-/**
- * struct kutf_result_set - Represents a set of results.
- * @results: Pointer to the linked list where the results are stored.
- */
-struct kutf_result_set {
- struct list_head results;
-};
+/* Lock to protect all result structures */
+static DEFINE_SPINLOCK(kutf_result_lock);
struct kutf_result_set *kutf_create_result_set(void)
{
@@ -42,6 +41,8 @@ struct kutf_result_set *kutf_create_result_set(void)
}
INIT_LIST_HEAD(&set->results);
+ init_waitqueue_head(&set->waitq);
+ set->flags = 0;
return set;
@@ -49,11 +50,12 @@ fail_alloc:
return NULL;
}
-void kutf_add_result(struct kutf_mempool *mempool,
- struct kutf_result_set *set,
+int kutf_add_result(struct kutf_context *context,
enum kutf_result_status status,
const char *message)
{
+ struct kutf_mempool *mempool = &context->fixture_pool;
+ struct kutf_result_set *set = context->result_set;
/* Create the new result */
struct kutf_result *new_result;
@@ -62,14 +64,22 @@ void kutf_add_result(struct kutf_mempool *mempool,
new_result = kutf_mempool_alloc(mempool, sizeof(*new_result));
if (!new_result) {
pr_err("Result allocation failed\n");
- return;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&new_result->node);
new_result->status = status;
new_result->message = message;
+ spin_lock(&kutf_result_lock);
+
list_add_tail(&new_result->node, &set->results);
+
+ spin_unlock(&kutf_result_lock);
+
+ wake_up(&set->waitq);
+
+ return 0;
}
void kutf_destroy_result_set(struct kutf_result_set *set)
@@ -80,16 +90,70 @@ void kutf_destroy_result_set(struct kutf_result_set *set)
kfree(set);
}
+static bool kutf_has_result(struct kutf_result_set *set)
+{
+ bool has_result;
+
+ spin_lock(&kutf_result_lock);
+ if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT)
+ /* Pretend there are results if waiting for input */
+ has_result = true;
+ else
+ has_result = !list_empty(&set->results);
+ spin_unlock(&kutf_result_lock);
+
+ return has_result;
+}
+
struct kutf_result *kutf_remove_result(struct kutf_result_set *set)
{
- if (!list_empty(&set->results)) {
- struct kutf_result *ret;
+ struct kutf_result *result = NULL;
+ int ret;
+
+ do {
+ ret = wait_event_interruptible(set->waitq,
+ kutf_has_result(set));
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ spin_lock(&kutf_result_lock);
+
+ if (!list_empty(&set->results)) {
+ result = list_first_entry(&set->results,
+ struct kutf_result,
+ node);
+ list_del(&result->node);
+ } else if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT) {
+ /* Return a fake result */
+ static struct kutf_result waiting = {
+ .status = KUTF_RESULT_USERDATA_WAIT
+ };
+ result = &waiting;
+ }
+ /* If result == NULL then there was a race with the event
+ * being removed between the check in kutf_has_result and
+ * the lock being obtained. In this case we retry
+ */
+
+ spin_unlock(&kutf_result_lock);
+ } while (result == NULL);
+
+ return result;
+}
- ret = list_first_entry(&set->results, struct kutf_result, node);
- list_del(&ret->node);
- return ret;
- }
+void kutf_set_waiting_for_input(struct kutf_result_set *set)
+{
+ spin_lock(&kutf_result_lock);
+ set->flags |= KUTF_RESULT_SET_WAITING_FOR_INPUT;
+ spin_unlock(&kutf_result_lock);
- return NULL;
+ wake_up(&set->waitq);
}
+void kutf_clear_waiting_for_input(struct kutf_result_set *set)
+{
+ spin_lock(&kutf_result_lock);
+ set->flags &= ~KUTF_RESULT_SET_WAITING_FOR_INPUT;
+ spin_unlock(&kutf_result_lock);
+}
diff --git a/mali_kbase/tests/kutf/kutf_suite.c b/mali_kbase/tests/kutf/kutf_suite.c
index ad30cc8..4968f24 100644
--- a/mali_kbase/tests/kutf/kutf_suite.c
+++ b/mali_kbase/tests/kutf/kutf_suite.c
@@ -27,12 +27,14 @@
#include <linux/fs.h>
#include <linux/version.h>
#include <linux/atomic.h>
+#include <linux/sched.h>
#include <generated/autoconf.h>
#include <kutf/kutf_suite.h>
#include <kutf/kutf_resultset.h>
#include <kutf/kutf_utils.h>
+#include <kutf/kutf_helpers.h>
#if defined(CONFIG_DEBUG_FS)
@@ -61,8 +63,6 @@ struct kutf_application {
* @variant_list: List head to store all the variants which can run on
* this function
* @dir: debugfs directory for this test function
- * @userdata_ops: Callbacks to use for sending and receiving data to
- * userspace.
*/
struct kutf_test_function {
struct kutf_suite *suite;
@@ -73,7 +73,6 @@ struct kutf_test_function {
struct list_head node;
struct list_head variant_list;
struct dentry *dir;
- struct kutf_userdata_ops userdata_ops;
};
/**
@@ -83,17 +82,16 @@ struct kutf_test_function {
* @fixture_index: Index of this fixture
* @node: List node for variant_list
* @dir: debugfs directory for this test fixture
- * @nr_running: Current count of user-clients running this fixture
*/
struct kutf_test_fixture {
struct kutf_test_function *test_func;
unsigned int fixture_index;
struct list_head node;
struct dentry *dir;
- atomic_t nr_running;
};
-struct dentry *base_dir;
+static struct dentry *base_dir;
+static struct workqueue_struct *kutf_workq;
/**
* struct kutf_convert_table - Structure which keeps test results
@@ -252,263 +250,6 @@ static const struct file_operations kutf_debugfs_const_string_ops = {
};
/**
- * kutf_debugfs_data_open() Debugfs open callback for the "data" entry.
- * @inode: inode of the opened file
- * @file: Opened file to read from
- *
- * This function notifies the userdata callbacks that the userdata file has
- * been opened, for tracking purposes.
- *
- * It is called on both the context's userdata_consumer_priv and
- * userdata_producer_priv.
- *
- * This takes a refcount on the kutf_context
- *
- * Return: 0 on success
- */
-static int kutf_debugfs_data_open(struct inode *inode, struct file *file)
-{
- struct kutf_context *test_context = inode->i_private;
- struct kutf_test_fixture *test_fix = test_context->test_fix;
- struct kutf_test_function *test_func = test_fix->test_func;
- int err;
-
- simple_open(inode, file);
-
- /* This is not an error */
- if (!test_func->userdata_ops.open)
- goto out_no_ops;
-
- /* This is safe here - the 'data' file is only openable whilst the
- * initial refcount is still present, and the initial refcount is only
- * dropped strictly after the 'data' file is removed */
- kutf_context_get(test_context);
-
- if (test_context->userdata_consumer_priv) {
- err = test_func->userdata_ops.open(test_context->userdata_consumer_priv);
- if (err)
- goto out_consumer_fail;
- }
-
- if (test_context->userdata_producer_priv) {
- err = test_func->userdata_ops.open(test_context->userdata_producer_priv);
- if (err)
- goto out_producer_fail;
- }
-
-out_no_ops:
- return 0;
-
-out_producer_fail:
- if (test_func->userdata_ops.release && test_context->userdata_consumer_priv)
- test_func->userdata_ops.release(test_context->userdata_consumer_priv);
-out_consumer_fail:
- kutf_context_put(test_context);
-
- return err;
-}
-
-
-/**
- * kutf_debugfs_data_read() Debugfs read callback for the "data" entry.
- * @file: Opened file to read from
- * @buf: User buffer to write the data into
- * @len: Amount of data to read
- * @ppos: Offset into file to read from
- *
- * This function allows user and kernel to exchange extra data necessary for
- * the test fixture.
- *
- * The data is read from the first struct kutf_context running the fixture
- *
- * Return: Number of bytes read
- */
-static ssize_t kutf_debugfs_data_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- struct kutf_context *test_context = file->private_data;
- struct kutf_test_fixture *test_fix = test_context->test_fix;
- struct kutf_test_function *test_func = test_fix->test_func;
- ssize_t (*producer)(void *private, char __user *userbuf,
- size_t userbuf_len, loff_t *ppos);
- ssize_t count;
-
- producer = test_func->userdata_ops.producer;
- /* Can only read if there's a producer callback */
- if (!producer)
- return -ENODEV;
-
- count = producer(test_context->userdata_producer_priv, buf, len, ppos);
-
- return count;
-}
-
-/**
- * kutf_debugfs_data_write() Debugfs write callback for the "data" entry.
- * @file: Opened file to write to
- * @buf: User buffer to read the data from
- * @len: Amount of data to write
- * @ppos: Offset into file to write to
- *
- * This function allows user and kernel to exchange extra data necessary for
- * the test fixture.
- *
- * The data is added to the first struct kutf_context running the fixture
- *
- * Return: Number of bytes written
- */
-static ssize_t kutf_debugfs_data_write(struct file *file,
- const char __user *buf, size_t len, loff_t *ppos)
-{
- struct kutf_context *test_context = file->private_data;
- struct kutf_test_fixture *test_fix = test_context->test_fix;
- struct kutf_test_function *test_func = test_fix->test_func;
- ssize_t (*consumer)(void *private, const char __user *userbuf,
- size_t userbuf_len, loff_t *ppos);
- ssize_t count;
-
- consumer = test_func->userdata_ops.consumer;
- /* Can only write if there's a consumer callback */
- if (!consumer)
- return -ENODEV;
-
- count = consumer(test_context->userdata_consumer_priv, buf, len, ppos);
-
- return count;
-}
-
-
-/**
- * kutf_debugfs_data_release() - Debugfs release callback for the "data" entry.
- * @inode: File entry representation
- * @file: A specific opening of the file
- *
- * This function notifies the userdata callbacks that the userdata file has
- * been closed, for tracking purposes.
- *
- * It is called on both the context's userdata_consumer_priv and
- * userdata_producer_priv.
- *
- * It also drops the refcount on the kutf_context that was taken during
- * kutf_debugfs_data_open()
- */
-static int kutf_debugfs_data_release(struct inode *inode, struct file *file)
-{
- struct kutf_context *test_context = file->private_data;
- struct kutf_test_fixture *test_fix = test_context->test_fix;
- struct kutf_test_function *test_func = test_fix->test_func;
-
- if (!test_func->userdata_ops.release)
- return 0;
-
- if (test_context->userdata_consumer_priv)
- test_func->userdata_ops.release(test_context->userdata_consumer_priv);
- if (test_context->userdata_producer_priv)
- test_func->userdata_ops.release(test_context->userdata_producer_priv);
-
- kutf_context_put(test_context);
-
- return 0;
-}
-
-
-static const struct file_operations kutf_debugfs_data_ops = {
- .owner = THIS_MODULE,
- .open = kutf_debugfs_data_open,
- .read = kutf_debugfs_data_read,
- .write = kutf_debugfs_data_write,
- .release = kutf_debugfs_data_release,
- .llseek = default_llseek,
-};
-
-/**
- * userdata_init() - Initialize userspace data exchange for a test, if
- * specified by that test
- * @test_context: Test context
- *
- * Note that this allows new refcounts to be made on test_context by userspace
- * threads opening the 'data' file.
- *
- * Return: 0 on success, negative value corresponding to error code in failure
- * and kutf result will be set appropriately to indicate the error
- */
-static int userdata_init(struct kutf_context *test_context)
-{
- struct kutf_test_fixture *test_fix = test_context->test_fix;
- struct kutf_test_function *test_func = test_fix->test_func;
- int err = 0;
- struct dentry *userdata_dentry;
-
- /* Valid to have neither a producer or consumer, which is the case for
- * tests not requiring usersdata */
- if ((!test_func->userdata_ops.consumer) && (!test_func->userdata_ops.producer))
- return err;
-
- if (test_func->userdata_ops.consumer && !test_context->userdata_consumer_priv) {
- kutf_test_fatal(test_context,
- "incorrect test setup - userdata consumer provided without private data");
- return -EFAULT;
- }
-
- if (test_func->userdata_ops.producer && !test_context->userdata_producer_priv) {
- kutf_test_fatal(test_context,
- "incorrect test setup - userdata producer provided without private data");
- return -EFAULT;
- }
-
- userdata_dentry = debugfs_create_file("data", S_IROTH, test_fix->dir,
- test_context, &kutf_debugfs_data_ops);
-
- if (!userdata_dentry) {
- pr_err("Failed to create debugfs file \"data\" when running fixture\n");
- /* Not using Fatal (which stops other tests running),
- * nor Abort (which indicates teardown should not be done) */
- kutf_test_fail(test_context,
- "failed to create 'data' file for userside data exchange");
-
- /* Error code is discarded by caller, but consistent with other
- * debugfs_create_file failures */
- err = -EEXIST;
- } else {
- test_context->userdata_dentry = userdata_dentry;
- }
-
-
- return err;
-}
-
-/**
- * userdata_term() - Terminate userspace data exchange for a test, if specified
- * by that test
- * @test_context: Test context
- *
- * Note This also prevents new refcounts being made on @test_context by userspace
- * threads opening the 'data' file for this test. Any existing open file descriptors
- * to the 'data' file will still be safe to use by userspace.
- */
-static void userdata_term(struct kutf_context *test_context)
-{
- struct kutf_test_fixture *test_fix = test_context->test_fix;
- struct kutf_test_function *test_func = test_fix->test_func;
- void (*notify_ended)(void *priv) = test_func->userdata_ops.notify_ended;
-
- /* debugfs_remove() is safe when parameter is error or NULL */
- debugfs_remove(test_context->userdata_dentry);
-
- /* debugfs_remove() doesn't kill any currently open file descriptors on
- * this file, and such fds are still safe to use providing test_context
- * is properly refcounted */
-
- if (notify_ended) {
- if (test_context->userdata_consumer_priv)
- notify_ended(test_context->userdata_consumer_priv);
- if (test_context->userdata_producer_priv)
- notify_ended(test_context->userdata_producer_priv);
- }
-
-}
-
-/**
* kutf_add_explicit_result() - Check if an explicit result needs to be added
* @context: KUTF test context
*/
@@ -563,75 +304,75 @@ static void kutf_add_explicit_result(struct kutf_context *context)
}
}
+static void kutf_run_test(struct work_struct *data)
+{
+ struct kutf_context *test_context = container_of(data,
+ struct kutf_context, work);
+ struct kutf_suite *suite = test_context->suite;
+ struct kutf_test_function *test_func;
+
+ test_func = test_context->test_fix->test_func;
+
+ /*
+ * Call the create fixture function if required before the
+ * fixture is run
+ */
+ if (suite->create_fixture)
+ test_context->fixture = suite->create_fixture(test_context);
+
+ /* Only run the test if the fixture was created (if required) */
+ if ((suite->create_fixture && test_context->fixture) ||
+ (!suite->create_fixture)) {
+ /* Run this fixture */
+ test_func->execute(test_context);
+
+ if (suite->remove_fixture)
+ suite->remove_fixture(test_context);
+
+ kutf_add_explicit_result(test_context);
+ }
+
+ kutf_add_result(test_context, KUTF_RESULT_TEST_FINISHED, NULL);
+
+ kutf_context_put(test_context);
+}
+
/**
* kutf_debugfs_run_open() Debugfs open callback for the "run" entry.
* @inode: inode of the opened file
* @file: Opened file to read from
*
- * This function retrieves the test fixture data that is associated with the
- * opened file and works back to get the test, suite and application so
- * it can then run the test that is associated with the file entry.
+ * This function creates a KUTF context and queues it onto a workqueue to be
+ * run asynchronously. The resulting file descriptor can be used to communicate
+ * userdata to the test and to read back the results of the test execution.
*
* Return: 0 on success
*/
static int kutf_debugfs_run_open(struct inode *inode, struct file *file)
{
struct kutf_test_fixture *test_fix = inode->i_private;
- struct kutf_test_function *test_func = test_fix->test_func;
- struct kutf_suite *suite = test_func->suite;
struct kutf_context *test_context;
int err = 0;
- /* For the moment, only one user-client should be attempting to run
- * this at a time. This simplifies how we lookup the kutf_context when
- * using the 'data' file.
- * Removing this restriction would require a rewrite of the mechanism
- * of the 'data' file to pass data in, perhaps 'data' created here and
- * based upon userspace thread's pid */
- if (atomic_inc_return(&test_fix->nr_running) != 1) {
- err = -EBUSY;
- goto finish;
- }
-
test_context = kutf_create_context(test_fix);
if (!test_context) {
- err = -ENODEV;
+ err = -ENOMEM;
goto finish;
}
file->private_data = test_context;
- /*
- * Call the create fixture function if required before the
- * fixture is run
- */
- if (suite->create_fixture)
- test_context->fixture = suite->create_fixture(test_context);
-
- /* Only run the test if the fixture was created (if required) */
- if ((suite->create_fixture && test_context->fixture) ||
- (!suite->create_fixture)) {
- int late_err;
- /* Setup any userdata exchange */
- late_err = userdata_init(test_context);
-
- if (!late_err)
- /* Run this fixture */
- test_func->execute(test_context);
-
- userdata_term(test_context);
-
- if (suite->remove_fixture)
- suite->remove_fixture(test_context);
+ /* This reference is release by the kutf_run_test */
+ kutf_context_get(test_context);
- kutf_add_explicit_result(test_context);
- }
+ queue_work(kutf_workq, &test_context->work);
finish:
- atomic_dec(&test_fix->nr_running);
return err;
}
+#define USERDATA_WARNING_MESSAGE "WARNING: This test requires userdata\n"
+
/**
* kutf_debugfs_run_read() - Debugfs read callback for the "run" entry.
* @file: Opened file to read from
@@ -639,8 +380,14 @@ finish:
* @len: Amount of data to read
* @ppos: Offset into file to read from
*
- * This function emits the results which where logged during the opening of
- * the file kutf_debugfs_run_open.
+ * This function emits the results of the test, blocking until they are
+ * available.
+ *
+ * If the test involves user data then this will also return user data records
+ * to user space. If the test is waiting for user data then this function will
+ * output a message (to make the likes of 'cat' display it), followed by
+ * returning 0 to mark the end of file.
+ *
* Results will be emitted one at a time, once all the results have been read
* 0 will be returned to indicate there is no more data.
*
@@ -653,68 +400,153 @@ static ssize_t kutf_debugfs_run_read(struct file *file, char __user *buf,
struct kutf_result *res;
unsigned long bytes_not_copied;
ssize_t bytes_copied = 0;
+ char *kutf_str_ptr = NULL;
+ size_t kutf_str_len = 0;
+ size_t message_len = 0;
+ char separator = ':';
+ char terminator = '\n';
- /* Note: This code assumes a result is read completely */
res = kutf_remove_result(test_context->result_set);
- if (res) {
- char *kutf_str_ptr = NULL;
- unsigned int kutf_str_len = 0;
- unsigned int message_len = 0;
- char separator = ':';
- char terminator = '\n';
-
- kutf_result_to_string(&kutf_str_ptr, res->status);
- if (kutf_str_ptr)
- kutf_str_len = strlen(kutf_str_ptr);
-
- if (res->message)
- message_len = strlen(res->message);
-
- if ((kutf_str_len + 1 + message_len + 1) > len) {
- pr_err("Not enough space in user buffer for a single result");
+
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ /*
+ * Handle 'fake' results - these results are converted to another
+ * form before being returned from the kernel
+ */
+ switch (res->status) {
+ case KUTF_RESULT_TEST_FINISHED:
+ return 0;
+ case KUTF_RESULT_USERDATA_WAIT:
+ if (test_context->userdata.flags &
+ KUTF_USERDATA_WARNING_OUTPUT) {
+ /*
+ * Warning message already output,
+ * signal end-of-file
+ */
return 0;
}
- /* First copy the result string */
- if (kutf_str_ptr) {
- bytes_not_copied = copy_to_user(&buf[0], kutf_str_ptr,
- kutf_str_len);
- bytes_copied += kutf_str_len - bytes_not_copied;
- if (bytes_not_copied)
- goto exit;
+ message_len = sizeof(USERDATA_WARNING_MESSAGE)-1;
+ if (message_len > len)
+ message_len = len;
+
+ bytes_not_copied = copy_to_user(buf,
+ USERDATA_WARNING_MESSAGE,
+ message_len);
+ if (bytes_not_copied != 0)
+ return -EFAULT;
+ test_context->userdata.flags |= KUTF_USERDATA_WARNING_OUTPUT;
+ return message_len;
+ case KUTF_RESULT_USERDATA:
+ message_len = strlen(res->message);
+ if (message_len > len-1) {
+ message_len = len-1;
+ pr_warn("User data truncated, read not long enough\n");
+ }
+ bytes_not_copied = copy_to_user(buf, res->message,
+ message_len);
+ if (bytes_not_copied != 0) {
+ pr_warn("Failed to copy data to user space buffer\n");
+ return -EFAULT;
+ }
+ /* Finally the terminator */
+ bytes_not_copied = copy_to_user(&buf[message_len],
+ &terminator, 1);
+ if (bytes_not_copied != 0) {
+ pr_warn("Failed to copy data to user space buffer\n");
+ return -EFAULT;
}
+ return message_len+1;
+ default:
+ /* Fall through - this is a test result */
+ break;
+ }
- /* Then the separator */
- bytes_not_copied = copy_to_user(&buf[bytes_copied],
- &separator, 1);
- bytes_copied += 1 - bytes_not_copied;
+ /* Note: This code assumes a result is read completely */
+ kutf_result_to_string(&kutf_str_ptr, res->status);
+ if (kutf_str_ptr)
+ kutf_str_len = strlen(kutf_str_ptr);
+
+ if (res->message)
+ message_len = strlen(res->message);
+
+ if ((kutf_str_len + 1 + message_len + 1) > len) {
+ pr_err("Not enough space in user buffer for a single result");
+ return 0;
+ }
+
+ /* First copy the result string */
+ if (kutf_str_ptr) {
+ bytes_not_copied = copy_to_user(&buf[0], kutf_str_ptr,
+ kutf_str_len);
+ bytes_copied += kutf_str_len - bytes_not_copied;
if (bytes_not_copied)
goto exit;
+ }
- /* Finally Next copy the result string */
- if (res->message) {
- bytes_not_copied = copy_to_user(&buf[bytes_copied],
- res->message, message_len);
- bytes_copied += message_len - bytes_not_copied;
- if (bytes_not_copied)
- goto exit;
- }
+ /* Then the separator */
+ bytes_not_copied = copy_to_user(&buf[bytes_copied],
+ &separator, 1);
+ bytes_copied += 1 - bytes_not_copied;
+ if (bytes_not_copied)
+ goto exit;
- /* Finally the terminator */
+ /* Finally Next copy the result string */
+ if (res->message) {
bytes_not_copied = copy_to_user(&buf[bytes_copied],
- &terminator, 1);
- bytes_copied += 1 - bytes_not_copied;
+ res->message, message_len);
+ bytes_copied += message_len - bytes_not_copied;
+ if (bytes_not_copied)
+ goto exit;
}
+
+ /* Finally the terminator */
+ bytes_not_copied = copy_to_user(&buf[bytes_copied],
+ &terminator, 1);
+ bytes_copied += 1 - bytes_not_copied;
+
exit:
return bytes_copied;
}
/**
+ * kutf_debugfs_run_write() Debugfs write callback for the "run" entry.
+ * @file: Opened file to write to
+ * @buf: User buffer to read the data from
+ * @len: Amount of data to write
+ * @ppos: Offset into file to write to
+ *
+ * This function allows user and kernel to exchange extra data necessary for
+ * the test fixture.
+ *
+ * The data is added to the first struct kutf_context running the fixture
+ *
+ * Return: Number of bytes written
+ */
+static ssize_t kutf_debugfs_run_write(struct file *file,
+ const char __user *buf, size_t len, loff_t *ppos)
+{
+ int ret = 0;
+ struct kutf_context *test_context = file->private_data;
+
+ if (len > KUTF_MAX_LINE_LENGTH)
+ return -EINVAL;
+
+ ret = kutf_helper_input_enqueue(test_context, buf, len);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+/**
* kutf_debugfs_run_release() - Debugfs release callback for the "run" entry.
* @inode: File entry representation
* @file: A specific opening of the file
*
- * Release any resources that where created during the opening of the file
+ * Release any resources that were created during the opening of the file
*
* Note that resources may not be released immediately, that might only happen
* later when other users of the kutf_context release their refcount.
@@ -725,6 +557,8 @@ static int kutf_debugfs_run_release(struct inode *inode, struct file *file)
{
struct kutf_context *test_context = file->private_data;
+ kutf_helper_input_enqueue_end_of_data(test_context);
+
kutf_context_put(test_context);
return 0;
}
@@ -733,6 +567,7 @@ static const struct file_operations kutf_debugfs_run_ops = {
.owner = THIS_MODULE,
.open = kutf_debugfs_run_open,
.read = kutf_debugfs_run_read,
+ .write = kutf_debugfs_run_write,
.release = kutf_debugfs_run_release,
.llseek = default_llseek,
};
@@ -763,7 +598,6 @@ static int create_fixture_variant(struct kutf_test_function *test_func,
test_fix->test_func = test_func;
test_fix->fixture_index = fixture_index;
- atomic_set(&test_fix->nr_running, 0);
snprintf(name, sizeof(name), "%d", fixture_index);
test_fix->dir = debugfs_create_dir(name, test_func->dir);
@@ -783,8 +617,14 @@ static int create_fixture_variant(struct kutf_test_function *test_func,
goto fail_file;
}
- tmp = debugfs_create_file("run", S_IROTH, test_fix->dir, test_fix,
- &kutf_debugfs_run_ops);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ tmp = debugfs_create_file_unsafe(
+#else
+ tmp = debugfs_create_file(
+#endif
+ "run", 0600, test_fix->dir,
+ test_fix,
+ &kutf_debugfs_run_ops);
if (!tmp) {
pr_err("Failed to create debugfs file \"run\" when adding fixture\n");
/* Might not be the right error, we don't get it passed back to us */
@@ -813,14 +653,13 @@ static void kutf_remove_test_variant(struct kutf_test_fixture *test_fix)
kfree(test_fix);
}
-void kutf_add_test_with_filters_data_and_userdata(
+void kutf_add_test_with_filters_and_data(
struct kutf_suite *suite,
unsigned int id,
const char *name,
void (*execute)(struct kutf_context *context),
unsigned int filters,
- union kutf_callback_data test_data,
- struct kutf_userdata_ops *userdata_ops)
+ union kutf_callback_data test_data)
{
struct kutf_test_function *test_func;
struct dentry *tmp;
@@ -873,7 +712,6 @@ void kutf_add_test_with_filters_data_and_userdata(
test_func->suite = suite;
test_func->execute = execute;
test_func->test_data = test_data;
- memcpy(&test_func->userdata_ops, userdata_ops, sizeof(*userdata_ops));
list_add(&test_func->node, &suite->test_list);
return;
@@ -885,27 +723,6 @@ fail_dir:
fail_alloc:
return;
}
-EXPORT_SYMBOL(kutf_add_test_with_filters_data_and_userdata);
-
-void kutf_add_test_with_filters_and_data(
- struct kutf_suite *suite,
- unsigned int id,
- const char *name,
- void (*execute)(struct kutf_context *context),
- unsigned int filters,
- union kutf_callback_data test_data)
-{
- struct kutf_userdata_ops userdata_ops = {
- .open = NULL,
- .release = NULL,
- .consumer = NULL,
- .producer = NULL,
- };
-
- kutf_add_test_with_filters_data_and_userdata(suite, id, name, execute,
- filters, test_data, &userdata_ops);
-}
-
EXPORT_SYMBOL(kutf_add_test_with_filters_and_data);
void kutf_add_test_with_filters(
@@ -1150,7 +967,7 @@ static struct kutf_context *kutf_create_context(
new_context->result_set = kutf_create_result_set();
if (!new_context->result_set) {
- pr_err("Failed to create resultset");
+ pr_err("Failed to create result set");
goto fail_result_set;
}
@@ -1165,9 +982,12 @@ static struct kutf_context *kutf_create_context(
new_context->fixture_index = test_fix->fixture_index;
new_context->fixture_name = NULL;
new_context->test_data = test_fix->test_func->test_data;
- new_context->userdata_consumer_priv = NULL;
- new_context->userdata_producer_priv = NULL;
- new_context->userdata_dentry = NULL;
+
+ new_context->userdata.flags = 0;
+ INIT_LIST_HEAD(&new_context->userdata.input_head);
+ init_waitqueue_head(&new_context->userdata.input_waitq);
+
+ INIT_WORK(&new_context->work, kutf_run_test);
kref_init(&new_context->kref);
@@ -1227,8 +1047,7 @@ static void kutf_test_log_result(
context->status = new_status;
if (context->expected_status != new_status)
- kutf_add_result(&context->fixture_pool, context->result_set,
- new_status, message);
+ kutf_add_result(context, new_status, message);
}
void kutf_test_log_result_external(
@@ -1344,18 +1163,18 @@ EXPORT_SYMBOL(kutf_test_abort);
*/
static int __init init_kutf_core(void)
{
- int ret;
+ kutf_workq = alloc_workqueue("kutf workq", WQ_UNBOUND, 1);
+ if (!kutf_workq)
+ return -ENOMEM;
base_dir = debugfs_create_dir("kutf_tests", NULL);
if (!base_dir) {
- ret = -ENODEV;
- goto exit_dir;
+ destroy_workqueue(kutf_workq);
+ kutf_workq = NULL;
+ return -ENOMEM;
}
return 0;
-
-exit_dir:
- return ret;
}
/**
@@ -1366,6 +1185,9 @@ exit_dir:
static void __exit exit_kutf_core(void)
{
debugfs_remove_recursive(base_dir);
+
+ if (kutf_workq)
+ destroy_workqueue(kutf_workq);
}
#else /* defined(CONFIG_DEBUG_FS) */