summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2019-08-23 15:40:27 +0200
committerSidath Senanayake <sidaths@google.com>2019-08-23 15:40:27 +0200
commit869660680efaed77cec0414161640a6f1a85d5b2 (patch)
treed36678f523da5f9148c465c3b5d5ef058401a409
parent228451ed83f4840e863beff27b33ca9a460f820b (diff)
downloadgpu-869660680efaed77cec0414161640a6f1a85d5b2.tar.gz
Mali Valhall DDK r20p0 KMD
Provenance: f3deff212 (collaborate/EAC/v_r20p0) VX504X08X-BU-00000-r20p0-01rel0 - Android DDK Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: If3c1611d83966bcb25a85ad27638612c121cb545
-rw-r--r--mali_kbase/Kbuild2
-rw-r--r--mali_kbase/Kconfig26
-rw-r--r--mali_kbase/Mconfig24
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_devfreq.c59
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_devfreq.h8
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_device_hw.c52
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_device_internal.h12
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_gpu.c13
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_instr_backend.c10
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_irq_linux.c2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_hw.c31
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_rb.c31
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c12
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c11
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h19
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c219
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_defs.h45
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c354
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_internal.h49
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_l2_states.h2
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_metrics.c41
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_time.c43
-rw-r--r--mali_kbase/build.bp6
-rw-r--r--mali_kbase/docs/Doxyfile132
-rw-r--r--mali_kbase/docs/policy_operation_diagram.dot117
-rw-r--r--mali_kbase/docs/policy_overview.dot68
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa.c52
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa.h2
-rw-r--r--mali_kbase/ipa/mali_kbase_ipa_vinstr_g7x.c36
-rw-r--r--mali_kbase/mali_base_hwconfig_features.h156
-rw-r--r--mali_kbase/mali_base_hwconfig_issues.h902
-rw-r--r--mali_kbase/mali_base_kernel.h47
-rw-r--r--mali_kbase/mali_kbase.h18
-rw-r--r--mali_kbase/mali_kbase_context.c1
-rw-r--r--mali_kbase/mali_kbase_core_linux.c72
-rw-r--r--mali_kbase/mali_kbase_debug_job_fault.c69
-rw-r--r--mali_kbase/mali_kbase_debugfs_helper.c4
-rw-r--r--mali_kbase/mali_kbase_defs.h24
-rw-r--r--mali_kbase/mali_kbase_gpu_id.h29
-rw-r--r--mali_kbase/mali_kbase_gpuprops.c45
-rw-r--r--mali_kbase/mali_kbase_hw.c444
-rw-r--r--mali_kbase/mali_kbase_hwaccess_jm.h8
-rw-r--r--mali_kbase/mali_kbase_hwaccess_pm.h29
-rw-r--r--mali_kbase/mali_kbase_ioctl.h42
-rw-r--r--mali_kbase/mali_kbase_jd.c5
-rw-r--r--mali_kbase/mali_kbase_js.c14
-rw-r--r--mali_kbase/mali_kbase_mem.c12
-rw-r--r--mali_kbase/mali_kbase_mem.h6
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c140
-rw-r--r--mali_kbase/mali_kbase_mem_linux.h26
-rw-r--r--mali_kbase/mali_kbase_mem_pool_debugfs.c4
-rw-r--r--mali_kbase/mali_kbase_mipe_proto.h4
-rw-r--r--mali_kbase/mali_kbase_mmu.c49
-rw-r--r--mali_kbase/mali_kbase_native_mgm.c26
-rw-r--r--mali_kbase/mali_kbase_softjobs.c6
-rw-r--r--mali_kbase/mali_midg_regmap.h113
-rw-r--r--mali_kbase/mali_midg_regmap_jm.h12
-rw-r--r--mali_kbase/tests/kutf/kutf_suite.c10
-rw-r--r--mali_mgm/memory_group_manager.c32
59 files changed, 1425 insertions, 2402 deletions
diff --git a/mali_kbase/Kbuild b/mali_kbase/Kbuild
index 75fcaba..4d0b557 100644
--- a/mali_kbase/Kbuild
+++ b/mali_kbase/Kbuild
@@ -21,7 +21,7 @@
# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r19p0-01rel0"
+MALI_RELEASE_NAME ?= "r20p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
diff --git a/mali_kbase/Kconfig b/mali_kbase/Kconfig
index 3d93aaa..ca3e29c 100644
--- a/mali_kbase/Kconfig
+++ b/mali_kbase/Kconfig
@@ -23,6 +23,7 @@
menuconfig MALI_MIDGARD
tristate "Mali Midgard series support"
select GPU_TRACEPOINTS if ANDROID
+ select DMA_SHARED_BUFFER
default n
help
Enable this option to build support for a ARM Mali Midgard GPU.
@@ -211,7 +212,7 @@ config MALI_MEMORY_FULLY_BACKED
config MALI_DMA_BUF_MAP_ON_DEMAND
bool "Map imported dma-bufs on demand"
- depends on DMA_SHARED_BUFFER && MALI_MIDGARD
+ depends on MALI_MIDGARD
default n
help
This option caused kbase to set up the GPU mapping of imported
@@ -230,6 +231,29 @@ config MALI_DMA_BUF_LEGACY_COMPAT
maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
including a cache flush.
+config MALI_HW_ERRATA_1485982_NOT_AFFECTED
+ bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ This option disables the default workaround for GPU2017-1336. The
+ workaround keeps the L2 cache powered up except for powerdown and reset.
+
+ The workaround introduces a limitation that will prevent the running of
+ protected mode content on fully coherent platforms, as the switch to IO
+ coherency mode requires the L2 to be turned off.
+
+config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
+ bool "Use alternative workaround for BASE_HW_ISSUE_GPU2017_1336"
+ depends on MALI_MIDGARD && MALI_EXPERT && !MALI_HW_ERRATA_1485982_NOT_AFFECTED
+ default n
+ help
+ This option uses an alternative workaround for GPU2017-1336. Lowering
+ the GPU clock to a, platform specific, known good frequeuncy before
+ powering down the L2 cache. The clock can be specified in the device
+ tree using the property, opp-mali-errata-1485982. Otherwise the
+ slowest clock will be selected.
+
# Instrumentation options.
config MALI_JOB_DUMP
diff --git a/mali_kbase/Mconfig b/mali_kbase/Mconfig
index 60cb2a3..27e0d63 100644
--- a/mali_kbase/Mconfig
+++ b/mali_kbase/Mconfig
@@ -239,6 +239,30 @@ config MALI_REAL_HW
default y
default n if NO_MALI
+config MALI_HW_ERRATA_1485982_NOT_AFFECTED
+ bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ default y if PLATFORM_JUNO
+ help
+ This option disables the default workaround for GPU2017-1336. The
+ workaround keeps the L2 cache powered up except for powerdown and reset.
+
+ The workaround introduces a limitation that will prevent the running of
+ protected mode content on fully coherent platforms, as the switch to IO
+ coherency mode requires the L2 to be turned off.
+
+config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
+ bool "Use alternative workaround for BASE_HW_ISSUE_GPU2017_1336"
+ depends on MALI_MIDGARD && MALI_EXPERT && !MALI_HW_ERRATA_1485982_NOT_AFFECTED
+ default n
+ help
+ This option uses an alternative workaround for GPU2017-1336. Lowering
+ the GPU clock to a, platform specific, known good frequeuncy before
+ powering down the L2 cache. The clock can be specified in the device
+ tree using the property, opp-mali-errata-1485982. Otherwise the
+ slowest clock will be selected.
+
# Instrumentation options.
# config MALI_JOB_DUMP exists in the Kernel Kconfig but is configured using CINSTR_JOB_DUMP in Mconfig.
diff --git a/mali_kbase/backend/gpu/mali_kbase_devfreq.c b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
index 4e87c6a..5c17297 100644
--- a/mali_kbase/backend/gpu/mali_kbase_devfreq.c
+++ b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
@@ -208,6 +208,13 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
return 0;
}
+void kbase_devfreq_force_freq(struct kbase_device *kbdev, unsigned long freq)
+{
+ unsigned long target_freq = freq;
+
+ kbase_devfreq_target(kbdev->dev, &target_freq, 0);
+}
+
static int
kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
{
@@ -280,6 +287,20 @@ static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
dp->max_state = i;
+ /* Have the lowest clock as suspend clock.
+ * It may be overridden by 'opp-mali-errata-1485982'.
+ */
+ if (kbdev->pm.backend.gpu_clock_slow_down_wa) {
+ freq = 0;
+ opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(kbdev->dev, "failed to find slowest clock");
+ return 0;
+ }
+ dev_info(kbdev->dev, "suspend clock %lu from slowest", freq);
+ kbdev->pm.backend.gpu_clock_suspend_freq = freq;
+ }
+
return 0;
}
@@ -302,6 +323,40 @@ static void kbase_devfreq_exit(struct device *dev)
kbase_devfreq_term_freq_table(kbdev);
}
+static void kbasep_devfreq_read_suspend_clock(struct kbase_device *kbdev,
+ struct device_node *node)
+{
+ u64 freq = 0;
+ int err = 0;
+
+ /* Check if this node is the opp entry having 'opp-mali-errata-1485982'
+ * to get the suspend clock, otherwise skip it.
+ */
+ if (!of_property_read_bool(node, "opp-mali-errata-1485982"))
+ return;
+
+ /* In kbase DevFreq, the clock will be read from 'opp-hz'
+ * and translated into the actual clock by opp_translate.
+ *
+ * In customer DVFS, the clock will be read from 'opp-hz-real'
+ * for clk driver. If 'opp-hz-real' does not exist,
+ * read from 'opp-hz'.
+ */
+ if (IS_ENABLED(CONFIG_MALI_DEVFREQ))
+ err = of_property_read_u64(node, "opp-hz", &freq);
+ else {
+ if (of_property_read_u64(node, "opp-hz-real", &freq))
+ err = of_property_read_u64(node, "opp-hz", &freq);
+ }
+
+ if (WARN_ON(err || !freq))
+ return;
+
+ kbdev->pm.backend.gpu_clock_suspend_freq = freq;
+ dev_info(kbdev->dev,
+ "suspend clock %llu by opp-mali-errata-1485982", freq);
+}
+
static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
{
#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE || !defined(CONFIG_OF)
@@ -341,6 +396,10 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
u32 opp_volts[BASE_MAX_NR_CLOCKS_REGULATORS];
#endif
+ /* Read suspend clock from opp table */
+ if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+ kbasep_devfreq_read_suspend_clock(kbdev, node);
+
err = of_property_read_u64(node, "opp-hz", &opp_freq);
if (err) {
dev_warn(kbdev->dev, "Failed to read opp-hz property with error %d\n",
diff --git a/mali_kbase/backend/gpu/mali_kbase_devfreq.h b/mali_kbase/backend/gpu/mali_kbase_devfreq.h
index 6ffdcd8..8c976b2 100644
--- a/mali_kbase/backend/gpu/mali_kbase_devfreq.h
+++ b/mali_kbase/backend/gpu/mali_kbase_devfreq.h
@@ -28,6 +28,14 @@ int kbase_devfreq_init(struct kbase_device *kbdev);
void kbase_devfreq_term(struct kbase_device *kbdev);
/**
+ * kbase_devfreq_force_freq - Set GPU frequency on L2 power on/off.
+ * @kbdev: Device pointer
+ * @freq: GPU frequency in HZ to be set when
+ * MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE is enabled
+ */
+void kbase_devfreq_force_freq(struct kbase_device *kbdev, unsigned long freq);
+
+/**
* kbase_devfreq_enqueue_work - Enqueue a work item for suspend/resume devfreq.
* @kbdev: Device pointer
* @work_type: The type of the devfreq work item, i.e. suspend or resume
diff --git a/mali_kbase/backend/gpu/mali_kbase_device_hw.c b/mali_kbase/backend/gpu/mali_kbase_device_hw.c
index 567ebf1..c470a97 100644
--- a/mali_kbase/backend/gpu/mali_kbase_device_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_device_hw.c
@@ -27,15 +27,15 @@
#include <mali_kbase.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
-
#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_mmu_hw_direct.h>
+#include <mali_kbase_reset_gpu.h>
#if !defined(CONFIG_MALI_NO_MALI)
#ifdef CONFIG_DEBUG_FS
-
int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
{
struct kbase_io_access *old_buf;
@@ -203,21 +203,29 @@ KBASE_EXPORT_TEST_API(kbase_reg_read);
*/
static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
{
- u32 status;
- u64 address;
-
- status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS));
- address = (u64) kbase_reg_read(kbdev,
+ u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ u32 status = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_FAULTSTATUS));
+ u64 address = (u64) kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
+
address |= kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
- dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
- status & 0xFF,
- kbase_exception_name(kbdev, status),
+ if ((gpu_id & GPU_ID2_PRODUCT_MODEL) != GPU_ID2_PRODUCT_TULX) {
+ dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+ status,
+ kbase_exception_name(kbdev, status & 0xFF),
address);
- if (multiple)
- dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+ if (multiple)
+ dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+ }
+}
+
+static bool kbase_gpu_fault_interrupt(struct kbase_device *kbdev, int multiple)
+{
+ kbase_report_gpu_fault(kbdev, multiple);
+ return false;
}
void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev)
@@ -257,6 +265,15 @@ void kbase_gpu_start_cache_clean(struct kbase_device *kbdev)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
+void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbdev->cache_clean_queued = false;
+ kbdev->cache_clean_in_progress = false;
+ wake_up(&kbdev->cache_clean_wait);
+}
+
static void kbase_clean_caches_done(struct kbase_device *kbdev)
{
u32 irq_mask;
@@ -276,9 +293,7 @@ static void kbase_clean_caches_done(struct kbase_device *kbdev)
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
irq_mask & ~CLEAN_CACHES_COMPLETED);
- kbdev->cache_clean_in_progress = false;
-
- wake_up(&kbdev->cache_clean_wait);
+ kbase_gpu_cache_clean_wait_complete(kbdev);
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -300,9 +315,12 @@ void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev)
void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
{
+ bool clear_gpu_fault = false;
+
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
if (val & GPU_FAULT)
- kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
+ clear_gpu_fault = kbase_gpu_fault_interrupt(kbdev,
+ val & MULTIPLE_GPU_FAULTS);
if (val & RESET_COMPLETED)
kbase_pm_reset_done(kbdev);
@@ -341,9 +359,11 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
* cores.
*/
if (platform_power_down_only ||
+ kbdev->pm.backend.l2_always_on ||
kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_921))
kbase_pm_power_changed(kbdev);
}
+
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_device_internal.h b/mali_kbase/backend/gpu/mali_kbase_device_internal.h
index 7886e96..c62f1e5 100644
--- a/mali_kbase/backend/gpu/mali_kbase_device_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_device_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014,2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -77,6 +77,16 @@ void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev);
void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev);
/**
+ * kbase_gpu_cache_clean_wait_complete - Called after the cache cleaning is
+ * finished. Would also be called after
+ * the GPU reset.
+ * @kbdev: Kbase device
+ *
+ * Caller must hold the hwaccess_lock.
+ */
+void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev);
+
+/**
* kbase_gpu_interrupt - GPU interrupt handler
* @kbdev: Kbase device pointer
* @val: The value of the GPU IRQ status register which triggered the call
diff --git a/mali_kbase/backend/gpu/mali_kbase_gpu.c b/mali_kbase/backend/gpu/mali_kbase_gpu.c
index 97dd09a..9745df6 100644
--- a/mali_kbase/backend/gpu/mali_kbase_gpu.c
+++ b/mali_kbase/backend/gpu/mali_kbase_gpu.c
@@ -57,14 +57,8 @@ int kbase_backend_early_init(struct kbase_device *kbdev)
if (err)
goto fail_interrupts;
- err = kbase_hwaccess_pm_early_init(kbdev);
- if (err)
- goto fail_pm;
-
return 0;
-fail_pm:
- kbase_release_interrupts(kbdev);
fail_interrupts:
kbase_pm_runtime_term(kbdev);
fail_runtime_pm:
@@ -75,7 +69,6 @@ fail_runtime_pm:
void kbase_backend_early_term(struct kbase_device *kbdev)
{
- kbase_hwaccess_pm_early_term(kbdev);
kbase_release_interrupts(kbdev);
kbase_pm_runtime_term(kbdev);
kbasep_platform_device_term(kbdev);
@@ -85,7 +78,7 @@ int kbase_backend_late_init(struct kbase_device *kbdev)
{
int err;
- err = kbase_hwaccess_pm_late_init(kbdev);
+ err = kbase_hwaccess_pm_init(kbdev);
if (err)
return err;
@@ -152,7 +145,7 @@ fail_timer:
fail_pm_powerup:
kbase_reset_gpu_term(kbdev);
fail_reset_gpu_init:
- kbase_hwaccess_pm_late_term(kbdev);
+ kbase_hwaccess_pm_term(kbdev);
return err;
}
@@ -165,5 +158,5 @@ void kbase_backend_late_term(struct kbase_device *kbdev)
kbase_backend_timer_term(kbdev);
kbase_hwaccess_pm_halt(kbdev);
kbase_reset_gpu_term(kbdev);
- kbase_hwaccess_pm_late_term(kbdev);
+ kbase_hwaccess_pm_term(kbdev);
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_instr_backend.c b/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
index 5494c49..1d18326 100644
--- a/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_instr_backend.c
@@ -71,15 +71,7 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
/* Configure */
prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
if (enable->use_secondary)
- {
- u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
- >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
-
- if (arch_v6)
- prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
- }
+ prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
prfcnt_config | PRFCNT_CONFIG_MODE_OFF);
diff --git a/mali_kbase/backend/gpu/mali_kbase_irq_linux.c b/mali_kbase/backend/gpu/mali_kbase_irq_linux.c
index 643f450..fa3d2cc 100644
--- a/mali_kbase/backend/gpu/mali_kbase_irq_linux.c
+++ b/mali_kbase/backend/gpu/mali_kbase_irq_linux.c
@@ -469,4 +469,6 @@ void kbase_synchronize_irqs(struct kbase_device *kbdev)
}
}
+KBASE_EXPORT_TEST_API(kbase_synchronize_irqs);
+
#endif /* !defined(CONFIG_MALI_NO_MALI) */
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
index 794abbf..d4f96c8 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
@@ -715,35 +715,15 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
#endif
}
-void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx)
+void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx)
{
- unsigned long flags;
- struct kbase_device *kbdev;
+ struct kbase_device *kbdev = kctx->kbdev;
int i;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- kbdev = kctx->kbdev;
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
- /* Cancel any remaining running jobs for this kctx */
- mutex_lock(&kctx->jctx.lock);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
-
- /* Invalidate all incomplete jobs in context to prevent resubmitting */
- for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
- struct kbase_jd_atom *katom = &kctx->jctx.atoms[i];
-
- if ((katom->status != KBASE_JD_ATOM_STATE_COMPLETED) &&
- (katom->status !=
- KBASE_JD_ATOM_STATE_HW_COMPLETED))
- katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
- }
+ lockdep_assert_held(&kbdev->hwaccess_lock);
for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
kbase_job_slot_hardstop(kctx, i, NULL);
-
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- mutex_unlock(&kctx->jctx.lock);
}
void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
@@ -1197,7 +1177,8 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
/* Complete any jobs that were still on the GPU */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->protected_mode = false;
- kbase_backend_reset(kbdev, &end_timestamp);
+ if (!kbdev->pm.backend.protected_entry_transition_override)
+ kbase_backend_reset(kbdev, &end_timestamp);
kbase_pm_metrics_update(kbdev, NULL);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -1300,7 +1281,7 @@ static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
/* To prevent getting incorrect registers when dumping failed job,
* skip early reset.
*/
- if (kbdev->job_fault_debug != false)
+ if (atomic_read(&kbdev->job_fault_debug) > 0)
return;
/* Check that the reset has been committed to (i.e. kbase_reset_gpu has
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
index 7cdaf98..55440b8 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
@@ -349,6 +349,10 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
kbase_pm_protected_override_disable(kbdev);
kbase_pm_update_cores_state_nolock(kbdev);
}
+ if (kbase_jd_katom_is_protected(katom) &&
+ (katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2))
+ kbase_pm_protected_entry_override_disable(kbdev);
if (!kbase_jd_katom_is_protected(katom) &&
(katom->protected_state.exit !=
KBASE_ATOM_EXIT_PROTECTED_CHECK) &&
@@ -648,6 +652,9 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
* switched to protected mode or hwcnt
* re-enabled. */
+ if (kbase_pm_protected_entry_override_enable(kbdev))
+ return -EAGAIN;
+
/*
* Not in correct mode, begin protected mode switch.
* Entering protected mode requires us to power down the L2,
@@ -657,13 +664,28 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
kbase_pm_protected_override_enable(kbdev);
- kbase_pm_update_cores_state_nolock(kbdev);
+ /*
+ * Only if the GPU reset hasn't been initiated, there is a need
+ * to invoke the state machine to explicitly power down the
+ * shader cores and L2.
+ */
+ if (!kbdev->pm.backend.protected_entry_transition_override)
+ kbase_pm_update_cores_state_nolock(kbdev);
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
/* Avoid unnecessary waiting on non-ACE platforms. */
- if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) {
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ if (kbdev->pm.backend.l2_always_on) {
+ /*
+ * If the GPU reset hasn't completed, then L2
+ * could still be powered up.
+ */
+ if (kbase_reset_gpu_is_active(kbdev))
+ return -EAGAIN;
+ }
+
if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
/*
@@ -687,6 +709,8 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
*/
kbase_gpu_disable_coherent(kbdev);
+ kbase_pm_protected_entry_override_disable(kbdev);
+
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
/*
* Power on L2 caches; this will also result in the
@@ -780,8 +804,7 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
- if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
- kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+ if (kbdev->pm.backend.l2_state != KBASE_L2_OFF) {
/*
* The L2 is still powered, wait for all the users to
* finish with it before doing the actual reset.
diff --git a/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c b/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c
index 7bf9e4d..916916d 100644
--- a/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c
+++ b/mali_kbase/backend/gpu/mali_kbase_l2_mmu_config.c
@@ -57,13 +57,14 @@ struct l2_mmu_config_limit {
/*
* Zero represents no limit
*
- * For TBEX TTRX and TNAX:
+ * For LBEX TBEX TTRX and TNAX:
* The value represents the number of outstanding reads (6 bits) or writes (5 bits)
*
* For all other GPUS it is a fraction see: mali_kbase_config_defaults.h
*/
static const struct l2_mmu_config_limit limits[] = {
/* GPU read write */
+ {GPU_ID2_PRODUCT_LBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
{GPU_ID2_PRODUCT_TBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
{GPU_ID2_PRODUCT_TTRX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
{GPU_ID2_PRODUCT_TNAX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
@@ -90,6 +91,7 @@ void kbase_set_mmu_quirks(struct kbase_device *kbdev)
gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
product_model = gpu_id & GPU_ID2_PRODUCT_MODEL;
+ /* Limit the GPU bus bandwidth if the platform needs this. */
for (i = 0; i < ARRAY_SIZE(limits); i++) {
if (product_model == limits[i].product_model) {
limit = limits[i];
@@ -105,4 +107,12 @@ void kbase_set_mmu_quirks(struct kbase_device *kbdev)
(limit.write.value << limit.write.shift);
kbdev->hw_quirks_mmu = mmu_config;
+
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ /* Allow memory configuration disparity to be ignored,
+ * we optimize the use of shared memory and thus we
+ * expect some disparity in the memory configuration.
+ */
+ kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
+ }
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
index a70439d..77e0b78 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -77,7 +77,7 @@ static int wait_ready(struct kbase_device *kbdev,
val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
if (max_loops == 0) {
- dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
+ dev_err(kbdev->dev, "AS_ACTIVE bit stuck, might be caused by slow/unstable GPU clock or possible faulty FPGA connector\n");
return -1;
}
@@ -160,11 +160,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
as = &kbdev->as[as_no];
/* find the fault type */
- as->fault_type = (bf_bits & (1 << as_no)) ?
- KBASE_MMU_FAULT_TYPE_BUS :
- KBASE_MMU_FAULT_TYPE_PAGE;
-
- if (kbase_as_has_bus_fault(as))
+ if (bf_bits & (1 << as_no))
fault = &as->bf_data;
else
fault = &as->pf_data;
@@ -183,7 +179,6 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
fault->addr <<= 32;
fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
AS_FAULTADDRESS_LO));
-
/* Mark the fault protected or not */
fault->protected_mode = kbdev->protected_mode;
@@ -207,7 +202,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
}
- if (kbase_as_has_bus_fault(as)) {
+ if (kbase_as_has_bus_fault(as, fault)) {
/* Mark bus fault as handled.
* Note that a bus fault is processed first in case
* where both a bus fault and page fault occur.
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
index a5bbdf5..0a3fa7e 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
+++ b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.h
@@ -39,9 +39,24 @@
*
* Process the MMU interrupt that was reported by the &kbase_device.
*
- * @kbdev: kbase context to clear the fault from.
- * @irq_stat: Value of the MMU_IRQ_STATUS register
+ * @kbdev: Pointer to the kbase device for which the interrupt happened.
+ * @irq_stat: Value of the MMU_IRQ_STATUS register.
*/
void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+/**
+ * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
+ *
+ * Process the bus fault interrupt that was reported for a particular GPU
+ * address space.
+ *
+ * @kbdev: Pointer to the kbase device for which bus fault was reported.
+ * @status: Value of the GPU_FAULTSTATUS register.
+ * @as_nr: GPU address space for which the bus fault occurred.
+ *
+ * Return: zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
+ u32 status, u32 as_nr);
+
#endif /* _KBASE_MMU_HW_DIRECT_H_ */
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index d9fc761..0faf677 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -35,9 +35,11 @@
#include <backend/gpu/mali_kbase_js_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_devfreq.h>
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
+static void kbase_pm_gpu_clock_control_worker(struct work_struct *data);
int kbase_pm_runtime_init(struct kbase_device *kbdev)
{
@@ -114,7 +116,7 @@ void kbase_pm_register_access_disable(struct kbase_device *kbdev)
kbdev->pm.backend.gpu_powered = false;
}
-int kbase_hwaccess_pm_early_init(struct kbase_device *kbdev)
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
{
int ret = 0;
@@ -162,6 +164,52 @@ int kbase_hwaccess_pm_early_init(struct kbase_device *kbdev)
if (kbase_pm_state_machine_init(kbdev) != 0)
goto pm_state_machine_fail;
+ kbdev->pm.backend.hwcnt_desired = false;
+ kbdev->pm.backend.hwcnt_disabled = true;
+ INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
+ kbase_pm_hwcnt_disable_worker);
+ kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+ /* At runtime, this feature can be enabled via module parameter
+ * when insmod is executed. Then this will override all workarounds.
+ */
+ if (platform_power_down_only) {
+ kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+ kbdev->pm.backend.l2_always_on = false;
+
+ return 0;
+ }
+
+ if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
+ kbdev->pm.backend.l2_always_on = false;
+ kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+
+ return 0;
+ }
+
+ /* WA1: L2 always_on for GPUs being affected by GPU2017-1336 */
+ if (!IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE)) {
+ kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336))
+ kbdev->pm.backend.l2_always_on = true;
+ else
+ kbdev->pm.backend.l2_always_on = false;
+
+ return 0;
+ }
+
+ /* WA3: Clock slow down for GPUs being affected by GPU2017-1336 */
+ kbdev->pm.backend.l2_always_on = false;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336)) {
+ kbdev->pm.backend.gpu_clock_slow_down_wa = true;
+ kbdev->pm.backend.gpu_clock_suspend_freq = 0;
+ kbdev->pm.backend.gpu_clock_slow_down_desired = true;
+ kbdev->pm.backend.gpu_clock_slowed_down = false;
+ INIT_WORK(&kbdev->pm.backend.gpu_clock_control_work,
+ kbase_pm_gpu_clock_control_worker);
+ } else
+ kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+
return 0;
pm_state_machine_fail:
@@ -173,19 +221,6 @@ workq_fail:
return -EINVAL;
}
-int kbase_hwaccess_pm_late_init(struct kbase_device *kbdev)
-{
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
- kbdev->pm.backend.hwcnt_desired = false;
- kbdev->pm.backend.hwcnt_disabled = true;
- INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
- kbase_pm_hwcnt_disable_worker);
- kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
-
- return 0;
-}
-
void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
{
lockdep_assert_held(&kbdev->pm.lock);
@@ -231,14 +266,13 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
mutex_lock(&kbdev->pm.lock);
if (!backend->poweron_required) {
- if (!platform_power_down_only) {
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- WARN_ON(backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF ||
- backend->l2_state != KBASE_L2_OFF);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ WARN_ON(backend->shaders_state !=
+ KBASE_SHADERS_OFF_CORESTACK_OFF ||
+ backend->l2_state != KBASE_L2_OFF);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* Disable interrupts and turn the clock off */
if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
@@ -284,6 +318,121 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
wake_up(&kbdev->pm.backend.poweroff_wait);
}
+static void kbase_pm_l2_clock_slow(struct kbase_device *kbdev)
+{
+#if defined(CONFIG_MALI_MIDGARD_DVFS)
+ struct clk *clk = kbdev->clocks[0];
+#endif
+
+ if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
+ return;
+
+ /* No suspend clock is specified */
+ if (WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_suspend_freq))
+ return;
+
+#if defined(CONFIG_MALI_DEVFREQ)
+
+ /* Suspend devfreq */
+ devfreq_suspend_device(kbdev->devfreq);
+
+ /* Keep the current freq to restore it upon resume */
+ kbdev->previous_frequency = kbdev->current_nominal_freq;
+
+ /* Slow down GPU clock to the suspend clock*/
+ kbase_devfreq_force_freq(kbdev,
+ kbdev->pm.backend.gpu_clock_suspend_freq);
+
+#elif defined(CONFIG_MALI_MIDGARD_DVFS) /* CONFIG_MALI_DEVFREQ */
+
+ if (WARN_ON_ONCE(!clk))
+ return;
+
+ /* Stop the metrics gathering framework */
+ if (kbase_pm_metrics_is_active(kbdev))
+ kbase_pm_metrics_stop(kbdev);
+
+ /* Keep the current freq to restore it upon resume */
+ kbdev->previous_frequency = clk_get_rate(clk);
+
+ /* Slow down GPU clock to the suspend clock*/
+ if (WARN_ON_ONCE(clk_set_rate(clk,
+ kbdev->pm.backend.gpu_clock_suspend_freq)))
+ dev_err(kbdev->dev, "Failed to set suspend freq\n");
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+static void kbase_pm_l2_clock_normalize(struct kbase_device *kbdev)
+{
+#if defined(CONFIG_MALI_MIDGARD_DVFS)
+ struct clk *clk = kbdev->clocks[0];
+#endif
+
+ if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
+ return;
+
+#if defined(CONFIG_MALI_DEVFREQ)
+
+ /* Restore GPU clock to the previous one */
+ kbase_devfreq_force_freq(kbdev, kbdev->previous_frequency);
+
+ /* Resume devfreq */
+ devfreq_resume_device(kbdev->devfreq);
+
+#elif defined(CONFIG_MALI_MIDGARD_DVFS) /* CONFIG_MALI_DEVFREQ */
+
+ if (WARN_ON_ONCE(!clk))
+ return;
+
+ /* Restore GPU clock */
+ if (WARN_ON_ONCE(clk_set_rate(clk, kbdev->previous_frequency)))
+ dev_err(kbdev->dev, "Failed to restore freq (%lu)\n",
+ kbdev->previous_frequency);
+
+ /* Restart the metrics gathering framework */
+ kbase_pm_metrics_start(kbdev);
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
+{
+ struct kbase_device *kbdev = container_of(data, struct kbase_device,
+ pm.backend.gpu_clock_control_work);
+ struct kbase_pm_device_data *pm = &kbdev->pm;
+ struct kbase_pm_backend_data *backend = &pm->backend;
+ unsigned long flags;
+ bool slow_down = false, normalize = false;
+
+ /* Determine if GPU clock control is required */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (!backend->gpu_clock_slowed_down &&
+ backend->gpu_clock_slow_down_desired) {
+ slow_down = true;
+ backend->gpu_clock_slowed_down = true;
+ } else if (backend->gpu_clock_slowed_down &&
+ !backend->gpu_clock_slow_down_desired) {
+ normalize = true;
+ backend->gpu_clock_slowed_down = false;
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* Control GPU clock according to the request of L2 state machine.
+ * The GPU clock needs to be lowered for safe L2 power down
+ * and restored to previous speed at L2 power up.
+ */
+ if (slow_down)
+ kbase_pm_l2_clock_slow(kbdev);
+ else if (normalize)
+ kbase_pm_l2_clock_normalize(kbdev);
+
+ /* Tell L2 state machine to transit to next state */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_update_state(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
{
struct kbase_device *kbdev = container_of(data, struct kbase_device,
@@ -375,6 +524,7 @@ void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
wait_event_killable(kbdev->pm.backend.poweroff_wait,
is_poweroff_in_progress(kbdev));
}
+KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_complete);
int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
unsigned int flags)
@@ -443,27 +593,12 @@ void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
-void kbase_hwaccess_pm_early_term(struct kbase_device *kbdev)
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
- /* Free any resources the policy allocated */
- kbase_pm_state_machine_term(kbdev);
- kbase_pm_policy_term(kbdev);
- kbase_pm_ca_term(kbdev);
-
- /* Shut down the metrics subsystem */
- kbasep_pm_metrics_term(kbdev);
-
- destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
-}
-
-void kbase_hwaccess_pm_late_term(struct kbase_device *kbdev)
-{
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
cancel_work_sync(&kbdev->pm.backend.hwcnt_disable_work);
if (kbdev->pm.backend.hwcnt_disabled) {
@@ -473,6 +608,16 @@ void kbase_hwaccess_pm_late_term(struct kbase_device *kbdev)
kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
+
+ /* Free any resources the policy allocated */
+ kbase_pm_state_machine_term(kbdev);
+ kbase_pm_policy_term(kbdev);
+ kbase_pm_ca_term(kbdev);
+
+ /* Shut down the metrics subsystem */
+ kbasep_pm_metrics_term(kbdev);
+
+ destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
}
void kbase_pm_power_changed(struct kbase_device *kbdev)
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
index 42d3cb0..d7dc63a 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
@@ -73,11 +73,14 @@ enum kbase_pm_core_type {
*
* @KBASE_L2_OFF: The L2 cache and tiler are off
* @KBASE_L2_PEND_ON: The L2 cache and tiler are powering on
+ * @KBASE_L2_RESTORE_CLOCKS: The GPU clock is restored. Conditionally used.
* @KBASE_L2_ON_HWCNT_ENABLE: The L2 cache and tiler are on, and hwcnt is being
* enabled
* @KBASE_L2_ON: The L2 cache and tiler are on, and hwcnt is enabled
* @KBASE_L2_ON_HWCNT_DISABLE: The L2 cache and tiler are on, and hwcnt is being
* disabled
+ * @KBASE_L2_SLOW_DOWN_CLOCKS: The GPU clock is set to appropriate or lowest
+ * clock. Conditionally used.
* @KBASE_L2_POWER_DOWN: The L2 cache and tiler are about to be powered off
* @KBASE_L2_PEND_OFF: The L2 cache and tiler are powering off
* @KBASE_L2_RESET_WAIT: The GPU is resetting, L2 cache and tiler power state
@@ -159,8 +162,7 @@ struct kbasep_pm_metrics {
* not. Updated when the job scheduler informs us a job in submitted
* or removed from a GPU slot.
* @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
- * @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As
- * GL jobs never run on slot 2 this slot is not recorded.
+ * @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot.
* @lock: spinlock protecting the kbasep_pm_metrics_data structure
* @platform_data: pointer to data controlled by platform specific code
* @kbdev: pointer to kbase device for which metrics are collected
@@ -177,7 +179,7 @@ struct kbasep_pm_metrics_state {
ktime_t time_period_start;
bool gpu_active;
u32 active_cl_ctx[2];
- u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */
+ u32 active_gl_ctx[3];
spinlock_t lock;
void *platform_data;
@@ -291,10 +293,11 @@ union kbase_pm_policy_data {
* @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
* &struct kbase_pm_callback_conf
* @ca_cores_enabled: Cores that are currently available
- * @l2_state: The current state of the L2 cache state machine. See
- * &enum kbase_l2_core_state
- * @l2_desired: True if the L2 cache should be powered on by the L2 cache state
- * machine
+ * @l2_state: The current state of the L2 cache state machine. See
+ * &enum kbase_l2_core_state
+ * @l2_desired: True if the L2 cache should be powered on by the L2 cache state
+ * machine
+ * @l2_always_on: If true, disable powering down of l2 cache.
* @shaders_state: The current state of the shader state machine.
* @shaders_avail: This is updated by the state machine when it is in a state
* where it can handle changes to the core availability. This
@@ -308,6 +311,10 @@ union kbase_pm_policy_data {
* that the policy doesn't change its mind in the mean time).
* @in_reset: True if a GPU is resetting and normal power manager operation is
* suspended
+ * @protected_entry_transition_override : True if GPU reset is being used
+ * before entering the protected mode and so
+ * the reset handling behaviour is being
+ * overridden.
* @protected_transition_override : True if a protected mode transition is in
* progress and is overriding power manager
* behaviour.
@@ -318,6 +325,22 @@ union kbase_pm_policy_data {
* @hwcnt_disabled: True if GPU hardware counters are not enabled.
* @hwcnt_disable_work: Work item to disable GPU hardware counters, used if
* atomic disable is not possible.
+ * @gpu_clock_suspend_freq: 'opp-mali-errata-1485982' clock in opp table
+ * for safe L2 power cycle.
+ * If no opp-mali-errata-1485982 specified,
+ * the slowest clock will be taken.
+ * @gpu_clock_slow_down_wa: If true, slow down GPU clock during L2 power cycle.
+ * @gpu_clock_slow_down_desired: True if we want lower GPU clock
+ * for safe L2 power cycle. False if want GPU clock
+ * to back to normalized one. This is updated only
+ * in L2 state machine, kbase_pm_l2_update_state.
+ * @gpu_clock_slowed_down: During L2 power cycle,
+ * True if gpu clock is set at lower frequency
+ * for safe L2 power down, False if gpu clock gets
+ * restored to previous speed. This is updated only in
+ * work function, kbase_pm_gpu_clock_control_worker.
+ * @gpu_clock_control_work: work item to set GPU clock during L2 power cycle
+ * using gpu_clock_control
*
* Note:
* During an IRQ, @pm_current_policy can be NULL when the policy is being
@@ -374,16 +397,24 @@ struct kbase_pm_backend_data {
enum kbase_shader_core_state shaders_state;
u64 shaders_avail;
bool l2_desired;
+ bool l2_always_on;
bool shaders_desired;
bool in_reset;
+ bool protected_entry_transition_override;
bool protected_transition_override;
int protected_l2_override;
bool hwcnt_desired;
bool hwcnt_disabled;
struct work_struct hwcnt_disable_work;
+
+ u64 gpu_clock_suspend_freq;
+ bool gpu_clock_slow_down_wa;
+ bool gpu_clock_slow_down_desired;
+ bool gpu_clock_slowed_down;
+ struct work_struct gpu_clock_control_work;
};
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index d97ec23..92d3818 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -97,6 +97,9 @@ static u64 kbase_pm_get_state(
static bool kbase_pm_is_l2_desired(struct kbase_device *kbdev)
{
+ if (kbdev->pm.backend.protected_entry_transition_override)
+ return false;
+
if (kbdev->pm.backend.protected_transition_override &&
kbdev->pm.backend.protected_l2_override)
return true;
@@ -121,6 +124,44 @@ void kbase_pm_protected_override_disable(struct kbase_device *kbdev)
kbdev->pm.backend.protected_transition_override = false;
}
+int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(!kbdev->protected_mode_transition);
+
+ if (kbdev->pm.backend.l2_always_on &&
+ (kbdev->system_coherency == COHERENCY_ACE)) {
+ WARN_ON(kbdev->pm.backend.protected_entry_transition_override);
+
+ /*
+ * If there is already a GPU reset pending then wait for it to
+ * complete before initiating a special reset for protected
+ * mode entry.
+ */
+ if (kbase_reset_gpu_silent(kbdev))
+ return -EAGAIN;
+
+ kbdev->pm.backend.protected_entry_transition_override = true;
+ }
+
+ return 0;
+}
+
+void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(!kbdev->protected_mode_transition);
+
+ if (kbdev->pm.backend.l2_always_on &&
+ (kbdev->system_coherency == COHERENCY_ACE)) {
+ WARN_ON(!kbdev->pm.backend.protected_entry_transition_override);
+
+ kbdev->pm.backend.protected_entry_transition_override = false;
+ }
+}
+
void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
@@ -487,6 +528,15 @@ static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_CONFIG), val);
}
+static void kbase_pm_control_gpu_clock(struct kbase_device *kbdev)
+{
+ struct kbase_pm_backend_data *const backend = &kbdev->pm.backend;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ queue_work(system_wq, &backend->gpu_clock_control_work);
+}
+
static const char *kbase_l2_core_state_to_string(enum kbase_l2_core_state state)
{
const char *const strings[] = {
@@ -571,7 +621,12 @@ static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
/* With the L2 enabled, we can now enable
* hardware counters.
*/
- backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
+ if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+ backend->l2_state =
+ KBASE_L2_RESTORE_CLOCKS;
+ else
+ backend->l2_state =
+ KBASE_L2_ON_HWCNT_ENABLE;
/* Now that the L2 is on, the shaders can start
* powering on if they're required. The obvious
@@ -590,6 +645,28 @@ static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
}
break;
+ case KBASE_L2_RESTORE_CLOCKS:
+ /* We always assume only GPUs being affected by
+ * BASE_HW_ISSUE_GPU2017_1336 fall into this state
+ */
+ WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
+
+ /* If L2 not needed, we need to make sure cancellation
+ * of any previously issued work to restore GPU clock.
+ * For it, move to KBASE_L2_SLOW_DOWN_CLOCKS state.
+ */
+ if (!kbase_pm_is_l2_desired(kbdev)) {
+ backend->l2_state = KBASE_L2_SLOW_DOWN_CLOCKS;
+ break;
+ }
+
+ backend->gpu_clock_slow_down_desired = false;
+ if (backend->gpu_clock_slowed_down)
+ kbase_pm_control_gpu_clock(kbdev);
+ else
+ backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
+ break;
+
case KBASE_L2_ON_HWCNT_ENABLE:
backend->hwcnt_desired = true;
if (backend->hwcnt_disabled) {
@@ -653,12 +730,41 @@ static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
kbase_pm_trigger_hwcnt_disable(kbdev);
}
- if (backend->hwcnt_disabled)
+ if (backend->hwcnt_disabled) {
+ if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+ backend->l2_state =
+ KBASE_L2_SLOW_DOWN_CLOCKS;
+ else
+ backend->l2_state = KBASE_L2_POWER_DOWN;
+ }
+ break;
+
+ case KBASE_L2_SLOW_DOWN_CLOCKS:
+ /* We always assume only GPUs being affected by
+ * BASE_HW_ISSUE_GPU2017_1336 fall into this state
+ */
+ WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
+
+ /* L2 needs to be powered up. And we need to make sure
+ * cancellation of any previously issued work to slow
+ * down GPU clock. For it, we move to the state,
+ * KBASE_L2_RESTORE_CLOCKS.
+ */
+ if (kbase_pm_is_l2_desired(kbdev)) {
+ backend->l2_state = KBASE_L2_RESTORE_CLOCKS;
+ break;
+ }
+
+ backend->gpu_clock_slow_down_desired = true;
+ if (!backend->gpu_clock_slowed_down)
+ kbase_pm_control_gpu_clock(kbdev);
+ else
backend->l2_state = KBASE_L2_POWER_DOWN;
+
break;
case KBASE_L2_POWER_DOWN:
- if (!platform_power_down_only)
+ if (!platform_power_down_only && !backend->l2_always_on)
/* Powering off the L2 will also power off the
* tiler.
*/
@@ -681,7 +787,7 @@ static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
break;
case KBASE_L2_PEND_OFF:
- if (!platform_power_down_only) {
+ if (!platform_power_down_only && !backend->l2_always_on) {
/* We only need to check the L2 here - if the L2
* is off then the tiler is definitely also off.
*/
@@ -1244,8 +1350,15 @@ void kbase_pm_reset_complete(struct kbase_device *kbdev)
struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
unsigned long flags;
+ WARN_ON(!kbase_reset_gpu_is_active(kbdev));
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ /* As GPU has just been reset, that results in implicit flush of L2
+ * cache, can safely mark the pending cache flush operation (if there
+ * was any) as complete and unblock the waiter.
+ * No work can be submitted whilst GPU reset is ongoing.
+ */
+ kbase_gpu_cache_clean_wait_complete(kbdev);
backend->in_reset = false;
kbase_pm_update_state(kbdev);
@@ -1553,35 +1666,72 @@ static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+static void kbase_set_jm_quirks(struct kbase_device *kbdev, const u32 prod_id)
{
- struct device_node *np = kbdev->dev->of_node;
- const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
- GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >>
- GPU_ID_VERSION_MAJOR_SHIFT;
+ kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(JM_CONFIG));
+ if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == GPU_ID2_PRODUCT_TMIX) {
+ /* Only for tMIx */
+ u32 coherency_features;
- kbdev->hw_quirks_sc = 0;
+ coherency_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(COHERENCY_FEATURES));
+
+ /* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+ * documented for tMIx so force correct value here.
+ */
+ if (coherency_features ==
+ COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
+ kbdev->hw_quirks_jm |= (COHERENCY_ACE_LITE |
+ COHERENCY_ACE) <<
+ JM_FORCE_COHERENCY_FEATURES_SHIFT;
+ }
+ }
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
+ int default_idvs_group_size = 0xF;
+ u32 tmp;
+
+ if (of_property_read_u32(kbdev->dev->of_node,
+ "idvs-group-size", &tmp))
+ tmp = default_idvs_group_size;
+
+ if (tmp > JM_MAX_IDVS_GROUP_SIZE) {
+ dev_err(kbdev->dev,
+ "idvs-group-size of %d is too large. Maximum value is %d",
+ tmp, JM_MAX_IDVS_GROUP_SIZE);
+ tmp = default_idvs_group_size;
+ }
+
+ kbdev->hw_quirks_jm |= tmp << JM_IDVS_GROUP_SIZE_SHIFT;
+ }
+
+#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
+ if (corestack_driver_control)
+ kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL;
+}
+
+static void kbase_set_sc_quirks(struct kbase_device *kbdev, const u32 prod_id)
+{
+ kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_CONFIG));
- /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
- * and needed due to MIDGLES-3539. See PRLAM-11035 */
+ /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE.
+ * See PRLAM-8443 and needed due to MIDGLES-3539.
+ * See PRLAM-11035.
+ */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
- /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327.
+ /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD.
+ * See PRLAM-10327.
*/
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
- /* Enable alternative hardware counter selection if configured. */
- if (!GPU_ID_IS_NEW_FORMAT(prod_id))
- kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
-#endif
-
- /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
+ /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS.
+ * See PRLAM-10797.
+ */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
@@ -1595,104 +1745,66 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_2968_TTRX_3162))
kbdev->hw_quirks_sc |= SC_VAR_ALGORITHM;
- if (!kbdev->hw_quirks_sc)
- kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SHADER_CONFIG));
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
+ kbdev->hw_quirks_sc |= SC_TLS_HASH_ENABLE;
+}
+static void kbase_set_tiler_quirks(struct kbase_device *kbdev)
+{
kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_CONFIG));
-
+ GPU_CONTROL_REG(TILER_CONFIG));
/* Set tiler clock gate override if required */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
+}
- /* Limit the GPU bus bandwidth if the platform needs this. */
- kbase_set_mmu_quirks(kbdev);
-
- if (kbdev->system_coherency == COHERENCY_ACE) {
- /* Allow memory configuration disparity to be ignored, we
- * optimize the use of shared memory and thus we expect
- * some disparity in the memory configuration */
- kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
- }
+static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+{
+ struct device_node *np = kbdev->dev->of_node;
+ const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
kbdev->hw_quirks_jm = 0;
- /* Only for T86x/T88x-based products after r2p0 */
- if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
- u32 jm_values[4] = {0u, 0u, 0u, JM_MAX_JOB_THROTTLE_LIMIT};
-
- /* If entry not in device tree (return value of this func != 0),
- * use defaults from jm_values[]'s initializer
- */
- (void)of_property_read_u32_array(np,
- "jm_config",
- &jm_values[0],
- ARRAY_SIZE(jm_values));
-
- /* Limit throttle limit to 6 bits*/
- if (jm_values[3] > JM_MAX_JOB_THROTTLE_LIMIT) {
- dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63).");
- jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT;
- }
-
- /* Aggregate to one integer. */
- kbdev->hw_quirks_jm |= (jm_values[0] ?
- JM_TIMESTAMP_OVERRIDE : 0);
- kbdev->hw_quirks_jm |= (jm_values[1] ?
- JM_CLOCK_GATE_OVERRIDE : 0);
- kbdev->hw_quirks_jm |= (jm_values[2] ?
- JM_JOB_THROTTLE_ENABLE : 0);
- kbdev->hw_quirks_jm |= (jm_values[3] <<
- JM_JOB_THROTTLE_LIMIT_SHIFT);
-
- } else if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
- (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
- GPU_ID2_PRODUCT_TMIX)) {
- /* Only for tMIx */
- u32 coherency_features;
-
- coherency_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(COHERENCY_FEATURES));
+ kbdev->hw_quirks_sc = 0;
+ kbdev->hw_quirks_tiler = 0;
+ kbdev->hw_quirks_mmu = 0;
- /* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
- * documented for tMIx so force correct value here.
- */
- if (coherency_features ==
- COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
- kbdev->hw_quirks_jm |=
- (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
- JM_FORCE_COHERENCY_FEATURES_SHIFT;
- }
+ if (!of_property_read_u32(np, "quirks_jm",
+ &kbdev->hw_quirks_jm)) {
+ dev_info(kbdev->dev,
+ "Found quirks_jm = [0x%x] in Devicetree\n",
+ kbdev->hw_quirks_jm);
+ } else {
+ kbase_set_jm_quirks(kbdev, prod_id);
}
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
- kbdev->hw_quirks_sc |= SC_TLS_HASH_ENABLE;
-
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
- int default_idvs_group_size = 0xF;
- u32 tmp;
-
- if (of_property_read_u32(kbdev->dev->of_node,
- "idvs-group-size", &tmp))
- tmp = default_idvs_group_size;
-
- if (tmp > JM_MAX_IDVS_GROUP_SIZE) {
- dev_err(kbdev->dev,
- "idvs-group-size of %d is too large. Maximum value is %d",
- tmp, JM_MAX_IDVS_GROUP_SIZE);
- tmp = default_idvs_group_size;
- }
-
- kbdev->hw_quirks_jm |= tmp << JM_IDVS_GROUP_SIZE_SHIFT;
+ if (!of_property_read_u32(np, "quirks_sc",
+ &kbdev->hw_quirks_sc)) {
+ dev_info(kbdev->dev,
+ "Found quirks_sc = [0x%x] in Devicetree\n",
+ kbdev->hw_quirks_sc);
+ } else {
+ kbase_set_sc_quirks(kbdev, prod_id);
}
- if (!kbdev->hw_quirks_jm)
- kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(JM_CONFIG));
+ if (!of_property_read_u32(np, "quirks_tiler",
+ &kbdev->hw_quirks_tiler)) {
+ dev_info(kbdev->dev,
+ "Found quirks_tiler = [0x%x] in Devicetree\n",
+ kbdev->hw_quirks_tiler);
+ } else {
+ kbase_set_tiler_quirks(kbdev);
+ }
-#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
- if (corestack_driver_control)
- kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL;
+ if (!of_property_read_u32(np, "quirks_mmu",
+ &kbdev->hw_quirks_mmu)) {
+ dev_info(kbdev->dev,
+ "Found quirks_mmu = [0x%x] in Devicetree\n",
+ kbdev->hw_quirks_mmu);
+ } else {
+ kbase_set_mmu_quirks(kbdev);
+ }
}
static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
@@ -1705,10 +1817,8 @@ static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
kbdev->hw_quirks_mmu);
-
kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
kbdev->hw_quirks_jm);
-
}
void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
@@ -1738,6 +1848,19 @@ void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
}
}
+static void reenable_protected_mode_hwcnt(struct kbase_device *kbdev)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ kbdev->protected_mode_hwcnt_desired = true;
+ if (kbdev->protected_mode_hwcnt_disabled) {
+ kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+ kbdev->protected_mode_hwcnt_disabled = false;
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+}
+
static int kbase_pm_do_reset(struct kbase_device *kbdev)
{
struct kbasep_reset_timeout_data rtdata;
@@ -1923,16 +2046,13 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
kbase_pm_enable_interrupts(kbdev);
exit:
- /* Re-enable GPU hardware counters if we're resetting from protected
- * mode.
- */
- spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
- kbdev->protected_mode_hwcnt_desired = true;
- if (kbdev->protected_mode_hwcnt_disabled) {
- kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
- kbdev->protected_mode_hwcnt_disabled = false;
+ if (!kbdev->pm.backend.protected_entry_transition_override) {
+ /* Re-enable GPU hardware counters if we're resetting from
+ * protected mode.
+ */
+ reenable_protected_mode_hwcnt(kbdev);
}
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
return err;
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
index e88b3a8..6ca6a71 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -219,6 +219,11 @@ void kbase_pm_reset_done(struct kbase_device *kbdev);
* Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
* because this function will take that lock itself.
*
+ * NOTE: This may not wait until the correct state is reached if there is a
+ * power off in progress. To correctly wait for the desired state the caller
+ * must ensure that this is not the case by, for example, calling
+ * kbase_pm_wait_for_poweroff_complete()
+ *
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
@@ -609,6 +614,48 @@ void kbase_pm_protected_override_disable(struct kbase_device *kbdev);
*/
void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override);
+/**
+ * kbase_pm_protected_entry_override_enable - Enable the protected mode entry
+ * override
+ * @kbdev: Device pointer
+ *
+ * Initiate a GPU reset and enable the protected mode entry override flag if
+ * l2_always_on WA is enabled and platform is fully coherent. If the GPU
+ * reset is already ongoing then protected mode entry override flag will not
+ * be enabled and function will have to be called again.
+ *
+ * When protected mode entry override flag is enabled to power down L2 via GPU
+ * reset, the GPU reset handling behavior gets changed. For example call to
+ * kbase_backend_reset() is skipped, Hw counters are not re-enabled and L2
+ * isn't powered up again post reset.
+ * This is needed only as a workaround for a Hw issue where explicit power down
+ * of L2 causes a glitch. For entering protected mode on fully coherent
+ * platforms L2 needs to be powered down to switch to IO coherency mode, so to
+ * avoid the glitch GPU reset is used to power down L2. Hence, this function
+ * does nothing on systems where the glitch issue isn't present.
+ *
+ * Caller must hold hwaccess_lock. Should be only called during the transition
+ * to enter protected mode.
+ *
+ * Return: -EAGAIN if a GPU reset was required for the glitch workaround but
+ * was already ongoing, otherwise 0.
+ */
+int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_entry_override_disable - Disable the protected mode entry
+ * override
+ * @kbdev: Device pointer
+ *
+ * This shall be called once L2 has powered down and switch to IO coherency
+ * mode has been made. As with kbase_pm_protected_entry_override_enable(),
+ * this function does nothing on systems where the glitch issue isn't present.
+ *
+ * Caller must hold hwaccess_lock. Should be only called during the transition
+ * to enter protected mode.
+ */
+void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev);
+
/* If true, the driver should explicitly control corestack power management,
* instead of relying on the Power Domain Controller.
*/
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_l2_states.h b/mali_kbase/backend/gpu/mali_kbase_pm_l2_states.h
index 94bad77..12cb051 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_l2_states.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_l2_states.h
@@ -28,9 +28,11 @@
*/
KBASEP_L2_STATE(OFF)
KBASEP_L2_STATE(PEND_ON)
+KBASEP_L2_STATE(RESTORE_CLOCKS)
KBASEP_L2_STATE(ON_HWCNT_ENABLE)
KBASEP_L2_STATE(ON)
KBASEP_L2_STATE(ON_HWCNT_DISABLE)
+KBASEP_L2_STATE(SLOW_DOWN_CLOCKS)
KBASEP_L2_STATE(POWER_DOWN)
KBASEP_L2_STATE(PEND_OFF)
KBASEP_L2_STATE(RESET_WAIT)
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c b/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c
index 6b9b686..ae494b0 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -80,6 +80,7 @@ int kbasep_pm_metrics_init(struct kbase_device *kbdev)
kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[2] = 0;
kbdev->pm.backend.metrics.values.time_busy = 0;
kbdev->pm.backend.metrics.values.time_idle = 0;
@@ -90,19 +91,15 @@ int kbasep_pm_metrics_init(struct kbase_device *kbdev)
spin_lock_init(&kbdev->pm.backend.metrics.lock);
#ifdef CONFIG_MALI_MIDGARD_DVFS
- kbdev->pm.backend.metrics.timer_active = true;
hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
kbdev->pm.backend.metrics.timer.function = dvfs_callback;
- hrtimer_start(&kbdev->pm.backend.metrics.timer,
- HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
- HRTIMER_MODE_REL);
+ kbase_pm_metrics_start(kbdev);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
return 0;
}
-
KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
void kbasep_pm_metrics_term(struct kbase_device *kbdev)
@@ -148,6 +145,8 @@ static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
kbdev->pm.backend.metrics.values.busy_gl += ns_time;
if (kbdev->pm.backend.metrics.active_gl_ctx[1])
kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+ if (kbdev->pm.backend.metrics.active_gl_ctx[2])
+ kbdev->pm.backend.metrics.values.busy_gl += ns_time;
} else {
kbdev->pm.backend.metrics.values.time_idle += (u32) (ktime_to_ns(diff)
>> KBASE_PM_TIME_SHIFT);
@@ -221,6 +220,29 @@ bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
}
KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
+void kbase_pm_metrics_start(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbdev->pm.backend.metrics.timer_active = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+ hrtimer_start(&kbdev->pm.backend.metrics.timer,
+ HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
+ HRTIMER_MODE_REL);
+}
+
+void kbase_pm_metrics_stop(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbdev->pm.backend.metrics.timer_active = false;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+ hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+}
+
+
#endif /* CONFIG_MALI_MIDGARD_DVFS */
/**
@@ -238,6 +260,7 @@ static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[2] = 0;
kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
kbdev->pm.backend.metrics.gpu_active = false;
@@ -260,11 +283,7 @@ static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
kbdev->pm.backend.metrics.
active_cl_ctx[device_nr] = 1;
} else {
- /* Slot 2 should not be running non-compute
- * atoms */
- if (!WARN_ON(js >= 2))
- kbdev->pm.backend.metrics.
- active_gl_ctx[js] = 1;
+ kbdev->pm.backend.metrics.active_gl_ctx[js] = 1;
}
kbdev->pm.backend.metrics.gpu_active = true;
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_time.c b/mali_kbase/backend/gpu/mali_kbase_time.c
index 5e1b761..0e17dc0 100644
--- a/mali_kbase/backend/gpu/mali_kbase_time.c
+++ b/mali_kbase/backend/gpu/mali_kbase_time.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016,2018-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -32,28 +32,35 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
kbase_pm_request_gpu_cycle_counter(kbdev);
- /* Read hi, lo, hi to ensure that overflow from lo to hi is handled
- * correctly */
- do {
- hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI));
- *cycle_counter = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(CYCLE_COUNT_LO));
- hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI));
+ if (cycle_counter) {
+ /* Read hi, lo, hi to ensure a coherent u64 */
+ do {
+ hi1 = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_HI));
+ *cycle_counter = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_LO));
+ hi2 = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_HI));
+ } while (hi1 != hi2);
*cycle_counter |= (((u64) hi1) << 32);
- } while (hi1 != hi2);
+ }
- /* Read hi, lo, hi to ensure that overflow from lo to hi is handled
- * correctly */
- do {
- hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI));
- *system_time = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TIMESTAMP_LO));
- hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI));
+ if (system_time) {
+ /* Read hi, lo, hi to ensure a coherent u64 */
+ do {
+ hi1 = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TIMESTAMP_HI));
+ *system_time = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TIMESTAMP_LO));
+ hi2 = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TIMESTAMP_HI));
+ } while (hi1 != hi2);
*system_time |= (((u64) hi1) << 32);
- } while (hi1 != hi2);
+ }
/* Record the CPU's idea of current time */
- getrawmonotonic(ts);
+ if (ts != NULL)
+ getrawmonotonic(ts);
kbase_pm_release_gpu_cycle_counter(kbdev);
}
diff --git a/mali_kbase/build.bp b/mali_kbase/build.bp
index 666f500..5e6fdfc 100644
--- a/mali_kbase/build.bp
+++ b/mali_kbase/build.bp
@@ -117,6 +117,12 @@ bob_kernel_module {
mali_2mb_alloc: {
kbuild_options: ["CONFIG_MALI_2MB_ALLOC=y"],
},
+ mali_hw_errata_1485982_not_affected: {
+ kbuild_options: ["CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED=y"],
+ },
+ mali_hw_errata_1485982_use_clock_alternative: {
+ kbuild_options: ["CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE=y"],
+ },
gpu_has_csf: {
srcs: [
"csf/*.c",
diff --git a/mali_kbase/docs/Doxyfile b/mali_kbase/docs/Doxyfile
deleted file mode 100644
index 6498dcb..0000000
--- a/mali_kbase/docs/Doxyfile
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-# (C) COPYRIGHT 2011-2013, 2015, 2017 ARM Limited. All rights reserved.
-#
-# This program is free software and is provided to you under the terms of the
-# GNU General Public License version 2 as published by the Free Software
-# Foundation, and any use by you of this program is subject to the terms
-# of such GNU licence.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# SPDX-License-Identifier: GPL-2.0
-#
-#
-
-
-##############################################################################
-
-# This file contains per-module Doxygen configuration. Please do not add
-# extra settings to this file without consulting all stakeholders, as they
-# may cause override project-wide settings.
-#
-# Additionally, when defining aliases, macros, sections etc, use the module
-# name as a prefix e.g. gles_my_alias.
-
-##############################################################################
-
-@INCLUDE = ../../bldsys/Doxyfile_common
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT += ../../kernel/drivers/gpu/arm/midgard/
-
-##############################################################################
-# Everything below here is optional, and in most cases not required
-##############################################################################
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES +=
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS +=
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
-# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
-
-FILE_PATTERNS +=
-
-# The EXCLUDE tag can be used to specify files and/or directories that should
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-EXCLUDE += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile
-
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS +=
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS +=
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH +=
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH +=
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH +=
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED +=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition.
-
-EXPAND_AS_DEFINED +=
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS += ../../kernel/drivers/gpu/arm/midgard/docs
-
diff --git a/mali_kbase/docs/policy_operation_diagram.dot b/mali_kbase/docs/policy_operation_diagram.dot
deleted file mode 100644
index a15b558..0000000
--- a/mali_kbase/docs/policy_operation_diagram.dot
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-digraph policy_objects_diagram {
- rankdir=LR;
- size="12,8";
- compound=true;
-
- node [ shape = box ];
-
- subgraph cluster_policy_queues {
- low_queue [ shape=record label = "LowP | {<ql>ctx_lo | ... | <qm>ctx_i | ... | <qr>ctx_hi}" ];
- queues_middle_sep [ label="" shape=plaintext width=0 height=0 ];
-
- rt_queue [ shape=record label = "RT | {<ql>ctx_lo | ... | <qm>ctx_j | ... | <qr>ctx_hi}" ];
-
- label = "Policy's Queue(s)";
- }
-
- call_enqueue [ shape=plaintext label="enqueue_ctx()" ];
-
- {
- rank=same;
- ordering=out;
- call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ];
- call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ];
-
- call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ];
- }
-
- subgraph cluster_runpool {
-
- as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ];
- as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ];
- as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ];
- as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ];
-
- label = "Policy's Run Pool";
- }
-
- {
- rank=same;
- call_jdequeue [ shape=plaintext label="dequeue_job()" ];
- sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ];
- }
-
- {
- rank=same;
- ordering=out;
- sstop [ shape=ellipse label="SS-Timer expires" ]
- jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
-
- irq [ label="IRQ" shape=ellipse ];
-
- job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ];
- }
-
- hstop [ shape=ellipse label="HS-Timer expires" ]
-
- /*
- * Edges
- */
-
- call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ];
-
- low_queue:qr -> call_dequeue:w;
- rt_queue:qr -> call_dequeue:w;
-
- call_dequeue -> as1 [lhead=cluster_runpool];
-
- as1->call_jdequeue [ltail=cluster_runpool];
- call_jdequeue->jobslots:0;
- call_jdequeue->sstop_dotfixup [ arrowhead=none];
- sstop_dotfixup->sstop [label="Spawn SS-Timer"];
- sstop->jobslots [label="SoftStop"];
- sstop->hstop [label="Spawn HS-Timer"];
- hstop->jobslots:ne [label="HardStop"];
-
-
- as3->call_ctxfinish:ne [ ltail=cluster_runpool ];
- call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ];
-
- call_ctxfinish->call_ctxdone [constraint=false];
-
- call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false];
-
-
- {
- jobslots->irq [constraint=false];
-
- irq->job_finish [constraint=false];
- }
-
- irq->as2 [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ];
-
-}
diff --git a/mali_kbase/docs/policy_overview.dot b/mali_kbase/docs/policy_overview.dot
deleted file mode 100644
index 6b87335..0000000
--- a/mali_kbase/docs/policy_overview.dot
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-digraph policy_objects_diagram {
- rankdir=LR
- size="6,6"
- compound=true;
-
- node [ shape = box ];
-
- call_enqueue [ shape=plaintext label="enqueue ctx" ];
-
-
- policy_queue [ label="Policy's Queue" ];
-
- {
- rank=same;
- runpool [ label="Policy's Run Pool" ];
-
- ctx_finish [ label="ctx finished" ];
- }
-
- {
- rank=same;
- jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
-
- job_finish [ label="Job finished" ];
- }
-
-
-
- /*
- * Edges
- */
-
- call_enqueue -> policy_queue;
-
- policy_queue->runpool [label="dequeue ctx" weight=0.1];
- runpool->policy_queue [label="requeue ctx" weight=0.1];
-
- runpool->ctx_finish [ style=dotted ];
-
- runpool->jobslots [label="dequeue job" weight=0.1];
- jobslots->runpool [label="requeue job" weight=0.1];
-
- jobslots->job_finish [ style=dotted ];
-}
diff --git a/mali_kbase/ipa/mali_kbase_ipa.c b/mali_kbase/ipa/mali_kbase_ipa.c
index 65b4edf..9b75f0d 100644
--- a/mali_kbase/ipa/mali_kbase_ipa.c
+++ b/mali_kbase/ipa/mali_kbase_ipa.c
@@ -47,7 +47,9 @@ static const struct kbase_ipa_model_ops *kbase_ipa_all_model_ops[] = {
&kbase_g52_ipa_model_ops,
&kbase_g52_r1_ipa_model_ops,
&kbase_g51_ipa_model_ops,
- &kbase_g77_ipa_model_ops
+ &kbase_g77_ipa_model_ops,
+ &kbase_tnax_ipa_model_ops,
+ &kbase_tbex_ipa_model_ops
};
int kbase_ipa_model_recalculate(struct kbase_ipa_model *model)
@@ -91,31 +93,31 @@ const char *kbase_ipa_model_name_from_id(u32 gpu_id)
const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- if (GPU_ID_IS_NEW_FORMAT(prod_id)) {
- switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) {
- case GPU_ID2_PRODUCT_TMIX:
- return "mali-g71-power-model";
- case GPU_ID2_PRODUCT_THEX:
- return "mali-g72-power-model";
- case GPU_ID2_PRODUCT_TNOX:
- return "mali-g76-power-model";
- case GPU_ID2_PRODUCT_TSIX:
- return "mali-g51-power-model";
- case GPU_ID2_PRODUCT_TGOX:
- if ((gpu_id & GPU_ID2_VERSION_MAJOR) ==
- (0 << GPU_ID2_VERSION_MAJOR_SHIFT))
- /* g52 aliased to g76 power-model's ops */
- return "mali-g52-power-model";
- else
- return "mali-g52_r1-power-model";
- case GPU_ID2_PRODUCT_TTRX:
- return "mali-g77-power-model";
- default:
- return KBASE_IPA_FALLBACK_MODEL_NAME;
- }
+ switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) {
+ case GPU_ID2_PRODUCT_TMIX:
+ return "mali-g71-power-model";
+ case GPU_ID2_PRODUCT_THEX:
+ return "mali-g72-power-model";
+ case GPU_ID2_PRODUCT_TNOX:
+ return "mali-g76-power-model";
+ case GPU_ID2_PRODUCT_TSIX:
+ return "mali-g51-power-model";
+ case GPU_ID2_PRODUCT_TGOX:
+ if ((gpu_id & GPU_ID2_VERSION_MAJOR) ==
+ (0 << GPU_ID2_VERSION_MAJOR_SHIFT))
+ /* g52 aliased to g76 power-model's ops */
+ return "mali-g52-power-model";
+ else
+ return "mali-g52_r1-power-model";
+ case GPU_ID2_PRODUCT_TNAX:
+ return "mali-tnax-power-model";
+ case GPU_ID2_PRODUCT_TTRX:
+ return "mali-g77-power-model";
+ case GPU_ID2_PRODUCT_TBEX:
+ return "mali-tbex-power-model";
+ default:
+ return KBASE_IPA_FALLBACK_MODEL_NAME;
}
-
- return KBASE_IPA_FALLBACK_MODEL_NAME;
}
KBASE_EXPORT_TEST_API(kbase_ipa_model_name_from_id);
diff --git a/mali_kbase/ipa/mali_kbase_ipa.h b/mali_kbase/ipa/mali_kbase_ipa.h
index 81cafc8..92aace9 100644
--- a/mali_kbase/ipa/mali_kbase_ipa.h
+++ b/mali_kbase/ipa/mali_kbase_ipa.h
@@ -209,6 +209,8 @@ extern const struct kbase_ipa_model_ops kbase_g52_ipa_model_ops;
extern const struct kbase_ipa_model_ops kbase_g52_r1_ipa_model_ops;
extern const struct kbase_ipa_model_ops kbase_g51_ipa_model_ops;
extern const struct kbase_ipa_model_ops kbase_g77_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_tnax_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_tbex_ipa_model_ops;
/**
* kbase_get_real_power() - get the real power consumption of the GPU
diff --git a/mali_kbase/ipa/mali_kbase_ipa_vinstr_g7x.c b/mali_kbase/ipa/mali_kbase_ipa_vinstr_g7x.c
index a3d1fae..270b75e 100644
--- a/mali_kbase/ipa/mali_kbase_ipa_vinstr_g7x.c
+++ b/mali_kbase/ipa/mali_kbase_ipa_vinstr_g7x.c
@@ -382,6 +382,39 @@ static const struct kbase_ipa_group ipa_groups_def_g77[] = {
},
};
+static const struct kbase_ipa_group ipa_groups_def_tbex[] = {
+ {
+ .name = "l2_access",
+ .default_value = 599800,
+ .op = kbase_g7x_sum_all_memsys_blocks,
+ .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+ },
+ {
+ .name = "exec_instr_msg",
+ .default_value = 1830200,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_MSG,
+ },
+ {
+ .name = "exec_instr_fma",
+ .default_value = 407300,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_FMA,
+ },
+ {
+ .name = "tex_filt_num_operations",
+ .default_value = 224500,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_TEX_FILT_NUM_OPERATIONS,
+ },
+ {
+ .name = "gpu_active",
+ .default_value = 153800,
+ .op = kbase_g7x_jm_single_counter,
+ .counter_block_offset = JM_GPU_ACTIVE,
+ },
+};
+
#define IPA_POWER_MODEL_OPS(gpu, init_token) \
const struct kbase_ipa_model_ops kbase_ ## gpu ## _ipa_model_ops = { \
@@ -415,6 +448,9 @@ STANDARD_POWER_MODEL(g76, 800);
STANDARD_POWER_MODEL(g52_r1, 1000);
STANDARD_POWER_MODEL(g51, 1000);
STANDARD_POWER_MODEL(g77, 1000);
+STANDARD_POWER_MODEL(tbex, 1000);
/* g52 is an alias of g76 (TNOX) for IPA */
ALIAS_POWER_MODEL(g52, g76);
+/* tnax is an alias of g77 (TTRX) for IPA */
+ALIAS_POWER_MODEL(tnax, g77);
diff --git a/mali_kbase/mali_base_hwconfig_features.h b/mali_kbase/mali_base_hwconfig_features.h
index bf27180..3d24972 100644
--- a/mali_kbase/mali_base_hwconfig_features.h
+++ b/mali_kbase/mali_base_hwconfig_features.h
@@ -68,120 +68,6 @@ static const enum base_hw_feature base_hw_features_generic[] = {
BASE_HW_FEATURE_END
};
-static const enum base_hw_feature base_hw_features_t60x[] = {
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_V4,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t62x[] = {
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_V4,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t72x[] = {
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
- BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
- BASE_HW_FEATURE_WARPING,
- BASE_HW_FEATURE_V4,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t76x[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_XAFFINITY,
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_BRNDOUT_CC,
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LD_ST_TILEBUFFER,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_MRT,
- BASE_HW_FEATURE_MSAA_16X,
- BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_TEST4_DATUM_MODE,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_tFxx[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_XAFFINITY,
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_BRNDOUT_CC,
- BASE_HW_FEATURE_BRNDOUT_KILL,
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LD_ST_TILEBUFFER,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_MRT,
- BASE_HW_FEATURE_MSAA_16X,
- BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
- BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_TEST4_DATUM_MODE,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t83x[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_XAFFINITY,
- BASE_HW_FEATURE_WARPING,
- BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_BRNDOUT_CC,
- BASE_HW_FEATURE_BRNDOUT_KILL,
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LD_ST_TILEBUFFER,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_MRT,
- BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
- BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_TEST4_DATUM_MODE,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t82x[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_XAFFINITY,
- BASE_HW_FEATURE_WARPING,
- BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_BRNDOUT_CC,
- BASE_HW_FEATURE_BRNDOUT_KILL,
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LD_ST_TILEBUFFER,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_MRT,
- BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
- BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_TEST4_DATUM_MODE,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_END
-};
-
static const enum base_hw_feature base_hw_features_tMIx[] = {
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
@@ -378,6 +264,7 @@ static const enum base_hw_feature base_hw_features_tTRx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -406,6 +293,7 @@ static const enum base_hw_feature base_hw_features_tNAx[] = {
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -435,6 +323,7 @@ static const enum base_hw_feature base_hw_features_tBEx[] = {
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -462,8 +351,8 @@ static const enum base_hw_feature base_hw_features_tULx[] = {
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
- BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -493,6 +382,7 @@ static const enum base_hw_feature base_hw_features_tDUx[] = {
BASE_HW_FEATURE_AARCH64_MMU,
BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -520,8 +410,8 @@ static const enum base_hw_feature base_hw_features_tODx[] = {
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
- BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -549,7 +439,8 @@ static const enum base_hw_feature base_hw_features_tIDx[] = {
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
- BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+ BASE_HW_FEATURE_L2_CONFIG,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
@@ -577,37 +468,8 @@ static const enum base_hw_feature base_hw_features_tVAx[] = {
BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
BASE_HW_FEATURE_COHERENCY_REG,
BASE_HW_FEATURE_AARCH64_MMU,
- BASE_HW_FEATURE_IDVS_GROUP_SIZE,
BASE_HW_FEATURE_L2_CONFIG,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_tEGx[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_XAFFINITY,
- BASE_HW_FEATURE_WARPING,
- BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
- BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
- BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
- BASE_HW_FEATURE_BRNDOUT_CC,
- BASE_HW_FEATURE_BRNDOUT_KILL,
- BASE_HW_FEATURE_LD_ST_LEA_TEX,
- BASE_HW_FEATURE_LD_ST_TILEBUFFER,
- BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
- BASE_HW_FEATURE_MRT,
- BASE_HW_FEATURE_MSAA_16X,
- BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
- BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
- BASE_HW_FEATURE_T7XX_PAIRING_RULES,
- BASE_HW_FEATURE_TEST4_DATUM_MODE,
- BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
- BASE_HW_FEATURE_FLUSH_REDUCTION,
- BASE_HW_FEATURE_PROTECTED_MODE,
- BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
- BASE_HW_FEATURE_COHERENCY_REG,
- BASE_HW_FEATURE_AARCH64_MMU,
- BASE_HW_FEATURE_TLS_HASHING,
+ BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
BASE_HW_FEATURE_END
};
diff --git a/mali_kbase/mali_base_hwconfig_issues.h b/mali_kbase/mali_base_hwconfig_issues.h
index 7964c22..7448608 100644
--- a/mali_kbase/mali_base_hwconfig_issues.h
+++ b/mali_kbase/mali_base_hwconfig_issues.h
@@ -131,6 +131,7 @@ enum base_hw_issue {
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -138,843 +139,6 @@ static const enum base_hw_issue base_hw_issues_generic[] = {
BASE_HW_ISSUE_END
};
-static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
- BASE_HW_ISSUE_6367,
- BASE_HW_ISSUE_6398,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_6787,
- BASE_HW_ISSUE_7027,
- BASE_HW_ISSUE_7144,
- BASE_HW_ISSUE_7304,
- BASE_HW_ISSUE_8073,
- BASE_HW_ISSUE_8186,
- BASE_HW_ISSUE_8215,
- BASE_HW_ISSUE_8245,
- BASE_HW_ISSUE_8250,
- BASE_HW_ISSUE_8260,
- BASE_HW_ISSUE_8280,
- BASE_HW_ISSUE_8316,
- BASE_HW_ISSUE_8381,
- BASE_HW_ISSUE_8394,
- BASE_HW_ISSUE_8401,
- BASE_HW_ISSUE_8408,
- BASE_HW_ISSUE_8443,
- BASE_HW_ISSUE_8456,
- BASE_HW_ISSUE_8564,
- BASE_HW_ISSUE_8634,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_8791,
- BASE_HW_ISSUE_8833,
- BASE_HW_ISSUE_8896,
- BASE_HW_ISSUE_8975,
- BASE_HW_ISSUE_8986,
- BASE_HW_ISSUE_8987,
- BASE_HW_ISSUE_9010,
- BASE_HW_ISSUE_9418,
- BASE_HW_ISSUE_9423,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_9510,
- BASE_HW_ISSUE_9566,
- BASE_HW_ISSUE_9630,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10969,
- BASE_HW_ISSUE_10984,
- BASE_HW_ISSUE_10995,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11035,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_3964,
- GPUCORE_1619,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
- BASE_HW_ISSUE_6367,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_6787,
- BASE_HW_ISSUE_7027,
- BASE_HW_ISSUE_7304,
- BASE_HW_ISSUE_8408,
- BASE_HW_ISSUE_8564,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_8975,
- BASE_HW_ISSUE_9010,
- BASE_HW_ISSUE_9418,
- BASE_HW_ISSUE_9423,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_9510,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10969,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11035,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
- BASE_HW_ISSUE_6367,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_6787,
- BASE_HW_ISSUE_7027,
- BASE_HW_ISSUE_7304,
- BASE_HW_ISSUE_8408,
- BASE_HW_ISSUE_8564,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_8975,
- BASE_HW_ISSUE_9010,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_9510,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11035,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10127,
- BASE_HW_ISSUE_10327,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10817,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11035,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_11056,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t72x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t76x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t60x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t62x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11042,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3964,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_tFRx_r1p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_tFRx_r2p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_tFRx[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t86x_r1p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t86x_r2p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3966,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t86x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_TMIX_7891,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t83x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- GPUCORE_1619,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3964,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1909,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_11054,
- BASE_HW_ISSUE_T720_1386,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- BASE_HW_ISSUE_T76X_3960,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- BASE_HW_ISSUE_TMIX_8438,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t82x[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_11051,
- BASE_HW_ISSUE_T76X_1963,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3979,
- BASE_HW_ISSUE_T83X_817,
- BASE_HW_ISSUE_TMIX_7891,
- GPUCORE_1619,
- BASE_HW_ISSUE_END
-};
-
static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_10682,
@@ -991,6 +155,7 @@ static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1010,6 +175,7 @@ static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1029,6 +195,7 @@ static const enum base_hw_issue base_hw_issues_tMIx_r0p1[] = {
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1056,6 +223,7 @@ static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1068,6 +236,7 @@ static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1080,6 +249,7 @@ static const enum base_hw_issue base_hw_issues_tHEx_r0p2[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1091,6 +261,7 @@ static const enum base_hw_issue base_hw_issues_tHEx_r0p3[] = {
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1112,6 +283,7 @@ static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TSIX_1792,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1123,6 +295,7 @@ static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TSIX_1792,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1133,6 +306,7 @@ static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = {
BASE_HW_ISSUE_TSIX_1116,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1142,6 +316,7 @@ static const enum base_hw_issue base_hw_issues_tSIx_r1p1[] = {
BASE_HW_ISSUE_TSIX_1116,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1160,6 +335,7 @@ static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = {
BASE_HW_ISSUE_TSIX_1116,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1179,6 +355,7 @@ static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TNOX_1194,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1198,6 +375,7 @@ static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TNOX_1194,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1208,6 +386,7 @@ static const enum base_hw_issue base_hw_issues_tGOx_r1p0[] = {
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TGOX_R1_1234,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
@@ -1222,30 +401,29 @@ static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
static const enum base_hw_issue base_hw_issues_tTRx_r0p1[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1253,30 +431,29 @@ static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
static const enum base_hw_issue base_hw_issues_tNAx_r0p1[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
BASE_HW_ISSUE_TTRX_3076,
BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_END
};
static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1284,7 +461,15 @@ static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_2033,
+ BASE_HW_ISSUE_TTRX_1337,
+ BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+ BASE_HW_ISSUE_TTRX_921,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tBEx_r1p0[] = {
+ BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
@@ -1295,7 +480,6 @@ static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1303,7 +487,6 @@ static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
static const enum base_hw_issue base_hw_issues_tULx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_921,
@@ -1313,7 +496,6 @@ static const enum base_hw_issue base_hw_issues_tULx_r0p0[] = {
static const enum base_hw_issue base_hw_issues_model_tULx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1321,7 +503,6 @@ static const enum base_hw_issue base_hw_issues_model_tULx[] = {
static const enum base_hw_issue base_hw_issues_tDUx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_TTRX_921,
@@ -1331,7 +512,6 @@ static const enum base_hw_issue base_hw_issues_tDUx_r0p0[] = {
static const enum base_hw_issue base_hw_issues_model_tDUx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1339,7 +519,6 @@ static const enum base_hw_issue base_hw_issues_model_tDUx[] = {
static const enum base_hw_issue base_hw_issues_tODx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1348,7 +527,6 @@ static const enum base_hw_issue base_hw_issues_tODx_r0p0[] = {
static const enum base_hw_issue base_hw_issues_model_tODx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1356,7 +534,6 @@ static const enum base_hw_issue base_hw_issues_model_tODx[] = {
static const enum base_hw_issue base_hw_issues_tIDx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1365,7 +542,6 @@ static const enum base_hw_issue base_hw_issues_tIDx_r0p0[] = {
static const enum base_hw_issue base_hw_issues_model_tIDx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1373,7 +549,6 @@ static const enum base_hw_issue base_hw_issues_model_tIDx[] = {
static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
@@ -1382,28 +557,9 @@ static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TTRX_1337,
BASE_HW_ISSUE_END
};
-static const enum base_hw_issue base_hw_issues_tEGx_r0p0[] = {
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
- BASE_HW_ISSUE_TSIX_1116,
- BASE_HW_ISSUE_TSIX_2033,
- BASE_HW_ISSUE_TTRX_921,
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_tEGx[] = {
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_TMIX_8133,
- BASE_HW_ISSUE_TSIX_1116,
- BASE_HW_ISSUE_TSIX_2033,
- BASE_HW_ISSUE_END
-};
-
#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/mali_kbase/mali_base_kernel.h b/mali_kbase/mali_base_kernel.h
index 6caf36c..a8ab408 100644
--- a/mali_kbase/mali_base_kernel.h
+++ b/mali_kbase/mali_base_kernel.h
@@ -185,9 +185,9 @@ typedef u32 base_mem_alloc_flags;
*/
#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
-/* Secure memory
+/* Protected memory
*/
-#define BASE_MEM_SECURE ((base_mem_alloc_flags)1 << 16)
+#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16)
/* Not needed physical memory
*/
@@ -225,12 +225,10 @@ typedef u32 base_mem_alloc_flags;
/*
* Bits [22:25] for group_id (0~15).
*
- * In user space, inline function base_mem_group_id_set() can be used with
- * numeric value (0~15) to generate a specific memory group ID.
- *
- * group_id is packed into in.flags of kbase_ioctl_mem_alloc to be delivered to
- * kernel space via ioctl and then kernel driver can use inline function
- * base_mem_group_id_get() to extract group_id from flags.
+ * base_mem_group_id_set() should be used to pack a memory group ID into a
+ * base_mem_alloc_flags value instead of accessing the bits directly.
+ * base_mem_group_id_get() should be used to extract the memory group ID from
+ * a base_mem_alloc_flags value.
*/
#define BASEP_MEM_GROUP_ID_SHIFT 22
#define BASE_MEM_GROUP_ID_MASK \
@@ -383,7 +381,7 @@ struct base_mem_import_user_buffer {
#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
-/* reserved handles ..-48<<PAGE_SHIFT> for future special handles */
+/* reserved handles ..-47<<PAGE_SHIFT> for future special handles */
#define BASE_MEM_COOKIE_BASE (64ul << 12)
#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
BASE_MEM_COOKIE_BASE)
@@ -785,6 +783,14 @@ typedef u32 base_jd_core_req;
#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
/**
+ * Request the atom be executed on a specific job slot.
+ *
+ * When this flag is specified, it takes precedence over any existing job slot
+ * selection logic.
+ */
+#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17)
+
+/**
* These requirement bits are currently unused in base_jd_core_req
*/
#define BASEP_JD_REQ_RESERVED \
@@ -793,7 +799,8 @@ typedef u32 base_jd_core_req;
BASE_JD_REQ_EVENT_COALESCE | \
BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
- BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END))
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \
+ BASE_JD_REQ_JOB_SLOT))
/**
* Mask of all bits in base_jd_core_req that control the type of the atom.
@@ -907,7 +914,7 @@ typedef struct base_jd_atom_v2 {
base_atom_id atom_number; /**< unique number to identify the atom */
base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */
u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
- u8 padding[1];
+ u8 jobslot; /**< Job slot to use when BASE_JD_REQ_JOB_SLOT is specified */
base_jd_core_req core_req; /**< core requirements */
} base_jd_atom_v2;
@@ -1763,5 +1770,23 @@ static inline int base_context_mmu_group_id_get(
#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+/**
+ * A number of bit flags are defined for requesting cpu_gpu_timeinfo. These
+ * flags are also used, where applicable, for specifying which fields
+ * are valid following the request operation.
+ */
+
+/* For monotonic (counter) timefield */
+#define BASE_TIMEINFO_MONOTONIC_FLAG (1UL << 0)
+/* For system wide timestamp */
+#define BASE_TIMEINFO_TIMESTAMP_FLAG (1UL << 1)
+/* For GPU cycle counter */
+#define BASE_TIMEINFO_CYCLE_COUNTER_FLAG (1UL << 2)
+
+#define BASE_TIMEREQUEST_ALLOWED_FLAGS (\
+ BASE_TIMEINFO_MONOTONIC_FLAG | \
+ BASE_TIMEINFO_TIMESTAMP_FLAG | \
+ BASE_TIMEINFO_CYCLE_COUNTER_FLAG)
+
#endif /* _BASE_KERNEL_H_ */
diff --git a/mali_kbase/mali_kbase.h b/mali_kbase/mali_kbase.h
index e3f209c..1ab785e 100644
--- a/mali_kbase/mali_kbase.h
+++ b/mali_kbase/mali_kbase.h
@@ -375,6 +375,24 @@ static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
}
/**
+ * kbase_pm_metrics_start - Start the utilization metrics timer
+ * @kbdev: Pointer to the kbase device for which to start the utilization
+ * metrics calculation thread.
+ *
+ * Start the timer that drives the metrics calculation, runs the custom DVFS.
+ */
+void kbase_pm_metrics_start(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_metrics_stop - Stop the utilization metrics timer
+ * @kbdev: Pointer to the kbase device for which to stop the utilization
+ * metrics calculation thread.
+ *
+ * Stop the timer that drives the metrics calculation, runs the custom DVFS.
+ */
+void kbase_pm_metrics_stop(struct kbase_device *kbdev);
+
+/**
* Return the atom's ID, as was originally supplied by userspace in
* base_jd_atom_v2::atom_number
*/
diff --git a/mali_kbase/mali_kbase_context.c b/mali_kbase/mali_kbase_context.c
index ed482e1..e72ce70 100644
--- a/mali_kbase/mali_kbase_context.c
+++ b/mali_kbase/mali_kbase_context.c
@@ -75,6 +75,7 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat,
spin_lock_init(&kctx->mm_update_lock);
kctx->process_mm = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
+ atomic_set(&kctx->permanent_mapped_pages, 0);
kctx->slots_pullable = 0;
kctx->tgid = current->tgid;
kctx->pid = current->pid;
diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c
index b2b5a08..57acbf9 100644
--- a/mali_kbase/mali_kbase_core_linux.c
+++ b/mali_kbase/mali_kbase_core_linux.c
@@ -46,6 +46,7 @@
#endif /* !MALI_CUSTOMER_RELEASE */
#include "mali_kbase_regs_history_debugfs.h"
#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_hwaccess_time.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_reset_gpu.h>
@@ -892,6 +893,37 @@ static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
return ret;
}
+static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
+ union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
+{
+ u32 flags = timeinfo->in.request_flags;
+ struct timespec ts;
+ u64 timestamp;
+ u64 cycle_cnt;
+
+ kbase_pm_context_active(kctx->kbdev);
+
+ kbase_backend_get_gpu_time(kctx->kbdev,
+ (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
+ (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? &timestamp : NULL,
+ (flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
+
+ if (flags & BASE_TIMEINFO_TIMESTAMP_FLAG)
+ timeinfo->out.timestamp = timestamp;
+
+ if (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG)
+ timeinfo->out.cycle_counter = cycle_cnt;
+
+ if (flags & BASE_TIMEINFO_MONOTONIC_FLAG) {
+ timeinfo->out.sec = ts.tv_sec;
+ timeinfo->out.nsec = ts.tv_nsec;
+ }
+
+ kbase_pm_context_idle(kctx->kbdev);
+
+ return 0;
+}
+
#ifdef CONFIG_MALI_NO_MALI
static int kbase_api_hwcnt_set(struct kbase_context *kctx,
struct kbase_ioctl_hwcnt_values *values)
@@ -1534,6 +1566,12 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
kbase_api_hwcnt_clear,
kctx);
break;
+ case KBASE_IOCTL_GET_CPU_GPU_TIMEINFO:
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_GET_CPU_GPU_TIMEINFO,
+ kbase_api_get_cpu_gpu_timeinfo,
+ union kbase_ioctl_get_cpu_gpu_timeinfo,
+ kctx);
+ break;
#ifdef CONFIG_MALI_NO_MALI
case KBASE_IOCTL_HWCNT_SET:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
@@ -2509,14 +2547,6 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
unsigned id;
char *name;
} gpu_product_id_names[] = {
- { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
- { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
- { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
- { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
- { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
- { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
- { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
- { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
{ .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-G71" },
{ .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
@@ -2531,13 +2561,22 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
.name = "Mali-G52" },
{ .id = GPU_ID2_PRODUCT_TTRX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-G77" },
+ { .id = GPU_ID2_PRODUCT_TBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TBEX" },
+ { .id = GPU_ID2_PRODUCT_LBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-LBEX" },
+ { .id = GPU_ID2_PRODUCT_TNAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TNAX" },
+ { .id = GPU_ID2_PRODUCT_TODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-TODX" },
+ { .id = GPU_ID2_PRODUCT_LODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-LODX" },
};
const char *product_name = "(Unknown Mali GPU)";
struct kbase_device *kbdev;
u32 gpu_id;
unsigned product_id, product_id_mask;
unsigned i;
- bool is_new_format;
kbdev = to_kbase_device(dev);
if (!kbdev)
@@ -2545,18 +2584,12 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
- product_id_mask =
- (is_new_format ?
- GPU_ID2_PRODUCT_MODEL :
- GPU_ID_VERSION_PRODUCT_ID) >>
- GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ product_id_mask = GPU_ID2_PRODUCT_MODEL >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
const struct gpu_product_id_name *p = &gpu_product_id_names[i];
- if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
- (p->id & product_id_mask) ==
+ if ((p->id & product_id_mask) ==
(product_id & product_id_mask)) {
product_name = p->name;
break;
@@ -3779,9 +3812,8 @@ static void kbase_device_coherency_init(struct kbase_device *kbdev,
* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
* documented for tMIx so force correct value here.
*/
- if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
- (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
- GPU_ID2_PRODUCT_TMIX))
+ if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
+ GPU_ID2_PRODUCT_TMIX)
if (supported_coherency_bitmap ==
COHERENCY_FEATURE_BIT(COHERENCY_ACE))
supported_coherency_bitmap |=
diff --git a/mali_kbase/mali_kbase_debug_job_fault.c b/mali_kbase/mali_kbase_debug_job_fault.c
index 4873474..dbc774d 100644
--- a/mali_kbase/mali_kbase_debug_job_fault.c
+++ b/mali_kbase/mali_kbase_debug_job_fault.c
@@ -89,11 +89,16 @@ static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx)
static int wait_for_job_fault(struct kbase_device *kbdev)
{
#if KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE && \
- KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
- if (!kbase_is_job_fault_event_pending(kbdev))
+ KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+ int ret = wait_event_interruptible_timeout(kbdev->job_fault_wq,
+ kbase_is_job_fault_event_pending(kbdev),
+ msecs_to_jiffies(2000));
+ if (ret == 0)
return -EAGAIN;
- else
+ else if (ret > 0)
return 0;
+ else
+ return ret;
#else
return wait_event_interruptible(kbdev->job_fault_wq,
kbase_is_job_fault_event_pending(kbdev));
@@ -164,24 +169,6 @@ static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx)
}
-/* Remove all the failed atoms that belong to different contexts
- * Resume all the contexts that were suspend due to failed job
- */
-static void kbase_job_fault_event_cleanup(struct kbase_device *kbdev)
-{
- struct list_head *event_list = &kbdev->job_fault_event_list;
- unsigned long flags;
-
- spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
- while (!list_empty(event_list)) {
- kbase_job_fault_event_dequeue(kbdev, event_list);
- spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
- wake_up(&kbdev->job_fault_resume_wq);
- spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
- }
- spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
-}
-
static void kbase_job_fault_resume_worker(struct work_struct *data)
{
struct base_job_fault_event *event = container_of(data,
@@ -282,7 +269,7 @@ bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
if (kbase_ctx_flag(kctx, KCTX_DYING))
return false;
- if (kctx->kbdev->job_fault_debug == true) {
+ if (atomic_read(&kctx->kbdev->job_fault_debug) > 0) {
if (completion_code != BASE_JD_EVENT_DONE) {
@@ -428,12 +415,16 @@ static int debug_job_fault_open(struct inode *in, struct file *file)
{
struct kbase_device *kbdev = in->i_private;
+ if (atomic_cmpxchg(&kbdev->job_fault_debug, 0, 1) == 1) {
+ dev_warn(kbdev->dev, "debug job fault is busy, only a single client is allowed");
+ return -EBUSY;
+ }
+
seq_open(file, &ops);
((struct seq_file *)file->private_data)->private = kbdev;
dev_info(kbdev->dev, "debug job fault seq open");
- kbdev->job_fault_debug = true;
return 0;
@@ -442,15 +433,35 @@ static int debug_job_fault_open(struct inode *in, struct file *file)
static int debug_job_fault_release(struct inode *in, struct file *file)
{
struct kbase_device *kbdev = in->i_private;
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ unsigned long flags;
seq_release(in, file);
- kbdev->job_fault_debug = false;
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+
+ /* Disable job fault dumping. This will let kbase run jobs as normal,
+ * without blocking waiting for a job_fault client to read failed jobs.
+ *
+ * After this a new client may open the file, and may re-enable job
+ * fault dumping, but the job_fault_event_lock we hold here will block
+ * that from interfering until after we've completed the cleanup.
+ */
+ atomic_dec(&kbdev->job_fault_debug);
/* Clean the unprocessed job fault. After that, all the suspended
- * contexts could be rescheduled.
+ * contexts could be rescheduled. Remove all the failed atoms that
+ * belong to different contexts Resume all the contexts that were
+ * suspend due to failed job.
*/
- kbase_job_fault_event_cleanup(kbdev);
+ while (!list_empty(event_list)) {
+ kbase_job_fault_event_dequeue(kbdev, event_list);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ wake_up(&kbdev->job_fault_resume_wq);
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
dev_info(kbdev->dev, "debug job fault seq close");
@@ -470,7 +481,7 @@ static const struct file_operations kbasep_debug_job_fault_fops = {
*/
void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev)
{
- debugfs_create_file("job_fault", S_IRUGO,
+ debugfs_create_file("job_fault", 0400,
kbdev->mali_debugfs_directory, kbdev,
&kbasep_debug_job_fault_fops);
}
@@ -490,7 +501,7 @@ int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
if (!kbdev->job_fault_resume_workq)
return -ENOMEM;
- kbdev->job_fault_debug = false;
+ atomic_set(&kbdev->job_fault_debug, 0);
return 0;
}
@@ -545,8 +556,6 @@ void kbase_debug_job_fault_kctx_unblock(struct kbase_context *kctx)
int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
{
- kbdev->job_fault_debug = false;
-
return 0;
}
diff --git a/mali_kbase/mali_kbase_debugfs_helper.c b/mali_kbase/mali_kbase_debugfs_helper.c
index 0df75dd..37e507b 100644
--- a/mali_kbase/mali_kbase_debugfs_helper.c
+++ b/mali_kbase/mali_kbase_debugfs_helper.c
@@ -27,8 +27,6 @@
#include "mali_kbase_debugfs_helper.h"
-#ifdef CONFIG_DEBUG_FS
-
/* Arbitrary maximum size to prevent user space allocating too much kernel
* memory
*/
@@ -183,5 +181,3 @@ int kbase_debugfs_helper_seq_read(struct seq_file *const sfile,
}
return 0;
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index afd7545..4f1c070 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -692,6 +692,7 @@ struct kbase_jd_atom {
/* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
enum base_jd_event_code event_code;
base_jd_core_req core_req;
+ u8 jobslot;
u32 ticks;
int sched_priority;
@@ -891,8 +892,6 @@ struct kbase_fault {
* and Page fault handling.
* @work_pagefault: Work item for the Page fault handling.
* @work_busfault: Work item for the Bus fault handling.
- * @fault_type: Type of fault which occured for this address space,
- * regular/unexpected Bus or Page fault.
* @pf_data: Data relating to page fault.
* @bf_data: Data relating to bus fault.
* @current_setup: Stores the MMU configuration for this address space.
@@ -911,7 +910,6 @@ struct kbase_as {
struct workqueue_struct *pf_wq;
struct work_struct work_pagefault;
struct work_struct work_busfault;
- enum kbase_mmu_fault_type fault_type;
struct kbase_fault pf_data;
struct kbase_fault bf_data;
struct kbase_mmu_setup current_setup;
@@ -948,14 +946,16 @@ struct kbase_mmu_table {
struct kbase_context *kctx;
};
-static inline int kbase_as_has_bus_fault(struct kbase_as *as)
+static inline int kbase_as_has_bus_fault(struct kbase_as *as,
+ struct kbase_fault *fault)
{
- return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
+ return (fault == &as->bf_data);
}
-static inline int kbase_as_has_page_fault(struct kbase_as *as)
+static inline int kbase_as_has_page_fault(struct kbase_as *as,
+ struct kbase_fault *fault)
{
- return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE;
+ return (fault == &as->pf_data);
}
struct kbasep_mem_device {
@@ -1422,6 +1422,9 @@ struct kbase_devfreq_queue_info {
* previously entered protected mode.
* @ipa: Top level structure for IPA, containing pointers to both
* configured & fallback models.
+ * @previous_frequency: Previous frequency of GPU clock used for
+ * BASE_HW_ISSUE_GPU2017_1336 workaround, This clock is
+ * restored when L2 is powered on.
* @job_fault_debug: Flag to control the dumping of debug data for job faults,
* set when the 'job_fault' debugfs file is opened.
* @mali_debugfs_directory: Root directory for the debugfs files created by the driver
@@ -1650,8 +1653,9 @@ struct kbase_device {
} ipa;
#endif /* CONFIG_DEVFREQ_THERMAL */
#endif /* CONFIG_MALI_DEVFREQ */
+ unsigned long previous_frequency;
- bool job_fault_debug;
+ atomic_t job_fault_debug;
#ifdef CONFIG_DEBUG_FS
struct dentry *mali_debugfs_directory;
@@ -2169,7 +2173,7 @@ struct kbase_context {
struct kbase_jd_context jctx;
atomic_t used_pages;
atomic_t nonmapped_pages;
- unsigned long permanent_mapped_pages;
+ atomic_t permanent_mapped_pages;
struct kbase_mem_pool_group mem_pools;
@@ -2347,7 +2351,7 @@ static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev)
/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
-#define KBASE_AS_INACTIVE_MAX_LOOPS 100000
+#define KBASE_AS_INACTIVE_MAX_LOOPS 100000000
/* JobDescriptorHeader - taken from the architecture specifications, the layout
* is currently identical for all GPU archs. */
diff --git a/mali_kbase/mali_kbase_gpu_id.h b/mali_kbase/mali_kbase_gpu_id.h
index 24b99f2..a38e886 100644
--- a/mali_kbase/mali_kbase_gpu_id.h
+++ b/mali_kbase/mali_kbase_gpu_id.h
@@ -32,22 +32,6 @@
#define GPU_ID_VERSION_MAJOR (0xFu << GPU_ID_VERSION_MAJOR_SHIFT)
#define GPU_ID_VERSION_PRODUCT_ID (0xFFFFu << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
-/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */
-#define GPU_ID_PI_T60X 0x6956u
-#define GPU_ID_PI_T62X 0x0620u
-#define GPU_ID_PI_T76X 0x0750u
-#define GPU_ID_PI_T72X 0x0720u
-#define GPU_ID_PI_TFRX 0x0880u
-#define GPU_ID_PI_T86X 0x0860u
-#define GPU_ID_PI_T82X 0x0820u
-#define GPU_ID_PI_T83X 0x0830u
-
-/* New GPU ID format when PRODUCT_ID is >= 0x1000 (and not 0x6956) */
-#define GPU_ID_PI_NEW_FORMAT_START 0x1000
-#define GPU_ID_IS_NEW_FORMAT(product_id) ((product_id) != GPU_ID_PI_T60X && \
- (product_id) >= \
- GPU_ID_PI_NEW_FORMAT_START)
-
#define GPU_ID2_VERSION_STATUS_SHIFT 0
#define GPU_ID2_VERSION_MINOR_SHIFT 4
#define GPU_ID2_VERSION_MAJOR_SHIFT 12
@@ -109,19 +93,16 @@
#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7, 3)
#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7, 1)
#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7, 2)
-#define GPU_ID2_PRODUCT_TEGX GPU_ID2_MODEL_MAKE(8, 3)
#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(9, 0)
#define GPU_ID2_PRODUCT_TNAX GPU_ID2_MODEL_MAKE(9, 1)
#define GPU_ID2_PRODUCT_TBEX GPU_ID2_MODEL_MAKE(9, 2)
+#define GPU_ID2_PRODUCT_LBEX GPU_ID2_MODEL_MAKE(9, 4)
#define GPU_ID2_PRODUCT_TULX GPU_ID2_MODEL_MAKE(10, 0)
#define GPU_ID2_PRODUCT_TDUX GPU_ID2_MODEL_MAKE(10, 1)
+#define GPU_ID2_PRODUCT_TODX GPU_ID2_MODEL_MAKE(10, 2)
#define GPU_ID2_PRODUCT_TIDX GPU_ID2_MODEL_MAKE(10, 3)
#define GPU_ID2_PRODUCT_TVAX GPU_ID2_MODEL_MAKE(10, 4)
-#define GPU_ID2_PRODUCT_TODX GPU_ID2_MODEL_MAKE(10, 8)
-
-/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
-#define GPU_ID_S_15DEV0 0x1
-#define GPU_ID_S_EAC 0x2
+#define GPU_ID2_PRODUCT_LODX GPU_ID2_MODEL_MAKE(10, 5)
/* Helper macro to create a GPU_ID assuming valid values for id, major,
minor, status */
@@ -131,8 +112,4 @@
(((u32)minor) << GPU_ID_VERSION_MINOR_SHIFT) | \
(((u32)status) << GPU_ID_VERSION_STATUS_SHIFT))
-/* Statically set to 0 because the HW revision cannot be seen at compile time
- * by the build system */
-#define GPU_HAS_CSF_VERSION_10_REVISION_2 (0)
-
#endif /* _KBASE_GPU_ID_H_ */
diff --git a/mali_kbase/mali_kbase_gpuprops.c b/mali_kbase/mali_kbase_gpuprops.c
index 302d383..f6b70bd 100644
--- a/mali_kbase/mali_kbase_gpuprops.c
+++ b/mali_kbase/mali_kbase_gpuprops.c
@@ -194,6 +194,8 @@ void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props)
static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
{
int i;
+ u32 gpu_id;
+ u32 product_id;
/* Populate the base_gpu_props structure */
kbase_gpuprops_update_core_props_gpu_id(gpu_props);
@@ -245,17 +247,38 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, str
gpu_props->thread_props.tls_alloc =
gpu_props->raw_props.thread_tls_alloc;
-#if GPU_HAS_CSF_VERSION_10_REVISION_2
- gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 22);
- gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 22U, 2);
- gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 8);
- gpu_props->thread_props.max_thread_group_split = 0;
-#else
- gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16);
- gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8);
- gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6);
- gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2);
-#endif
+ /* Workaround for GPU2019HW-509. MIDHARC-2364 was wrongfully applied
+ * to tDUx GPUs.
+ */
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+ product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ if ((gpu_id & GPU_ID2_PRODUCT_MODEL) == GPU_ID2_PRODUCT_TDUX) {
+ gpu_props->thread_props.max_registers =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 0U, 22);
+ gpu_props->thread_props.impl_tech =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 22U, 2);
+ gpu_props->thread_props.max_task_queue =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 24U, 8);
+ gpu_props->thread_props.max_thread_group_split = 0;
+ } else {
+ gpu_props->thread_props.max_registers =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 0U, 16);
+ gpu_props->thread_props.max_task_queue =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 16U, 8);
+ gpu_props->thread_props.max_thread_group_split =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 24U, 6);
+ gpu_props->thread_props.impl_tech =
+ KBASE_UBFX32(gpu_props->raw_props.thread_features,
+ 30U, 2);
+ }
/* If values are not specified, then use defaults */
if (gpu_props->thread_props.max_registers == 0) {
diff --git a/mali_kbase/mali_kbase_hw.c b/mali_kbase/mali_kbase_hw.c
index 8330698..c277c0c 100644
--- a/mali_kbase/mali_kbase_hw.c
+++ b/mali_kbase/mali_kbase_hw.c
@@ -36,92 +36,57 @@ void kbase_hw_set_features_mask(struct kbase_device *kbdev)
{
const enum base_hw_feature *features;
u32 gpu_id;
- u32 product_id;
gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
- product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- if (GPU_ID_IS_NEW_FORMAT(product_id)) {
- switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
- case GPU_ID2_PRODUCT_TMIX:
- features = base_hw_features_tMIx;
- break;
- case GPU_ID2_PRODUCT_THEX:
- features = base_hw_features_tHEx;
- break;
- case GPU_ID2_PRODUCT_TSIX:
- features = base_hw_features_tSIx;
- break;
- case GPU_ID2_PRODUCT_TDVX:
- features = base_hw_features_tDVx;
- break;
- case GPU_ID2_PRODUCT_TNOX:
- features = base_hw_features_tNOx;
- break;
- case GPU_ID2_PRODUCT_TGOX:
- features = base_hw_features_tGOx;
- break;
- case GPU_ID2_PRODUCT_TEGX:
- features = base_hw_features_tEGx;
- break;
- case GPU_ID2_PRODUCT_TTRX:
- features = base_hw_features_tTRx;
- break;
- case GPU_ID2_PRODUCT_TNAX:
- features = base_hw_features_tNAx;
- break;
- case GPU_ID2_PRODUCT_TBEX:
- features = base_hw_features_tBEx;
- break;
- case GPU_ID2_PRODUCT_TULX:
- features = base_hw_features_tULx;
- break;
- case GPU_ID2_PRODUCT_TDUX:
- features = base_hw_features_tDUx;
- break;
- case GPU_ID2_PRODUCT_TODX:
- features = base_hw_features_tODx;
- break;
- case GPU_ID2_PRODUCT_TIDX:
- features = base_hw_features_tIDx;
- break;
- case GPU_ID2_PRODUCT_TVAX:
- features = base_hw_features_tVAx;
- break;
- default:
- features = base_hw_features_generic;
- break;
- }
- } else {
- switch (product_id) {
- case GPU_ID_PI_TFRX:
- /* FALLTHROUGH */
- case GPU_ID_PI_T86X:
- features = base_hw_features_tFxx;
- break;
- case GPU_ID_PI_T83X:
- features = base_hw_features_t83x;
- break;
- case GPU_ID_PI_T82X:
- features = base_hw_features_t82x;
- break;
- case GPU_ID_PI_T76X:
- features = base_hw_features_t76x;
- break;
- case GPU_ID_PI_T72X:
- features = base_hw_features_t72x;
- break;
- case GPU_ID_PI_T62X:
- features = base_hw_features_t62x;
- break;
- case GPU_ID_PI_T60X:
- features = base_hw_features_t60x;
- break;
- default:
- features = base_hw_features_generic;
- break;
- }
+ switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+ case GPU_ID2_PRODUCT_TMIX:
+ features = base_hw_features_tMIx;
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ features = base_hw_features_tHEx;
+ break;
+ case GPU_ID2_PRODUCT_TSIX:
+ features = base_hw_features_tSIx;
+ break;
+ case GPU_ID2_PRODUCT_TDVX:
+ features = base_hw_features_tDVx;
+ break;
+ case GPU_ID2_PRODUCT_TNOX:
+ features = base_hw_features_tNOx;
+ break;
+ case GPU_ID2_PRODUCT_TGOX:
+ features = base_hw_features_tGOx;
+ break;
+ case GPU_ID2_PRODUCT_TTRX:
+ features = base_hw_features_tTRx;
+ break;
+ case GPU_ID2_PRODUCT_TNAX:
+ features = base_hw_features_tNAx;
+ break;
+ case GPU_ID2_PRODUCT_LBEX:
+ case GPU_ID2_PRODUCT_TBEX:
+ features = base_hw_features_tBEx;
+ break;
+ case GPU_ID2_PRODUCT_TULX:
+ features = base_hw_features_tULx;
+ break;
+ case GPU_ID2_PRODUCT_TDUX:
+ features = base_hw_features_tDUx;
+ break;
+ case GPU_ID2_PRODUCT_TODX:
+ case GPU_ID2_PRODUCT_LODX:
+ features = base_hw_features_tODx;
+ break;
+ case GPU_ID2_PRODUCT_TIDX:
+ features = base_hw_features_tIDx;
+ break;
+ case GPU_ID2_PRODUCT_TVAX:
+ features = base_hw_features_tVAx;
+ break;
+ default:
+ features = base_hw_features_generic;
+ break;
}
for (; *features != BASE_HW_FEATURE_END; features++)
@@ -148,9 +113,6 @@ void kbase_hw_set_features_mask(struct kbase_device *kbdev)
* Return: pointer to an array of hardware issues, terminated by
* BASE_HW_ISSUE_END.
*
- * This function can only be used on new-format GPU IDs, i.e. those for which
- * GPU_ID_IS_NEW_FORMAT evaluates as true. The GPU ID is read from the @kbdev.
- *
* In debugging versions of the driver, unknown versions of a known GPU will
* be treated as the most recent known version not later than the actual
* version. In such circumstances, the GPU ID in @kbdev will also be replaced
@@ -210,10 +172,6 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tGOx_r1p0},
{U32_MAX, NULL} } },
- {GPU_ID2_PRODUCT_TEGX,
- {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tEGx_r0p0},
- {U32_MAX, NULL} } },
-
{GPU_ID2_PRODUCT_TTRX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0},
{GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tTRx_r0p0},
@@ -230,8 +188,14 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tNAx_r0p1},
{U32_MAX, NULL} } },
+ {GPU_ID2_PRODUCT_LBEX,
+ {{GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tBEx_r1p0},
+ {U32_MAX, NULL} } },
+
{GPU_ID2_PRODUCT_TBEX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tBEx_r0p0},
+ {GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tBEx_r0p0},
+ {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tBEx_r1p0},
{U32_MAX, NULL} } },
{GPU_ID2_PRODUCT_TULX,
@@ -246,6 +210,10 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tODx_r0p0},
{U32_MAX, NULL} } },
+ {GPU_ID2_PRODUCT_LODX,
+ {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tODx_r0p0},
+ {U32_MAX, NULL} } },
+
{GPU_ID2_PRODUCT_TIDX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tIDx_r0p0},
{U32_MAX, NULL} } },
@@ -341,242 +309,94 @@ int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
{
const enum base_hw_issue *issues;
u32 gpu_id;
- u32 product_id;
u32 impl_tech;
gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
- product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
impl_tech = kbdev->gpu_props.props.thread_props.impl_tech;
if (impl_tech != IMPLEMENTATION_MODEL) {
- if (GPU_ID_IS_NEW_FORMAT(product_id)) {
- issues = kbase_hw_get_issues_for_new_id(kbdev);
- if (issues == NULL) {
- dev_err(kbdev->dev,
- "Unknown GPU ID %x", gpu_id);
- return -EINVAL;
- }
+ issues = kbase_hw_get_issues_for_new_id(kbdev);
+ if (issues == NULL) {
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
#if !MALI_CUSTOMER_RELEASE
- /* The GPU ID might have been replaced with the last
- known version of the same GPU. */
- gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ /* The GPU ID might have been replaced with the last
+ known version of the same GPU. */
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
#endif
-
- } else {
- switch (gpu_id) {
- case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0):
- issues = base_hw_issues_t60x_r0p0_15dev0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC):
- issues = base_hw_issues_t60x_r0p0_eac;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0):
- issues = base_hw_issues_t60x_r0p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0):
- issues = base_hw_issues_t62x_r0p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1):
- issues = base_hw_issues_t62x_r1p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0):
- issues = base_hw_issues_t62x_r1p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1):
- issues = base_hw_issues_t76x_r0p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1):
- issues = base_hw_issues_t76x_r0p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
- issues = base_hw_issues_t76x_r0p1_50rel0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
- issues = base_hw_issues_t76x_r0p2;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
- issues = base_hw_issues_t76x_r0p3;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
- issues = base_hw_issues_t76x_r1p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1):
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 2):
- issues = base_hw_issues_t72x_r0p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
- issues = base_hw_issues_t72x_r1p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0):
- issues = base_hw_issues_t72x_r1p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2):
- issues = base_hw_issues_tFRx_r0p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0):
- issues = base_hw_issues_tFRx_r0p2;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 8):
- issues = base_hw_issues_tFRx_r1p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0):
- issues = base_hw_issues_tFRx_r2p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0):
- issues = base_hw_issues_t86x_r0p2;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 8):
- issues = base_hw_issues_t86x_r1p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T86X, 2, 0, 0):
- issues = base_hw_issues_t86x_r2p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T83X, 0, 1, 0):
- issues = base_hw_issues_t83x_r0p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 8):
- issues = base_hw_issues_t83x_r1p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 0, 0):
- issues = base_hw_issues_t82x_r0p0;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 1, 0):
- issues = base_hw_issues_t82x_r0p1;
- break;
- case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 8):
- issues = base_hw_issues_t82x_r1p0;
- break;
- default:
- dev_err(kbdev->dev,
- "Unknown GPU ID %x", gpu_id);
- return -EINVAL;
- }
- }
} else {
/* Software model */
- if (GPU_ID_IS_NEW_FORMAT(product_id)) {
- switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
- case GPU_ID2_PRODUCT_TMIX:
- issues = base_hw_issues_model_tMIx;
- break;
- case GPU_ID2_PRODUCT_THEX:
- issues = base_hw_issues_model_tHEx;
- break;
- case GPU_ID2_PRODUCT_TSIX:
- issues = base_hw_issues_model_tSIx;
- break;
- case GPU_ID2_PRODUCT_TDVX:
- issues = base_hw_issues_model_tDVx;
- break;
- case GPU_ID2_PRODUCT_TNOX:
- issues = base_hw_issues_model_tNOx;
- break;
- case GPU_ID2_PRODUCT_TGOX:
- issues = base_hw_issues_model_tGOx;
- break;
- case GPU_ID2_PRODUCT_TEGX:
- issues = base_hw_issues_model_tEGx;
- break;
- case GPU_ID2_PRODUCT_TTRX:
- issues = base_hw_issues_model_tTRx;
- break;
- case GPU_ID2_PRODUCT_TNAX:
- issues = base_hw_issues_model_tNAx;
- break;
- case GPU_ID2_PRODUCT_TBEX:
- issues = base_hw_issues_model_tBEx;
- break;
- case GPU_ID2_PRODUCT_TULX:
- issues = base_hw_issues_model_tULx;
- break;
- case GPU_ID2_PRODUCT_TDUX:
- issues = base_hw_issues_model_tDUx;
- break;
- case GPU_ID2_PRODUCT_TODX:
- issues = base_hw_issues_model_tODx;
- break;
- case GPU_ID2_PRODUCT_TIDX:
- issues = base_hw_issues_model_tIDx;
- break;
- case GPU_ID2_PRODUCT_TVAX:
- issues = base_hw_issues_model_tVAx;
- break;
- default:
- dev_err(kbdev->dev,
- "Unknown GPU ID %x", gpu_id);
- return -EINVAL;
- }
- } else {
- switch (product_id) {
- case GPU_ID_PI_T60X:
- issues = base_hw_issues_model_t60x;
- break;
- case GPU_ID_PI_T62X:
- issues = base_hw_issues_model_t62x;
- break;
- case GPU_ID_PI_T72X:
- issues = base_hw_issues_model_t72x;
- break;
- case GPU_ID_PI_T76X:
- issues = base_hw_issues_model_t76x;
- break;
- case GPU_ID_PI_TFRX:
- issues = base_hw_issues_model_tFRx;
- break;
- case GPU_ID_PI_T86X:
- issues = base_hw_issues_model_t86x;
- break;
- case GPU_ID_PI_T83X:
- issues = base_hw_issues_model_t83x;
- break;
- case GPU_ID_PI_T82X:
- issues = base_hw_issues_model_t82x;
- break;
- default:
- dev_err(kbdev->dev, "Unknown GPU ID %x",
- gpu_id);
- return -EINVAL;
- }
+ switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+ case GPU_ID2_PRODUCT_TMIX:
+ issues = base_hw_issues_model_tMIx;
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ issues = base_hw_issues_model_tHEx;
+ break;
+ case GPU_ID2_PRODUCT_TSIX:
+ issues = base_hw_issues_model_tSIx;
+ break;
+ case GPU_ID2_PRODUCT_TDVX:
+ issues = base_hw_issues_model_tDVx;
+ break;
+ case GPU_ID2_PRODUCT_TNOX:
+ issues = base_hw_issues_model_tNOx;
+ break;
+ case GPU_ID2_PRODUCT_TGOX:
+ issues = base_hw_issues_model_tGOx;
+ break;
+ case GPU_ID2_PRODUCT_TTRX:
+ issues = base_hw_issues_model_tTRx;
+ break;
+ case GPU_ID2_PRODUCT_TNAX:
+ issues = base_hw_issues_model_tNAx;
+ break;
+ case GPU_ID2_PRODUCT_LBEX:
+ case GPU_ID2_PRODUCT_TBEX:
+ issues = base_hw_issues_model_tBEx;
+ break;
+ case GPU_ID2_PRODUCT_TULX:
+ issues = base_hw_issues_model_tULx;
+ break;
+ case GPU_ID2_PRODUCT_TDUX:
+ issues = base_hw_issues_model_tDUx;
+ break;
+ case GPU_ID2_PRODUCT_TODX:
+ case GPU_ID2_PRODUCT_LODX:
+ issues = base_hw_issues_model_tODx;
+ break;
+ case GPU_ID2_PRODUCT_TIDX:
+ issues = base_hw_issues_model_tIDx;
+ break;
+ case GPU_ID2_PRODUCT_TVAX:
+ issues = base_hw_issues_model_tVAx;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
}
}
- if (GPU_ID_IS_NEW_FORMAT(product_id)) {
- dev_info(kbdev->dev,
- "GPU identified as 0x%x arch %d.%d.%d r%dp%d status %d",
- (gpu_id & GPU_ID2_PRODUCT_MAJOR) >>
- GPU_ID2_PRODUCT_MAJOR_SHIFT,
- (gpu_id & GPU_ID2_ARCH_MAJOR) >>
- GPU_ID2_ARCH_MAJOR_SHIFT,
- (gpu_id & GPU_ID2_ARCH_MINOR) >>
- GPU_ID2_ARCH_MINOR_SHIFT,
- (gpu_id & GPU_ID2_ARCH_REV) >>
- GPU_ID2_ARCH_REV_SHIFT,
- (gpu_id & GPU_ID2_VERSION_MAJOR) >>
- GPU_ID2_VERSION_MAJOR_SHIFT,
- (gpu_id & GPU_ID2_VERSION_MINOR) >>
- GPU_ID2_VERSION_MINOR_SHIFT,
- (gpu_id & GPU_ID2_VERSION_STATUS) >>
- GPU_ID2_VERSION_STATUS_SHIFT);
- } else {
- dev_info(kbdev->dev,
- "GPU identified as 0x%04x r%dp%d status %d",
- (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
- GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- (gpu_id & GPU_ID_VERSION_MAJOR) >>
- GPU_ID_VERSION_MAJOR_SHIFT,
- (gpu_id & GPU_ID_VERSION_MINOR) >>
- GPU_ID_VERSION_MINOR_SHIFT,
- (gpu_id & GPU_ID_VERSION_STATUS) >>
- GPU_ID_VERSION_STATUS_SHIFT);
- }
+ dev_info(kbdev->dev,
+ "GPU identified as 0x%x arch %d.%d.%d r%dp%d status %d",
+ (gpu_id & GPU_ID2_PRODUCT_MAJOR) >>
+ GPU_ID2_PRODUCT_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_ARCH_MAJOR) >>
+ GPU_ID2_ARCH_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_ARCH_MINOR) >>
+ GPU_ID2_ARCH_MINOR_SHIFT,
+ (gpu_id & GPU_ID2_ARCH_REV) >>
+ GPU_ID2_ARCH_REV_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_MAJOR) >>
+ GPU_ID2_VERSION_MAJOR_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_MINOR) >>
+ GPU_ID2_VERSION_MINOR_SHIFT,
+ (gpu_id & GPU_ID2_VERSION_STATUS) >>
+ GPU_ID2_VERSION_STATUS_SHIFT);
for (; *issues != BASE_HW_ISSUE_END; issues++)
set_bit(*issues, &kbdev->hw_issues_mask[0]);
diff --git a/mali_kbase/mali_kbase_hwaccess_jm.h b/mali_kbase/mali_kbase_hwaccess_jm.h
index c040753..c3b60e6 100644
--- a/mali_kbase/mali_kbase_hwaccess_jm.h
+++ b/mali_kbase/mali_kbase_hwaccess_jm.h
@@ -247,14 +247,16 @@ void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
struct kbase_jd_atom *target_katom);
/**
- * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
- * running from a context
+ * kbase_backend_jm_kill_running_jobs_from_kctx - Kill all jobs that are
+ * currently running on GPU from a context
* @kctx: Context pointer
*
* This is used in response to a page fault to remove all jobs from the faulting
* context from the hardware.
+ *
+ * Caller must hold hwaccess_lock.
*/
-void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
+void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx);
/**
* kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
diff --git a/mali_kbase/mali_kbase_hwaccess_pm.h b/mali_kbase/mali_kbase_hwaccess_pm.h
index 44c16f4..96c473a 100644
--- a/mali_kbase/mali_kbase_hwaccess_pm.h
+++ b/mali_kbase/mali_kbase_hwaccess_pm.h
@@ -48,39 +48,16 @@ struct kbase_device;
*
* Return: 0 if the power management framework was successfully initialized.
*/
-int kbase_hwaccess_pm_early_init(struct kbase_device *kbdev);
-
-/**
- * Initialize the power management framework.
- *
- * Must be called before any other power management function (except
- * @ref kbase_hwaccess_pm_early_init)
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- *
- * Return: 0 if the power management framework was successfully initialized.
- */
-int kbase_hwaccess_pm_late_init(struct kbase_device *kbdev);
-
-/**
- * Terminate the power management framework.
- *
- * No power management functions may be called after this (except
- * @ref kbase_pm_init)
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- */
-void kbase_hwaccess_pm_early_term(struct kbase_device *kbdev);
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev);
/**
* Terminate the power management framework.
*
- * No power management functions may be called after this (except
- * @ref kbase_hwaccess_pm_early_term or @ref kbase_hwaccess_pm_late_init)
+ * No power management functions may be called after this
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*/
-void kbase_hwaccess_pm_late_term(struct kbase_device *kbdev);
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev);
/**
* kbase_hwaccess_pm_powerup - Power up the GPU.
diff --git a/mali_kbase/mali_kbase_ioctl.h b/mali_kbase/mali_kbase_ioctl.h
index 525bfb6..9b138e5 100644
--- a/mali_kbase/mali_kbase_ioctl.h
+++ b/mali_kbase/mali_kbase_ioctl.h
@@ -36,7 +36,7 @@ extern "C" {
* 11.1:
* - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags
* 11.2:
- * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_SECURE,
+ * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED,
* which some user-side clients prior to 11.2 might fault if they received
* them
* 11.3:
@@ -79,9 +79,14 @@ extern "C" {
* dma-buf. Now, buffers are mapped on GPU when first imported, no longer
* requiring external resource or sticky resource tracking. UNLESS,
* CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled.
+ * 11.17:
+ * - Added BASE_JD_REQ_JOB_SLOT.
+ * - Reused padding field in base_jd_atom_v2 to pass job slot number.
+ * 11.18:
+ * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO
*/
#define BASE_UK_VERSION_MAJOR 11
-#define BASE_UK_VERSION_MINOR 16
+#define BASE_UK_VERSION_MINOR 17
/**
* struct kbase_ioctl_version_check - Check version compatibility with kernel
@@ -703,6 +708,39 @@ struct kbase_ioctl_mem_exec_init {
_IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init)
+/**
+ * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of
+ * cpu/gpu time (counter values)
+ *
+ * @request_flags: Bit-flags indicating the requested types.
+ * @paddings: Unused, size alignment matching the out.
+ * @sec: Integer field of the monotonic time, unit in seconds.
+ * @nsec: Fractional sec of the monotonic time, in nano-seconds.
+ * @padding: Unused, for u64 alignment
+ * @timestamp: System wide timestamp (counter) value.
+ * @cycle_counter: GPU cycle counter value.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ *
+ */
+union kbase_ioctl_get_cpu_gpu_timeinfo {
+ struct {
+ __u32 request_flags;
+ __u32 paddings[7];
+ } in;
+ struct {
+ __u64 sec;
+ __u32 nsec;
+ __u32 padding;
+ __u64 timestamp;
+ __u64 cycle_counter;
+ } out;
+};
+
+#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
+ _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo)
+
/***************
* test ioctls *
***************/
diff --git a/mali_kbase/mali_kbase_jd.c b/mali_kbase/mali_kbase_jd.c
index c984a82..02d5976 100644
--- a/mali_kbase/mali_kbase_jd.c
+++ b/mali_kbase/mali_kbase_jd.c
@@ -22,9 +22,7 @@
-#if defined(CONFIG_DMA_SHARED_BUFFER)
#include <linux/dma-buf.h>
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
@@ -284,7 +282,7 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
}
if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
- (reg->flags & KBASE_REG_SECURE)) {
+ (reg->flags & KBASE_REG_PROTECTED)) {
katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
}
@@ -737,6 +735,7 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
katom->device_nr = user_atom->device_nr;
katom->jc = user_atom->jc;
katom->core_req = user_atom->core_req;
+ katom->jobslot = user_atom->jobslot;
katom->atom_flags = 0;
katom->retry_count = 0;
katom->need_cache_flush_cores_retained = 0;
diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c
index e2e1d17..77d9716 100644
--- a/mali_kbase/mali_kbase_js.c
+++ b/mali_kbase/mali_kbase_js.c
@@ -2031,12 +2031,19 @@ bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
(katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
return false;
+ if ((katom->core_req & BASE_JD_REQ_JOB_SLOT) &&
+ (katom->jobslot >= BASE_JM_MAX_NR_SLOTS))
+ return false;
+
return true;
}
static int kbase_js_get_slot(struct kbase_device *kbdev,
struct kbase_jd_atom *katom)
{
+ if (katom->core_req & BASE_JD_REQ_JOB_SLOT)
+ return katom->jobslot;
+
if (katom->core_req & BASE_JD_REQ_FS)
return 0;
@@ -2335,6 +2342,8 @@ static void js_return_worker(struct work_struct *data)
mutex_unlock(&js_devdata->queue_mutex);
katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+ WARN_ON(kbasep_js_has_atom_finished(&retained_state));
+
kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
&retained_state);
@@ -2691,7 +2700,6 @@ void kbase_js_zap_context(struct kbase_context *kctx)
struct kbase_device *kbdev = kctx->kbdev;
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
- int js;
/*
* Critical assumption: No more submission is possible outside of the
@@ -2747,6 +2755,7 @@ void kbase_js_zap_context(struct kbase_context *kctx)
*/
if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
unsigned long flags;
+ int js;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
@@ -2812,8 +2821,7 @@ void kbase_js_zap_context(struct kbase_context *kctx)
/* Cancel any remaining running jobs for this kctx - if any.
* Submit is disallowed which takes effect immediately, so no
* more new jobs will appear after we do this. */
- for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
- kbase_job_slot_hardstop(kctx, js, NULL);
+ kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index f55d8ea..fa05f34 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -26,9 +26,7 @@
* @file mali_kbase_mem.c
* Base kernel memory APIs
*/
-#ifdef CONFIG_DMA_SHARED_BUFFER
#include <linux/dma-buf.h>
-#endif /* CONFIG_DMA_SHARED_BUFFER */
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compat.h>
@@ -2504,7 +2502,6 @@ void kbase_mem_kref_free(struct kref *kref)
case KBASE_MEM_TYPE_RAW:
/* raw pages, external cleanup */
break;
-#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM:
if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
WARN_ONCE(alloc->imported.umm.current_mapping_usage_count != 1,
@@ -2519,7 +2516,6 @@ void kbase_mem_kref_free(struct kref *kref)
alloc->imported.umm.dma_attachment);
dma_buf_put(alloc->imported.umm.dma_buf);
break;
-#endif
case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
if (alloc->imported.user_buf.mm)
mmdrop(alloc->imported.user_buf.mm);
@@ -2659,8 +2655,8 @@ bool kbase_check_import_flags(unsigned long flags)
if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
return false;
- /* Secure memory cannot be read by the CPU */
- if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD))
+ /* Protected memory cannot be read by the CPU */
+ if ((flags & BASE_MEM_PROTECTED) && (flags & BASE_MEM_PROT_CPU_RD))
return false;
return true;
@@ -3733,14 +3729,12 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
}
}
break;
-#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
err = kbase_mem_umm_map(kctx, reg);
if (err)
goto exit;
break;
}
-#endif
default:
goto exit;
}
@@ -3754,12 +3748,10 @@ void kbase_unmap_external_resource(struct kbase_context *kctx,
struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
{
switch (alloc->type) {
-#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
kbase_mem_umm_unmap(kctx, reg, alloc);
}
break;
-#endif /* CONFIG_DMA_SHARED_BUFFER */
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
alloc->imported.user_buf.current_mapping_usage_count--;
diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h
index ca5bbee..bebf55f 100644
--- a/mali_kbase/mali_kbase_mem.h
+++ b/mali_kbase/mali_kbase_mem.h
@@ -140,14 +140,12 @@ struct kbase_mem_phy_alloc {
u8 group_id;
union {
-#if defined(CONFIG_DMA_SHARED_BUFFER)
struct {
struct dma_buf *dma_buf;
struct dma_buf_attachment *dma_attachment;
unsigned int current_mapping_usage_count;
struct sg_table *sgt;
} umm;
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
struct {
u64 stride;
size_t nents;
@@ -292,7 +290,7 @@ struct kbase_va_region {
#define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
#define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
-#define KBASE_REG_SECURE (1ul << 19)
+#define KBASE_REG_PROTECTED (1ul << 19)
#define KBASE_REG_DONT_NEED (1ul << 20)
@@ -1626,7 +1624,6 @@ static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);
-#if defined(CONFIG_DMA_SHARED_BUFFER)
/**
* kbase_mem_umm_map - Map dma-buf
* @kctx: Pointer to the kbase context
@@ -1674,6 +1671,5 @@ void kbase_mem_umm_unmap(struct kbase_context *kctx,
*/
int kbase_mem_do_sync_imported(struct kbase_context *kctx,
struct kbase_va_region *reg, enum kbase_sync_type sync_fn);
-#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* _KBASE_MEM_H_ */
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 9e121f0..50a74ad 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -39,9 +39,7 @@
(LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
#include <linux/dma-attrs.h>
#endif /* LINUX_VERSION_CODE >= 3.5.0 && < 4.8.0 */
-#ifdef CONFIG_DMA_SHARED_BUFFER
#include <linux/dma-buf.h>
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
#include <linux/shrinker.h>
#include <linux/cache.h>
#include <linux/memory_group_manager.h>
@@ -51,19 +49,37 @@
#include <mali_kbase_tracepoints.h>
#include <mali_kbase_ioctl.h>
-#if KERNEL_VERSION(4, 17, 2) > LINUX_VERSION_CODE
-/* Enable workaround for ion for versions prior to v4.17.2 to avoid the potentially
+#if ((KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE) || \
+ (KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE))
+/* Enable workaround for ion for kernels prior to v5.0.0 and from v5.3.0
+ * onwards.
+ *
+ * For kernels prior to v4.12, workaround is needed as ion lacks the cache
+ * maintenance in begin_cpu_access and end_cpu_access methods.
+ *
+ * For kernels prior to v4.17.2, workaround is needed to avoid the potentially
* disruptive warnings which can come if begin_cpu_access and end_cpu_access
* methods are not called in pairs.
+ * Note that some long term maintenance kernel versions (e.g. 4.9.x, 4.14.x)
+ * only require this workaround on their earlier releases. However it is still
+ * safe to use it on such releases, and it simplifies the version check.
*
- * dma_sync_sg_for_* calls will be made directly as a workaround.
+ * For kernels later than v4.17.2, workaround is needed as ion can potentially
+ * end up calling dma_sync_sg_for_* for a dma-buf importer that hasn't mapped
+ * the attachment. This would result in a kernel panic as ion populates the
+ * dma_address when the attachment is mapped and kernel derives the physical
+ * address for cache maintenance from the dma_address.
+ * With some multi-threaded tests it has been seen that the same dma-buf memory
+ * gets imported twice on Mali DDK side and so the problem of sync happening
+ * with an importer having an unmapped attachment comes at the time of 2nd
+ * import. The same problem can if there is another importer of dma-buf
+ * memory.
*
- * Note that some long term maintenance kernel versions (e.g. 4.9.x, 4.14.x) only require this
- * workaround on their earlier releases. However it is still safe to use it on such releases, and
- * it simplifies the version check.
+ * Workaround can be safely disabled for kernels between v5.0.0 and v5.2.2,
+ * as all the above stated issues are not there.
*
- * This will also address the case on kernels prior to 4.12, where ion lacks
- * the cache maintenance in begin_cpu_access and end_cpu_access methods.
+ * dma_sync_sg_for_* calls will be made directly as a workaround using the
+ * Kbase's attachment to dma-buf that was previously mapped.
*/
#define KBASE_MEM_ION_SYNC_WORKAROUND
#endif
@@ -139,11 +155,11 @@ static int kbase_phy_alloc_mapping_init(struct kbase_context *kctx,
return -EINVAL;
if (size > (KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES -
- kctx->permanent_mapped_pages)) {
- dev_warn(kctx->kbdev->dev, "Request for %llu more pages mem needing a permanent mapping would breach limit %lu, currently at %lu pages",
+ atomic_read(&kctx->permanent_mapped_pages))) {
+ dev_warn(kctx->kbdev->dev, "Request for %llu more pages mem needing a permanent mapping would breach limit %lu, currently at %d pages",
(u64)size,
KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES,
- kctx->permanent_mapped_pages);
+ atomic_read(&kctx->permanent_mapped_pages));
return -ENOMEM;
}
@@ -159,7 +175,7 @@ static int kbase_phy_alloc_mapping_init(struct kbase_context *kctx,
reg->flags &= ~KBASE_REG_GROWABLE;
reg->cpu_alloc->permanent_map = kern_mapping;
- kctx->permanent_mapped_pages += size;
+ atomic_add(size, &kctx->permanent_mapped_pages);
return 0;
vmap_fail:
@@ -180,8 +196,8 @@ void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
* this being reduced a second time if a separate gpu_alloc is
* freed
*/
- WARN_ON(alloc->nents > kctx->permanent_mapped_pages);
- kctx->permanent_mapped_pages -= alloc->nents;
+ WARN_ON(alloc->nents > atomic_read(&kctx->permanent_mapped_pages));
+ atomic_sub(alloc->nents, &kctx->permanent_mapped_pages);
}
void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx,
@@ -368,11 +384,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
/* mmap needed to setup VA? */
if (*flags & BASE_MEM_SAME_VA) {
- unsigned long prot = PROT_NONE;
- unsigned long va_size = va_pages << PAGE_SHIFT;
- unsigned long va_map = va_size;
unsigned long cookie, cookie_nr;
- unsigned long cpu_addr;
/* Bind to a cookie */
if (!kctx->cookies) {
@@ -390,64 +402,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE);
cookie <<= PAGE_SHIFT;
- /*
- * 10.1-10.4 UKU userland relies on the kernel to call mmap.
- * For all other versions we can just return the cookie
- */
- if (kctx->api_version < KBASE_API_VERSION(10, 1) ||
- kctx->api_version > KBASE_API_VERSION(10, 4)) {
- *gpu_va = (u64) cookie;
- kbase_gpu_vm_unlock(kctx);
- return reg;
- }
-
- kbase_va_region_alloc_get(kctx, reg);
- kbase_gpu_vm_unlock(kctx);
-
- if (*flags & BASE_MEM_PROT_CPU_RD)
- prot |= PROT_READ;
- if (*flags & BASE_MEM_PROT_CPU_WR)
- prot |= PROT_WRITE;
-
- cpu_addr = vm_mmap(kctx->filp, 0, va_map, prot,
- MAP_SHARED, cookie);
-
- kbase_gpu_vm_lock(kctx);
-
- /* Since vm lock was released, check if the region has already
- * been freed meanwhile. This could happen if User was able to
- * second guess the cookie or the CPU VA and free the region
- * through the guessed value.
- */
- if (reg->flags & KBASE_REG_VA_FREED) {
- kbase_va_region_alloc_put(kctx, reg);
- reg = NULL;
- } else if (IS_ERR_VALUE(cpu_addr)) {
- /* Once the vm lock is released, multiple scenarios can
- * arise under which the cookie could get re-assigned
- * to some other region.
- */
- if (!WARN_ON(kctx->pending_regions[cookie_nr] &&
- (kctx->pending_regions[cookie_nr] != reg))) {
- kctx->pending_regions[cookie_nr] = NULL;
- kctx->cookies |= (1UL << cookie_nr);
- }
-
- /* Region has not been freed and we can be sure that
- * User won't be able to free the region now. So we
- * can free it ourselves.
- * If the region->start_pfn isn't zero then the
- * allocation will also be unmapped from GPU side.
- */
- kbase_mem_free_region(kctx, reg);
- kbase_va_region_alloc_put(kctx, reg);
- reg = NULL;
- } else {
- kbase_va_region_alloc_put(kctx, reg);
- *gpu_va = (u64) cpu_addr;
- }
-
- kbase_gpu_vm_unlock(kctx);
+ *gpu_va = (u64) cookie;
} else /* we control the VA */ {
if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
dev_warn(dev, "Failed to map memory on GPU");
@@ -456,10 +411,9 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
}
/* return real GPU VA */
*gpu_va = reg->start_pfn << PAGE_SHIFT;
-
- kbase_gpu_vm_unlock(kctx);
}
+ kbase_gpu_vm_unlock(kctx);
return reg;
no_mmap:
@@ -542,8 +496,8 @@ int kbase_mem_query(struct kbase_context *kctx,
* for compatibility reasons */
if (KBASE_REG_PF_GROW & reg->flags)
*out |= BASE_MEM_GROW_ON_GPF;
- if (KBASE_REG_SECURE & reg->flags)
- *out |= BASE_MEM_SECURE;
+ if (KBASE_REG_PROTECTED & reg->flags)
+ *out |= BASE_MEM_PROTECTED;
}
if (KBASE_REG_TILER_ALIGN_TOP & reg->flags)
*out |= BASE_MEM_TILER_ALIGN_TOP;
@@ -913,7 +867,6 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
new_flags |= real_flags;
/* Currently supporting only imported memory */
-#ifdef CONFIG_DMA_SHARED_BUFFER
if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM) {
ret = -EINVAL;
goto out_unlock;
@@ -955,10 +908,6 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
ret);
} else
WARN_ON(!reg->gpu_alloc->imported.umm.current_mapping_usage_count);
-#else
- /* Reject when dma-buf support is not enabled. */
- ret = -EINVAL;
-#endif /* CONFIG_DMA_SHARED_BUFFER */
/* If everything is good, then set the new flags on the region. */
if (!ret)
@@ -977,7 +926,6 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
struct kbase_va_region *reg, enum kbase_sync_type sync_fn)
{
int ret = -EINVAL;
-#ifdef CONFIG_DMA_SHARED_BUFFER
struct dma_buf *dma_buf;
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
@@ -1065,16 +1013,9 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
"Failed to sync mem region %pK at GPU VA %llx: %d\n",
reg, reg->start_pfn, ret);
-#else /* CONFIG_DMA_SHARED_BUFFER */
- CSTD_UNUSED(kctx);
- CSTD_UNUSED(reg);
- CSTD_UNUSED(sync_fn);
-#endif /* CONFIG_DMA_SHARED_BUFFER */
-
return ret;
}
-#ifdef CONFIG_DMA_SHARED_BUFFER
/**
* kbase_mem_umm_unmap_attachment - Unmap dma-buf attachment
* @kctx: Pointer to kbase context
@@ -1401,8 +1342,8 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */
- if (*flags & BASE_MEM_SECURE)
- reg->flags |= KBASE_REG_SECURE;
+ if (*flags & BASE_MEM_PROTECTED)
+ reg->flags |= KBASE_REG_PROTECTED;
if (padding)
reg->flags |= KBASE_REG_IMPORT_PAD;
@@ -1440,7 +1381,6 @@ no_alloc:
return NULL;
}
-#endif /* CONFIG_DMA_SHARED_BUFFER */
u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev)
{
@@ -1895,7 +1835,6 @@ int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
}
switch (type) {
-#ifdef CONFIG_DMA_SHARED_BUFFER
case BASE_MEM_IMPORT_TYPE_UMM: {
int fd;
@@ -1906,7 +1845,6 @@ int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
padding);
}
break;
-#endif /* CONFIG_DMA_SHARED_BUFFER */
case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
struct base_mem_import_user_buffer user_buffer;
void __user *uptr;
@@ -2678,7 +2616,6 @@ int kbase_context_mmap(struct kbase_context *const kctx,
goto out_unlock;
}
-#ifdef CONFIG_DMA_SHARED_BUFFER
if (KBASE_MEM_TYPE_IMPORTED_UMM ==
reg->cpu_alloc->type) {
if (0 != (vma->vm_pgoff - reg->start_pfn)) {
@@ -2692,7 +2629,6 @@ int kbase_context_mmap(struct kbase_context *const kctx,
vma, vma->vm_pgoff - reg->start_pfn);
goto out_unlock;
}
-#endif /* CONFIG_DMA_SHARED_BUFFER */
if (reg->cpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
/* initial params check for aliased dumping map */
diff --git a/mali_kbase/mali_kbase_mem_linux.h b/mali_kbase/mali_kbase_mem_linux.h
index e34972f..02f1c3b 100644
--- a/mali_kbase/mali_kbase_mem_linux.h
+++ b/mali_kbase/mali_kbase_mem_linux.h
@@ -440,4 +440,30 @@ void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
*/
u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev);
+#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
+static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, pgprot_t pgprot)
+{
+ int err;
+
+#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \
+ ((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \
+ (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE)))
+ if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
+ return VM_FAULT_SIGBUS;
+
+ err = vm_insert_pfn(vma, addr, pfn);
+#else
+ err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
+#endif
+
+ if (unlikely(err == -ENOMEM))
+ return VM_FAULT_OOM;
+ if (unlikely(err < 0 && err != -EBUSY))
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+#endif
+
#endif /* _KBASE_MEM_LINUX_H_ */
diff --git a/mali_kbase/mali_kbase_mem_pool_debugfs.c b/mali_kbase/mali_kbase_mem_pool_debugfs.c
index 9896202..edb9cd4 100644
--- a/mali_kbase/mali_kbase_mem_pool_debugfs.c
+++ b/mali_kbase/mali_kbase_mem_pool_debugfs.c
@@ -26,8 +26,6 @@
#include "mali_kbase_mem_pool_debugfs.h"
#include "mali_kbase_debugfs_helper.h"
-#ifdef CONFIG_DEBUG_FS
-
void kbase_mem_pool_debugfs_trim(void *const array, size_t const index,
size_t const value)
{
@@ -183,5 +181,3 @@ void kbase_mem_pool_debugfs_init(struct dentry *parent,
debugfs_create_file("lp_mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
&kctx->mem_pools.large, &kbase_mem_pool_debugfs_max_size_fops);
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/mali_kbase/mali_kbase_mipe_proto.h b/mali_kbase/mali_kbase_mipe_proto.h
index fb61faa..1a0b8b4 100644
--- a/mali_kbase/mali_kbase_mipe_proto.h
+++ b/mali_kbase/mali_kbase_mipe_proto.h
@@ -24,10 +24,10 @@
#define _KBASE_MIPE_PROTO_H
#define _BITFIELD_MASK_FIELD(pos, len) \
- (((1 << len) - 1) << pos)
+ (((1u << len) - 1) << pos)
#define _BITFIELD_SET_FIELD(pos, len, value) \
- (_BITFIELD_MASK_FIELD(pos, len) & ((value) << pos))
+ (_BITFIELD_MASK_FIELD(pos, len) & (((u32) value) << pos))
#define BITFIELD_SET(field_name, value) \
_BITFIELD_SET_FIELD(field_name ## _POS, field_name ## _LEN, value)
diff --git a/mali_kbase/mali_kbase_mmu.c b/mali_kbase/mali_kbase_mmu.c
index a8cd5ac..ccb63d0 100644
--- a/mali_kbase/mali_kbase_mmu.c
+++ b/mali_kbase/mali_kbase_mmu.c
@@ -115,6 +115,7 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
struct kbase_as *as, const char *reason_str,
struct kbase_fault *fault);
+
static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
struct tagged_addr *phys, size_t nr,
unsigned long flags, int group_id);
@@ -2045,23 +2046,20 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
char *mmu_dump_buffer;
u64 config[3];
size_t dump_size, size = 0;
+ struct kbase_mmu_setup as_setup;
buffer = (char *)kaddr;
mmu_dump_buffer = buffer;
- if (kctx->api_version >= KBASE_API_VERSION(8, 4)) {
- struct kbase_mmu_setup as_setup;
-
- kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu,
- &as_setup);
- config[0] = as_setup.transtab;
- config[1] = as_setup.memattr;
- config[2] = as_setup.transcfg;
- memcpy(buffer, &config, sizeof(config));
- mmu_dump_buffer += sizeof(config);
- size_left -= sizeof(config);
- size += sizeof(config);
- }
+ kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu,
+ &as_setup);
+ config[0] = as_setup.transtab;
+ config[1] = as_setup.memattr;
+ config[2] = as_setup.transcfg;
+ memcpy(buffer, &config, sizeof(config));
+ mmu_dump_buffer += sizeof(config);
+ size_left -= sizeof(config);
+ size += sizeof(config);
dump_size = kbasep_mmu_dump_level(kctx,
kctx->mmu.pgd,
@@ -2115,8 +2113,9 @@ void bus_fault_worker(struct work_struct *data)
kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
- /* Grab the context that was already refcounted in kbase_mmu_interrupt().
- * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+ /* Grab the context, already refcounted in kbase_mmu_interrupt() on
+ * flagging of the bus-fault. Therefore, it cannot be scheduled out of
+ * this AS until we explicitly release it
*/
kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
if (WARN_ON(!kctx)) {
@@ -2427,11 +2426,11 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
* out/rescheduled - this will occur on releasing the context's refcount */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbasep_js_clear_submit_allowed(js_devdata, kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
* context can appear in the job slots from this point on */
- kbase_backend_jm_kill_jobs_from_kctx(kctx);
+ kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* AS transaction begin */
mutex_lock(&kbdev->mmu_hw_mutex);
@@ -2622,33 +2621,31 @@ void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
struct kbase_context *kctx, struct kbase_as *as,
struct kbase_fault *fault)
{
- struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
-
lockdep_assert_held(&kbdev->hwaccess_lock);
if (!kctx) {
dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
- kbase_as_has_bus_fault(as) ?
+ kbase_as_has_bus_fault(as, fault) ?
"Bus error" : "Page fault",
as->number, fault->addr);
/* Since no ctx was found, the MMU must be disabled. */
WARN_ON(as->current_setup.transtab);
- if (kbase_as_has_bus_fault(as)) {
+ if (kbase_as_has_bus_fault(as, fault)) {
kbase_mmu_hw_clear_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
kbase_mmu_hw_enable_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- } else if (kbase_as_has_page_fault(as)) {
+ } else if (kbase_as_has_page_fault(as, fault)) {
kbase_mmu_hw_clear_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
kbase_mmu_hw_enable_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
}
- if (kbase_as_has_bus_fault(as) &&
- kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ if (kbase_as_has_bus_fault(as, fault) &&
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
bool reset_status;
/*
* Reset the GPU, like in bus_fault_worker, in case an
@@ -2664,7 +2661,9 @@ void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
return;
}
- if (kbase_as_has_bus_fault(as)) {
+ if (kbase_as_has_bus_fault(as, fault)) {
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
/*
* hw counters dumping in progress, signal the
* other thread that it failed
diff --git a/mali_kbase/mali_kbase_native_mgm.c b/mali_kbase/mali_kbase_native_mgm.c
index 022c056..38ae46e 100644
--- a/mali_kbase/mali_kbase_native_mgm.c
+++ b/mali_kbase/mali_kbase_native_mgm.c
@@ -27,32 +27,6 @@
#include <mali_kbase.h>
#include <mali_kbase_native_mgm.h>
-#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE)
-static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- int err = vm_insert_pfn(vma, addr, pfn);
-
- if (unlikely(err == -ENOMEM))
- return VM_FAULT_OOM;
- if (unlikely(err < 0 && err != -EBUSY))
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
-}
-#endif
-
-#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
-static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, pgprot_t pgprot)
-{
- if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
- return VM_FAULT_SIGBUS;
-
- return vmf_insert_pfn(vma, addr, pfn);
-}
-#endif
-
/**
* kbase_native_mgm_alloc - Native physical memory allocation method
*
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index 868e1ea..88773cc 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -24,10 +24,8 @@
#include <mali_kbase.h>
-#if defined(CONFIG_DMA_SHARED_BUFFER)
#include <linux/dma-buf.h>
#include <asm/cacheflush.h>
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
#include <mali_kbase_sync.h>
#endif
@@ -774,9 +772,7 @@ int kbase_mem_copy_from_extres(struct kbase_context *kctx,
size_t to_copy = min(extres_size, buf_data->size);
struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
int ret = 0;
-#ifdef CONFIG_DMA_SHARED_BUFFER
size_t dma_to_copy;
-#endif
KBASE_DEBUG_ASSERT(pages != NULL);
@@ -807,7 +803,6 @@ int kbase_mem_copy_from_extres(struct kbase_context *kctx,
break;
}
break;
-#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
@@ -847,7 +842,6 @@ int kbase_mem_copy_from_extres(struct kbase_context *kctx,
DMA_FROM_DEVICE);
break;
}
-#endif
default:
ret = -EINVAL;
}
diff --git a/mali_kbase/mali_midg_regmap.h b/mali_kbase/mali_midg_regmap.h
index a4aba19..f0ec391 100644
--- a/mali_kbase/mali_midg_regmap.h
+++ b/mali_kbase/mali_midg_regmap.h
@@ -20,8 +20,8 @@
*
*/
-#ifndef _MIDGARD_REGMAP_H_
-#define _MIDGARD_REGMAP_H_
+#ifndef _MIDG_REGMAP_H_
+#define _MIDG_REGMAP_H_
#include "mali_midg_coherency.h"
#include "mali_kbase_gpu_id.h"
@@ -325,112 +325,6 @@
#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
flush all L2 caches then issue a flush region command to all MMUs */
-#if GPU_HAS_CSF_VERSION_10_REVISION_2
-/* GPU_COMMAND codes */
-#define GPU_COMMAND_CODE_NOP 0x00 /* No operation, nothing happens */
-#define GPU_COMMAND_CODE_RESET 0x01 /* Reset the GPU */
-#define GPU_COMMAND_CODE_PRFCNT 0x02 /* Clear or sample performance counters */
-#define GPU_COMMAND_CODE_TIME 0x03 /* Configure time sources */
-#define GPU_COMMAND_CODE_FLUSH_CACHES 0x04 /* Flush caches */
-#define GPU_COMMAND_CODE_SET_PROTECTED_MODE 0x05 /* Places the GPU in protected mode */
-#define GPU_COMMAND_CODE_FINISH_HALT 0x06 /* Halt CSF */
-
-/* GPU_COMMAND_RESET payloads */
-
-/* This will leave the state of active jobs UNDEFINED, but will leave the external bus in a defined and idle state.
- * Power domains will remain powered on.
- */
-#define GPU_COMMAND_RESET_PAYLOAD_FAST_RESET 0x00
-
-/* This will leave the state of active command streams UNDEFINED, but will leave the external bus in a defined and
- * idle state.
- */
-#define GPU_COMMAND_RESET_PAYLOAD_SOFT_RESET 0x01
-
-/* This reset will leave the state of currently active streams UNDEFINED, will likely lose data, and may leave
- * the system bus in an inconsistent state. Use only as a last resort when nothing else works.
- */
-#define GPU_COMMAND_RESET_PAYLOAD_HARD_RESET 0x02
-
-/* GPU_COMMAND_PRFCNT payloads */
-#define GPU_COMMAND_PRFCNT_PAYLOAD_SAMPLE 0x01 /* Sample performance counters */
-#define GPU_COMMAND_PRFCNT_PAYLOAD_CLEAR 0x02 /* Clear performance counters */
-
-/* GPU_COMMAND_TIME payloads */
-#define GPU_COMMAND_TIME_DISABLE 0x00 /* Disable cycle counter */
-#define GPU_COMMAND_TIME_ENABLE 0x01 /* Enable cycle counter */
-
-/* GPU_COMMAND_FLUSH_CACHES payloads */
-#define GPU_COMMAND_FLUSH_PAYLOAD_NONE 0x00 /* No flush */
-#define GPU_COMMAND_FLUSH_PAYLOAD_CLEAN 0x01 /* Clean the caches */
-#define GPU_COMMAND_FLUSH_PAYLOAD_INVALIDATE 0x02 /* Invalidate the caches */
-#define GPU_COMMAND_FLUSH_PAYLOAD_CLEAN_INVALIDATE 0x03 /* Clean and invalidate the caches */
-
-/* GPU_COMMAND command + payload */
-#define GPU_COMMAND_CODE_PAYLOAD(opcode, payload) \
- ((u32)opcode || ((u32)payload << 8))
-
-/* Final GPU_COMMAND form */
-/* No operation, nothing happens */
-#define GPU_COMMAND_NOP \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_NOP, 0)
-
-/* Stop all external bus interfaces, and then reset the entire GPU. */
-#define GPU_COMMAND_SOFT_RESET \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_RESET, GPU_COMMAND_RESET_PAYLOAD_SOFT_RESET)
-
-/* Immediately reset the entire GPU. */
-#define GPU_COMMAND_HARD_RESET \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_RESET, GPU_COMMAND_RESET_PAYLOAD_HARD_RESET)
-
-/* Clear all performance counters, setting them all to zero. */
-#define GPU_COMMAND_PRFCNT_CLEAR \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_PRFCNT, GPU_COMMAND_PRFCNT_PAYLOAD_CLEAR)
-
-/* Sample all performance counters, writing them out to memory */
-#define GPU_COMMAND_PRFCNT_SAMPLE \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_PRFCNT, GPU_COMMAND_PRFCNT_PAYLOAD_SAMPLE)
-
-/* Starts the cycle counter, and system timestamp propagation */
-#define GPU_COMMAND_CYCLE_COUNT_START \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_TIME, GPU_COMMAND_TIME_ENABLE)
-
-/* Stops the cycle counter, and system timestamp propagation */
-#define GPU_COMMAND_CYCLE_COUNT_STOP \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_TIME, GPU_COMMAND_TIME_DISABLE)
-
-/* Clean all caches */
-#define GPU_COMMAND_CLEAN_CACHES \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_FLUSH_CACHES, GPU_COMMAND_FLUSH_PAYLOAD_CLEAN)
-
-/* Clean and invalidate all caches */
-#define GPU_COMMAND_CLEAN_INV_CACHES \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_FLUSH_CACHES, GPU_COMMAND_FLUSH_PAYLOAD_CLEAN_INVALIDATE)
-
-/* Places the GPU in protected mode */
-#define GPU_COMMAND_SET_PROTECTED_MODE \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_SET_PROTECTED_MODE, 0)
-
-/* Halt CSF */
-#define GPU_COMMAND_FINISH_HALT \
- GPU_COMMAND_CODE_PAYLOAD(GPU_COMMAND_CODE_FINISH_HALT, 0)
-#else
-/* GPU_COMMAND values */
-#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */
-#define GPU_COMMAND_SOFT_RESET 0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */
-#define GPU_COMMAND_HARD_RESET 0x02 /* Immediately reset the entire GPU. */
-#define GPU_COMMAND_PRFCNT_CLEAR 0x03 /* Clear all performance counters, setting them all to zero. */
-#define GPU_COMMAND_PRFCNT_SAMPLE 0x04 /* Sample all performance counters, writing them out to memory */
-#define GPU_COMMAND_CYCLE_COUNT_START 0x05 /* Starts the cycle counter, and system timestamp propagation */
-#define GPU_COMMAND_CYCLE_COUNT_STOP 0x06 /* Stops the cycle counter, and system timestamp propagation */
-#define GPU_COMMAND_CLEAN_CACHES 0x07 /* Clean all caches */
-#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */
-#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */
-#endif
-
-
-/* End Command Values */
-
/* GPU_STATUS values */
#define GPU_STATUS_PRFCNT_ACTIVE (1 << 2) /* Set if the performance counters are active. */
#define GPU_STATUS_PROTECTED_MODE_ACTIVE (1 << 7) /* Set if protected mode is active */
@@ -551,4 +445,5 @@
#define L2_CONFIG_HASH_MASK (0xFFul << L2_CONFIG_HASH_SHIFT)
/* End L2_CONFIG register */
-#endif /* _MIDGARD_REGMAP_H_ */
+
+#endif /* _MIDG_REGMAP_H_ */
diff --git a/mali_kbase/mali_midg_regmap_jm.h b/mali_kbase/mali_midg_regmap_jm.h
index 69996e2..58e4d08 100644
--- a/mali_kbase/mali_midg_regmap_jm.h
+++ b/mali_kbase/mali_midg_regmap_jm.h
@@ -195,4 +195,16 @@
#define JM_IDVS_GROUP_SIZE_SHIFT (16)
#define JM_MAX_IDVS_GROUP_SIZE (0x3F)
+/* GPU_COMMAND values */
+#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */
+#define GPU_COMMAND_SOFT_RESET 0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */
+#define GPU_COMMAND_HARD_RESET 0x02 /* Immediately reset the entire GPU. */
+#define GPU_COMMAND_PRFCNT_CLEAR 0x03 /* Clear all performance counters, setting them all to zero. */
+#define GPU_COMMAND_PRFCNT_SAMPLE 0x04 /* Sample all performance counters, writing them out to memory */
+#define GPU_COMMAND_CYCLE_COUNT_START 0x05 /* Starts the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CYCLE_COUNT_STOP 0x06 /* Stops the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CLEAN_CACHES 0x07 /* Clean all caches */
+#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */
+#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */
+
#endif /* _MIDG_REGMAP_JM_H_ */
diff --git a/mali_kbase/tests/kutf/kutf_suite.c b/mali_kbase/tests/kutf/kutf_suite.c
index f3a8e9b..3307c0e 100644
--- a/mali_kbase/tests/kutf/kutf_suite.c
+++ b/mali_kbase/tests/kutf/kutf_suite.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014, 2017-2018 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014, 2017-2019 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -41,8 +41,6 @@
#include <kutf/kutf_utils.h>
#include <kutf/kutf_helpers.h>
-#if defined(CONFIG_DEBUG_FS)
-
/**
* struct kutf_application - Structure which represents kutf application
* @name: The name of this test application.
@@ -1139,6 +1137,8 @@ void kutf_test_abort(struct kutf_context *context)
}
EXPORT_SYMBOL(kutf_test_abort);
+#ifdef CONFIG_DEBUG_FS
+
/**
* init_kutf_core() - Module entry point.
*
@@ -1173,7 +1173,7 @@ static void __exit exit_kutf_core(void)
destroy_workqueue(kutf_workq);
}
-#else /* defined(CONFIG_DEBUG_FS) */
+#else /* CONFIG_DEBUG_FS */
/**
* init_kutf_core() - Module entry point.
@@ -1195,7 +1195,7 @@ static int __init init_kutf_core(void)
static void __exit exit_kutf_core(void)
{
}
-#endif /* defined(CONFIG_DEBUG_FS) */
+#endif /* CONFIG_DEBUG_FS */
MODULE_LICENSE("GPL");
diff --git a/mali_mgm/memory_group_manager.c b/mali_mgm/memory_group_manager.c
index 5e0ff3a..44f848a 100644
--- a/mali_mgm/memory_group_manager.c
+++ b/mali_mgm/memory_group_manager.c
@@ -32,29 +32,29 @@
#include <linux/mm.h>
#include <linux/memory_group_manager.h>
-#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE)
-static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
+#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
+static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, pgprot_t pgprot)
{
- int err = vm_insert_pfn(vma, addr, pfn);
+ int err;
- if (unlikely(err == -ENOMEM))
- return VM_FAULT_OOM;
- if (unlikely(err < 0 && err != -EBUSY))
+#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \
+ ((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \
+ (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE)))
+ if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
return VM_FAULT_SIGBUS;
- return VM_FAULT_NOPAGE;
-}
+ err = vm_insert_pfn(vma, addr, pfn);
+#else
+ err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
#endif
-#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
-static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, pgprot_t pgprot)
-{
- if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
+ if (unlikely(err == -ENOMEM))
+ return VM_FAULT_OOM;
+ if (unlikely(err < 0 && err != -EBUSY))
return VM_FAULT_SIGBUS;
- return vmf_insert_pfn(vma, addr, pfn);
+ return VM_FAULT_NOPAGE;
}
#endif
@@ -362,7 +362,7 @@ static vm_fault_t example_mgm_vmf_insert_pfn_prot(
dev_dbg(data->dev,
"%s(mgm_dev=%p, group_id=%d, vma=%p, addr=0x%lx, pfn=0x%lx, prot=0x%llx)\n",
__func__, (void *)mgm_dev, group_id, (void *)vma, addr, pfn,
- pgprot_val(prot));
+ (unsigned long long int) pgprot_val(prot));
if (WARN_ON(group_id < 0) ||
WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS))