summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2017-05-18 14:43:17 +0200
committerSidath Senanayake <sidaths@google.com>2017-05-18 14:43:17 +0200
commit6f5ab3baed824941f168ab133469f997d4450146 (patch)
tree95dcbadaa979f84a8d75c0919af1b85c5afd1924 /mali_kbase/backend/gpu
parent48f3554a4abf9ce182253fb5415a1a26b0790998 (diff)
downloadgpu-6f5ab3baed824941f168ab133469f997d4450146.tar.gz
Mali Bifrost DDK r6p0 KMD
Provenance: b67d8663a (collaborate/EAC/b_r6p0) BX304L01B-BU-00000-r6p0-01rel0 BX304L06A-BU-00000-r6p0-01rel0 BX304X07X-BU-00000-r6p0-01rel0 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: I6b19113374d523be9a75a252d672731600e991d6
Diffstat (limited to 'mali_kbase/backend/gpu')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_devfreq.c3
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_as.c179
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_hw.c18
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_rb.c18
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c72
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c52
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_internal.h6
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_metrics.c4
8 files changed, 103 insertions, 249 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_devfreq.c b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
index 574eb3e..e280322 100644
--- a/mali_kbase/backend/gpu/mali_kbase_devfreq.c
+++ b/mali_kbase/backend/gpu/mali_kbase_devfreq.c
@@ -40,6 +40,7 @@
#define dev_pm_opp_get_voltage opp_get_voltage
#define dev_pm_opp_get_opp_count opp_get_opp_count
#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#define dev_pm_opp_find_freq_floor opp_find_freq_floor
#endif /* Linux >= 3.13 */
/**
@@ -362,7 +363,7 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
kbdev->dev->of_node,
kbdev->devfreq,
- &power_model_ops);
+ &kbase_ipa_power_model_ops);
if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
err = PTR_ERR(kbdev->devfreq_cooling);
dev_err(kbdev->dev,
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_as.c b/mali_kbase/backend/gpu/mali_kbase_jm_as.c
index 202dcfa..92358f2 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_as.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_as.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,6 +22,7 @@
#include <mali_kbase.h>
#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
/**
* assign_and_activate_kctx_addr_space - Assign an AS to a context
@@ -47,65 +48,20 @@ static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
struct kbase_as *current_as)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
- struct kbasep_js_per_as_data *js_per_as_data;
- int as_nr = current_as->number;
lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
lockdep_assert_held(&js_devdata->runpool_mutex);
lockdep_assert_held(&kbdev->hwaccess_lock);
- js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
-
/* Attribute handling */
kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
- /* Assign addr space */
- kctx->as_nr = as_nr;
-
- /* If the GPU is currently powered, activate this address space on the
- * MMU */
- if (kbdev->pm.backend.gpu_powered)
- kbase_mmu_update(kctx);
- /* If the GPU was not powered then the MMU will be reprogrammed on the
- * next pm_context_active() */
-
/* Allow it to run jobs */
kbasep_js_set_submit_allowed(js_devdata, kctx);
- /* Book-keeping */
- js_per_as_data->kctx = kctx;
- js_per_as_data->as_busy_refcount = 0;
-
kbase_js_runpool_inc_context_count(kbdev, kctx);
}
-/**
- * release_addr_space - Release an address space
- * @kbdev: Kbase device
- * @kctx_as_nr: Address space of context to release
- * @kctx: Context being released
- *
- * Context: kbasep_js_device_data.runpool_mutex must be held
- *
- * Release an address space, making it available for being picked again.
- */
-static void release_addr_space(struct kbase_device *kbdev, int kctx_as_nr,
- struct kbase_context *kctx)
-{
- struct kbasep_js_device_data *js_devdata;
- u16 as_bit = (1u << kctx_as_nr);
-
- js_devdata = &kbdev->js_data;
- lockdep_assert_held(&js_devdata->runpool_mutex);
-
- /* The address space must not already be free */
- KBASE_DEBUG_ASSERT(!(js_devdata->as_free & as_bit));
-
- js_devdata->as_free |= as_bit;
-
- kbase_js_runpool_dec_context_count(kbdev, kctx);
-}
-
bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
@@ -117,10 +73,7 @@ bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
}
for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
- struct kbasep_js_per_as_data *js_per_as_data =
- &kbdev->js_data.runpool_irq.per_as_data[i];
-
- if (js_per_as_data->kctx == kctx) {
+ if (kbdev->as_to_kctx[i] == kctx) {
/* Context already has ASID - mark as active */
return true;
}
@@ -133,7 +86,6 @@ bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
- struct kbasep_js_per_as_data *js_per_as_data;
int as_nr = kctx->as_nr;
if (as_nr == KBASEP_AS_NR_INVALID) {
@@ -143,100 +95,24 @@ void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
lockdep_assert_held(&kbdev->hwaccess_lock);
- js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr];
- if (js_per_as_data->as_busy_refcount != 0) {
+ if (atomic_read(&kctx->refcount) != 1) {
WARN(1, "Attempting to release active ASID\n");
return;
}
- /* Release context from address space */
- js_per_as_data->kctx = NULL;
-
kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
- /* If the GPU is currently powered, de-activate this address space on
- * the MMU */
- if (kbdev->pm.backend.gpu_powered)
- kbase_mmu_disable(kctx);
- /* If the GPU was not powered then the MMU will be reprogrammed on the
- * next pm_context_active() */
-
- release_addr_space(kbdev, as_nr, kctx);
- kctx->as_nr = KBASEP_AS_NR_INVALID;
-}
-
-void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
- struct kbase_context *kctx)
-{
-}
-
-void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
- int as_nr)
-{
- struct kbasep_js_device_data *js_devdata;
-
- js_devdata = &kbdev->js_data;
-
- lockdep_assert_held(&js_devdata->runpool_mutex);
- js_devdata->as_free |= (1 << as_nr);
+ kbase_ctx_sched_release_ctx(kctx);
+ kbase_js_runpool_dec_context_count(kbdev, kctx);
}
-/**
- * check_is_runpool_full - check whether the runpool is full for a specified
- * context
- * @kbdev: Kbase device
- * @kctx: Kbase context
- *
- * If kctx == NULL, then this makes the least restrictive check on the
- * runpool. A specific context that is supplied immediately after could fail
- * the check, even under the same conditions.
- *
- * Therefore, once a context is obtained you \b must re-check it with this
- * function, since the return value could change to false.
- *
- * Context:
- * In all cases, the caller must hold kbasep_js_device_data.runpool_mutex.
- * When kctx != NULL the caller must hold the
- * kbasep_js_kctx_info.ctx.jsctx_mutex.
- * When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but
- * it doesn't do any harm to do so).
- *
- * Return: true if the runpool is full
- */
-static bool check_is_runpool_full(struct kbase_device *kbdev,
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
struct kbase_context *kctx)
{
- struct kbasep_js_device_data *js_devdata;
- bool is_runpool_full;
-
- js_devdata = &kbdev->js_data;
- lockdep_assert_held(&js_devdata->runpool_mutex);
-
- /* Regardless of whether a context is submitting or not, can't have more
- * than there are HW address spaces */
- is_runpool_full = (bool) (js_devdata->nr_all_contexts_running >=
- kbdev->nr_hw_address_spaces);
-
- if (kctx && !kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
- lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
- /* Contexts that submit might use less of the address spaces
- * available, due to HW workarounds. In which case, the runpool
- * is also full when the number of submitting contexts exceeds
- * the number of submittable address spaces.
- *
- * Both checks must be made: can have nr_user_address_spaces ==
- * nr_hw_address spaces, and at the same time can have
- * nr_user_contexts_running < nr_all_contexts_running. */
- is_runpool_full |= (bool)
- (js_devdata->nr_user_contexts_running >=
- kbdev->nr_user_address_spaces);
- }
-
- return is_runpool_full;
}
-int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
- struct kbase_context *kctx)
+int kbase_backend_find_and_release_free_address_space(
+ struct kbase_device *kbdev, struct kbase_context *kctx)
{
struct kbasep_js_device_data *js_devdata;
struct kbasep_js_kctx_info *js_kctx_info;
@@ -249,37 +125,23 @@ int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
mutex_lock(&js_devdata->runpool_mutex);
- /* First try to find a free address space */
- if (check_is_runpool_full(kbdev, kctx))
- i = -1;
- else
- i = ffs(js_devdata->as_free) - 1;
-
- if (i >= 0 && i < kbdev->nr_hw_address_spaces) {
- js_devdata->as_free &= ~(1 << i);
-
- mutex_unlock(&js_devdata->runpool_mutex);
- mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
-
- return i;
- }
-
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- /* No address space currently free, see if we can release one */
for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
- struct kbasep_js_per_as_data *js_per_as_data;
struct kbasep_js_kctx_info *as_js_kctx_info;
struct kbase_context *as_kctx;
- js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[i];
- as_kctx = js_per_as_data->kctx;
+ as_kctx = kbdev->as_to_kctx[i];
as_js_kctx_info = &as_kctx->jctx.sched_info;
/* Don't release privileged or active contexts, or contexts with
- * jobs running */
+ * jobs running.
+ * Note that a context will have at least 1 reference (which
+ * was previously taken by kbasep_js_schedule_ctx()) until
+ * descheduled.
+ */
if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) &&
- js_per_as_data->as_busy_refcount == 0) {
+ atomic_read(&as_kctx->refcount) == 1) {
if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
as_kctx)) {
WARN(1, "Failed to retain active context\n");
@@ -314,8 +176,6 @@ int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
as_kctx,
true);
- js_devdata->as_free &= ~(1 << i);
-
mutex_unlock(&js_devdata->runpool_mutex);
mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
@@ -353,16 +213,15 @@ bool kbase_backend_use_ctx(struct kbase_device *kbdev,
js_devdata = &kbdev->js_data;
js_kctx_info = &kctx->jctx.sched_info;
- if (kbdev->hwaccess.active_kctx == kctx ||
- kctx->as_nr != KBASEP_AS_NR_INVALID ||
- as_nr == KBASEP_AS_NR_INVALID) {
- WARN(1, "Invalid parameters to use_ctx()\n");
+ if (kbdev->hwaccess.active_kctx == kctx) {
+ WARN(1, "Context is already scheduled in\n");
return false;
}
new_address_space = &kbdev->as[as_nr];
lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
lockdep_assert_held(&kbdev->hwaccess_lock);
assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
index ef7497d..a6fb097 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c
@@ -29,6 +29,7 @@
#include <mali_kbase_vinstr.h>
#include <mali_kbase_hw.h>
#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <backend/gpu/mali_kbase_js_affinity.h>
@@ -1155,7 +1156,6 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
{
unsigned long flags;
struct kbase_device *kbdev;
- int i;
ktime_t end_timestamp = ktime_get();
struct kbasep_js_device_data *js_devdata;
bool try_schedule = false;
@@ -1193,6 +1193,7 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
KBASE_RESET_GPU_NOT_PENDING);
kbase_disjoint_state_down(kbdev);
wake_up(&kbdev->hwaccess.backend.reset_wait);
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
return;
}
@@ -1265,18 +1266,9 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->mmu_hw_mutex);
- /* Reprogram the GPU's MMU */
- for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
-
- if (js_devdata->runpool_irq.per_as_data[i].kctx)
- kbase_mmu_update(
- js_devdata->runpool_irq.per_as_data[i].kctx);
- else
- kbase_mmu_disable_as(kbdev, i);
-
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_ctx_sched_restore_all_as(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->mmu_hw_mutex);
kbase_pm_enable_interrupts(kbdev);
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
index 418ae1f..1e9b9e5 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
@@ -672,10 +672,9 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
(katom->protected_state.enter ==
KBASE_ATOM_ENTER_PROTECTED_IDLE_L2)) {
kbase_vinstr_resume(kbdev->vinstr_ctx);
-#ifdef CONFIG_DEVFREQ_THERMAL
+
/* Go back to configured model for IPA */
kbase_ipa_model_use_configured_locked(kbdev);
-#endif
}
@@ -784,7 +783,8 @@ static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
if (kbdev->protected_ops) {
/* Switch GPU to protected mode */
- err = kbdev->protected_ops->protected_mode_enter(kbdev);
+ err = kbdev->protected_ops->protected_mode_enable(
+ kbdev->protected_dev);
if (err)
dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
@@ -806,6 +806,8 @@ static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
if (!kbdev->protected_ops)
return -EINVAL;
+ /* The protected mode disable callback will be called as part of reset
+ */
kbase_reset_gpu_silent(kbdev);
return 0;
@@ -841,10 +843,8 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
return -EAGAIN;
}
-#ifdef CONFIG_DEVFREQ_THERMAL
/* Use generic model for IPA in protected mode */
kbase_ipa_model_use_fallback_locked(kbdev);
-#endif
/* Once reaching this point GPU must be
* switched to protected mode or vinstr
@@ -907,10 +907,9 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
kbase_gpu_dequeue_atom(kbdev, js, NULL);
kbase_jm_return_atom_to_js(kbdev, katom[idx]);
}
-#ifdef CONFIG_DEVFREQ_THERMAL
+
/* Go back to configured model for IPA */
kbase_ipa_model_use_configured_locked(kbdev);
-#endif
return -EINVAL;
}
@@ -990,10 +989,9 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
}
kbase_vinstr_resume(kbdev->vinstr_ctx);
-#ifdef CONFIG_DEVFREQ_THERMAL
+
/* Use generic model for IPA in protected mode */
kbase_ipa_model_use_fallback_locked(kbdev);
-#endif
return -EINVAL;
}
@@ -1516,6 +1514,8 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
{
KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
/* protected mode sanity checks */
KBASE_DEBUG_ASSERT_MSG(
kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev),
diff --git a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
index 4e5a74f..aa1817c 100644
--- a/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -200,15 +200,15 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
KBASE_MMU_FAULT_TYPE_BUS :
KBASE_MMU_FAULT_TYPE_PAGE;
-#ifdef CONFIG_MALI_GPU_MMU_AARCH64
- as->fault_extra_addr = kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
- kctx);
- as->fault_extra_addr <<= 32;
- as->fault_extra_addr |= kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
- kctx);
-#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+ as->fault_extra_addr = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
+ kctx);
+ as->fault_extra_addr <<= 32;
+ as->fault_extra_addr |= kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
+ kctx);
+ }
if (kbase_as_has_bus_fault(as)) {
/* Mark bus fault as handled.
@@ -248,34 +248,32 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_mmu_setup *current_setup = &as->current_setup;
u32 transcfg = 0;
-#ifdef CONFIG_MALI_GPU_MMU_AARCH64
- transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
-
- /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
- /* Clear PTW_MEMATTR bits */
- transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
- /* Enable correct PTW_MEMATTR bits */
- transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
-
- if (kbdev->system_coherency == COHERENCY_ACE) {
- /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
- /* Clear PTW_SH bits */
- transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
- /* Enable correct PTW_SH bits */
- transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
- }
-
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
- transcfg, kctx);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
- (current_setup->transcfg >> 32) & 0xFFFFFFFFUL, kctx);
-
-#else /* CONFIG_MALI_GPU_MMU_AARCH64 */
-
- if (kbdev->system_coherency == COHERENCY_ACE)
- current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+ transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
+
+ /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
+ /* Clear PTW_MEMATTR bits */
+ transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
+ /* Enable correct PTW_MEMATTR bits */
+ transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
+
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
+ /* Clear PTW_SH bits */
+ transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
+ /* Enable correct PTW_SH bits */
+ transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
+ }
-#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
+ transcfg, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
+ (current_setup->transcfg >> 32) & 0xFFFFFFFFUL,
+ kctx);
+ } else {
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
+ }
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
current_setup->transtab & 0xFFFFFFFFUL, kctx);
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index ac63aa5..ed19a8a 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -32,6 +32,7 @@
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_smc.h>
#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
#include <backend/gpu/mali_kbase_cache_policy_backend.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
@@ -1025,7 +1026,6 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
bool reset_required = is_resume;
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
unsigned long flags;
- int i;
KBASE_DEBUG_ASSERT(NULL != kbdev);
lockdep_assert_held(&js_devdata->runpool_mutex);
@@ -1067,18 +1067,9 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
}
mutex_lock(&kbdev->mmu_hw_mutex);
- /* Reprogram the GPU's MMU */
- for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
-
- if (js_devdata->runpool_irq.per_as_data[i].kctx)
- kbase_mmu_update(
- js_devdata->runpool_irq.per_as_data[i].kctx);
- else
- kbase_mmu_disable_as(kbdev, i);
-
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_ctx_sched_restore_all_as(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->mmu_hw_mutex);
/* Lastly, enable the interrupts */
@@ -1359,7 +1350,7 @@ void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
}
}
-static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
+static int kbase_pm_do_reset(struct kbase_device *kbdev)
{
struct kbasep_reset_timeout_data rtdata;
@@ -1439,14 +1430,29 @@ static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
return -EINVAL;
}
-static int kbase_pm_reset_do_protected(struct kbase_device *kbdev)
+static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
{
- KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
- KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev);
+ struct kbase_device *kbdev = pdev->data;
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_SET_PROTECTED_MODE, NULL);
+ return 0;
+}
- return kbdev->protected_ops->protected_mode_reset(kbdev);
+static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
+{
+ struct kbase_device *kbdev = pdev->data;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ return kbase_pm_do_reset(kbdev);
}
+struct protected_mode_ops kbase_native_protected_ops = {
+ .protected_mode_enable = kbasep_protected_mode_enable,
+ .protected_mode_disable = kbasep_protected_mode_disable
+};
+
int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
{
unsigned long irq_flags;
@@ -1490,19 +1496,17 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
/* Soft reset the GPU */
- if (kbdev->protected_mode_support &&
- kbdev->protected_ops->protected_mode_reset)
- err = kbase_pm_reset_do_protected(kbdev);
+ if (kbdev->protected_mode_support)
+ err = kbdev->protected_ops->protected_mode_disable(
+ kbdev->protected_dev);
else
- err = kbase_pm_reset_do_normal(kbdev);
+ err = kbase_pm_do_reset(kbdev);
spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
if (kbdev->protected_mode)
resume_vinstr = true;
kbdev->protected_mode = false;
-#ifdef CONFIG_DEVFREQ_THERMAL
kbase_ipa_model_use_configured_locked(kbdev);
-#endif
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
index 58f615d..6804f45 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -486,11 +486,11 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
*/
void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
-#ifdef CONFIG_PM_DEVFREQ
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
unsigned long *total, unsigned long *busy);
void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
-#endif
+#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
#ifdef CONFIG_MALI_MIDGARD_DVFS
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c b/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c
index 7613e1d..024248c 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_metrics.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -155,7 +155,7 @@ static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
kbdev->pm.backend.metrics.time_period_start = now;
}
-#if defined(CONFIG_PM_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this
* function.
*/