diff options
author | Suren Baghdasaryan <surenb@google.com> | 2021-04-26 13:18:46 -0700 |
---|---|---|
committer | Suren Baghdasaryan <surenb@google.com> | 2021-05-01 00:20:43 +0000 |
commit | 3092012197aa1f3206c60dd422e9a3d12df915a4 (patch) | |
tree | d439db9b45162d289da0f8c8a5516293c780c905 | |
parent | 3881a45951c087848056cd36516465edba524eca (diff) | |
download | hikey-linaro-3092012197aa1f3206c60dd422e9a3d12df915a4.tar.gz |
ANDROID: GKI: QoS: Prevent usage of dev_pm_qos_request as pm_qos_request
pm_qos_set_value_for_cpus expects the list in pm_qos_constraints to
contain structs of type pm_qos_request. However requests from device
drivers will populate the list with dev_pm_qos_request structs.
pm_qos_set_value_for_cpus updates target_per_cpu array and since there
is no way to access target_per_cpu for device driver requests, we can
skip updating target_per_cpu for such requests. This will prevent
current issue when pm_qos_set_value_for_cpus accesses dev_pm_qos_request
structs as it they were pm_qos_request structs.
Fixes: 723feab600f7 ("ANDROID: GKI: QoS: Enhance framework to support cpu/irq specific QoS requests")
Bug: 183959482
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: Iaa8d349b3c1f9cd8357b2e7912b16aadef78165f
-rw-r--r-- | drivers/base/power/qos.c | 6 | ||||
-rw-r--r-- | include/linux/pm_qos.h | 3 | ||||
-rw-r--r-- | kernel/power/qos.c | 35 |
3 files changed, 31 insertions, 13 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 3382542b39b7..6571b8a6d992 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -143,11 +143,13 @@ static int apply_constraint(struct dev_pm_qos_request *req, value = 0; ret = pm_qos_update_target(&qos->resume_latency, - &req->data.pnode, action, value); + &req->data.pnode, action, value, + true); break; case DEV_PM_QOS_LATENCY_TOLERANCE: ret = pm_qos_update_target(&qos->latency_tolerance, - &req->data.pnode, action, value); + &req->data.pnode, action, value, + true); if (ret) { value = pm_qos_read_value(&qos->latency_tolerance); req->dev->power.set_latency_tolerance(req->dev, value); diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index e22de69dcd98..1d6f8439d67b 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -136,7 +136,8 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) } int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, - enum pm_qos_req_action action, int value); + enum pm_qos_req_action action, int value, + bool dev_req); bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val); diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 789dd20b6669..6ccadce69e97 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -265,12 +265,26 @@ static const struct file_operations pm_qos_debug_fops = { .release = single_release, }; -static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c) +static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c, + bool dev_req) { struct pm_qos_request *req = NULL; int cpu; s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value }; + /* + * pm_qos_set_value_for_cpus expects all c->list elements to be of type + * pm_qos_request, however requests from device will contain elements + * of type dev_pm_qos_request. + * pm_qos_constraints.target_per_cpu can be accessed only for + * constraints associated with one of the pm_qos_class and present in + * pm_qos_array. Device requests are not associated with any of + * pm_qos_class, therefore their target_per_cpu cannot be accessed. We + * can safely skip updating target_per_cpu for device requests. + */ + if (dev_req) + return; + plist_for_each_entry(req, &c->list, node) { for_each_cpu(cpu, &req->cpus_affine) { switch (c->type) { @@ -304,7 +318,7 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c) * otherwise. */ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, - enum pm_qos_req_action action, int value) + enum pm_qos_req_action action, int value, bool dev_req) { unsigned long flags; int prev_value, curr_value, new_value; @@ -340,7 +354,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, curr_value = pm_qos_get_value(c); pm_qos_set_value(c, curr_value); - pm_qos_set_value_for_cpus(c); + pm_qos_set_value_for_cpus(c, dev_req); spin_unlock_irqrestore(&pm_qos_lock, flags); @@ -485,7 +499,7 @@ static void __pm_qos_update_request(struct pm_qos_request *req, if (new_value != req->node.prio) pm_qos_update_target( pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + &req->node, PM_QOS_UPDATE_REQ, new_value, false); } /** @@ -519,7 +533,7 @@ static void pm_qos_irq_release(struct kref *ref) spin_unlock_irqrestore(&pm_qos_lock, flags); pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, - c->default_value); + c->default_value, false); } static void pm_qos_irq_notify(struct irq_affinity_notify *notify, @@ -535,7 +549,8 @@ static void pm_qos_irq_notify(struct irq_affinity_notify *notify, cpumask_copy(&req->cpus_affine, mask); spin_unlock_irqrestore(&pm_qos_lock, flags); - pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio); + pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio, + false); } #endif @@ -608,7 +623,7 @@ void pm_qos_add_request(struct pm_qos_request *req, INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); trace_pm_qos_add_request(pm_qos_class, value); pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, - &req->node, PM_QOS_ADD_REQ, value); + &req->node, PM_QOS_ADD_REQ, value, false); #ifdef CONFIG_SMP if (req->type == PM_QOS_REQ_AFFINE_IRQ && @@ -623,7 +638,7 @@ void pm_qos_add_request(struct pm_qos_request *req, cpumask_setall(&req->cpus_affine); pm_qos_update_target( pm_qos_array[pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, value); + &req->node, PM_QOS_UPDATE_REQ, value, false); } } #endif @@ -680,7 +695,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, if (new_value != req->node.prio) pm_qos_update_target( pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + &req->node, PM_QOS_UPDATE_REQ, new_value, false); schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); } @@ -710,7 +725,7 @@ void pm_qos_remove_request(struct pm_qos_request *req) trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE); pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, &req->node, PM_QOS_REMOVE_REQ, - PM_QOS_DEFAULT_VALUE); + PM_QOS_DEFAULT_VALUE, false); memset(req, 0, sizeof(*req)); } EXPORT_SYMBOL_GPL(pm_qos_remove_request); |