summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRick Yiu <rickyiu@google.com>2022-05-16 19:18:37 +0800
committerRick Yiu <rickyiu@google.com>2022-05-16 19:27:15 +0800
commitb124a02e458cddb26325bf7ac96cc33002fc3db4 (patch)
tree7b10756e096b8ac655bd3738ea874ee5921e3d3a
parenta06724a9cdd666e360265a15f0d2eed529b0aa8e (diff)
downloadgs-b124a02e458cddb26325bf7ac96cc33002fc3db4.tar.gz
vendor_hook: sched: Add control for reduce_prefer_idle
Add a file node to control the usage of reducing prefer idle. Bug: 178016953 Signed-off-by: Rick Yiu <rickyiu@google.com> Change-Id: If817867cf4c78b471040d20f8c610d4b25859418
-rw-r--r--drivers/soc/google/vh/kernel/sched/fair.c13
-rw-r--r--drivers/soc/google/vh/kernel/sched/procfs_node.c33
2 files changed, 42 insertions, 4 deletions
diff --git a/drivers/soc/google/vh/kernel/sched/fair.c b/drivers/soc/google/vh/kernel/sched/fair.c
index c84e21ec73f9..cbd2923dcf14 100644
--- a/drivers/soc/google/vh/kernel/sched/fair.c
+++ b/drivers/soc/google/vh/kernel/sched/fair.c
@@ -25,6 +25,7 @@ EXPORT_SYMBOL_GPL(vendor_sched_cpu_to_em_pd);
extern unsigned int vendor_sched_uclamp_threshold;
extern unsigned int vendor_sched_util_post_init_scale;
extern bool vendor_sched_npi_packing;
+extern bool vendor_sched_reduce_prefer_idle;
static struct vendor_group_property vg[VG_MAX];
@@ -260,11 +261,15 @@ static inline bool get_prefer_idle(struct task_struct *p)
// For group based prefer_idle vote, filter our smaller or low prio or
// have throttled uclamp.max settings
// Ignore all checks, if the prefer_idle is from per-task API.
- return (vg[get_vendor_task_struct(p)->group].prefer_idle &&
- task_util_est(p) >= vendor_sched_uclamp_threshold &&
- p->prio <= DEFAULT_PRIO &&
- uclamp_eff_value(p, UCLAMP_MAX) == SCHED_CAPACITY_SCALE) ||
+ if (vendor_sched_reduce_prefer_idle)
+ return (vg[get_vendor_task_struct(p)->group].prefer_idle &&
+ task_util_est(p) >= vendor_sched_uclamp_threshold &&
+ p->prio <= DEFAULT_PRIO &&
+ uclamp_eff_value(p, UCLAMP_MAX) == SCHED_CAPACITY_SCALE) ||
get_vendor_task_struct(p)->prefer_idle;
+ else
+ return vg[get_vendor_task_struct(p)->group].prefer_idle ||
+ get_vendor_task_struct(p)->prefer_idle;
}
bool get_prefer_high_cap(struct task_struct *p)
diff --git a/drivers/soc/google/vh/kernel/sched/procfs_node.c b/drivers/soc/google/vh/kernel/sched/procfs_node.c
index 17f0141632e9..cbc7048c98d7 100644
--- a/drivers/soc/google/vh/kernel/sched/procfs_node.c
+++ b/drivers/soc/google/vh/kernel/sched/procfs_node.c
@@ -24,6 +24,7 @@ DECLARE_PER_CPU(struct uclamp_stats, uclamp_stats);
unsigned int __read_mostly vendor_sched_uclamp_threshold;
unsigned int __read_mostly vendor_sched_util_post_init_scale = DEF_UTIL_POST_INIT_SCALE;
bool __read_mostly vendor_sched_npi_packing = true; //non prefer idle packing
+bool __read_mostly vendor_sched_reduce_prefer_idle = true;
struct proc_dir_entry *vendor_sched;
extern unsigned int sched_capacity_margin[CPU_NUM];
@@ -958,6 +959,37 @@ static ssize_t npi_packing_store(struct file *filp,
PROC_OPS_RW(npi_packing);
+static int reduce_prefer_idle_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s\n", vendor_sched_reduce_prefer_idle ? "true" : "false");
+
+ return 0;
+}
+
+static ssize_t reduce_prefer_idle_store(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ bool enable;
+ char buf[MAX_PROC_SIZE];
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ if (kstrtobool(buf, &enable))
+ return -EINVAL;
+
+ vendor_sched_reduce_prefer_idle = enable;
+
+ return count;
+}
+
+PROC_OPS_RW(reduce_prefer_idle);
+
#if IS_ENABLED(CONFIG_UCLAMP_STATS)
static int uclamp_stats_show(struct seq_file *m, void *v)
{
@@ -1311,6 +1343,7 @@ static struct pentry entries[] = {
PROC_ENTRY(util_threshold),
PROC_ENTRY(util_post_init_scale),
PROC_ENTRY(npi_packing),
+ PROC_ENTRY(reduce_prefer_idle),
PROC_ENTRY(dump_task),
// pmu limit attribute
PROC_ENTRY(pmu_poll_time),