aboutsummaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2017-07-18 07:53:27 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2017-07-18 07:53:27 +0000
commit2684a6c505531426bb7d4c8a68aab91abe6af05c (patch)
tree1636806f0c5f37c506bf5b38aa7ee3e7c4c48644 /include/linux/sched.h
parent39b9a227eb927f4a8963f60d763bd43fa5b2bb5b (diff)
parentcc3d2b7361cf10851f579da257fdf2f1a7bd66b4 (diff)
downloadv4.4-2684a6c505531426bb7d4c8a68aab91abe6af05c.tar.gz
release-request-b8734135-c578-454d-bc9f-628062b9de89-for-git_nyc-iot-release-4188548 snap-temp-L42600000083191324
Change-Id: If12f2ca34c92ce87169ba986b5b24a1b6b377a3d
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h180
1 files changed, 179 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 239b4151ad2c..ad2c304b29b8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -173,6 +173,9 @@ extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+#ifdef CONFIG_CPU_QUIET
+extern u64 nr_running_integral(unsigned int cpu);
+#endif
extern void calc_global_load(unsigned long ticks);
@@ -314,6 +317,15 @@ extern char ___assert_task_state[1 - 2*!!(
/* Task command name length */
#define TASK_COMM_LEN 16
+enum task_event {
+ PUT_PREV_TASK = 0,
+ PICK_NEXT_TASK = 1,
+ TASK_WAKE = 2,
+ TASK_MIGRATE = 3,
+ TASK_UPDATE = 4,
+ IRQ_UPDATE = 5,
+};
+
#include <linux/spinlock.h>
/*
@@ -831,6 +843,7 @@ struct user_struct {
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
@@ -928,6 +941,14 @@ enum cpu_idle_type {
#define SCHED_CAPACITY_SHIFT 10
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+struct sched_capacity_reqs {
+ unsigned long cfs;
+ unsigned long rt;
+ unsigned long dl;
+
+ unsigned long total;
+};
+
/*
* Wake-queues are lists of tasks with a pending wakeup, whose
* callers have already marked the task as woken internally,
@@ -982,7 +1003,8 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
+#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
@@ -990,6 +1012,7 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
+#define SD_SHARE_CAP_STATES 0x8000 /* Domain members share capacity state */
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
@@ -1022,8 +1045,57 @@ struct sched_domain_attr {
extern int sched_domain_level_max;
+struct capacity_state {
+ unsigned long cap; /* compute capacity */
+ unsigned long power; /* power consumption at this compute capacity */
+};
+
+struct idle_state {
+ unsigned long power; /* power consumption in this idle state */
+};
+
+struct sched_group_energy {
+ unsigned int nr_idle_states; /* number of idle states */
+ struct idle_state *idle_states; /* ptr to idle state array */
+ unsigned int nr_cap_states; /* number of capacity states */
+ struct capacity_state *cap_states; /* ptr to capacity state array */
+};
+
+unsigned long capacity_curr_of(int cpu);
+
struct sched_group;
+struct eas_stats {
+ /* select_idle_sibling() stats */
+ u64 sis_attempts;
+ u64 sis_idle;
+ u64 sis_cache_affine;
+ u64 sis_suff_cap;
+ u64 sis_idle_cpu;
+ u64 sis_count;
+
+ /* select_energy_cpu_brute() stats */
+ u64 secb_attempts;
+ u64 secb_sync;
+ u64 secb_idle_bt;
+ u64 secb_insuff_cap;
+ u64 secb_no_nrg_sav;
+ u64 secb_nrg_sav;
+ u64 secb_count;
+
+ /* find_best_target() stats */
+ u64 fbt_attempts;
+ u64 fbt_no_cpu;
+ u64 fbt_no_sd;
+ u64 fbt_pref_idle;
+ u64 fbt_count;
+
+ /* cas */
+ /* select_task_rq_fair() stats */
+ u64 cas_attempts;
+ u64 cas_count;
+};
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
@@ -1084,6 +1156,8 @@ struct sched_domain {
unsigned int ttwu_wake_remote;
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
+
+ struct eas_stats eas_stats;
#endif
#ifdef CONFIG_SCHED_DEBUG
char *name;
@@ -1120,6 +1194,8 @@ bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
+typedef
+const struct sched_group_energy * const(*sched_domain_energy_f)(int cpu);
#define SDTL_OVERLAP 0x01
@@ -1132,6 +1208,7 @@ struct sd_data {
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
+ sched_domain_energy_f energy;
int flags;
int numa_level;
struct sd_data data;
@@ -1239,6 +1316,70 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+ /* select_idle_sibling() */
+ u64 nr_wakeups_sis_attempts;
+ u64 nr_wakeups_sis_idle;
+ u64 nr_wakeups_sis_cache_affine;
+ u64 nr_wakeups_sis_suff_cap;
+ u64 nr_wakeups_sis_idle_cpu;
+ u64 nr_wakeups_sis_count;
+
+ /* energy_aware_wake_cpu() */
+ u64 nr_wakeups_secb_attempts;
+ u64 nr_wakeups_secb_sync;
+ u64 nr_wakeups_secb_idle_bt;
+ u64 nr_wakeups_secb_insuff_cap;
+ u64 nr_wakeups_secb_no_nrg_sav;
+ u64 nr_wakeups_secb_nrg_sav;
+ u64 nr_wakeups_secb_count;
+
+ /* find_best_target() */
+ u64 nr_wakeups_fbt_attempts;
+ u64 nr_wakeups_fbt_no_cpu;
+ u64 nr_wakeups_fbt_no_sd;
+ u64 nr_wakeups_fbt_pref_idle;
+ u64 nr_wakeups_fbt_count;
+
+ /* cas */
+ /* select_task_rq_fair() */
+ u64 nr_wakeups_cas_attempts;
+ u64 nr_wakeups_cas_count;
+};
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+#define RAVG_HIST_SIZE_MAX 5
+
+/* ravg represents frequency scaled cpu-demand of tasks */
+struct ravg {
+ /*
+ * 'mark_start' marks the beginning of an event (task waking up, task
+ * starting to execute, task being preempted) within a window
+ *
+ * 'sum' represents how runnable a task has been within current
+ * window. It incorporates both running time and wait time and is
+ * frequency scaled.
+ *
+ * 'sum_history' keeps track of history of 'sum' seen over previous
+ * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
+ * ignored.
+ *
+ * 'demand' represents maximum sum seen over previous
+ * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
+ * demand for tasks.
+ *
+ * 'curr_window' represents task's contribution to cpu busy time
+ * statistics (rq->curr_runnable_sum) in current window
+ *
+ * 'prev_window' represents task's contribution to cpu busy time
+ * statistics (rq->prev_runnable_sum) in previous window
+ */
+ u64 mark_start;
+ u32 sum, demand;
+ u32 sum_history[RAVG_HIST_SIZE_MAX];
+ u32 curr_window, prev_window;
+ u16 active_windows;
};
#endif
@@ -1399,6 +1540,15 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_WALT
+ struct ravg ravg;
+ /*
+ * 'init_load_pct' represents the initial task load assigned to children
+ * of this task
+ */
+ u32 init_load_pct;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -1474,6 +1624,10 @@ struct task_struct {
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+#ifdef CONFIG_CGROUPS
+ /* disallow userland-initiated cgroup migration */
+ unsigned no_cgroup_migration:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
@@ -1539,6 +1693,7 @@ struct task_struct {
struct list_head cpu_timers[3];
/* process credentials */
+ const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
@@ -3139,6 +3294,11 @@ static inline void inc_syscw(struct task_struct *tsk)
{
tsk->ioac.syscw++;
}
+
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+ tsk->ioac.syscfs++;
+}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
@@ -3155,6 +3315,9 @@ static inline void inc_syscr(struct task_struct *tsk)
static inline void inc_syscw(struct task_struct *tsk)
{
}
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+}
#endif
#ifndef TASK_SIZE_OF
@@ -3191,4 +3354,19 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#define SCHED_CPUFREQ_RT (1U << 0)
+#define SCHED_CPUFREQ_DL (1U << 1)
+#define SCHED_CPUFREQ_IOWAIT (1U << 2)
+
+#ifdef CONFIG_CPU_FREQ
+struct update_util_data {
+ void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
+};
+
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned int flags));
+void cpufreq_remove_update_util_hook(int cpu);
+#endif /* CONFIG_CPU_FREQ */
+
#endif