aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ddb96553ad50..fdf8b2f92aa5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11526,6 +11526,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
goto err;
tg->shares = NICE_0_LOAD;
+ tg->latency_prio = DEFAULT_LATENCY_PRIO;
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
@@ -11624,6 +11625,7 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
}
se->my_q = cfs_rq;
+ se->latency_weight = sched_latency_to_weight[tg->latency_prio];
/* guarantee group entities always have weight */
update_load_set(&se->load, NICE_0_LOAD);
se->parent = parent;
@@ -11754,6 +11756,36 @@ next_cpu:
return 0;
}
+int sched_group_set_latency(struct task_group *tg, long latency_prio)
+{
+ int i;
+
+ if (tg == &root_task_group)
+ return -EINVAL;
+
+ if (latency_prio < 0 ||
+ latency_prio > LATENCY_NICE_WIDTH)
+ return -EINVAL;
+
+ mutex_lock(&shares_mutex);
+
+ if (tg->latency_prio == latency_prio) {
+ mutex_unlock(&shares_mutex);
+ return 0;
+ }
+
+ tg->latency_prio = latency_prio;
+
+ for_each_possible_cpu(i) {
+ struct sched_entity *se = tg->se[i];
+
+ WRITE_ONCE(se->latency_weight, sched_latency_to_weight[latency_prio]);
+ }
+
+ mutex_unlock(&shares_mutex);
+ return 0;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
void free_fair_sched_group(struct task_group *tg) { }