diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index cc7dd1aaf08e..b14bb2225185 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -8,6 +8,9 @@ #include <linux/slab.h> #include <linux/irq_work.h> +#include "tune.h" + +#include "walt.h" int sched_rr_timeslice = RR_TIMESLICE; int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; @@ -1324,10 +1327,13 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; + schedtune_enqueue_task(p, cpu_of(rq)); + if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags); + walt_inc_cumulative_runnable_avg(rq, p); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); @@ -1337,8 +1343,11 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; + schedtune_dequeue_task(p, cpu_of(rq)); + update_curr_rt(rq); dequeue_rt_entity(rt_se, flags); + walt_dec_cumulative_runnable_avg(rq, p); dequeue_pushable_task(rq, p); } @@ -1381,7 +1390,8 @@ static void yield_task_rt(struct rq *rq) static int find_lowest_rq(struct task_struct *task); static int -select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) +select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags, + int sibling_count_hint) { struct task_struct *curr; struct rq *rq; @@ -1528,6 +1538,8 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) return p; } +extern int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running); + static struct task_struct * pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -1573,6 +1585,10 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) queue_push_tasks(rq); + if (p) + update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), rt_rq, + rq->curr->sched_class == &rt_sched_class); + return p; } @@ -1580,6 +1596,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); + update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1); + /* * The previous task needs to be made eligible for pushing * if it is still active @@ -1847,7 +1865,9 @@ retry: } deactivate_task(rq, next_task, 0); + next_task->on_rq = TASK_ON_RQ_MIGRATING; set_task_cpu(next_task, lowest_rq->cpu); + next_task->on_rq = TASK_ON_RQ_QUEUED; activate_task(lowest_rq, next_task, 0); ret = 1; @@ -2119,7 +2139,9 @@ static void pull_rt_task(struct rq *this_rq) resched = true; deactivate_task(src_rq, p, 0); + p->on_rq = TASK_ON_RQ_MIGRATING; set_task_cpu(p, this_cpu); + p->on_rq = TASK_ON_RQ_QUEUED; activate_task(this_rq, p, 0); /* * We continue with the search, just in @@ -2299,6 +2321,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); + update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1); watchdog(rq, p); @@ -2378,6 +2401,9 @@ const struct sched_class rt_sched_class = { .switched_to = switched_to_rt, .update_curr = update_curr_rt, +#ifdef CONFIG_SCHED_WALT + .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg, +#endif }; #ifdef CONFIG_RT_GROUP_SCHED |