diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2015-01-09 14:05:25 +0100 |
---|---|---|
committer | Vincent Guittot <vincent.guittot@linaro.org> | 2015-01-09 14:05:25 +0100 |
commit | 4de4b2f03d6386b26a06946023764b7257fb78c5 (patch) | |
tree | 7f5b825f10b565ade60271683e841bf954f6b63f /kernel/sched/fair.c | |
parent | 7742fc42858185d5401e1c350c78b864a804e79d (diff) | |
parent | 5f74d508f3f5edfe91ccb7437c139928a79db4f7 (diff) |
Merge branch 'sched-morten-invariance' into test-sched-packing-tasks
Conflicts:
kernel/sched/fair.c
kernel/sched/sched.h
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 74 |
1 files changed, 59 insertions, 15 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2d7b01342b0b..e9d79a8b7d00 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2507,9 +2507,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu, int runnable, int running) { - u64 delta, periods; - u32 runnable_contrib; - int delta_w, decayed = 0; + u64 delta, scaled_delta, periods; + u32 runnable_contrib, scaled_runnable_contrib; + int delta_w, scaled_delta_w, decayed = 0; unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu); delta = now - sa->last_runnable_update; @@ -2543,11 +2543,12 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu, * period and accrue it. */ delta_w = 1024 - delta_w; + scaled_delta_w = (delta_w * scale_freq) >> SCHED_CAPACITY_SHIFT; + if (runnable) - sa->runnable_avg_sum += delta_w; + sa->runnable_avg_sum += scaled_delta_w; if (running) - sa->running_avg_sum += delta_w * scale_freq - >> SCHED_CAPACITY_SHIFT; + sa->running_avg_sum += scaled_delta_w; sa->avg_period += delta_w; delta -= delta_w; @@ -2565,20 +2566,23 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu, /* Efficiently calculate \sum (1..n_period) 1024*y^i */ runnable_contrib = __compute_runnable_contrib(periods); + scaled_runnable_contrib = (runnable_contrib * scale_freq) + >> SCHED_CAPACITY_SHIFT; + if (runnable) - sa->runnable_avg_sum += runnable_contrib; + sa->runnable_avg_sum += scaled_runnable_contrib; if (running) - sa->running_avg_sum += runnable_contrib * scale_freq - >> SCHED_CAPACITY_SHIFT; + sa->running_avg_sum += scaled_runnable_contrib; sa->avg_period += runnable_contrib; } /* Remainder of delta accrued against u_0` */ + scaled_delta = (delta * scale_freq) >> SCHED_CAPACITY_SHIFT; + if (runnable) - sa->runnable_avg_sum += delta; + sa->runnable_avg_sum += scaled_delta; if (running) - sa->running_avg_sum += delta * scale_freq - >> SCHED_CAPACITY_SHIFT; + sa->running_avg_sum += scaled_delta; sa->avg_period += delta; return decayed; @@ -2743,7 +2747,8 @@ static long __update_entity_utilization_avg_contrib(struct sched_entity *se) __update_task_entity_utilization(se); else se->avg.utilization_avg_contrib = - group_cfs_rq(se)->utilization_load_avg; + group_cfs_rq(se)->utilization_load_avg + + group_cfs_rq(se)->utilization_blocked_avg; return se->avg.utilization_avg_contrib - old_contrib; } @@ -2757,6 +2762,15 @@ static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq, cfs_rq->blocked_load_avg = 0; } +static inline void subtract_utilization_blocked_contrib(struct cfs_rq *cfs_rq, + long utilization_contrib) +{ + if (likely(utilization_contrib < cfs_rq->utilization_blocked_avg)) + cfs_rq->utilization_blocked_avg -= utilization_contrib; + else + cfs_rq->utilization_blocked_avg = 0; +} + static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); /* Update a sched_entity's runnable average */ @@ -2792,6 +2806,8 @@ static inline void update_entity_load_avg(struct sched_entity *se, cfs_rq->utilization_load_avg += utilization_delta; } else { subtract_blocked_load_contrib(cfs_rq, -contrib_delta); + subtract_utilization_blocked_contrib(cfs_rq, + -utilization_delta); } } @@ -2809,14 +2825,20 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) return; if (atomic_long_read(&cfs_rq->removed_load)) { - unsigned long removed_load; + unsigned long removed_load, removed_utilization; removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0); + removed_utilization = + atomic_long_xchg(&cfs_rq->removed_utilization, 0); subtract_blocked_load_contrib(cfs_rq, removed_load); + subtract_utilization_blocked_contrib(cfs_rq, + removed_utilization); } if (decays) { cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg, decays); + cfs_rq->utilization_blocked_avg = + decay_load(cfs_rq->utilization_blocked_avg, decays); atomic64_add(decays, &cfs_rq->decay_counter); cfs_rq->last_decay = now; } @@ -2863,6 +2885,8 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, /* migrated tasks did not contribute to our blocked load */ if (wakeup) { subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); + subtract_utilization_blocked_contrib(cfs_rq, + se->avg.utilization_avg_contrib); update_entity_load_avg(se, 0); } @@ -2889,6 +2913,8 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib; if (sleep) { cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; + cfs_rq->utilization_blocked_avg += + se->avg.utilization_avg_contrib; se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); } /* migrations, e.g. sleep=0 leave decay_count == 0 */ } @@ -4306,7 +4332,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) /* Used instead of source_load when we know the type == 0 */ static unsigned long weighted_cpuload(const int cpu) { - return cpu_rq(cpu)->cfs.runnable_load_avg; + return cpu_rq(cpu)->cfs.runnable_load_avg + + cpu_rq(cpu)->cfs.blocked_load_avg; } /* @@ -4787,6 +4814,8 @@ static int get_cpu_usage(int cpu) unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg; unsigned long capacity = capacity_orig_of(cpu); + usage += cpu_rq(cpu)->cfs.utilization_blocked_avg; + if (usage >= SCHED_LOAD_SCALE) return capacity; @@ -4906,6 +4935,8 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) se->avg.decay_count = -__synchronize_entity_decay(se); atomic_long_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); + atomic_long_add(se->avg.utilization_avg_contrib, + &cfs_rq->removed_utilization); } /* We have migrated, no longer consider this task hot */ @@ -5893,6 +5924,14 @@ static unsigned long task_h_load(struct task_struct *p) #else static inline void update_blocked_averages(int cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + raw_spin_lock_irqsave(&rq->lock, flags); + update_rq_clock(rq); + update_rq_runnable_avg(rq, rq->nr_running); + update_cfs_rq_blocked_load(&rq->cfs, 1); + raw_spin_unlock_irqrestore(&rq->lock, flags); } static unsigned long task_h_load(struct task_struct *p) @@ -7936,6 +7975,8 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) if (se->avg.decay_count) { __synchronize_entity_decay(se); subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); + subtract_utilization_blocked_contrib(cfs_rq, + se->avg.utilization_avg_contrib); } #endif } @@ -7995,6 +8036,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) #ifdef CONFIG_SMP atomic64_set(&cfs_rq->decay_counter, 1); atomic_long_set(&cfs_rq->removed_load, 0); + atomic_long_set(&cfs_rq->removed_utilization, 0); #endif } @@ -8047,6 +8089,8 @@ static void task_move_group_fair(struct task_struct *p, int queued) */ se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; + cfs_rq->utilization_blocked_avg += + se->avg.utilization_avg_contrib; #endif } } |