summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 908ff5118f40..e7a6bd77aefc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8791,13 +8791,18 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* detach_task() -- detach the task for the migration specified in env
*/
-static void detach_task(struct task_struct *p, struct lb_env *env)
+static void detach_task(struct task_struct *p, struct lb_env *env,
+ struct rq_flags *rf)
{
lockdep_assert_held(&env->src_rq->lock);
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
+ rq_unpin_lock(env->src_rq, rf);
+ double_lock_balance(env->src_rq, env->dst_rq);
set_task_cpu(p, env->dst_cpu);
+ double_unlock_balance(env->src_rq, env->dst_rq);
+ rq_repin_lock(env->src_rq, rf);
}
/*
@@ -8806,7 +8811,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
*
* Returns a task if successful and NULL otherwise.
*/
-static struct task_struct *detach_one_task(struct lb_env *env)
+static struct task_struct *detach_one_task(struct lb_env *env,
+ struct rq_flags *rf)
{
struct task_struct *p, *n;
@@ -8816,7 +8822,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
if (!can_migrate_task(p, env))
continue;
- detach_task(p, env);
+ detach_task(p, env, rf);
/*
* Right now, this is only the second place where
@@ -8838,7 +8844,7 @@ static const unsigned int sched_nr_migrate_break = 32;
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
-static int detach_tasks(struct lb_env *env)
+static int detach_tasks(struct lb_env *env, struct rq_flags *rf)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
struct task_struct *p;
@@ -8883,7 +8889,7 @@ static int detach_tasks(struct lb_env *env)
if ((load / 2) > env->imbalance)
goto next;
- detach_task(p, env);
+ detach_task(p, env, rf);
list_add(&p->se.group_node, &env->tasks);
detached++;
@@ -10421,7 +10427,7 @@ more_balance:
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
*/
- cur_ld_moved = detach_tasks(&env);
+ cur_ld_moved = detach_tasks(&env, &rf);
/*
* We've detached some tasks from busiest_rq. Every
@@ -10848,7 +10854,7 @@ static int active_load_balance_cpu_stop(void *data)
schedstat_inc(sd->alb_count);
update_rq_clock(busiest_rq);
- p = detach_one_task(&env);
+ p = detach_one_task(&env, &rf);
if (p) {
schedstat_inc(sd->alb_pushed);
/* Active balancing done, reset the failure counter. */