summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKe Wang <ke.wang@spreadtrum.com>2019-01-21 13:41:45 +0800
committerTodd Kjos <tkjos@google.com>2019-02-14 16:44:43 +0000
commitbfc525947c5686df850efb39c81aae3eb6a62ac3 (patch)
tree86819f19d58651143d4583df46821b5364ddb405
parent578e66bf09f94988cb232532644c1f8ee813d16c (diff)
ANDROID: sched/walt: Fix lockdep assert issue
commit c8d50e061e38 ("ANDROID: DEBUG: Temporarily disable lockdep asserting on update_task_ravg") is a temporary commit to disable the lockdep assert in walt_update_task_ravg(). The root cause is that there are two paths enetering here without holding the rq lock in the pure scheduler: one is move_queued_task(), another is detach_task(). Now fix this by making sure the rq lock is held at the two paths listed above as it did in android4.4. The following warning message occurs when enable lockdep_assert_held() in walt_update_task_ravg(): [ 0.850221] c3 WARNING: CPU: 3 PID: 28 at kernel/sched/walt.c:763 walt_update_task_ravg+0x28c/0x634 ... [ 1.379256] c3 [<ffffff8008101038>] walt_update_task_ravg+0x28c/0x634 [ 1.385587] c3 [<ffffff80081017f8>] walt_fixup_busy_time+0x108/0x404 [ 1.392006] c3 [<ffffff80080dc91c>] set_task_cpu+0x220/0x2a4 [ 1.397639] c3 [<ffffff80080e48a8>] detach_task+0x60/0x74 [ 1.402929] c3 [<ffffff80080e4c24>] active_load_balance_cpu_stop+0x2cc/0x350 [ 1.410045] c3 [<ffffff800816582c>] cpu_stopper_thread+0xb0/0x118 [ 1.416029] c3 [<ffffff80080d418c>] smpboot_thread_fn+0x214/0x258 [ 1.422100] c3 [<ffffff80080cfd40>] kthread+0x128/0x130 [ 1.427304] c3 [<ffffff8008084834>] ret_from_fork+0x10/0x18 Bug: 120440300 Change-Id: Id3da7ad0dbc9d0d316cd7365b96c3686daba0340 Signed-off-by: Ke Wang <ke.wang@spreadtrum.com>
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/fair.c20
2 files changed, 19 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 09cff398896c..c674185cdd2c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -950,12 +950,17 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int new_cpu)
{
+ struct rq *new_rq = cpu_rq(new_cpu);
+
lockdep_assert_held(&rq->lock);
p->on_rq = TASK_ON_RQ_MIGRATING;
dequeue_task(rq, p, DEQUEUE_NOCLOCK);
+ rq_unpin_lock(rq, rf);
+ double_lock_balance(rq, new_rq);
set_task_cpu(p, new_cpu);
- rq_unlock(rq, rf);
+ double_unlock_balance(rq, new_rq);
+ raw_spin_unlock(&rq->lock);
rq = cpu_rq(new_cpu);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 908ff5118f40..e7a6bd77aefc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8791,13 +8791,18 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* detach_task() -- detach the task for the migration specified in env
*/
-static void detach_task(struct task_struct *p, struct lb_env *env)
+static void detach_task(struct task_struct *p, struct lb_env *env,
+ struct rq_flags *rf)
{
lockdep_assert_held(&env->src_rq->lock);
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
+ rq_unpin_lock(env->src_rq, rf);
+ double_lock_balance(env->src_rq, env->dst_rq);
set_task_cpu(p, env->dst_cpu);
+ double_unlock_balance(env->src_rq, env->dst_rq);
+ rq_repin_lock(env->src_rq, rf);
}
/*
@@ -8806,7 +8811,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
*
* Returns a task if successful and NULL otherwise.
*/
-static struct task_struct *detach_one_task(struct lb_env *env)
+static struct task_struct *detach_one_task(struct lb_env *env,
+ struct rq_flags *rf)
{
struct task_struct *p, *n;
@@ -8816,7 +8822,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
if (!can_migrate_task(p, env))
continue;
- detach_task(p, env);
+ detach_task(p, env, rf);
/*
* Right now, this is only the second place where
@@ -8838,7 +8844,7 @@ static const unsigned int sched_nr_migrate_break = 32;
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
-static int detach_tasks(struct lb_env *env)
+static int detach_tasks(struct lb_env *env, struct rq_flags *rf)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
struct task_struct *p;
@@ -8883,7 +8889,7 @@ static int detach_tasks(struct lb_env *env)
if ((load / 2) > env->imbalance)
goto next;
- detach_task(p, env);
+ detach_task(p, env, rf);
list_add(&p->se.group_node, &env->tasks);
detached++;
@@ -10421,7 +10427,7 @@ more_balance:
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
*/
- cur_ld_moved = detach_tasks(&env);
+ cur_ld_moved = detach_tasks(&env, &rf);
/*
* We've detached some tasks from busiest_rq. Every
@@ -10848,7 +10854,7 @@ static int active_load_balance_cpu_stop(void *data)
schedstat_inc(sd->alb_count);
update_rq_clock(busiest_rq);
- p = detach_one_task(&env);
+ p = detach_one_task(&env, &rf);
if (p) {
schedstat_inc(sd->alb_pushed);
/* Active balancing done, reset the failure counter. */