aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorNikunj A. Dadhania <nikunj@linux.vnet.ibm.com>2011-06-07 15:43:22 +0530
committerIngo Molnar <mingo@elte.hu>2011-07-01 10:39:06 +0200
commit2a46dae38087e62dd5fb08a6dadf1407717ed13c (patch)
tree1840acef6c5003aee8744b64fec3a13002ba0abd /kernel/sched_fair.c
parent36b2e922b5acd291051fab25bc7535274ce49532 (diff)
sched: Remove rcu_read_lock() from wake_affine()
wake_affine() is only called from one path: select_task_rq_fair(), which already has the RCU read lock held. Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20110607101251.777.34547.stgit@IBM-009124035060.in.ibm.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 433491c2dc8..eb98f77b38e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1481,7 +1481,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* effect of the currently running task from the load
* of the current CPU:
*/
- rcu_read_lock();
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;
@@ -1517,7 +1516,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
balanced = this_eff_load <= prev_eff_load;
} else
balanced = true;
- rcu_read_unlock();
/*
* If the currently running task will sleep within