sched: Fix select_idle_sibling()

Don't bother with selection when the current cpu is idle.  Recent load
balancing changes also make it no longer necessary to check wake_affine()
success before returning the selected sibling, so we now always use it.

Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301369.6785.36.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Mike Galbraith 2010-03-11 17:17:16 +01:00 committed by Ingo Molnar
parent 21406928af
commit 8b911acdf0

View File

@ -1439,7 +1439,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
int new_cpu = cpu;
int want_affine = 0;
int want_affine = 0, cpu_idle = !current->pid;
int want_sd = 1;
int sync = wake_flags & WF_SYNC;
@ -1497,13 +1497,15 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
* If there's an idle sibling in this domain, make that
* the wake_affine target instead of the current cpu.
*/
if (tmp->flags & SD_SHARE_PKG_RESOURCES)
if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
target = select_idle_sibling(p, tmp, target);
if (target >= 0) {
if (tmp->flags & SD_WAKE_AFFINE) {
affine_sd = tmp;
want_affine = 0;
if (target != cpu)
cpu_idle = 1;
}
cpu = target;
}
@ -1519,6 +1521,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
sd = tmp;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
if (sched_feat(LB_SHARES_UPDATE)) {
/*
* Pick the largest domain to update shares over
@ -1532,9 +1535,12 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
if (tmp)
update_shares(tmp);
}
#endif
if (affine_sd && wake_affine(affine_sd, p, sync))
return cpu;
if (affine_sd) {
if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
return cpu;
}
while (sd) {
int load_idx = sd->forkexec_idx;