summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2007-10-15 17:00:15 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:15 +0200
commit3a5e4dc12f23fb96fafd4f5d0f61e6c3070f80a5 (patch)
treee7c0246126f7cf169cdd167555a1db209d5b03ef /kernel
parent8cbbe86dfcfd68ad69916164bdc838d9e09adca8 (diff)
sched: cleanup: refactor normalize_rt_tasks
Replace a particularly ugly ifdef with an inline and a new macro. Also split up the function to be easier to read. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c43
1 files changed, 23 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index db88b5655ac..2c6295b395a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -75,6 +75,12 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ);
}
+#ifdef CONFIG_SMP
+#define is_migration_thread(p, rq) ((p) == (rq)->migration_thread)
+#else
+#define is_migration_thread(p, rq) 0
+#endif
+
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -6532,12 +6538,25 @@ EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
+static void normalize_task(struct rq *rq, struct task_struct *p)
+{
+ int on_rq;
+ update_rq_clock(rq);
+ on_rq = p->se.on_rq;
+ if (on_rq)
+ deactivate_task(rq, p, 0);
+ __setscheduler(rq, p, SCHED_NORMAL, 0);
+ if (on_rq) {
+ activate_task(rq, p, 0);
+ resched_task(rq->curr);
+ }
+}
+
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
- int on_rq;
read_lock_irq(&tasklist_lock);
do_each_thread(g, p) {
@@ -6561,26 +6580,10 @@ void normalize_rt_tasks(void)
spin_lock_irqsave(&p->pi_lock, flags);
rq = __task_rq_lock(p);
-#ifdef CONFIG_SMP
- /*
- * Do not touch the migration thread:
- */
- if (p == rq->migration_thread)
- goto out_unlock;
-#endif
- update_rq_clock(rq);
- on_rq = p->se.on_rq;
- if (on_rq)
- deactivate_task(rq, p, 0);
- __setscheduler(rq, p, SCHED_NORMAL, 0);
- if (on_rq) {
- activate_task(rq, p, 0);
- resched_task(rq->curr);
- }
-#ifdef CONFIG_SMP
- out_unlock:
-#endif
+ if (!is_migration_thread(p, rq))
+ normalize_task(rq, p);
+
__task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags);
} while_each_thread(g, p);