From 90d08ba2b9b4be4aeca6a5b5a4b09fbcde30194d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:41 +0100 Subject: sched/cputime, powerpc32: Fix stale scaled stime on context switch On context switch with powerpc32, the cputime is accumulated in the thread_info struct. So the switching-in task must move forward its start time snapshot to the current time in order to later compute the delta spent in system mode. This is what we do for the normal cputime by initializing the starttime field to the value of the previous task's starttime which got freshly updated. But we are missing the update of the scaled cputime start time. As a result we may be accounting too much scaled cputime later. Fix this by initializing the scaled cputime the same way we do for normal cputime. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/time.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index bc2e08d415fa..ce2165089318 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -407,6 +407,7 @@ void arch_vtime_task_switch(struct task_struct *prev) struct cpu_accounting_data *acct = get_accounting(current); acct->starttime = get_accounting(prev)->starttime; + acct->startspurr = get_accounting(prev)->startspurr; acct->system_time = 0; acct->user_time = 0; } -- cgit v1.2.3 From 8388d21468e7e7656867b67ab2ec98a78c9ad799 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:42 +0100 Subject: sched/cputime, ia64: Fix incorrect start cputime assignment on task switch On task switch we must initialize the current cputime of the next task using the value of the previous task which got freshly updated. But we are confusing that with doing the opposite, which should result in incorrect cputime accounting. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 71775b95d6cc..637e7413632c 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -83,7 +83,7 @@ void arch_vtime_task_switch(struct task_struct *prev) struct thread_info *pi = task_thread_info(prev); struct thread_info *ni = task_thread_info(current); - pi->ac_stamp = ni->ac_stamp; + ni->ac_stamp = pi->ac_stamp; ni->ac_stime = ni->ac_utime = 0; } -- cgit v1.2.3 From c31cc6a5187e8b09ccee34f81728a90f80e872e7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:43 +0100 Subject: sched/cputime: Allow accounting system time using cpustat index In order to prepare for CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y to delay cputime accounting to the tick, let's provide APIs to account system time to precise contexts: hardirq, softirq, pure system, ... Inspired-by: Martin Schwidefsky Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-4-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/kernel_stat.h | 2 ++ kernel/sched/cputime.c | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 00f776816aa3..14b63bb714d4 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -80,6 +80,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) extern void account_user_time(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t); +extern void account_system_index_time(struct task_struct *, cputime_t, + enum cpu_usage_stat); extern void account_steal_time(cputime_t); extern void account_idle_time(cputime_t); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 7700a9cba335..aad42835938c 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -176,8 +176,8 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime) * @cputime: the cpu time spent in kernel space since the last update * @index: pointer to cpustat field that has to be updated */ -static inline -void __account_system_time(struct task_struct *p, cputime_t cputime, int index) +void account_system_index_time(struct task_struct *p, + cputime_t cputime, enum cpu_usage_stat index) { /* Add system time to process. */ p->stime += cputime; @@ -213,7 +213,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, else index = CPUTIME_SYSTEM; - __account_system_time(p, cputime, index); + account_system_index_time(p, cputime, index); } /* @@ -400,7 +400,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ - __account_system_time(p, cputime, CPUTIME_SOFTIRQ); + account_system_index_time(p, cputime, CPUTIME_SOFTIRQ); } else if (user_tick) { account_user_time(p, cputime); } else if (p == rq->idle) { @@ -408,7 +408,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, } else if (p->flags & PF_VCPU) { /* System time or guest time */ account_guest_time(p, cputime); } else { - __account_system_time(p, cputime, CPUTIME_SYSTEM); + account_system_index_time(p, cputime, CPUTIME_SYSTEM); } } -- cgit v1.2.3 From 1213699ab426608ff1925ab263dd6925102bb92a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:44 +0100 Subject: sched/cputime: Export account_guest_time() In order to prepare for CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y to delay cputime accounting to the tick, let's allow archs to account cputime directly to gtime. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/kernel_stat.h | 1 + kernel/sched/cputime.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 14b63bb714d4..cfd6c0c6d4e8 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -79,6 +79,7 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) } extern void account_user_time(struct task_struct *, cputime_t); +extern void account_guest_time(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_system_index_time(struct task_struct *, cputime_t, enum cpu_usage_stat); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index aad42835938c..5813ee4a5168 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -151,7 +151,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in virtual machine since the last update */ -static void account_guest_time(struct task_struct *p, cputime_t cputime) +void account_guest_time(struct task_struct *p, cputime_t cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; -- cgit v1.2.3 From 8c8b73c4811f2b5e458a7418dca07d2ef85c7db1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:45 +0100 Subject: sched/cputime, powerpc: Prepare accounting structure for cputime flush on tick In order to prepare for CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y to delay cputime accounting to the tick, provide finegrained accumulators to powerpc in order to store the cputime until flushing. While at it, normalize the name of several fields according to common cputime naming. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-6-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/accounting.h | 14 +++++++++++--- arch/powerpc/kernel/asm-offsets.c | 8 ++++---- arch/powerpc/kernel/time.c | 31 ++++++++++++++++--------------- arch/powerpc/xmon/xmon.c | 6 +++--- 4 files changed, 34 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h index c133246df467..3abcf98ed2e0 100644 --- a/arch/powerpc/include/asm/accounting.h +++ b/arch/powerpc/include/asm/accounting.h @@ -12,9 +12,17 @@ /* Stuff for accurate time accounting */ struct cpu_accounting_data { - unsigned long user_time; /* accumulated usermode TB ticks */ - unsigned long system_time; /* accumulated system TB ticks */ - unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */ + /* Accumulated cputime values to flush on ticks*/ + unsigned long utime; + unsigned long stime; + unsigned long utime_scaled; + unsigned long stime_scaled; + unsigned long gtime; + unsigned long hardirq_time; + unsigned long softirq_time; + unsigned long steal_time; + unsigned long idle_time; + /* Internal counters */ unsigned long starttime; /* TB value snapshot */ unsigned long starttime_user; /* TB value on exit to usermode */ unsigned long startspurr; /* SPURR value snapshot */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0601e6a7297c..e505319574ca 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -252,9 +252,9 @@ int main(void) DEFINE(ACCOUNT_STARTTIME_USER, offsetof(struct paca_struct, accounting.starttime_user)); DEFINE(ACCOUNT_USER_TIME, - offsetof(struct paca_struct, accounting.user_time)); + offsetof(struct paca_struct, accounting.utime)); DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct paca_struct, accounting.system_time)); + offsetof(struct paca_struct, accounting.stime)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); @@ -265,9 +265,9 @@ int main(void) DEFINE(ACCOUNT_STARTTIME_USER, offsetof(struct thread_info, accounting.starttime_user)); DEFINE(ACCOUNT_USER_TIME, - offsetof(struct thread_info, accounting.user_time)); + offsetof(struct thread_info, accounting.utime)); DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct thread_info, accounting.system_time)); + offsetof(struct thread_info, accounting.stime)); #endif #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index ce2165089318..17a2cd1c6a75 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -271,8 +271,8 @@ void accumulate_stolen_time(void) sst = scan_dispatch_log(acct->starttime_user); ust = scan_dispatch_log(acct->starttime); - acct->system_time -= sst; - acct->user_time -= ust; + acct->stime -= sst; + acct->utime -= ust; local_paca->stolen_time += ust + sst; local_paca->soft_enabled = save_soft_enabled; @@ -281,10 +281,11 @@ void accumulate_stolen_time(void) static inline u64 calculate_stolen_time(u64 stop_tb) { u64 stolen = 0; + struct cpu_accounting_data *acct = &local_paca->accounting; if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { stolen = scan_dispatch_log(stop_tb); - get_paca()->accounting.system_time -= stolen; + acct->stime -= stolen; } stolen += get_paca()->stolen_time; @@ -316,17 +317,17 @@ static unsigned long vtime_delta(struct task_struct *tsk, now = mftb(); nowscaled = read_spurr(now); - acct->system_time += now - acct->starttime; + acct->stime += now - acct->starttime; acct->starttime = now; deltascaled = nowscaled - acct->startspurr; acct->startspurr = nowscaled; *stolen = calculate_stolen_time(now); - delta = acct->system_time; - acct->system_time = 0; - udelta = acct->user_time - acct->utime_sspurr; - acct->utime_sspurr = acct->user_time; + delta = acct->stime; + acct->stime = 0; + udelta = acct->utime - acct->utime_sspurr; + acct->utime_sspurr = acct->utime; /* * Because we don't read the SPURR on every kernel entry/exit, @@ -348,7 +349,7 @@ static unsigned long vtime_delta(struct task_struct *tsk, *sys_scaled = deltascaled; } } - acct->user_time_scaled += user_scaled; + acct->utime_scaled += user_scaled; return delta; } @@ -387,10 +388,10 @@ void vtime_account_user(struct task_struct *tsk) cputime_t utime, utimescaled; struct cpu_accounting_data *acct = get_accounting(tsk); - utime = acct->user_time; - utimescaled = acct->user_time_scaled; - acct->user_time = 0; - acct->user_time_scaled = 0; + utime = acct->utime; + utimescaled = acct->utime_scaled; + acct->utime = 0; + acct->utime_scaled = 0; acct->utime_sspurr = 0; account_user_time(tsk, utime); tsk->utimescaled += utimescaled; @@ -408,8 +409,8 @@ void arch_vtime_task_switch(struct task_struct *prev) acct->starttime = get_accounting(prev)->starttime; acct->startspurr = get_accounting(prev)->startspurr; - acct->system_time = 0; - acct->user_time = 0; + acct->stime = 0; + acct->utime = 0; } #endif /* CONFIG_PPC32 */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 9c0e17cf6886..9f3b170d9e0b 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2287,9 +2287,9 @@ static void dump_one_paca(int cpu) DUMP(p, subcore_sibling_mask, "x"); #endif - DUMP(p, accounting.user_time, "llx"); - DUMP(p, accounting.system_time, "llx"); - DUMP(p, accounting.user_time_scaled, "llx"); + DUMP(p, accounting.utime, "llx"); + DUMP(p, accounting.stime, "llx"); + DUMP(p, accounting.utime_scaled, "llx"); DUMP(p, accounting.starttime, "llx"); DUMP(p, accounting.starttime_user, "llx"); DUMP(p, accounting.startspurr, "llx"); -- cgit v1.2.3 From f828c3d0aebab130a19d36336b50afa3414fa0bc Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:46 +0100 Subject: sched/cputime, powerpc: Migrate stolen_time field to the accounting structure That in order to gather all cputime accumulation to the same place. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-7-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/paca.h | 1 - arch/powerpc/kernel/time.c | 6 +++--- arch/powerpc/xmon/xmon.c | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6a6792bb39fb..708c3e592eeb 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -187,7 +187,6 @@ struct paca_struct { /* Stuff for accurate time accounting */ struct cpu_accounting_data accounting; - u64 stolen_time; /* TB ticks taken by hypervisor */ u64 dtl_ridx; /* read index in dispatch log */ struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 17a2cd1c6a75..714313ebf030 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -273,7 +273,7 @@ void accumulate_stolen_time(void) ust = scan_dispatch_log(acct->starttime); acct->stime -= sst; acct->utime -= ust; - local_paca->stolen_time += ust + sst; + acct->steal_time += ust + sst; local_paca->soft_enabled = save_soft_enabled; } @@ -288,8 +288,8 @@ static inline u64 calculate_stolen_time(u64 stop_tb) acct->stime -= stolen; } - stolen += get_paca()->stolen_time; - get_paca()->stolen_time = 0; + stolen += acct->steal_time; + acct->steal_time = 0; return stolen; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 9f3b170d9e0b..3f864c36d847 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2294,7 +2294,7 @@ static void dump_one_paca(int cpu) DUMP(p, accounting.starttime_user, "llx"); DUMP(p, accounting.startspurr, "llx"); DUMP(p, accounting.utime_sspurr, "llx"); - DUMP(p, stolen_time, "llx"); + DUMP(p, accounting.steal_time, "llx"); #undef DUMP catch_memory_errors = 0; -- cgit v1.2.3 From a19ff1a2cc9227f82e97836a8ee3e593f622eaf9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:47 +0100 Subject: sched/cputime, powerpc/vtime: Accumulate cputime and account only on tick/task switch Currently CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y accounts the cputime on any context boundary: irq entry/exit, guest entry/exit, context switch, etc... Calling functions such as account_system_time(), account_user_time() and such can be costly, especially if they are called on many fastpath such as twice per IRQ. Those functions do more than just accounting to kcpustat and task cputime. Depending on the config, some subsystems can perform unpleasant multiplications and divisions, among other things. So lets accumulate the cputime instead and delay the accounting on ticks and context switches only. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/time.c | 120 +++++++++++++++++++++++++++++---------------- 1 file changed, 77 insertions(+), 43 deletions(-) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 714313ebf030..4255e6930ac1 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -280,17 +280,10 @@ void accumulate_stolen_time(void) static inline u64 calculate_stolen_time(u64 stop_tb) { - u64 stolen = 0; - struct cpu_accounting_data *acct = &local_paca->accounting; - - if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { - stolen = scan_dispatch_log(stop_tb); - acct->stime -= stolen; - } + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) + return scan_dispatch_log(stop_tb); - stolen += acct->steal_time; - acct->steal_time = 0; - return stolen; + return 0; } #else /* CONFIG_PPC_SPLPAR */ @@ -306,27 +299,26 @@ static inline u64 calculate_stolen_time(u64 stop_tb) * or soft irq state. */ static unsigned long vtime_delta(struct task_struct *tsk, - unsigned long *sys_scaled, - unsigned long *stolen) + unsigned long *stime_scaled, + unsigned long *steal_time) { unsigned long now, nowscaled, deltascaled; - unsigned long udelta, delta, user_scaled; + unsigned long stime; + unsigned long utime, utime_scaled; struct cpu_accounting_data *acct = get_accounting(tsk); WARN_ON_ONCE(!irqs_disabled()); now = mftb(); nowscaled = read_spurr(now); - acct->stime += now - acct->starttime; + stime = now - acct->starttime; acct->starttime = now; deltascaled = nowscaled - acct->startspurr; acct->startspurr = nowscaled; - *stolen = calculate_stolen_time(now); + *steal_time = calculate_stolen_time(now); - delta = acct->stime; - acct->stime = 0; - udelta = acct->utime - acct->utime_sspurr; + utime = acct->utime - acct->utime_sspurr; acct->utime_sspurr = acct->utime; /* @@ -339,39 +331,54 @@ static unsigned long vtime_delta(struct task_struct *tsk, * the user ticks get saved up in paca->user_time_scaled to be * used by account_process_tick. */ - *sys_scaled = delta; - user_scaled = udelta; - if (deltascaled != delta + udelta) { - if (udelta) { - *sys_scaled = deltascaled * delta / (delta + udelta); - user_scaled = deltascaled - *sys_scaled; + *stime_scaled = stime; + utime_scaled = utime; + if (deltascaled != stime + utime) { + if (utime) { + *stime_scaled = deltascaled * stime / (stime + utime); + utime_scaled = deltascaled - *stime_scaled; } else { - *sys_scaled = deltascaled; + *stime_scaled = deltascaled; } } - acct->utime_scaled += user_scaled; + acct->utime_scaled += utime_scaled; - return delta; + return stime; } void vtime_account_system(struct task_struct *tsk) { - unsigned long delta, sys_scaled, stolen; + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); + + stime = vtime_delta(tsk, &stime_scaled, &steal_time); + + stime -= min(stime, steal_time); + acct->steal_time += steal_time; - delta = vtime_delta(tsk, &sys_scaled, &stolen); - account_system_time(tsk, 0, delta); - tsk->stimescaled += sys_scaled; - if (stolen) - account_steal_time(stolen); + if ((tsk->flags & PF_VCPU) && !irq_count()) { + acct->gtime += stime; + acct->utime_scaled += stime_scaled; + } else { + if (hardirq_count()) + acct->hardirq_time += stime; + else if (in_serving_softirq()) + acct->softirq_time += stime; + else + acct->stime += stime; + + acct->stime_scaled += stime_scaled; + } } EXPORT_SYMBOL_GPL(vtime_account_system); void vtime_account_idle(struct task_struct *tsk) { - unsigned long delta, sys_scaled, stolen; + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); - delta = vtime_delta(tsk, &sys_scaled, &stolen); - account_idle_time(delta + stolen); + stime = vtime_delta(tsk, &stime_scaled, &steal_time); + acct->idle_time += stime + steal_time; } /* @@ -385,16 +392,45 @@ void vtime_account_idle(struct task_struct *tsk) */ void vtime_account_user(struct task_struct *tsk) { - cputime_t utime, utimescaled; struct cpu_accounting_data *acct = get_accounting(tsk); - utime = acct->utime; - utimescaled = acct->utime_scaled; + if (acct->utime) + account_user_time(tsk, acct->utime); + + if (acct->utime_scaled) + tsk->utimescaled += acct->utime_scaled; + + if (acct->gtime) + account_guest_time(tsk, acct->gtime); + + if (acct->steal_time) + account_steal_time(acct->steal_time); + + if (acct->idle_time) + account_idle_time(acct->idle_time); + + if (acct->stime) + account_system_index_time(tsk, acct->stime, CPUTIME_SYSTEM); + + if (acct->stime_scaled) + tsk->stimescaled += acct->stime_scaled; + + if (acct->hardirq_time) + account_system_index_time(tsk, acct->hardirq_time, CPUTIME_IRQ); + + if (acct->softirq_time) + account_system_index_time(tsk, acct->softirq_time, CPUTIME_SOFTIRQ); + acct->utime = 0; acct->utime_scaled = 0; acct->utime_sspurr = 0; - account_user_time(tsk, utime); - tsk->utimescaled += utimescaled; + acct->gtime = 0; + acct->steal_time = 0; + acct->idle_time = 0; + acct->stime = 0; + acct->stime_scaled = 0; + acct->hardirq_time = 0; + acct->softirq_time = 0; } #ifdef CONFIG_PPC32 @@ -409,8 +445,6 @@ void arch_vtime_task_switch(struct task_struct *prev) acct->starttime = get_accounting(prev)->starttime; acct->startspurr = get_accounting(prev)->startspurr; - acct->stime = 0; - acct->utime = 0; } #endif /* CONFIG_PPC32 */ -- cgit v1.2.3 From 7dd582305d19fd178bb42ecd1666285ecfb1657a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:48 +0100 Subject: sched/cputime, ia64: Accumulate cputime and account only on tick/task switch Currently CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y accounts the cputime on any context boundary: irq entry/exit, guest entry/exit, context switch, etc... Calling functions such as account_system_time(), account_user_time() and such can be costly, especially if they are called on many fastpath such as twice per IRQ. Those functions do more than just accounting to kcpustat and task cputime. Depending on the config, some subsystems can perform unpleasant multiplications and divisions, among other things. So lets accumulate the cputime instead and delay the accounting on ticks and context switches only. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-9-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/thread_info.h | 6 ++++ arch/ia64/kernel/time.c | 62 ++++++++++++++++++++++++++++--------- 2 files changed, 53 insertions(+), 15 deletions(-) diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index c7026429816b..8742d741d19a 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -27,6 +27,12 @@ struct thread_info { mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + __u64 utime; + __u64 stime; + __u64 gtime; + __u64 hardirq_time; + __u64 softirq_time; + __u64 idle_time; __u64 ac_stamp; __u64 ac_leave; __u64 ac_stime; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 637e7413632c..37f1b315d9b6 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -63,14 +63,39 @@ extern cputime_t cycle_to_cputime(u64 cyc); void vtime_account_user(struct task_struct *tsk) { - cputime_t delta_utime; struct thread_info *ti = task_thread_info(tsk); + cputime_t delta; - if (ti->ac_utime) { - delta_utime = cycle_to_cputime(ti->ac_utime); - account_user_time(tsk, delta_utime); - ti->ac_utime = 0; + if (ti->utime) + account_user_time(tsk, cycle_to_cputime(ti->utime)); + + if (ti->gtime) + account_guest_time(tsk, cycle_to_cputime(ti->gtime)); + + if (ti->idle_time) + account_idle_time(cycle_to_cputime(ti->idle_time)); + + if (ti->stime) { + delta = cycle_to_cputime(ti->stime); + account_system_index_time(tsk, delta, CPUTIME_SYSTEM); + } + + if (ti->hardirq_time) { + delta = cycle_to_cputime(ti->hardirq_time); + account_system_index_time(tsk, delta, CPUTIME_IRQ); + } + + if (ti->softirq_time) { + delta = cycle_to_cputime(ti->softirq_time); + account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } + + ti->utime = 0; + ti->gtime = 0; + ti->idle_time = 0; + ti->stime = 0; + ti->hardirq_time = 0; + ti->softirq_time = 0; } /* @@ -91,18 +116,15 @@ void arch_vtime_task_switch(struct task_struct *prev) * Account time for a transition between system, hard irq or soft irq state. * Note that this function is called with interrupts enabled. */ -static cputime_t vtime_delta(struct task_struct *tsk) +static __u64 vtime_delta(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - cputime_t delta_stime; - __u64 now; + __u64 now, delta_stime; WARN_ON_ONCE(!irqs_disabled()); now = ia64_get_itc(); - - delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); - ti->ac_stime = 0; + delta_stime = now - ti->ac_stamp; ti->ac_stamp = now; return delta_stime; @@ -110,15 +132,25 @@ static cputime_t vtime_delta(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk) { - cputime_t delta = vtime_delta(tsk); - - account_system_time(tsk, 0, delta); + struct thread_info *ti = task_thread_info(tsk); + __u64 stime = vtime_delta(tsk); + + if ((tsk->flags & PF_VCPU) && !irq_count()) + ti->gtime += stime; + else if (hardirq_count()) + ti->hardirq_time += stime; + else if (in_serving_softirq()) + ti->softirq_time += stime; + else + ti->stime += stime; } EXPORT_SYMBOL_GPL(vtime_account_system); void vtime_account_idle(struct task_struct *tsk) { - account_idle_time(vtime_delta(tsk)); + struct thread_info *ti = task_thread_info(tsk); + + ti->idle_time += vtime_delta(tsk); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -- cgit v1.2.3 From b7394a5f4ce9542666cc68422c3594ea854adc2c Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 5 Jan 2017 18:11:49 +0100 Subject: sched/cputime, s390: Implement delayed accounting of system time The account_system_time() function is called with a cputime that occurred while running in the kernel. The function detects which context the CPU is currently running in and accounts the time to the correct bucket. This forces the arch code to account the cputime for hardirq and softirq immediately. Such accounting function can be costly and perform unwelcome divisions and multiplications, among others. The arch code can delay the accounting for system time. For s390 the accounting is done once per timer tick and for each task switch. Signed-off-by: Martin Schwidefsky Signed-off-by: Frederic Weisbecker [ Rebase against latest linus tree and move account_system_index_scaled(). ] Acked-by: Thomas Gleixner Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-10-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/lowcore.h | 65 +++++++++--------- arch/s390/include/asm/processor.h | 3 + arch/s390/kernel/vtime.c | 136 ++++++++++++++++++++++++-------------- 3 files changed, 125 insertions(+), 79 deletions(-) diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 9bfad2ad6312..61261e0e95c0 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -85,53 +85,56 @@ struct lowcore { __u64 mcck_enter_timer; /* 0x02c0 */ __u64 exit_timer; /* 0x02c8 */ __u64 user_timer; /* 0x02d0 */ - __u64 system_timer; /* 0x02d8 */ - __u64 steal_timer; /* 0x02e0 */ - __u64 last_update_timer; /* 0x02e8 */ - __u64 last_update_clock; /* 0x02f0 */ - __u64 int_clock; /* 0x02f8 */ - __u64 mcck_clock; /* 0x0300 */ - __u64 clock_comparator; /* 0x0308 */ + __u64 guest_timer; /* 0x02d8 */ + __u64 system_timer; /* 0x02e0 */ + __u64 hardirq_timer; /* 0x02e8 */ + __u64 softirq_timer; /* 0x02f0 */ + __u64 steal_timer; /* 0x02f8 */ + __u64 last_update_timer; /* 0x0300 */ + __u64 last_update_clock; /* 0x0308 */ + __u64 int_clock; /* 0x0310 */ + __u64 mcck_clock; /* 0x0318 */ + __u64 clock_comparator; /* 0x0320 */ /* Current process. */ - __u64 current_task; /* 0x0310 */ - __u8 pad_0x318[0x320-0x318]; /* 0x0318 */ - __u64 kernel_stack; /* 0x0320 */ + __u64 current_task; /* 0x0328 */ + __u8 pad_0x318[0x320-0x318]; /* 0x0330 */ + __u64 kernel_stack; /* 0x0338 */ /* Interrupt, panic and restart stack. */ - __u64 async_stack; /* 0x0328 */ - __u64 panic_stack; /* 0x0330 */ - __u64 restart_stack; /* 0x0338 */ + __u64 async_stack; /* 0x0340 */ + __u64 panic_stack; /* 0x0348 */ + __u64 restart_stack; /* 0x0350 */ /* Restart function and parameter. */ - __u64 restart_fn; /* 0x0340 */ - __u64 restart_data; /* 0x0348 */ - __u64 restart_source; /* 0x0350 */ + __u64 restart_fn; /* 0x0358 */ + __u64 restart_data; /* 0x0360 */ + __u64 restart_source; /* 0x0368 */ /* Address space pointer. */ - __u64 kernel_asce; /* 0x0358 */ - __u64 user_asce; /* 0x0360 */ + __u64 kernel_asce; /* 0x0370 */ + __u64 user_asce; /* 0x0378 */ /* * The lpp and current_pid fields form a * 64-bit value that is set as program * parameter with the LPP instruction. */ - __u32 lpp; /* 0x0368 */ - __u32 current_pid; /* 0x036c */ + __u32 lpp; /* 0x0380 */ + __u32 current_pid; /* 0x0384 */ /* SMP info area */ - __u32 cpu_nr; /* 0x0370 */ - __u32 softirq_pending; /* 0x0374 */ - __u64 percpu_offset; /* 0x0378 */ - __u64 vdso_per_cpu_data; /* 0x0380 */ - __u64 machine_flags; /* 0x0388 */ - __u32 preempt_count; /* 0x0390 */ - __u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */ - __u64 gmap; /* 0x0398 */ - __u32 spinlock_lockval; /* 0x03a0 */ - __u32 fpu_flags; /* 0x03a4 */ - __u8 pad_0x03a8[0x0400-0x03a8]; /* 0x03a8 */ + __u32 cpu_nr; /* 0x0388 */ + __u32 softirq_pending; /* 0x038c */ + __u64 percpu_offset; /* 0x0390 */ + __u64 vdso_per_cpu_data; /* 0x0398 */ + __u64 machine_flags; /* 0x03a0 */ + __u32 preempt_count; /* 0x03a8 */ + __u8 pad_0x03ac[0x03b0-0x03ac]; /* 0x03ac */ + __u64 gmap; /* 0x03b0 */ + __u32 spinlock_lockval; /* 0x03b8 */ + __u32 fpu_flags; /* 0x03bc */ + __u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */ /* Per cpu primary space access list */ __u32 paste[16]; /* 0x0400 */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 6bca916a5ba0..977a5b6501b8 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -111,7 +111,10 @@ struct thread_struct { unsigned int acrs[NUM_ACRS]; unsigned long ksp; /* kernel stack pointer */ unsigned long user_timer; /* task cputime in user space */ + unsigned long guest_timer; /* task cputime in kvm guest */ unsigned long system_timer; /* task cputime in kernel space */ + unsigned long hardirq_timer; /* task cputime in hardirq context */ + unsigned long softirq_timer; /* task cputime in softirq context */ unsigned long sys_call_table; /* system call table address */ mm_segment_t mm_segment; unsigned long gmap_addr; /* address of last gmap fault. */ diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 1b5c5ee9fc1b..1a53e0bdc90a 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -90,14 +90,41 @@ static void update_mt_scaling(void) __this_cpu_write(mt_scaling_jiffies, jiffies_64); } +static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new) +{ + u64 delta; + + delta = new - *tsk_vtime; + *tsk_vtime = new; + return delta; +} + + +static inline u64 scale_vtime(u64 vtime) +{ + u64 mult = __this_cpu_read(mt_scaling_mult); + u64 div = __this_cpu_read(mt_scaling_div); + + if (smp_cpu_mtid) + return vtime * mult / div; + return vtime; +} + +static void account_system_index_scaled(struct task_struct *p, + cputime_t cputime, cputime_t scaled, + enum cpu_usage_stat index) +{ + p->stimescaled += scaled; + account_system_index_time(p, cputime, index); +} + /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ static int do_account_vtime(struct task_struct *tsk) { - u64 timer, clock, user, system, steal; - u64 user_scaled, system_scaled; + u64 timer, clock, user, guest, system, hardirq, softirq, steal; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -110,36 +137,53 @@ static int do_account_vtime(struct task_struct *tsk) #endif : "=m" (S390_lowcore.last_update_timer), "=m" (S390_lowcore.last_update_clock)); - S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; - S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; + clock = S390_lowcore.last_update_clock - clock; + timer -= S390_lowcore.last_update_timer; + + if (hardirq_count()) + S390_lowcore.hardirq_timer += timer; + else + S390_lowcore.system_timer += timer; /* Update MT utilization calculation */ if (smp_cpu_mtid && time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) update_mt_scaling(); - user = S390_lowcore.user_timer - tsk->thread.user_timer; - S390_lowcore.steal_timer -= user; - tsk->thread.user_timer = S390_lowcore.user_timer; - - system = S390_lowcore.system_timer - tsk->thread.system_timer; - S390_lowcore.steal_timer -= system; - tsk->thread.system_timer = S390_lowcore.system_timer; - - user_scaled = user; - system_scaled = system; - /* Do MT utilization scaling */ - if (smp_cpu_mtid) { - u64 mult = __this_cpu_read(mt_scaling_mult); - u64 div = __this_cpu_read(mt_scaling_div); + /* Calculate cputime delta */ + user = update_tsk_timer(&tsk->thread.user_timer, + READ_ONCE(S390_lowcore.user_timer)); + guest = update_tsk_timer(&tsk->thread.guest_timer, + READ_ONCE(S390_lowcore.guest_timer)); + system = update_tsk_timer(&tsk->thread.system_timer, + READ_ONCE(S390_lowcore.system_timer)); + hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, + READ_ONCE(S390_lowcore.hardirq_timer)); + softirq = update_tsk_timer(&tsk->thread.softirq_timer, + READ_ONCE(S390_lowcore.softirq_timer)); + S390_lowcore.steal_timer += + clock - user - guest - system - hardirq - softirq; + + /* Push account value */ + if (user) { + account_user_time(tsk, user); + tsk->utimescaled += scale_vtime(user); + } - user_scaled = (user_scaled * mult) / div; - system_scaled = (system_scaled * mult) / div; + if (guest) { + account_guest_time(tsk, guest); + tsk->utimescaled += scale_vtime(guest); } - account_user_time(tsk, user); - tsk->utimescaled += user_scaled; - account_system_time(tsk, 0, system); - tsk->stimescaled += system_scaled; + + if (system) + account_system_index_scaled(tsk, system, scale_vtime(system), + CPUTIME_SYSTEM); + if (hardirq) + account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq), + CPUTIME_IRQ); + if (softirq) + account_system_index_scaled(tsk, softirq, scale_vtime(softirq), + CPUTIME_SOFTIRQ); steal = S390_lowcore.steal_timer; if ((s64) steal > 0) { @@ -147,16 +191,22 @@ static int do_account_vtime(struct task_struct *tsk) account_steal_time(steal); } - return virt_timer_forward(user + system); + return virt_timer_forward(user + guest + system + hardirq + softirq); } void vtime_task_switch(struct task_struct *prev) { do_account_vtime(prev); prev->thread.user_timer = S390_lowcore.user_timer; + prev->thread.guest_timer = S390_lowcore.guest_timer; prev->thread.system_timer = S390_lowcore.system_timer; + prev->thread.hardirq_timer = S390_lowcore.hardirq_timer; + prev->thread.softirq_timer = S390_lowcore.softirq_timer; S390_lowcore.user_timer = current->thread.user_timer; + S390_lowcore.guest_timer = current->thread.guest_timer; S390_lowcore.system_timer = current->thread.system_timer; + S390_lowcore.hardirq_timer = current->thread.hardirq_timer; + S390_lowcore.softirq_timer = current->thread.softirq_timer; } /* @@ -176,32 +226,22 @@ void vtime_account_user(struct task_struct *tsk) */ void vtime_account_irq_enter(struct task_struct *tsk) { - u64 timer, system, system_scaled; + u64 timer; timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); - S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; - - /* Update MT utilization calculation */ - if (smp_cpu_mtid && - time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) - update_mt_scaling(); - - system = S390_lowcore.system_timer - tsk->thread.system_timer; - S390_lowcore.steal_timer -= system; - tsk->thread.system_timer = S390_lowcore.system_timer; - system_scaled = system; - /* Do MT utilization scaling */ - if (smp_cpu_mtid) { - u64 mult = __this_cpu_read(mt_scaling_mult); - u64 div = __this_cpu_read(mt_scaling_div); - - system_scaled = (system_scaled * mult) / div; - } - account_system_time(tsk, 0, system); - tsk->stimescaled += system_scaled; - - virt_timer_forward(system); + timer -= S390_lowcore.last_update_timer; + + if ((tsk->flags & PF_VCPU) && (irq_count() == 0)) + S390_lowcore.guest_timer += timer; + else if (hardirq_count()) + S390_lowcore.hardirq_timer += timer; + else if (in_serving_softirq()) + S390_lowcore.softirq_timer += timer; + else + S390_lowcore.system_timer += timer; + + virt_timer_forward(timer); } EXPORT_SYMBOL_GPL(vtime_account_irq_enter); -- cgit v1.2.3 From c8d7dabf8f91fadd265e6eb87afb201d14ea299b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Jan 2017 18:11:50 +0100 Subject: sched/cputime: Rename vtime_account_user() to vtime_flush() CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y used to accumulate user time and account it on ticks and context switches only through the vtime_account_user() function. Now this model has been generalized on the 3 archs for all kind of cputime (system, irq, ...) and all the cputime flushing happens under vtime_account_user(). So let's rename this function to better reflect its new role. Signed-off-by: Frederic Weisbecker Acked-by: Thomas Gleixner Acked-by: Martin Schwidefsky Cc: Benjamin Herrenschmidt Cc: Christian Borntraeger Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1483636310-6557-11-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/kernel/time.c | 2 +- arch/powerpc/kernel/time.c | 6 ++---- arch/s390/kernel/vtime.c | 2 +- include/linux/kernel_stat.h | 2 +- include/linux/vtime.h | 7 +++++-- kernel/sched/cputime.c | 4 +--- 6 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 37f1b315d9b6..d040f12ea9f9 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -61,7 +61,7 @@ static struct clocksource *itc_clocksource; extern cputime_t cycle_to_cputime(u64 cyc); -void vtime_account_user(struct task_struct *tsk) +void vtime_flush(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); cputime_t delta; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 4255e6930ac1..02e97305d22b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -382,15 +382,13 @@ void vtime_account_idle(struct task_struct *tsk) } /* - * Transfer the user time accumulated in the paca - * by the exception entry and exit code to the generic - * process user time records. + * Account the whole cputime accumulated in the paca * Must be called with interrupts disabled. * Assumes that vtime_account_system/idle() has been called * recently (i.e. since the last entry from usermode) so that * get_paca()->user_time_scaled is up to date. */ -void vtime_account_user(struct task_struct *tsk) +void vtime_flush(struct task_struct *tsk) { struct cpu_accounting_data *acct = get_accounting(tsk); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 1a53e0bdc90a..0a9e5d67547d 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -214,7 +214,7 @@ void vtime_task_switch(struct task_struct *prev) * accounting system time in order to correctly compute * the stolen time accounting. */ -void vtime_account_user(struct task_struct *tsk) +void vtime_flush(struct task_struct *tsk) { if (do_account_vtime(tsk)) virt_timer_expire(); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index cfd6c0c6d4e8..c3e38ded2d73 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -89,7 +89,7 @@ extern void account_idle_time(cputime_t); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE static inline void account_process_tick(struct task_struct *tsk, int user) { - vtime_account_user(tsk); + vtime_flush(tsk); } #else extern void account_process_tick(struct task_struct *, int user); diff --git a/include/linux/vtime.h b/include/linux/vtime.h index aa9bfea8804a..0681fe25abeb 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev) extern void vtime_account_system(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); -extern void vtime_account_user(struct task_struct *tsk); #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_system(struct task_struct *tsk) { } -static inline void vtime_account_user(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void arch_vtime_task_switch(struct task_struct *tsk); +extern void vtime_account_user(struct task_struct *tsk); extern void vtime_user_enter(struct task_struct *tsk); static inline void vtime_user_exit(struct task_struct *tsk) { vtime_account_user(tsk); } + extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ +static inline void vtime_account_user(struct task_struct *tsk) { } static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } @@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk) /* On hard|softirq exit we always account to hard|softirq cputime */ vtime_account_system(tsk); } +extern void vtime_flush(struct task_struct *tsk); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static inline void vtime_account_irq_enter(struct task_struct *tsk) { } static inline void vtime_account_irq_exit(struct task_struct *tsk) { } +static inline void vtime_flush(struct task_struct *tsk) { } #endif diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 5813ee4a5168..f7c14cc71d06 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -437,9 +437,7 @@ void vtime_common_task_switch(struct task_struct *prev) else vtime_account_system(prev); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - vtime_account_user(prev); -#endif + vtime_flush(prev); arch_vtime_task_switch(prev); } #endif -- cgit v1.2.3 From d8ac897137a230ec351269f6378017f2decca512 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 21 Sep 2016 14:38:10 +0100 Subject: sched/core: Add wrappers for lockdep_(un)pin_lock() In preparation for adding diagnostic checks to catch missing calls to update_rq_clock(), provide wrappers for (re)pinning and unpinning rq->lock. Because the pending diagnostic checks allow state to be maintained in rq_flags across pin contexts, swap the 'struct pin_cookie' arguments for 'struct rq_flags *'. Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Byungchul Park Cc: Frederic Weisbecker Cc: Jan Kara Cc: Linus Torvalds Cc: Luca Abeni Cc: Mel Gorman Cc: Mike Galbraith Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Petr Mladek Cc: Rik van Riel Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Wanpeng Li Cc: Yuyang Du Link: http://lkml.kernel.org/r/20160921133813.31976-5-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 80 ++++++++++++++++++++++++------------------------ kernel/sched/deadline.c | 10 +++--- kernel/sched/fair.c | 6 ++-- kernel/sched/idle_task.c | 2 +- kernel/sched/rt.c | 6 ++-- kernel/sched/sched.h | 31 ++++++++++++++----- kernel/sched/stop_task.c | 2 +- 7 files changed, 76 insertions(+), 61 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c56fb57f2991..41df935180ea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -185,7 +185,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { - rf->cookie = lockdep_pin_lock(&rq->lock); + rq_pin_lock(rq, rf); return rq; } raw_spin_unlock(&rq->lock); @@ -225,7 +225,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) * pair with the WMB to ensure we must then also see migrating. */ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { - rf->cookie = lockdep_pin_lock(&rq->lock); + rq_pin_lock(rq, rf); return rq; } raw_spin_unlock(&rq->lock); @@ -1195,9 +1195,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, * OK, since we're going to drop the lock immediately * afterwards anyway. */ - lockdep_unpin_lock(&rq->lock, rf.cookie); + rq_unpin_lock(rq, &rf); rq = move_queued_task(rq, p, dest_cpu); - lockdep_repin_lock(&rq->lock, rf.cookie); + rq_repin_lock(rq, &rf); } out: task_rq_unlock(rq, p, &rf); @@ -1690,7 +1690,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl * Mark the task runnable and perform wakeup-preemption. */ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, - struct pin_cookie cookie) + struct rq_flags *rf) { check_preempt_curr(rq, p, wake_flags); p->state = TASK_RUNNING; @@ -1702,9 +1702,9 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, * Our task @p is fully woken up and running; so its safe to * drop the rq->lock, hereafter rq is only used for statistics. */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, rf); p->sched_class->task_woken(rq, p); - lockdep_repin_lock(&rq->lock, cookie); + rq_repin_lock(rq, rf); } if (rq->idle_stamp) { @@ -1723,7 +1723,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, static void ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, - struct pin_cookie cookie) + struct rq_flags *rf) { int en_flags = ENQUEUE_WAKEUP; @@ -1738,7 +1738,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, #endif ttwu_activate(rq, p, en_flags); - ttwu_do_wakeup(rq, p, wake_flags, cookie); + ttwu_do_wakeup(rq, p, wake_flags, rf); } /* @@ -1757,7 +1757,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) if (task_on_rq_queued(p)) { /* check_preempt_curr() may use rq clock */ update_rq_clock(rq); - ttwu_do_wakeup(rq, p, wake_flags, rf.cookie); + ttwu_do_wakeup(rq, p, wake_flags, &rf); ret = 1; } __task_rq_unlock(rq, &rf); @@ -1770,15 +1770,15 @@ void sched_ttwu_pending(void) { struct rq *rq = this_rq(); struct llist_node *llist = llist_del_all(&rq->wake_list); - struct pin_cookie cookie; struct task_struct *p; unsigned long flags; + struct rq_flags rf; if (!llist) return; raw_spin_lock_irqsave(&rq->lock, flags); - cookie = lockdep_pin_lock(&rq->lock); + rq_pin_lock(rq, &rf); while (llist) { int wake_flags = 0; @@ -1789,10 +1789,10 @@ void sched_ttwu_pending(void) if (p->sched_remote_wakeup) wake_flags = WF_MIGRATED; - ttwu_do_activate(rq, p, wake_flags, cookie); + ttwu_do_activate(rq, p, wake_flags, &rf); } - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, &rf); raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -1881,7 +1881,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu) static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) { struct rq *rq = cpu_rq(cpu); - struct pin_cookie cookie; + struct rq_flags rf; #if defined(CONFIG_SMP) if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { @@ -1892,9 +1892,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) #endif raw_spin_lock(&rq->lock); - cookie = lockdep_pin_lock(&rq->lock); - ttwu_do_activate(rq, p, wake_flags, cookie); - lockdep_unpin_lock(&rq->lock, cookie); + rq_pin_lock(rq, &rf); + ttwu_do_activate(rq, p, wake_flags, &rf); + rq_unpin_lock(rq, &rf); raw_spin_unlock(&rq->lock); } @@ -2111,7 +2111,7 @@ out: * ensure that this_rq() is locked, @p is bound to this_rq() and not * the current task. */ -static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie) +static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) { struct rq *rq = task_rq(p); @@ -2128,11 +2128,11 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie * disabled avoiding further scheduler activity on it and we've * not yet picked a replacement task. */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, rf); raw_spin_unlock(&rq->lock); raw_spin_lock(&p->pi_lock); raw_spin_lock(&rq->lock); - lockdep_repin_lock(&rq->lock, cookie); + rq_repin_lock(rq, rf); } if (!(p->state & TASK_NORMAL)) @@ -2143,7 +2143,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie if (!task_on_rq_queued(p)) ttwu_activate(rq, p, ENQUEUE_WAKEUP); - ttwu_do_wakeup(rq, p, 0, cookie); + ttwu_do_wakeup(rq, p, 0, rf); ttwu_stat(p, smp_processor_id(), 0); out: raw_spin_unlock(&p->pi_lock); @@ -2590,9 +2590,9 @@ void wake_up_new_task(struct task_struct *p) * Nothing relies on rq->lock after this, so its fine to * drop it. */ - lockdep_unpin_lock(&rq->lock, rf.cookie); + rq_unpin_lock(rq, &rf); p->sched_class->task_woken(rq, p); - lockdep_repin_lock(&rq->lock, rf.cookie); + rq_repin_lock(rq, &rf); } #endif task_rq_unlock(rq, p, &rf); @@ -2861,7 +2861,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) */ static __always_inline struct rq * context_switch(struct rq *rq, struct task_struct *prev, - struct task_struct *next, struct pin_cookie cookie) + struct task_struct *next, struct rq_flags *rf) { struct mm_struct *mm, *oldmm; @@ -2893,7 +2893,7 @@ context_switch(struct rq *rq, struct task_struct *prev, * of the scheduler it's an obvious special-case), so we * do an early lockdep release here: */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, rf); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); /* Here we just switch the register state and the stack. */ @@ -3257,7 +3257,7 @@ static inline void schedule_debug(struct task_struct *prev) * Pick up the highest-prio task: */ static inline struct task_struct * -pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) +pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { const struct sched_class *class = &fair_sched_class; struct task_struct *p; @@ -3268,20 +3268,20 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie */ if (likely(prev->sched_class == class && rq->nr_running == rq->cfs.h_nr_running)) { - p = fair_sched_class.pick_next_task(rq, prev, cookie); + p = fair_sched_class.pick_next_task(rq, prev, rf); if (unlikely(p == RETRY_TASK)) goto again; /* assumes fair_sched_class->next == idle_sched_class */ if (unlikely(!p)) - p = idle_sched_class.pick_next_task(rq, prev, cookie); + p = idle_sched_class.pick_next_task(rq, prev, rf); return p; } again: for_each_class(class) { - p = class->pick_next_task(rq, prev, cookie); + p = class->pick_next_task(rq, prev, rf); if (p) { if (unlikely(p == RETRY_TASK)) goto again; @@ -3335,7 +3335,7 @@ static void __sched notrace __schedule(bool preempt) { struct task_struct *prev, *next; unsigned long *switch_count; - struct pin_cookie cookie; + struct rq_flags rf; struct rq *rq; int cpu; @@ -3358,7 +3358,7 @@ static void __sched notrace __schedule(bool preempt) */ smp_mb__before_spinlock(); raw_spin_lock(&rq->lock); - cookie = lockdep_pin_lock(&rq->lock); + rq_pin_lock(rq, &rf); rq->clock_skip_update <<= 1; /* promote REQ to ACT */ @@ -3380,7 +3380,7 @@ static void __sched notrace __schedule(bool preempt) to_wakeup = wq_worker_sleeping(prev); if (to_wakeup) - try_to_wake_up_local(to_wakeup, cookie); + try_to_wake_up_local(to_wakeup, &rf); } } switch_count = &prev->nvcsw; @@ -3389,7 +3389,7 @@ static void __sched notrace __schedule(bool preempt) if (task_on_rq_queued(prev)) update_rq_clock(rq); - next = pick_next_task(rq, prev, cookie); + next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); clear_preempt_need_resched(); rq->clock_skip_update = 0; @@ -3400,9 +3400,9 @@ static void __sched notrace __schedule(bool preempt) ++*switch_count; trace_sched_switch(preempt, prev, next); - rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */ + rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */ } else { - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, &rf); raw_spin_unlock_irq(&rq->lock); } @@ -5521,7 +5521,7 @@ static void migrate_tasks(struct rq *dead_rq) { struct rq *rq = dead_rq; struct task_struct *next, *stop = rq->stop; - struct pin_cookie cookie; + struct rq_flags rf; int dest_cpu; /* @@ -5553,8 +5553,8 @@ static void migrate_tasks(struct rq *dead_rq) /* * pick_next_task assumes pinned rq->lock. */ - cookie = lockdep_pin_lock(&rq->lock); - next = pick_next_task(rq, &fake_task, cookie); + rq_pin_lock(rq, &rf); + next = pick_next_task(rq, &fake_task, &rf); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); @@ -5567,7 +5567,7 @@ static void migrate_tasks(struct rq *dead_rq) * because !cpu_active at this point, which means load-balance * will not interfere. Also, stop-machine. */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, &rf); raw_spin_unlock(&rq->lock); raw_spin_lock(&next->pi_lock); raw_spin_lock(&rq->lock); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 70ef2b1901e4..491ff663e1b6 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -663,9 +663,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) * Nothing relies on rq->lock after this, so its safe to drop * rq->lock. */ - lockdep_unpin_lock(&rq->lock, rf.cookie); + rq_unpin_lock(rq, &rf); push_dl_task(rq); - lockdep_repin_lock(&rq->lock, rf.cookie); + rq_repin_lock(rq, &rf); } #endif @@ -1118,7 +1118,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, } struct task_struct * -pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) +pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct sched_dl_entity *dl_se; struct task_struct *p; @@ -1133,9 +1133,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie coo * disabled avoiding further scheduler activity on it and we're * being very careful to re-start the picking loop. */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, rf); pull_dl_task(rq); - lockdep_repin_lock(&rq->lock, cookie); + rq_repin_lock(rq, rf); /* * pull_dl_task() can drop (and re-acquire) rq->lock; this * means a stop task can slip in, in which case we need to diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6559d197e08a..490441255c56 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6213,7 +6213,7 @@ preempt: } static struct task_struct * -pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) +pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; @@ -6326,9 +6326,9 @@ idle: * further scheduler activity on it and we're being very careful to * re-start the picking loop. */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, rf); new_tasks = idle_balance(rq); - lockdep_repin_lock(&rq->lock, cookie); + rq_repin_lock(rq, rf); /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 5405d3feb112..0c00172db63e 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -24,7 +24,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl } static struct task_struct * -pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) +pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { put_prev_task(rq, prev); update_idle_core(rq); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2516b8df6dbb..88254be118b0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1523,7 +1523,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) } static struct task_struct * -pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) +pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct task_struct *p; struct rt_rq *rt_rq = &rq->rt; @@ -1535,9 +1535,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie coo * disabled avoiding further scheduler activity on it and we're * being very careful to re-start the picking loop. */ - lockdep_unpin_lock(&rq->lock, cookie); + rq_unpin_lock(rq, rf); pull_rt_task(rq); - lockdep_repin_lock(&rq->lock, cookie); + rq_repin_lock(rq, rf); /* * pull_rt_task() can drop (and re-acquire) rq->lock; this * means a dl or stop task can slip in, in which case we need diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7b34c7826ca5..98e7eee07237 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -792,6 +792,26 @@ static inline void rq_clock_skip_update(struct rq *rq, bool skip) rq->clock_skip_update &= ~RQCF_REQ_SKIP; } +struct rq_flags { + unsigned long flags; + struct pin_cookie cookie; +}; + +static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) +{ + rf->cookie = lockdep_pin_lock(&rq->lock); +} + +static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) +{ + lockdep_unpin_lock(&rq->lock, rf->cookie); +} + +static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) +{ + lockdep_repin_lock(&rq->lock, rf->cookie); +} + #ifdef CONFIG_NUMA enum numa_topology_type { NUMA_DIRECT, @@ -1245,7 +1265,7 @@ struct sched_class { */ struct task_struct * (*pick_next_task) (struct rq *rq, struct task_struct *prev, - struct pin_cookie cookie); + struct rq_flags *rf); void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP @@ -1501,11 +1521,6 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } static inline void sched_avg_update(struct rq *rq) { } #endif -struct rq_flags { - unsigned long flags; - struct pin_cookie cookie; -}; - struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(rq->lock); struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) @@ -1515,7 +1530,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) { - lockdep_unpin_lock(&rq->lock, rf->cookie); + rq_unpin_lock(rq, rf); raw_spin_unlock(&rq->lock); } @@ -1524,7 +1539,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) __releases(rq->lock) __releases(p->pi_lock) { - lockdep_unpin_lock(&rq->lock, rf->cookie); + rq_unpin_lock(rq, rf); raw_spin_unlock(&rq->lock); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); } diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 604297a08b3a..9f69fb630853 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -24,7 +24,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) } static struct task_struct * -pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) +pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct task_struct *stop = rq->stop; -- cgit v1.2.3 From 92509b732baf14c59ca702307270cfaa3a585ae7 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 21 Sep 2016 14:38:11 +0100 Subject: sched/core: Reset RQCF_ACT_SKIP before unpinning rq->lock rq_clock() is called from sched_info_{depart,arrive}() after resetting RQCF_ACT_SKIP but prior to a call to update_rq_clock(). In preparation for pending patches that check whether the rq clock has been updated inside of a pin context before rq_clock() is called, move the reset of rq->clock_skip_update immediately before unpinning the rq lock. This will avoid the new warnings which check if update_rq_clock() is being actively skipped. Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Byungchul Park Cc: Frederic Weisbecker Cc: Jan Kara Cc: Linus Torvalds Cc: Luca Abeni Cc: Mel Gorman Cc: Mike Galbraith Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Petr Mladek Cc: Rik van Riel Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Wanpeng Li Cc: Yuyang Du Link: http://lkml.kernel.org/r/20160921133813.31976-6-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 41df935180ea..311460b46d68 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2887,6 +2887,9 @@ context_switch(struct rq *rq, struct task_struct *prev, prev->active_mm = NULL; rq->prev_mm = oldmm; } + + rq->clock_skip_update = 0; + /* * Since the runqueue lock will be released by the next * task (which is an invalid locking op but in the case @@ -3392,7 +3395,6 @@ static void __sched notrace __schedule(bool preempt) next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); clear_preempt_need_resched(); - rq->clock_skip_update = 0; if (likely(prev != next)) { rq->nr_switches++; @@ -3402,6 +3404,7 @@ static void __sched notrace __schedule(bool preempt) trace_sched_switch(preempt, prev, next); rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */ } else { + rq->clock_skip_update = 0; rq_unpin_lock(rq, &rf); raw_spin_unlock_irq(&rq->lock); } -- cgit v1.2.3 From 46f69fa33712ad12ccaa723e46ed5929ee93589b Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 21 Sep 2016 14:38:12 +0100 Subject: sched/fair: Push rq lock pin/unpin into idle_balance() Future patches will emit warnings if rq_clock() is called before update_rq_clock() inside a rq_pin_lock()/rq_unpin_lock() pair. Since there is only one caller of idle_balance() we can push the unpin/repin there. Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Byungchul Park Cc: Frederic Weisbecker Cc: Jan Kara Cc: Linus Torvalds Cc: Luca Abeni Cc: Mel Gorman Cc: Mike Galbraith Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Petr Mladek Cc: Rik van Riel Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Wanpeng Li Cc: Yuyang Du Link: http://lkml.kernel.org/r/20160921133813.31976-7-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 490441255c56..faf80e10d662 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3424,7 +3424,7 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) return cfs_rq->avg.load_avg; } -static int idle_balance(struct rq *this_rq); +static int idle_balance(struct rq *this_rq, struct rq_flags *rf); #else /* CONFIG_SMP */ @@ -3453,7 +3453,7 @@ attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} -static inline int idle_balance(struct rq *rq) +static inline int idle_balance(struct rq *rq, struct rq_flags *rf) { return 0; } @@ -6320,15 +6320,8 @@ simple: return p; idle: - /* - * This is OK, because current is on_cpu, which avoids it being picked - * for load-balance and preemption/IRQs are still disabled avoiding - * further scheduler activity on it and we're being very careful to - * re-start the picking loop. - */ - rq_unpin_lock(rq, rf); - new_tasks = idle_balance(rq); - rq_repin_lock(rq, rf); + new_tasks = idle_balance(rq, rf); + /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we @@ -8297,7 +8290,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance) * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -static int idle_balance(struct rq *this_rq) +static int idle_balance(struct rq *this_rq, struct rq_flags *rf) { unsigned long next_balance = jiffies + HZ; int this_cpu = this_rq->cpu; @@ -8311,6 +8304,14 @@ static int idle_balance(struct rq *this_rq) */ this_rq->idle_stamp = rq_clock(this_rq); + /* + * This is OK, because current is on_cpu, which avoids it being picked + * for load-balance and preemption/IRQs are still disabled avoiding + * further scheduler activity on it and we're being very careful to + * re-start the picking loop. + */ + rq_unpin_lock(this_rq, rf); + if (this_rq->avg_idle < sysctl_sched_migration_cost || !this_rq->rd->overload) { rcu_read_lock(); @@ -8388,6 +8389,8 @@ out: if (pulled_task) this_rq->idle_stamp = 0; + rq_repin_lock(this_rq, rf); + return pulled_task; } -- cgit v1.2.3 From 4126bad6717336abe5d666440ae15555563ca53f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Oct 2016 16:20:59 +0200 Subject: sched/core: Add missing update_rq_clock() in post_init_entity_util_avg() Address this rq-clock update bug: WARNING: CPU: 0 PID: 0 at ../kernel/sched/sched.h:797 post_init_entity_util_avg() rq->clock_update_flags < RQCF_ACT_SKIP Call Trace: __warn() post_init_entity_util_avg() wake_up_new_task() _do_fork() kernel_thread() rest_init() start_kernel() Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + kernel/sched/fair.c | 1 + 2 files changed, 2 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 311460b46d68..9217c3221b0d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2578,6 +2578,7 @@ void wake_up_new_task(struct task_struct *p) __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); #endif rq = __task_rq_lock(p, &rf); + update_rq_clock(rq); post_init_entity_util_avg(&p->se); activate_task(rq, p, 0); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index faf80e10d662..972b67622922 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9267,6 +9267,7 @@ void online_fair_sched_group(struct task_group *tg) se = tg->se[i]; raw_spin_lock_irq(&rq->lock); + update_rq_clock(rq); attach_entity_cfs_rq(se); sync_throttle(tg, i); raw_spin_unlock_irq(&rq->lock); -- cgit v1.2.3 From 80f5c1b84baa8180c3c27b7e227429712cd967b6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Oct 2016 16:28:37 +0200 Subject: sched/core: Add missing update_rq_clock() in detach_task_cfs_rq() Instead of adding the update_rq_clock() all the way at the bottom of the callstack, add one at the top, this to aid later effort to minimize update_rq_lock() calls. WARNING: CPU: 0 PID: 1 at ../kernel/sched/sched.h:797 detach_task_cfs_rq() rq->clock_update_flags < RQCF_ACT_SKIP Call Trace: dump_stack() __warn() warn_slowpath_fmt() detach_task_cfs_rq() switched_from_fair() __sched_setscheduler() _sched_setscheduler() sched_set_stop_task() cpu_stop_create() __smpboot_create_thread.part.2() smpboot_register_percpu_thread_cpumask() cpu_stop_init() do_one_initcall() ? print_cpu_info() kernel_init_freeable() ? rest_init() kernel_init() ret_from_fork() Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9217c3221b0d..b7f7f215b51b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3655,6 +3655,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) BUG_ON(prio > MAX_PRIO); rq = __task_rq_lock(p, &rf); + update_rq_clock(rq); /* * Idle task boosting is a nono in general. There is one @@ -4183,6 +4184,7 @@ recheck: * runqueue lock must be held. */ rq = task_rq_lock(p, &rf); + update_rq_clock(rq); /* * Changing the policy of the stop threads its a very bad idea @@ -8435,6 +8437,7 @@ static void cpu_cgroup_fork(struct task_struct *task) rq = task_rq_lock(task, &rf); + update_rq_clock(rq); sched_change_group(task, TASK_SET_GROUP); task_rq_unlock(rq, task, &rf); -- cgit v1.2.3 From 3bed5e2166a5e433bf62162f3cd3c5174d335934 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Oct 2016 16:35:32 +0200 Subject: sched/core: Add missing update_rq_clock() call for task_hot() Add the update_rq_clock() call at the top of the callstack instead of at the bottom where we find it missing, this to aid later effort to minimize the number of update_rq_lock() calls. WARNING: CPU: 30 PID: 194 at ../kernel/sched/sched.h:797 assert_clock_updated() rq->clock_update_flags < RQCF_ACT_SKIP Call Trace: dump_stack() __warn() warn_slowpath_fmt() assert_clock_updated.isra.63.part.64() can_migrate_task() load_balance() pick_next_task_fair() __schedule() schedule() worker_thread() kthread() Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 972b67622922..b3bfe3fb4e13 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8070,6 +8070,7 @@ redo: more_balance: raw_spin_lock_irqsave(&busiest->lock, flags); + update_rq_clock(busiest); /* * cur_ld_moved - load moved in current iteration @@ -8446,6 +8447,7 @@ static int active_load_balance_cpu_stop(void *data) }; schedstat_inc(sd->alb_count); + update_rq_clock(busiest_rq); p = detach_one_task(&env); if (p) { -- cgit v1.2.3 From 2fb8d36787affe26f3536c3d8ec094995a48037d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Oct 2016 16:44:25 +0200 Subject: sched/core: Add missing update_rq_clock() call in set_user_nice() Address this rq-clock update bug: WARNING: CPU: 30 PID: 195 at ../kernel/sched/sched.h:797 set_next_entity() rq->clock_update_flags < RQCF_ACT_SKIP Call Trace: dump_stack() __warn() warn_slowpath_fmt() set_next_entity() ? _raw_spin_lock() set_curr_task_fair() set_user_nice.part.85() set_user_nice() create_worker() worker_thread() kthread() ret_from_fork() Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7f7f215b51b..d2338927773a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3752,6 +3752,8 @@ void set_user_nice(struct task_struct *p, long nice) * the task might be in the middle of scheduling on another CPU. */ rq = task_rq_lock(p, &rf); + update_rq_clock(rq); + /* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected -- cgit v1.2.3 From cb42c9a3ebbbb23448c3f9a25417fae6309b1a92 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 21 Sep 2016 14:38:13 +0100 Subject: sched/core: Add debugging code to catch missing update_rq_clock() calls There's no diagnostic checks for figuring out when we've accidentally missed update_rq_clock() calls. Let's add some by piggybacking on the rq_*pin_lock() wrappers. The idea behind the diagnostic checks is that upon pining rq lock the rq clock should be updated, via update_rq_clock(), before anybody reads the clock with rq_clock() or rq_clock_task(). The exception to this rule is when updates have explicitly been disabled with the rq_clock_skip_update() optimisation. There are some functions that only unpin the rq lock in order to grab some other lock and avoid deadlock. In that case we don't need to update the clock again and the previous diagnostic state can be carried over in rq_repin_lock() by saving the state in the rq_flags context. Since this patch adds a new clock update flag and some already exist in rq::clock_skip_update, that field has now been renamed. An attempt has been made to keep the flag manipulation code small and fast since it's used in the heart of the __schedule() fast path. For the !CONFIG_SCHED_DEBUG case the only object code change (other than addresses) is the following change to reset RQCF_ACT_SKIP inside of __schedule(), - c7 83 38 09 00 00 00 movl $0x0,0x938(%rbx) - 00 00 00 + 83 a3 38 09 00 00 fc andl $0xfffffffc,0x938(%rbx) Suggested-by: Peter Zijlstra Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Byungchul Park Cc: Frederic Weisbecker Cc: Jan Kara Cc: Linus Torvalds Cc: Luca Abeni Cc: Mel Gorman Cc: Mike Galbraith Cc: Mike Galbraith Cc: Petr Mladek Cc: Rik van Riel Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Wanpeng Li Cc: Yuyang Du Link: http://lkml.kernel.org/r/20160921133813.31976-8-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 11 +++++--- kernel/sched/sched.h | 74 +++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 10 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d2338927773a..a129b34b8206 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -102,9 +102,12 @@ void update_rq_clock(struct rq *rq) lockdep_assert_held(&rq->lock); - if (rq->clock_skip_update & RQCF_ACT_SKIP) + if (rq->clock_update_flags & RQCF_ACT_SKIP) return; +#ifdef CONFIG_SCHED_DEBUG + rq->clock_update_flags |= RQCF_UPDATED; +#endif delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; if (delta < 0) return; @@ -2889,7 +2892,7 @@ context_switch(struct rq *rq, struct task_struct *prev, rq->prev_mm = oldmm; } - rq->clock_skip_update = 0; + rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); /* * Since the runqueue lock will be released by the next @@ -3364,7 +3367,7 @@ static void __sched notrace __schedule(bool preempt) raw_spin_lock(&rq->lock); rq_pin_lock(rq, &rf); - rq->clock_skip_update <<= 1; /* promote REQ to ACT */ + rq->clock_update_flags <<= 1; /* promote REQ to ACT */ switch_count = &prev->nivcsw; if (!preempt && prev->state) { @@ -3405,7 +3408,7 @@ static void __sched notrace __schedule(bool preempt) trace_sched_switch(preempt, prev, next); rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */ } else { - rq->clock_skip_update = 0; + rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); rq_unpin_lock(rq, &rf); raw_spin_unlock_irq(&rq->lock); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 98e7eee07237..6eeae7ebd99b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -644,7 +644,7 @@ struct rq { unsigned long next_balance; struct mm_struct *prev_mm; - unsigned int clock_skip_update; + unsigned int clock_update_flags; u64 clock; u64 clock_task; @@ -768,48 +768,110 @@ static inline u64 __rq_clock_broken(struct rq *rq) return READ_ONCE(rq->clock); } +/* + * rq::clock_update_flags bits + * + * %RQCF_REQ_SKIP - will request skipping of clock update on the next + * call to __schedule(). This is an optimisation to avoid + * neighbouring rq clock updates. + * + * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is + * in effect and calls to update_rq_clock() are being ignored. + * + * %RQCF_UPDATED - is a debug flag that indicates whether a call has been + * made to update_rq_clock() since the last time rq::lock was pinned. + * + * If inside of __schedule(), clock_update_flags will have been + * shifted left (a left shift is a cheap operation for the fast path + * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, + * + * if (rq-clock_update_flags >= RQCF_UPDATED) + * + * to check if %RQCF_UPADTED is set. It'll never be shifted more than + * one position though, because the next rq_unpin_lock() will shift it + * back. + */ +#define RQCF_REQ_SKIP 0x01 +#define RQCF_ACT_SKIP 0x02 +#define RQCF_UPDATED 0x04 + +static inline void assert_clock_updated(struct rq *rq) +{ + /* + * The only reason for not seeing a clock update since the + * last rq_pin_lock() is if we're currently skipping updates. + */ + SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); +} + static inline u64 rq_clock(struct rq *rq) { lockdep_assert_held(&rq->lock); + assert_clock_updated(rq); + return rq->clock; } static inline u64 rq_clock_task(struct rq *rq) { lockdep_assert_held(&rq->lock); + assert_clock_updated(rq); + return rq->clock_task; } -#define RQCF_REQ_SKIP 0x01 -#define RQCF_ACT_SKIP 0x02 - static inline void rq_clock_skip_update(struct rq *rq, bool skip) { lockdep_assert_held(&rq->lock); if (skip) - rq->clock_skip_update |= RQCF_REQ_SKIP; + rq->clock_update_flags |= RQCF_REQ_SKIP; else - rq->clock_skip_update &= ~RQCF_REQ_SKIP; + rq->clock_update_flags &= ~RQCF_REQ_SKIP; } struct rq_flags { unsigned long flags; struct pin_cookie cookie; +#ifdef CONFIG_SCHED_DEBUG + /* + * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the + * current pin context is stashed here in case it needs to be + * restored in rq_repin_lock(). + */ + unsigned int clock_update_flags; +#endif }; static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) { rf->cookie = lockdep_pin_lock(&rq->lock); + +#ifdef CONFIG_SCHED_DEBUG + rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); + rf->clock_update_flags = 0; +#endif } static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) { +#ifdef CONFIG_SCHED_DEBUG + if (rq->clock_update_flags > RQCF_ACT_SKIP) + rf->clock_update_flags = RQCF_UPDATED; +#endif + lockdep_unpin_lock(&rq->lock, rf->cookie); } static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) { lockdep_repin_lock(&rq->lock, rf->cookie); + +#ifdef CONFIG_SCHED_DEBUG + /* + * Restore the value we stashed in @rf for this pin context. + */ + rq->clock_update_flags |= rf->clock_update_flags; +#endif } #ifdef CONFIG_NUMA -- cgit v1.2.3 From 12907fbb1a691807bb0420a27126e15934cb7954 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 15 Dec 2016 11:44:28 +0100 Subject: sched/clock, clocksource: Add optional cs::mark_unstable() method PeterZ reported that we'd fail to mark the TSC unstable when the clocksource watchdog finds it unsuitable. Allow a clocksource to run a custom action when its being marked unstable and hook up the TSC unstable code. Reported-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc.c | 11 +++++++++++ include/linux/clocksource.h | 3 +++ kernel/time/clocksource.c | 4 ++++ 3 files changed, 18 insertions(+) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index be3a49ee0356..c8174c815d83 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1106,6 +1106,16 @@ static u64 read_tsc(struct clocksource *cs) return (u64)rdtsc_ordered(); } +static void tsc_cs_mark_unstable(struct clocksource *cs) +{ + if (tsc_unstable) + return; + tsc_unstable = 1; + clear_sched_clock_stable(); + disable_sched_clock_irqtime(); + pr_info("Marking TSC unstable due to clocksource watchdog\n"); +} + /* * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() */ @@ -1118,6 +1128,7 @@ static struct clocksource clocksource_tsc = { CLOCK_SOURCE_MUST_VERIFY, .archdata = { .vclock_mode = VCLOCK_TSC }, .resume = tsc_resume, + .mark_unstable = tsc_cs_mark_unstable, }; void mark_tsc_unstable(char *reason) diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index e315d04a2fd9..cfc75848a35d 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -62,6 +62,8 @@ struct module; * @archdata: arch-specific data * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary + * @mark_unstable: Optional function to inform the clocksource driver that + * the watchdog marked the clocksource unstable * @owner: module reference, must be set by clocksource in modules * * Note: This struct is not used in hotpathes of the timekeeping code @@ -93,6 +95,7 @@ struct clocksource { unsigned long flags; void (*suspend)(struct clocksource *cs); void (*resume)(struct clocksource *cs); + void (*mark_unstable)(struct clocksource *cs); /* private: */ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 665985b0a89a..93621ae718d3 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -141,6 +141,10 @@ static void __clocksource_unstable(struct clocksource *cs) { cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; + + if (cs->mark_unstable) + cs->mark_unstable(cs); + if (finished_booting) schedule_work(&watchdog_work); } -- cgit v1.2.3 From 555570d744f8150d3fce6083f144026cd1e63627 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Dec 2016 13:21:58 +0100 Subject: sched/clock: Update static_key usage sched_clock was still using the deprecated static_key interface. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/clock.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index e85a725e5c34..5d6dd38b449c 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -78,19 +78,17 @@ EXPORT_SYMBOL_GPL(sched_clock); __read_mostly int sched_clock_running; #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static struct static_key __sched_clock_stable = STATIC_KEY_INIT; +static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); static int __sched_clock_stable_early; int sched_clock_stable(void) { - return static_key_false(&__sched_clock_stable); + return static_branch_likely(&__sched_clock_stable); } static void __set_sched_clock_stable(void) { - if (!sched_clock_stable()) - static_key_slow_inc(&__sched_clock_stable); - + static_branch_enable(&__sched_clock_stable); tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); } @@ -109,9 +107,7 @@ void set_sched_clock_stable(void) static void __clear_sched_clock_stable(struct work_struct *work) { /* XXX worry about clock continuity */ - if (sched_clock_stable()) - static_key_slow_dec(&__sched_clock_stable); - + static_branch_disable(&__sched_clock_stable); tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); } -- cgit v1.2.3 From 9881b024b7d7671f6a014091bc96506b89081802 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Dec 2016 13:35:52 +0100 Subject: sched/clock: Delay switching sched_clock to stable Currently we switch to the stable sched_clock if we guess the TSC is usable, and then switch back to the unstable path if it turns out TSC isn't stable during SMP bringup after all. Delay switching to the stable path until after SMP bringup is complete. This way we'll avoid switching during the time we detect the worst of the TSC offences. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 5 +++++ init/main.c | 1 - kernel/sched/clock.c | 50 ++++++++++++++++++++++---------------------------- kernel/sched/core.c | 4 ++++ 4 files changed, 31 insertions(+), 29 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index ad3ec9ec61f7..94a48bb58297 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2515,6 +2515,10 @@ extern u64 sched_clock_cpu(int cpu); extern void sched_clock_init(void); #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init_late(void) +{ +} + static inline void sched_clock_tick(void) { } @@ -2537,6 +2541,7 @@ static inline u64 local_clock(void) return sched_clock(); } #else +extern void sched_clock_init_late(void); /* * Architectures can set this to 1 if they have specified * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, diff --git a/init/main.c b/init/main.c index b0c9d6facef9..19228149386c 100644 --- a/init/main.c +++ b/init/main.c @@ -625,7 +625,6 @@ asmlinkage __visible void __init start_kernel(void) numa_policy_init(); if (late_time_init) late_time_init(); - sched_clock_init(); calibrate_delay(); pidmap_init(); anon_vma_init(); diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 5d6dd38b449c..b3466d4e0cc2 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -77,6 +77,11 @@ EXPORT_SYMBOL_GPL(sched_clock); __read_mostly int sched_clock_running; +void sched_clock_init(void) +{ + sched_clock_running = 1; +} + #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); static int __sched_clock_stable_early; @@ -96,12 +101,18 @@ void set_sched_clock_stable(void) { __sched_clock_stable_early = 1; - smp_mb(); /* matches sched_clock_init() */ - - if (!sched_clock_running) - return; + smp_mb(); /* matches sched_clock_init_late() */ - __set_sched_clock_stable(); + /* + * This really should only be called early (before + * sched_clock_init_late()) when guestimating our sched_clock() is + * solid. + * + * After that we test stability and we can negate our guess using + * clear_sched_clock_stable, possibly from a watchdog. + */ + if (WARN_ON_ONCE(sched_clock_running == 2)) + __set_sched_clock_stable(); } static void __clear_sched_clock_stable(struct work_struct *work) @@ -117,12 +128,10 @@ void clear_sched_clock_stable(void) { __sched_clock_stable_early = 0; - smp_mb(); /* matches sched_clock_init() */ - - if (!sched_clock_running) - return; + smp_mb(); /* matches sched_clock_init_late() */ - schedule_work(&sched_clock_work); + if (sched_clock_running == 2) + schedule_work(&sched_clock_work); } struct sched_clock_data { @@ -143,20 +152,9 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) return &per_cpu(sched_clock_data, cpu); } -void sched_clock_init(void) +void sched_clock_init_late(void) { - u64 ktime_now = ktime_to_ns(ktime_get()); - int cpu; - - for_each_possible_cpu(cpu) { - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->tick_raw = 0; - scd->tick_gtod = ktime_now; - scd->clock = ktime_now; - } - - sched_clock_running = 1; + sched_clock_running = 2; /* * Ensure that it is impossible to not do a static_key update. @@ -362,11 +360,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -void sched_clock_init(void) -{ - sched_clock_running = 1; -} - u64 sched_clock_cpu(int cpu) { if (unlikely(!sched_clock_running)) @@ -374,6 +367,7 @@ u64 sched_clock_cpu(int cpu) return sched_clock(); } + #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a129b34b8206..96a4267e6020 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7498,6 +7498,7 @@ void __init sched_init_smp(void) init_sched_dl_class(); sched_init_smt(); + sched_clock_init_late(); sched_smp_initialized = true; } @@ -7513,6 +7514,7 @@ early_initcall(migration_init); void __init sched_init_smp(void) { sched_init_granularity(); + sched_clock_init_late(); } #endif /* CONFIG_SMP */ @@ -7556,6 +7558,8 @@ void __init sched_init(void) int i, j; unsigned long alloc_size = 0, ptr; + sched_clock_init(); + for (i = 0; i < WAIT_TABLE_SIZE; i++) init_waitqueue_head(bit_wait_table + i); -- cgit v1.2.3 From 5680d8094ffa9e5cfc81afdd865027ee6417c263 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Dec 2016 13:36:17 +0100 Subject: sched/clock: Provide better clock continuity When switching between the unstable and stable variants it is currently possible that clock discontinuities occur. And while these will mostly be 'small', attempt to do better. As observed on my IVB-EP, the sched_clock() is ~1.5s ahead of the ktime_get_ns() based timeline at the point of switchover (sched_clock_init_late()) after SMP bringup. Equally, when the TSC is later found to be unstable -- typically because SMM tries to hide its SMI latencies by mucking with the TSC -- we want to avoid large jumps. Since the clocksource watchdog reports the issue after the fact we cannot exactly fix up time, but since SMI latencies are typically small (~10ns range), the discontinuity is mainly due to drift between sched_clock() and ktime_get_ns() (which on my desktop is ~79s over 24days). I dislike this patch because it adds overhead to the good case in favour of dealing with badness. But given the widespread failure of TSC stability this is worth it. Note that in case the TSC makes drastic jumps after SMP bringup we're still hosed. There's just not much we can do in that case without stupid overhead. If we were to somehow expose tsc_clocksource_reliable (which is hard because this code is also used on ia64 and parisc) we could avoid some of the newly introduced overhead. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/clock.c | 99 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 34 deletions(-) diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index b3466d4e0cc2..7713b2b53f61 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -86,6 +86,30 @@ void sched_clock_init(void) static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); static int __sched_clock_stable_early; +/* + * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset + */ +static __read_mostly u64 raw_offset; +static __read_mostly u64 gtod_offset; + +struct sched_clock_data { + u64 tick_raw; + u64 tick_gtod; + u64 clock; +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); + +static inline struct sched_clock_data *this_scd(void) +{ + return this_cpu_ptr(&sched_clock_data); +} + +static inline struct sched_clock_data *cpu_sdc(int cpu) +{ + return &per_cpu(sched_clock_data, cpu); +} + int sched_clock_stable(void) { return static_branch_likely(&__sched_clock_stable); @@ -93,6 +117,17 @@ int sched_clock_stable(void) static void __set_sched_clock_stable(void) { + struct sched_clock_data *scd = this_scd(); + + /* + * Attempt to make the (initial) unstable->stable transition continuous. + */ + raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); + + printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", + scd->tick_gtod, gtod_offset, + scd->tick_raw, raw_offset); + static_branch_enable(&__sched_clock_stable); tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); } @@ -117,7 +152,23 @@ void set_sched_clock_stable(void) static void __clear_sched_clock_stable(struct work_struct *work) { - /* XXX worry about clock continuity */ + struct sched_clock_data *scd = this_scd(); + + /* + * Attempt to make the stable->unstable transition continuous. + * + * Trouble is, this is typically called from the TSC watchdog + * timer, which is late per definition. This means the tick + * values can already be screwy. + * + * Still do what we can. + */ + gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); + + printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", + scd->tick_gtod, gtod_offset, + scd->tick_raw, raw_offset); + static_branch_disable(&__sched_clock_stable); tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); } @@ -134,28 +185,9 @@ void clear_sched_clock_stable(void) schedule_work(&sched_clock_work); } -struct sched_clock_data { - u64 tick_raw; - u64 tick_gtod; - u64 clock; -}; - -static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); - -static inline struct sched_clock_data *this_scd(void) -{ - return this_cpu_ptr(&sched_clock_data); -} - -static inline struct sched_clock_data *cpu_sdc(int cpu) -{ - return &per_cpu(sched_clock_data, cpu); -} - void sched_clock_init_late(void) { sched_clock_running = 2; - /* * Ensure that it is impossible to not do a static_key update. * @@ -210,7 +242,7 @@ again: * scd->tick_gtod + TICK_NSEC); */ - clock = scd->tick_gtod + delta; + clock = scd->tick_gtod + gtod_offset + delta; min_clock = wrap_max(scd->tick_gtod, old_clock); max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); @@ -296,7 +328,7 @@ u64 sched_clock_cpu(int cpu) u64 clock; if (sched_clock_stable()) - return sched_clock(); + return sched_clock() + raw_offset; if (unlikely(!sched_clock_running)) return 0ull; @@ -317,23 +349,22 @@ EXPORT_SYMBOL_GPL(sched_clock_cpu); void sched_clock_tick(void) { struct sched_clock_data *scd; - u64 now, now_gtod; - - if (sched_clock_stable()) - return; - - if (unlikely(!sched_clock_running)) - return; WARN_ON_ONCE(!irqs_disabled()); + /* + * Update these values even if sched_clock_stable(), because it can + * become unstable at any point in time at which point we need some + * values to fall back on. + * + * XXX arguably we can skip this if we expose tsc_clocksource_reliable + */ scd = this_scd(); - now_gtod = ktime_to_ns(ktime_get()); - now = sched_clock(); + scd->tick_raw = sched_clock(); + scd->tick_gtod = ktime_get_ns(); - scd->tick_raw = now; - scd->tick_gtod = now_gtod; - sched_clock_local(scd); + if (!sched_clock_stable() && likely(sched_clock_running)) + sched_clock_local(scd); } /* -- cgit v1.2.3 From 59f8c2989283bbd3df9fcfb22494d84f4852e536 Mon Sep 17 00:00:00 2001 From: Tommaso Cucinotta Date: Wed, 26 Oct 2016 11:17:17 +0200 Subject: sched/deadline: Show leftover runtime and abs deadline in /proc/*/sched This patch allows for reading the current (leftover) runtime and absolute deadline of a SCHED_DEADLINE task through /proc/*/sched (entries dl.runtime and dl.deadline), while debugging/testing. Signed-off-by: Tommaso Cucinotta Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Juri Lelli Reviewed-by: Luca Abeni Acked-by: Daniel Bistrot de Oliveira Cc: Juri Lelli Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1477473437-10346-2-git-send-email-tommaso.cucinotta@sssup.it Signed-off-by: Ingo Molnar --- Documentation/scheduler/sched-deadline.txt | 6 ++++++ kernel/sched/debug.c | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/Documentation/scheduler/sched-deadline.txt b/Documentation/scheduler/sched-deadline.txt index 8e37b0ba2c9d..cbc1b46cbf70 100644 --- a/Documentation/scheduler/sched-deadline.txt +++ b/Documentation/scheduler/sched-deadline.txt @@ -408,6 +408,11 @@ CONTENTS * the new scheduling related syscalls that manipulate it, i.e., sched_setattr() and sched_getattr() are implemented. + For debugging purposes, the leftover runtime and absolute deadline of a + SCHED_DEADLINE task can be retrieved through /proc//sched (entries + dl.runtime and dl.deadline, both values in ns). A programmatic way to + retrieve these values from production code is under discussion. + 4.3 Default behavior --------------------- @@ -476,6 +481,7 @@ CONTENTS Still missing: + - programmatic way to retrieve current runtime and absolute deadline - refinements to deadline inheritance, especially regarding the possibility of retaining bandwidth isolation among non-interacting tasks. This is being studied from both theoretical and practical points of view, and diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index fa178b62ea79..109adc0e9cb9 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -953,6 +953,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) #endif P(policy); P(prio); + if (p->policy == SCHED_DEADLINE) { + P(dl.runtime); + P(dl.deadline); + } #undef PN_SCHEDSTAT #undef PN #undef __PN -- cgit v1.2.3 From da9647e076d440190fff4b4ee0f4b829dd6e8c4f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 21 Dec 2016 09:05:02 +0100 Subject: sched/completions: Fix complete_all() semantics Documentation/scheduler/completion.txt says this about complete_all(): "calls complete_all() to signal all current and future waiters." Which doesn't strictly match the current semantics. Currently complete_all() is equivalent to UINT_MAX/2 complete() invocations, which is distinctly less than 'all current and future waiters' (enumerable vs innumerable), although it has worked in practice. However, Dmitry had a weird case where it might matter, so change completions to use saturation semantics for complete()/complete_all(). Once done hits UINT_MAX (and complete_all() sets it there) it will never again be decremented. Requested-by: Dmitry Torokhov Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: der.herr@hofr.at Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/completion.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 8d0f35debf35..f063a25d4449 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -31,7 +31,8 @@ void complete(struct completion *x) unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); - x->done++; + if (x->done != UINT_MAX) + x->done++; __wake_up_locked(&x->wait, TASK_NORMAL, 1); spin_unlock_irqrestore(&x->wait.lock, flags); } @@ -51,7 +52,7 @@ void complete_all(struct completion *x) unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); - x->done += UINT_MAX/2; + x->done = UINT_MAX; __wake_up_locked(&x->wait, TASK_NORMAL, 0); spin_unlock_irqrestore(&x->wait.lock, flags); } @@ -79,7 +80,8 @@ do_wait_for_common(struct completion *x, if (!x->done) return timeout; } - x->done--; + if (x->done != UINT_MAX) + x->done--; return timeout ?: 1; } @@ -280,7 +282,7 @@ bool try_wait_for_completion(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; - else + else if (x->done != UINT_MAX) x->done--; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; -- cgit v1.2.3 From 89ee048f3cc796db6f26906c6bef4edf0bee70fd Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 21 Dec 2016 16:50:26 +0100 Subject: sched/core: Fix group_entity's share update The update of the share of a cfs_rq is done when its load_avg is updated but before the group_entity's load_avg has been updated for the past time slot. This generates wrong load_avg accounting which can be significant when small tasks are involved in the scheduling. Let take the example of a task a that is dequeued of its task group A: root (cfs_rq) \ (se) A (cfs_rq) \ (se) a Task "a" was the only task in task group A which becomes idle when a is dequeued. We have the sequence: - dequeue_entity a->se - update_load_avg(a->se) - dequeue_entity_load_avg(A->cfs_rq, a->se) - update_cfs_shares(A->cfs_rq) A->cfs_rq->load.weight == 0 A->se->load.weight is updated with the new share (0 in this case) - dequeue_entity A->se - update_load_avg(A->se) but its weight is now null so the last time slot (up to a tick) will be accounted with a weight of 0 instead of its real weight during the time slot. The last time slot will be accounted as an idle one whereas it was a running one. If the running time of task a is short enough that no tick happens when it runs, all running time of group entity A->se will be accounted as idle time. Instead, we should update the share of a cfs_rq (in fact the weight of its group entity) only after having updated the load_avg of the group_entity. update_cfs_shares() now takes the sched_entity as a parameter instead of the cfs_rq, and the weight of the group_entity is updated only once its load_avg has been synced with current time. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: pjt@google.com Link: http://lkml.kernel.org/r/1482335426-7664-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 50 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b3bfe3fb4e13..2b866a279bdf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2689,16 +2689,20 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); -static void update_cfs_shares(struct cfs_rq *cfs_rq) +static void update_cfs_shares(struct sched_entity *se) { + struct cfs_rq *cfs_rq = group_cfs_rq(se); struct task_group *tg; - struct sched_entity *se; long shares; - tg = cfs_rq->tg; - se = tg->se[cpu_of(rq_of(cfs_rq))]; - if (!se || throttled_hierarchy(cfs_rq)) + if (!cfs_rq) + return; + + if (throttled_hierarchy(cfs_rq)) return; + + tg = cfs_rq->tg; + #ifndef CONFIG_SMP if (likely(se->load.weight == tg->shares)) return; @@ -2707,8 +2711,9 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq) reweight_entity(cfs_rq_of(se), se, shares); } + #else /* CONFIG_FAIR_GROUP_SCHED */ -static inline void update_cfs_shares(struct cfs_rq *cfs_rq) +static inline void update_cfs_shares(struct sched_entity *se) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -3582,10 +3587,18 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if (renorm && !curr) se->vruntime += cfs_rq->min_vruntime; + /* + * When enqueuing a sched_entity, we must: + * - Update loads to have both entity and cfs_rq synced with now. + * - Add its load to cfs_rq->runnable_avg + * - For group_entity, update its weight to reflect the new share of + * its group cfs_rq + * - Add its new weight to cfs_rq->load.weight + */ update_load_avg(se, UPDATE_TG); enqueue_entity_load_avg(cfs_rq, se); + update_cfs_shares(se); account_entity_enqueue(cfs_rq, se); - update_cfs_shares(cfs_rq); if (flags & ENQUEUE_WAKEUP) place_entity(cfs_rq, se, 0); @@ -3657,6 +3670,15 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); + + /* + * When dequeuing a sched_entity, we must: + * - Update loads to have both entity and cfs_rq synced with now. + * - Substract its load from the cfs_rq->runnable_avg. + * - Substract its previous weight from cfs_rq->load.weight. + * - For group entity, update its weight to reflect the new share + * of its group cfs_rq. + */ update_load_avg(se, UPDATE_TG); dequeue_entity_load_avg(cfs_rq, se); @@ -3681,7 +3703,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) /* return excess runtime on last dequeue */ return_cfs_rq_runtime(cfs_rq); - update_cfs_shares(cfs_rq); + update_cfs_shares(se); /* * Now advance min_vruntime if @se was the entity holding it back, @@ -3864,7 +3886,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * Ensure that runnable average is periodically updated. */ update_load_avg(curr, UPDATE_TG); - update_cfs_shares(cfs_rq); + update_cfs_shares(curr); #ifdef CONFIG_SCHED_HRTICK /* @@ -4761,7 +4783,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) break; update_load_avg(se, UPDATE_TG); - update_cfs_shares(cfs_rq); + update_cfs_shares(se); } if (!se) @@ -4820,7 +4842,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) break; update_load_avg(se, UPDATE_TG); - update_cfs_shares(cfs_rq); + update_cfs_shares(se); } if (!se) @@ -9362,8 +9384,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) /* Possible calls to update_curr() need rq clock */ update_rq_clock(rq); - for_each_sched_entity(se) - update_cfs_shares(group_cfs_rq(se)); + for_each_sched_entity(se) { + update_load_avg(se, UPDATE_TG); + update_cfs_shares(se); + } raw_spin_unlock_irqrestore(&rq->lock, flags); } -- cgit v1.2.3 From b8fd8423697b9ec729c5bb91737faad84ae19985 Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Wed, 11 Jan 2017 11:29:47 +0000 Subject: sched/fair: Explain why MIN_SHARES isn't scaled in calc_cfs_shares() Signed-off-by: Dietmar Eggemann Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Paul Turner Cc: Peter Zijlstra Cc: Samuel Thibault Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/e9a4d858-bcf3-36b9-e3a9-449953e34569@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2b866a279bdf..274c747a01ce 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2657,6 +2657,18 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) if (tg_weight) shares /= tg_weight; + /* + * MIN_SHARES has to be unscaled here to support per-CPU partitioning + * of a group with small tg->shares value. It is a floor value which is + * assigned as a minimum load.weight to the sched_entity representing + * the group on a CPU. + * + * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 + * on an 8-core system with 8 tasks each runnable on one CPU shares has + * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In + * case no task is runnable on a CPU MIN_SHARES=2 should be returned + * instead of 0. + */ if (shares < MIN_SHARES) shares = MIN_SHARES; if (shares > tg->shares) -- cgit v1.2.3 From e33a9bba85a869b85fd0a7f16e21a5ec8977e325 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 7 Dec 2016 15:48:41 -0500 Subject: sched/core: move IO scheduling accounting from io_schedule_timeout() into scheduler For an interface to support blocking for IOs, it must call io_schedule() instead of schedule(). This makes it tedious to add IO blocking to existing interfaces as the switching between schedule() and io_schedule() is often buried deep. As we already have a way to mark the task as IO scheduling, this can be made easier by separating out io_schedule() into multiple steps so that IO schedule preparation can be performed before invoking a blocking interface and the actual accounting happens inside the scheduler. io_schedule_timeout() does the following three things prior to calling schedule_timeout(). 1. Mark the task as scheduling for IO. 2. Flush out plugged IOs. 3. Account the IO scheduling. done close to the actual scheduling. This patch moves #3 into the scheduler so that later patches can separate out preparation and finish steps from io_schedule(). Patch-originally-by: Peter Zijlstra Signed-off-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: adilger.kernel@dilger.ca Cc: akpm@linux-foundation.org Cc: axboe@kernel.dk Cc: jack@suse.com Cc: kernel-team@fb.com Cc: mingbo@fb.com Cc: tytso@mit.edu Link: http://lkml.kernel.org/r/20161207204841.GA22296@htj.duckdns.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 61 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 96a4267e6020..9fd37169b302 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2089,11 +2089,24 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) p->sched_contributes_to_load = !!task_contributes_to_load(p); p->state = TASK_WAKING; + if (p->in_iowait) { + delayacct_blkio_end(); + atomic_dec(&task_rq(p)->nr_iowait); + } + cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); if (task_cpu(p) != cpu) { wake_flags |= WF_MIGRATED; set_task_cpu(p, cpu); } + +#else /* CONFIG_SMP */ + + if (p->in_iowait) { + delayacct_blkio_end(); + atomic_dec(&task_rq(p)->nr_iowait); + } + #endif /* CONFIG_SMP */ ttwu_queue(p, cpu, wake_flags); @@ -2143,8 +2156,13 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) trace_sched_waking(p); - if (!task_on_rq_queued(p)) + if (!task_on_rq_queued(p)) { + if (p->in_iowait) { + delayacct_blkio_end(); + atomic_dec(&rq->nr_iowait); + } ttwu_activate(rq, p, ENQUEUE_WAKEUP); + } ttwu_do_wakeup(rq, p, 0, rf); ttwu_stat(p, smp_processor_id(), 0); @@ -2956,6 +2974,36 @@ unsigned long long nr_context_switches(void) return sum; } +/* + * IO-wait accounting, and how its mostly bollocks (on SMP). + * + * The idea behind IO-wait account is to account the idle time that we could + * have spend running if it were not for IO. That is, if we were to improve the + * storage performance, we'd have a proportional reduction in IO-wait time. + * + * This all works nicely on UP, where, when a task blocks on IO, we account + * idle time as IO-wait, because if the storage were faster, it could've been + * running and we'd not be idle. + * + * This has been extended to SMP, by doing the same for each CPU. This however + * is broken. + * + * Imagine for instance the case where two tasks block on one CPU, only the one + * CPU will have IO-wait accounted, while the other has regular idle. Even + * though, if the storage were faster, both could've ran at the same time, + * utilising both CPUs. + * + * This means, that when looking globally, the current IO-wait accounting on + * SMP is a lower bound, by reason of under accounting. + * + * Worse, since the numbers are provided per CPU, they are sometimes + * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly + * associated with any one particular CPU, it can wake to another CPU than it + * blocked on. This means the per CPU IO-wait number is meaningless. + * + * Task CPU affinities can make all that even more 'interesting'. + */ + unsigned long nr_iowait(void) { unsigned long i, sum = 0; @@ -2966,6 +3014,13 @@ unsigned long nr_iowait(void) return sum; } +/* + * Consumers of these two interfaces, like for example the cpufreq menu + * governor are using nonsensical data. Boosting frequency for a CPU that has + * IO-wait which might not even end up running the task when it does become + * runnable. + */ + unsigned long nr_iowait_cpu(int cpu) { struct rq *this = cpu_rq(cpu); @@ -3377,6 +3432,11 @@ static void __sched notrace __schedule(bool preempt) deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; + if (prev->in_iowait) { + atomic_inc(&rq->nr_iowait); + delayacct_blkio_start(); + } + /* * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain @@ -5075,19 +5135,13 @@ EXPORT_SYMBOL_GPL(yield_to); long __sched io_schedule_timeout(long timeout) { int old_iowait = current->in_iowait; - struct rq *rq; long ret; current->in_iowait = 1; blk_schedule_flush_plug(current); - delayacct_blkio_start(); - rq = raw_rq(); - atomic_inc(&rq->nr_iowait); ret = schedule_timeout(timeout); current->in_iowait = old_iowait; - atomic_dec(&rq->nr_iowait); - delayacct_blkio_end(); return ret; } -- cgit v1.2.3 From 10ab56434f2f633a51e432ee8b7c29e12438e163 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Oct 2016 12:58:10 -0400 Subject: sched/core: Separate out io_schedule_prepare() and io_schedule_finish() Now that IO schedule accounting is done inside __schedule(), io_schedule() can be split into three steps - prep, schedule, and finish - where the schedule part doesn't need any special annotation. This allows marking a sleep as iowait by simply wrapping an existing blocking function with io_schedule_prepare() and io_schedule_finish(). Because task_struct->in_iowait is single bit, the caller of io_schedule_prepare() needs to record and the pass its state to io_schedule_finish() to be safe regarding nesting. While this isn't the prettiest, these functions are mostly gonna be used by core functions and we don't want to use more space for ->in_iowait. While at it, as it's simple to do now, reimplement io_schedule() without unnecessarily going through io_schedule_timeout(). Signed-off-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Jens Axboe Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: adilger.kernel@dilger.ca Cc: jack@suse.com Cc: kernel-team@fb.com Cc: mingbo@fb.com Cc: tytso@mit.edu Link: http://lkml.kernel.org/r/1477673892-28940-3-git-send-email-tj@kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 8 +++----- kernel/sched/core.c | 33 ++++++++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 94a48bb58297..a8daed914eef 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -461,12 +461,10 @@ extern signed long schedule_timeout_idle(signed long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); +extern int __must_check io_schedule_prepare(void); +extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); - -static inline void io_schedule(void) -{ - io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); -} +extern void io_schedule(void); void __noreturn do_task_dead(void); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9fd37169b302..49ce1cb3d320 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5128,25 +5128,48 @@ out_irq: } EXPORT_SYMBOL_GPL(yield_to); +int io_schedule_prepare(void) +{ + int old_iowait = current->in_iowait; + + current->in_iowait = 1; + blk_schedule_flush_plug(current); + + return old_iowait; +} + +void io_schedule_finish(int token) +{ + current->in_iowait = token; +} + /* * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. */ long __sched io_schedule_timeout(long timeout) { - int old_iowait = current->in_iowait; + int token; long ret; - current->in_iowait = 1; - blk_schedule_flush_plug(current); - + token = io_schedule_prepare(); ret = schedule_timeout(timeout); - current->in_iowait = old_iowait; + io_schedule_finish(token); return ret; } EXPORT_SYMBOL(io_schedule_timeout); +void io_schedule(void) +{ + int token; + + token = io_schedule_prepare(); + schedule(); + io_schedule_finish(token); +} +EXPORT_SYMBOL(io_schedule); + /** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. -- cgit v1.2.3 From 1460cb65a10f6c7a6e3a1c76513338861a0a43b6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Oct 2016 12:58:11 -0400 Subject: locking/mutex, sched/wait: Add mutex_lock_io() We sometimes end up propagating IO blocking through mutexes; however, because there currently is no way of annotating mutex sleeps as iowait, there are cases where iowait and /proc/stat:procs_blocked report misleading numbers obscuring the actual state of the system. This patch adds mutex_lock_io() so that mutex sleeps can be marked as iowait in those cases. Signed-off-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Jens Axboe Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: adilger.kernel@dilger.ca Cc: jack@suse.com Cc: kernel-team@fb.com Cc: mingbo@fb.com Cc: tytso@mit.edu Link: http://lkml.kernel.org/r/1477673892-28940-4-git-send-email-tj@kernel.org Signed-off-by: Ingo Molnar --- include/linux/mutex.h | 4 ++++ kernel/locking/mutex.c | 24 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index b97870f2debd..980ba16b8749 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -156,10 +156,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); extern int __must_check mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass); +extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); #define mutex_lock(lock) mutex_lock_nested(lock, 0) #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) +#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) #define mutex_lock_nest_lock(lock, nest_lock) \ do { \ @@ -171,11 +173,13 @@ do { \ extern void mutex_lock(struct mutex *lock); extern int __must_check mutex_lock_interruptible(struct mutex *lock); extern int __must_check mutex_lock_killable(struct mutex *lock); +extern void mutex_lock_io(struct mutex *lock); # define mutex_lock_nested(lock, subclass) mutex_lock(lock) # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +# define mutex_lock_nest_io(lock, nest_lock) mutex_io(lock) #endif /* diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 9b349619f431..8464a5cbab97 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -783,6 +783,20 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) } EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); +void __sched +mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) +{ + int token; + + might_sleep(); + + token = io_schedule_prepare(); + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, + subclass, NULL, _RET_IP_, NULL, 0); + io_schedule_finish(token); +} +EXPORT_SYMBOL_GPL(mutex_lock_io_nested); + static inline int ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { @@ -950,6 +964,16 @@ int __sched mutex_lock_killable(struct mutex *lock) } EXPORT_SYMBOL(mutex_lock_killable); +void __sched mutex_lock_io(struct mutex *lock) +{ + int token; + + token = io_schedule_prepare(); + mutex_lock(lock); + io_schedule_finish(token); +} +EXPORT_SYMBOL_GPL(mutex_lock_io); + static noinline void __sched __mutex_lock_slowpath(struct mutex *lock) { -- cgit v1.2.3 From 6fa7aa50b2c48400bbd045daf3a2498882eb0596 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Oct 2016 12:58:12 -0400 Subject: fs/jbd2, locking/mutex, sched/wait: Use mutex_lock_io() for journal->j_checkpoint_mutex When an ext4 fs is bogged down by a lot of metadata IOs (in the reported case, it was deletion of millions of files, but any massive amount of journal writes would do), after the journal is filled up, tasks which try to access the filesystem and aren't currently performing the journal writes end up waiting in __jbd2_log_wait_for_space() for journal->j_checkpoint_mutex. Because those mutex sleeps aren't marked as iowait, this condition can lead to misleadingly low iowait and /proc/stat:procs_blocked. While iowait propagation is far from strict, this condition can be triggered fairly easily and annotating these sleeps correctly helps initial diagnosis quite a bit. Use the new mutex_lock_io() for journal->j_checkpoint_mutex so that these sleeps are properly marked as iowait. Reported-by: Mingbo Wan Signed-off-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Cc: Andreas Dilger Cc: Andrew Morton Cc: Jan Kara Cc: Jens Axboe Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Theodore Ts'o Cc: Thomas Gleixner Cc: kernel-team@fb.com Link: http://lkml.kernel.org/r/1477673892-28940-5-git-send-email-tj@kernel.org Signed-off-by: Ingo Molnar --- fs/jbd2/commit.c | 2 +- fs/jbd2/journal.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 8c514367ba5a..b6b194ec1b4f 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -393,7 +393,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) /* Do we need to erase the effects of a prior jbd2_journal_flush? */ if (journal->j_flags & JBD2_FLUSHED) { jbd_debug(3, "super block updated\n"); - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); /* * We hold j_checkpoint_mutex so tail cannot change under us. * We don't need any special data guarantees for writing sb diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a097048ed1a3..d8a5d0a08f07 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -944,7 +944,7 @@ out: */ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) { - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); if (tid_gt(tid, journal->j_tail_sequence)) __jbd2_update_log_tail(journal, tid, block); mutex_unlock(&journal->j_checkpoint_mutex); @@ -1304,7 +1304,7 @@ static int journal_reset(journal_t *journal) journal->j_flags |= JBD2_FLUSHED; } else { /* Lock here to make assertions happy... */ - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); /* * Update log tail information. We use REQ_FUA since new * transaction will start reusing journal space and so we @@ -1691,7 +1691,7 @@ int jbd2_journal_destroy(journal_t *journal) spin_lock(&journal->j_list_lock); while (journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); err = jbd2_log_do_checkpoint(journal); mutex_unlock(&journal->j_checkpoint_mutex); /* @@ -1713,7 +1713,7 @@ int jbd2_journal_destroy(journal_t *journal) if (journal->j_sb_buffer) { if (!is_journal_aborted(journal)) { - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); write_lock(&journal->j_state_lock); journal->j_tail_sequence = @@ -1955,7 +1955,7 @@ int jbd2_journal_flush(journal_t *journal) spin_lock(&journal->j_list_lock); while (!err && journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); err = jbd2_log_do_checkpoint(journal); mutex_unlock(&journal->j_checkpoint_mutex); spin_lock(&journal->j_list_lock); @@ -1965,7 +1965,7 @@ int jbd2_journal_flush(journal_t *journal) if (is_journal_aborted(journal)) return -EIO; - mutex_lock(&journal->j_checkpoint_mutex); + mutex_lock_io(&journal->j_checkpoint_mutex); if (!err) { err = jbd2_cleanup_journal_tail(journal); if (err < 0) { -- cgit v1.2.3 From f21860bac05b609d71757338361d26209ff0759b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 14 Jan 2017 17:11:36 +0100 Subject: locking/mutex, sched/wait: Fix the mutex_lock_io_nested() define Mike noticed this bogosity: > > +# define mutex_lock_nest_io(lock, nest_lock) mutex_io(lock) > ^^^^^^^^^^^^^^ typo This new locking API is not used yet, so this didn't trigger in testing. Fix it. Reported-by: Mike Galbraith Cc: Tejun Heo Cc: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Jens Axboe Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: adilger.kernel@dilger.ca Cc: jack@suse.com Cc: kernel-team@fb.com Cc: mingbo@fb.com Cc: tytso@mit.edu Signed-off-by: Ingo Molnar --- include/linux/mutex.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 980ba16b8749..7fffbfcd5430 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -179,7 +179,7 @@ extern void mutex_lock_io(struct mutex *lock); # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -# define mutex_lock_nest_io(lock, nest_lock) mutex_io(lock) +# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock) #endif /* -- cgit v1.2.3 From 02cfdc95a0104fa5812d855d1e4ec687312aaa6f Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Wed, 18 Jan 2017 14:30:29 -0800 Subject: sched/x86: Remove unnecessary TBM3 check to update topology Scheduling to the max performance core is enabled by default for Turbo Boost Maxt Technology 3.0 capable platforms. Remove the useless sysctl_sched_itmt_enabled check to update sched topology for adding the prioritized core scheduling flag. Signed-off-by: Tim Chen Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Srinivas Pandruvada Cc: Thomas Gleixner Cc: bp@suse.de Cc: jolsa@redhat.com Cc: linux-acpi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: rjw@rjwysocki.net Link: http://lkml.kernel.org/r/1484778629-4404-1-git-send-email-tim.c.chen@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/itmt.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index cb9c1ed1d391..f73f475d0573 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -132,10 +132,8 @@ int sched_set_itmt_support(void) sysctl_sched_itmt_enabled = 1; - if (sysctl_sched_itmt_enabled) { - x86_topology_update = true; - rebuild_sched_domains(); - } + x86_topology_update = true; + rebuild_sched_domains(); mutex_unlock(&itmt_update_mutex); -- cgit v1.2.3 From acb04058de49458010c44bb35b849d45113fd668 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Jan 2017 14:36:33 +0100 Subject: sched/clock: Fix hotplug crash Mike reported that he could trigger the WARN_ON_ONCE() in set_sched_clock_stable() using hotplug. This exposed a fundamental problem with the interface, we should never mark the TSC stable if we ever find it to be unstable. Therefore set_sched_clock_stable() is a broken interface. The reason it existed is that not having it is a pain, it means all relevant architecture code needs to call clear_sched_clock_stable() where appropriate. Of the three architectures that select HAVE_UNSTABLE_SCHED_CLOCK ia64 and parisc are trivial in that they never called set_sched_clock_stable(), so add an unconditional call to clear_sched_clock_stable() to them. For x86 the story is a lot more involved, and what this patch tries to do is ensure we preserve the status quo. So even is Cyrix or Transmeta have usable TSC they never called set_sched_clock_stable() so they now get an explicit mark unstable. Reported-by: Mike Galbraith Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 9881b024b7d7 ("sched/clock: Delay switching sched_clock to stable") Link: http://lkml.kernel.org/r/20170119133633.GB6536@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/ia64/kernel/setup.c | 2 ++ arch/parisc/kernel/setup.c | 2 ++ arch/x86/kernel/cpu/amd.c | 6 ++++-- arch/x86/kernel/cpu/centaur.c | 6 ++++-- arch/x86/kernel/cpu/common.c | 3 +++ arch/x86/kernel/cpu/cyrix.c | 2 ++ arch/x86/kernel/cpu/intel.c | 6 ++++-- arch/x86/kernel/cpu/transmeta.c | 3 +++ arch/x86/kernel/kvmclock.c | 2 +- include/linux/sched.h | 1 - kernel/sched/clock.c | 29 ++++++++--------------------- 11 files changed, 33 insertions(+), 29 deletions(-) diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 7ec7acc844c2..c483ece3eb84 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -619,6 +619,8 @@ setup_arch (char **cmdline_p) check_sal_cache_flush(); #endif paging_init(); + + clear_sched_clock_stable(); } /* diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 2e66a887788e..068ed3607bac 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -36,6 +36,7 @@ #undef PCI_DEBUG #include #include +#include #include #include @@ -176,6 +177,7 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; /* we use do_take_over_console() later ! */ #endif + clear_sched_clock_stable(); } /* diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 71cae73a5076..1bb253a6ee4d 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -548,8 +548,10 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - if (!check_tsc_unstable()) - set_sched_clock_stable(); + if (check_tsc_unstable()) + clear_sched_clock_stable(); + } else { + clear_sched_clock_stable(); } /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 1661d8ec9280..2c234a6d94c4 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,5 +1,5 @@ -#include -#include + +#include #include #include @@ -104,6 +104,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif + + clear_sched_clock_stable(); } static void init_centaur(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index dc1697ca5191..3457186275a0 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -83,6 +83,7 @@ static void default_init(struct cpuinfo_x86 *c) strcpy(c->x86_model_id, "386"); } #endif + clear_sched_clock_stable(); } static const struct cpu_dev default_cpu = { @@ -1055,6 +1056,8 @@ static void identify_cpu(struct cpuinfo_x86 *c) */ if (this_cpu->c_init) this_cpu->c_init(c); + else + clear_sched_clock_stable(); /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index bd9dcd6b712d..47416f959a48 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "cpu.h" @@ -183,6 +184,7 @@ static void early_init_cyrix(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; } + clear_sched_clock_stable(); } static void init_cyrix(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fcd484d2bb03..26eaff4907b2 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -124,8 +124,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - if (!check_tsc_unstable()) - set_sched_clock_stable(); + if (check_tsc_unstable()) + clear_sched_clock_stable(); + } else { + clear_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 34178564be2a..c1ea5b999839 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -14,6 +15,8 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) if (xlvl >= 0x80860001) c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); } + + clear_sched_clock_stable(); } static void init_transmeta(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 2a5cafdf8808..542710b99f52 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -107,12 +107,12 @@ static inline void kvm_sched_clock_init(bool stable) { if (!stable) { pv_time_ops.sched_clock = kvm_clock_read; + clear_sched_clock_stable(); return; } kvm_sched_clock_offset = kvm_clock_read(); pv_time_ops.sched_clock = kvm_sched_clock_read; - set_sched_clock_stable(); printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n", kvm_sched_clock_offset); diff --git a/include/linux/sched.h b/include/linux/sched.h index a8daed914eef..69e6852fede1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2547,7 +2547,6 @@ extern void sched_clock_init_late(void); * is reliable after all: */ extern int sched_clock_stable(void); -extern void set_sched_clock_stable(void); extern void clear_sched_clock_stable(void); extern void sched_clock_tick(void); diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 7713b2b53f61..ad64efe41722 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -83,8 +83,15 @@ void sched_clock_init(void) } #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +/* + * We must start with !__sched_clock_stable because the unstable -> stable + * transition is accurate, while the stable -> unstable transition is not. + * + * Similarly we start with __sched_clock_stable_early, thereby assuming we + * will become stable, such that there's only a single 1 -> 0 transition. + */ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); -static int __sched_clock_stable_early; +static int __sched_clock_stable_early = 1; /* * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset @@ -132,24 +139,6 @@ static void __set_sched_clock_stable(void) tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); } -void set_sched_clock_stable(void) -{ - __sched_clock_stable_early = 1; - - smp_mb(); /* matches sched_clock_init_late() */ - - /* - * This really should only be called early (before - * sched_clock_init_late()) when guestimating our sched_clock() is - * solid. - * - * After that we test stability and we can negate our guess using - * clear_sched_clock_stable, possibly from a watchdog. - */ - if (WARN_ON_ONCE(sched_clock_running == 2)) - __set_sched_clock_stable(); -} - static void __clear_sched_clock_stable(struct work_struct *work) { struct sched_clock_data *scd = this_scd(); @@ -199,8 +188,6 @@ void sched_clock_init_late(void) if (__sched_clock_stable_early) __set_sched_clock_stable(); - else - __clear_sched_clock_stable(NULL); } /* -- cgit v1.2.3 From 3a09b8d45b3c05d49e581831de626927c37599f8 Mon Sep 17 00:00:00 2001 From: Zhou Chengming Date: Sun, 22 Jan 2017 15:22:35 +0800 Subject: sched/Documentation/sched-rt-group: Fix incorrect example I feel that the example given in the document to show the possibility of task starvation of configurable period is wrong. The example says group A and B both have 50% bandwidth, and a while (1) loop in A will run for the full period of B and can starve B's tasks. So I think the runtime of group A should be 50000us, then the period and runtime of group B should be 50000us and 25000us. Signed-off-by: Zhou Chengming Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: corbet@lwn.net Cc: iamyooon@gmail.com Cc: linux-doc@vger.kernel.org Cc: lizefan@huawei.com Link: http://lkml.kernel.org/r/1485069755-44287-1-git-send-email-zhouchengming1@huawei.com Signed-off-by: Ingo Molnar --- Documentation/scheduler/sched-rt-group.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt index a03f0d944fe6..d8fce3e78457 100644 --- a/Documentation/scheduler/sched-rt-group.txt +++ b/Documentation/scheduler/sched-rt-group.txt @@ -158,11 +158,11 @@ as its prone to starvation without deadline scheduling. Consider two sibling groups A and B; both have 50% bandwidth, but A's period is twice the length of B's. -* group A: period=100000us, runtime=10000us - - this runs for 0.01s once every 0.1s +* group A: period=100000us, runtime=50000us + - this runs for 0.05s once every 0.1s -* group B: period= 50000us, runtime=10000us - - this runs for 0.01s twice every 0.1s (or once every 0.05 sec). +* group B: period= 50000us, runtime=25000us + - this runs for 0.025s twice every 0.1s (or once every 0.05 sec). This means that currently a while (1) loop in A will run for the full period of B and can starve B's tasks (assuming they are of lower priority) for a whole -- cgit v1.2.3 From 49ee576809d837442624ac18804b07943267cd57 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Jan 2017 18:44:08 +0100 Subject: sched/core: Optimize pick_next_task() for idle_sched_class Steve noticed that when we switch from IDLE to SCHED_OTHER we fail to take the shortcut, even though all runnable tasks are of the fair class, because prev->sched_class != &fair_sched_class. Since I reworked the put_prev_task() stuff, we don't really care about prev->class here, so removing that condition will allow this case. This increases the likely case from 78% to 98% correct for Steve's workload. Reported-by: Steven Rostedt (VMware) Tested-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170119174408.GN6485@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 49ce1cb3d320..51ca21ef7ae5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3321,15 +3321,14 @@ static inline void schedule_debug(struct task_struct *prev) static inline struct task_struct * pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { - const struct sched_class *class = &fair_sched_class; + const struct sched_class *class; struct task_struct *p; /* * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ - if (likely(prev->sched_class == class && - rq->nr_running == rq->cfs.h_nr_running)) { + if (likely(rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev, rf); if (unlikely(p == RETRY_TASK)) goto again; -- cgit v1.2.3 From 1b1d62254df0fe42a711eb71948f915918987790 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Jan 2017 16:05:55 +0100 Subject: sched/core: Add missing update_rq_clock() call in sched_move_task() Bug was noticed via this warning: WARNING: CPU: 6 PID: 1 at kernel/sched/sched.h:804 detach_task_cfs_rq+0x8e8/0xb80 rq->clock_update_flags < RQCF_ACT_SKIP Modules linked in: CPU: 6 PID: 1 Comm: systemd Not tainted 4.10.0-rc5-00140-g0874170baf55-dirty #1 Hardware name: Supermicro SYS-4048B-TRFT/X10QBi, BIOS 1.0 04/11/2014 Call Trace: dump_stack+0x4d/0x65 __warn+0xcb/0xf0 warn_slowpath_fmt+0x5f/0x80 detach_task_cfs_rq+0x8e8/0xb80 ? allocate_cgrp_cset_links+0x59/0x80 task_change_group_fair+0x27/0x150 sched_change_group+0x48/0xf0 sched_move_task+0x53/0x150 cpu_cgroup_attach+0x36/0x70 cgroup_taskset_migrate+0x175/0x300 cgroup_migrate+0xab/0xd0 cgroup_attach_task+0xf0/0x190 __cgroup_procs_write+0x1ed/0x2f0 cgroup_procs_write+0x14/0x20 cgroup_file_write+0x3f/0x100 kernfs_fop_write+0x104/0x180 __vfs_write+0x37/0x140 vfs_write+0xb8/0x1b0 SyS_write+0x55/0xc0 do_syscall_64+0x61/0x170 entry_SYSCALL64_slow_path+0x25/0x25 Reported-by: Ingo Molnar Reported-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 51ca21ef7ae5..7f983e83a353 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8074,6 +8074,7 @@ void sched_move_task(struct task_struct *tsk) struct rq *rq; rq = task_rq_lock(tsk, &rf); + update_rq_clock(rq); running = task_current(rq, tsk); queued = task_on_rq_queued(tsk); -- cgit v1.2.3 From 4d25b35ea3729affd37d69c78191ce6f92766e1a Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 26 Oct 2016 16:15:44 +0100 Subject: sched/fair: Restore previous rq_flags when migrating tasks in hotplug __migrate_task() can return with a different runqueue locked than the one we passed as an argument. So that we can repin the lock in migrate_tasks() (and keep the update_rq_clock() bit) we need to restore the old rq_flags before repinning. Note that it wouldn't be correct to change move_queued_task() to repin because of the change of runqueue and the fact that having an up-to-date clock on the initial rq doesn't mean the new rq has one too. Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f983e83a353..3b248b03ad8f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5608,7 +5608,7 @@ static void migrate_tasks(struct rq *dead_rq) { struct rq *rq = dead_rq; struct task_struct *next, *stop = rq->stop; - struct rq_flags rf; + struct rq_flags rf, old_rf; int dest_cpu; /* @@ -5669,6 +5669,13 @@ static void migrate_tasks(struct rq *dead_rq) continue; } + /* + * __migrate_task() may return with a different + * rq->lock held and a new cookie in 'rf', but we need + * to preserve rf::clock_update_flags for 'dead_rq'. + */ + old_rf = rf; + /* Find suitable destination for @next, with force if needed. */ dest_cpu = select_fallback_rq(dead_rq->cpu, next); @@ -5677,6 +5684,7 @@ static void migrate_tasks(struct rq *dead_rq) raw_spin_unlock(&rq->lock); rq = dead_rq; raw_spin_lock(&rq->lock); + rf = old_rf; } raw_spin_unlock(&next->pi_lock); } -- cgit v1.2.3 From 92c99ac829931abba33107e09358447c8ad6bd32 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 24 Jan 2017 14:11:34 -0700 Subject: sched/core: Fix &rd->rto_mask memory leak If function cpudl_init() fails the memory allocated for &rd->rto_mask needs to be freed, something this patch is addressing. Signed-off-by: Mathieu Poirier Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1485292295-21298-1-git-send-email-mathieu.poirier@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3b248b03ad8f..17d1df6e2dbb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5976,7 +5976,7 @@ static int init_rootdomain(struct root_domain *rd) init_dl_bw(&rd->dl_bw); if (cpudl_init(&rd->cpudl) != 0) - goto free_dlo_mask; + goto free_rto_mask; if (cpupri_init(&rd->cpupri) != 0) goto free_rto_mask; -- cgit v1.2.3 From 4b12db939166042eb78e592bdf94ef113c14e379 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 24 Jan 2017 14:11:35 -0700 Subject: sched/core: Fix &rd->cpudl memory leak While in the process of initialising a root domain, if function cpupri_init() fails the memory allocated in cpudl_init() is not reclaimed. Adding a new goto target to cleanup the previous initialistion of the root_domain's dl_bw structure reclaims said memory. Signed-off-by: Mathieu Poirier Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1485292295-21298-2-git-send-email-mathieu.poirier@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 17d1df6e2dbb..d01f9d047397 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5979,9 +5979,11 @@ static int init_rootdomain(struct root_domain *rd) goto free_rto_mask; if (cpupri_init(&rd->cpupri) != 0) - goto free_rto_mask; + goto free_cpudl; return 0; +free_cpudl: + cpudl_cleanup(&rd->cpudl); free_rto_mask: free_cpumask_var(rd->rto_mask); free_dlo_mask: -- cgit v1.2.3 From 619bd4a71874a8fd78eb6ccf9f272c5e98bcc7b7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 24 Jan 2017 15:40:06 +0100 Subject: sched/rt: Add a missing rescheduling point Since the change in commit: fd7a4bed1835 ("sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks") ... we don't reschedule a task under certain circumstances: Lets say task-A, SCHED_OTHER, is running on CPU0 (and it may run only on CPU0) and holds a PI lock. This task is removed from the CPU because it used up its time slice and another SCHED_OTHER task is running. Task-B on CPU1 runs at RT priority and asks for the lock owned by task-A. This results in a priority boost for task-A. Task-B goes to sleep until the lock has been made available. Task-A is already runnable (but not active), so it receives no wake up. The reality now is that task-A gets on the CPU once the scheduler decides to remove the current task despite the fact that a high priority task is enqueued and waiting. This may take a long time. The desired behaviour is that CPU0 immediately reschedules after the priority boost which made task-A the task with the lowest priority. Suggested-by: Peter Zijlstra Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Thomas Gleixner Fixes: fd7a4bed1835 ("sched, rt: Convert switched_{from, to}_rt() prio_changed_rt() to balance callbacks") Link: http://lkml.kernel.org/r/20170124144006.29821-1-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- kernel/sched/deadline.c | 3 +-- kernel/sched/rt.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 491ff663e1b6..27737f34757d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1729,12 +1729,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) #ifdef CONFIG_SMP if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) queue_push_tasks(rq); -#else +#endif if (dl_task(rq->curr)) check_preempt_curr_dl(rq, p, 0); else resched_curr(rq); -#endif } } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 88254be118b0..704f2b89abf1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2198,10 +2198,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) #ifdef CONFIG_SMP if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) queue_push_tasks(rq); -#else +#endif /* CONFIG_SMP */ if (p->prio < rq->curr->prio) resched_curr(rq); -#endif /* CONFIG_SMP */ } } -- cgit v1.2.3 From 93825f2ec736f30e034ab7c9d56b42849c5b00da Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:16 +0100 Subject: jiffies: Reuse TICK_NSEC instead of NSEC_PER_JIFFY NSEC_PER_JIFFY is an ad-hoc redefinition of TICK_NSEC. Let's rather use a unique and well maintained version. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/time/jiffies.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index a4a0e478e44d..7906b3f0c41a 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -27,19 +27,8 @@ #include "timekeeping.h" -/* The Jiffies based clocksource is the lowest common - * denominator clock source which should function on - * all systems. It has the same coarse resolution as - * the timer interrupt frequency HZ and it suffers - * inaccuracies caused by missed or lost timer - * interrupts and the inability for the timer - * interrupt hardware to accuratly tick at the - * requested HZ value. It is also not recommended - * for "tick-less" systems. - */ -#define NSEC_PER_JIFFY ((NSEC_PER_SEC+HZ/2)/HZ) -/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier +/* Since jiffies uses a simple TICK_NSEC multiplier * conversion, the .shift value could be zero. However * this would make NTP adjustments impossible as they are * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to @@ -47,8 +36,8 @@ * amount, and give ntp adjustments in units of 1/2^8 * * The value 8 is somewhat carefully chosen, as anything - * larger can result in overflows. NSEC_PER_JIFFY grows as - * HZ shrinks, so values greater than 8 overflow 32bits when + * larger can result in overflows. TICK_NSEC grows as HZ + * shrinks, so values greater than 8 overflow 32bits when * HZ=100. */ #if HZ < 34 @@ -64,12 +53,23 @@ static u64 jiffies_read(struct clocksource *cs) return (u64) jiffies; } +/* + * The Jiffies based clocksource is the lowest common + * denominator clock source which should function on + * all systems. It has the same coarse resolution as + * the timer interrupt frequency HZ and it suffers + * inaccuracies caused by missed or lost timer + * interrupts and the inability for the timer + * interrupt hardware to accuratly tick at the + * requested HZ value. It is also not recommended + * for "tick-less" systems. + */ static struct clocksource clocksource_jiffies = { .name = "jiffies", .rating = 1, /* lowest valid rating*/ .read = jiffies_read, .mask = CLOCKSOURCE_MASK(32), - .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ + .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */ .shift = JIFFIES_SHIFT, .max_cycles = 10, }; @@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second) shift_hz += cycles_per_tick/2; do_div(shift_hz, cycles_per_tick); /* Calculate nsec_per_tick using shift_hz */ - nsec_per_tick = (u64)NSEC_PER_SEC << 8; + nsec_per_tick = (u64)TICK_NSEC << 8; nsec_per_tick += (u32)shift_hz/2; do_div(nsec_per_tick, (u32)shift_hz); -- cgit v1.2.3 From 07e5f5e353aaa61696c8353d87050994a0c4648a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:17 +0100 Subject: time: Introduce jiffies64_to_nsecs() This will be needed for the cputime_t to nsec conversion. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/jiffies.h | 2 ++ kernel/time/time.c | 10 ++++++++++ kernel/time/timeconst.bc | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 589d14e970ad..624215cebee5 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j) return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; } +extern u64 jiffies64_to_nsecs(u64 j); + extern unsigned long __msecs_to_jiffies(const unsigned int m); #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) /* diff --git a/kernel/time/time.c b/kernel/time/time.c index a3a9a8a029dc..25bdd2504571 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -702,6 +702,16 @@ u64 nsec_to_clock_t(u64 x) #endif } +u64 jiffies64_to_nsecs(u64 j) +{ +#if !(NSEC_PER_SEC % HZ) + return (NSEC_PER_SEC / HZ) * j; +# else + return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN); +#endif +} +EXPORT_SYMBOL(jiffies64_to_nsecs); + /** * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 * diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc index c48688904f9f..f83bbb81600b 100644 --- a/kernel/time/timeconst.bc +++ b/kernel/time/timeconst.bc @@ -98,6 +98,12 @@ define timeconst(hz) { print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n" print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n" print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n" + + cd=gcd(hz,1000000000) + print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n" + print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n" + print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n" + print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n" print "\n" print "#endif /* KERNEL_TIMECONST_H */\n" -- cgit v1.2.3 From ba03ce822db234f8acb559de4a317a5c1f95c029 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:18 +0100 Subject: sched/cputime: Remove the unused INIT_CPUTIME macro It's a leftover from removed code. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 69e6852fede1..5f60aed37701 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -620,13 +620,6 @@ struct task_cputime { #define prof_exp stime #define sched_exp sum_exec_runtime -#define INIT_CPUTIME \ - (struct task_cputime) { \ - .utime = 0, \ - .stime = 0, \ - .sum_exec_runtime = 0, \ - } - /* * This is the atomic variant of task_cputime, which can be used for * storing and updating task_cputime statistics without locking. -- cgit v1.2.3 From 7fb1327ee9b92fca27662f9b9d60c7c3376d6c69 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:19 +0100 Subject: sched/cputime: Convert kcpustat to nsecs Kernel CPU stats are stored in cputime_t which is an architecture defined type, and hence a bit opaque and requiring accessors and mutators for any operation. Converting them to nsecs simplifies the code and is one step toward the removal of cputime_t in the core code. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-4-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/s390/appldata/appldata_os.c | 16 ++++----- drivers/cpufreq/cpufreq.c | 6 ++-- drivers/cpufreq/cpufreq_governor.c | 2 +- drivers/cpufreq/cpufreq_stats.c | 1 - drivers/macintosh/rack-meter.c | 2 +- fs/proc/stat.c | 68 +++++++++++++++++++------------------- fs/proc/uptime.c | 7 ++-- kernel/sched/cpuacct.c | 2 +- kernel/sched/cputime.c | 22 ++++++------ 9 files changed, 61 insertions(+), 65 deletions(-) diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 69b23b25ac34..08b9e942a262 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -113,21 +113,21 @@ static void appldata_get_os_data(void *data) j = 0; for_each_online_cpu(i) { os_data->os_cpu[j].per_cpu_user = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); os_data->os_cpu[j].per_cpu_nice = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); os_data->os_cpu[j].per_cpu_system = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); os_data->os_cpu[j].per_cpu_idle = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); os_data->os_cpu[j].per_cpu_irq = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); os_data->os_cpu[j].per_cpu_softirq = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); os_data->os_cpu[j].per_cpu_iowait = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); os_data->os_cpu[j].per_cpu_steal = - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); os_data->os_cpu[j].cpu_id = i; j++; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cc475eff90b3..3e9b319a2e79 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -132,7 +132,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) u64 cur_wall_time; u64 busy_time; - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + cur_wall_time = jiffies64_to_nsecs(get_jiffies_64()); busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; @@ -143,9 +143,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) idle_time = cur_wall_time - busy_time; if (wall) - *wall = cputime_to_usecs(cur_wall_time); + *wall = div_u64(cur_wall_time, NSEC_PER_USEC); - return cputime_to_usecs(idle_time); + return div_u64(idle_time, NSEC_PER_USEC); } u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0196467280bd..631bd2c86c5e 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) if (ignore_nice) { u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice); + idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC); j_cdbs->prev_cpu_nice = cur_nice; } diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index ac284e66839c..17048bbec287 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -13,7 +13,6 @@ #include #include #include -#include static DEFINE_SPINLOCK(cpufreq_stats_lock); diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 775527135b93..c114594136d4 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -91,7 +91,7 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) if (rackmeter_ignore_nice) retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - return retval; + return nsecs_to_cputime64(retval); } static void rackmeter_setup_i2s(struct rackmeter *rm) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index d700c42b3572..44475a44cbf1 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -21,23 +21,23 @@ #ifdef arch_idle_time -static cputime64_t get_idle_time(int cpu) +static u64 get_idle_time(int cpu) { - cputime64_t idle; + u64 idle; idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) - idle += arch_idle_time(cpu); + idle += cputime_to_nsecs(arch_idle_time(cpu)); return idle; } -static cputime64_t get_iowait_time(int cpu) +static u64 get_iowait_time(int cpu) { - cputime64_t iowait; + u64 iowait; iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; if (cpu_online(cpu) && nr_iowait_cpu(cpu)) - iowait += arch_idle_time(cpu); + iowait += cputime_to_nsecs(arch_idle_time(cpu)); return iowait; } @@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int cpu) static u64 get_idle_time(int cpu) { - u64 idle, idle_time = -1ULL; + u64 idle, idle_usecs = -1ULL; if (cpu_online(cpu)) - idle_time = get_cpu_idle_time_us(cpu, NULL); + idle_usecs = get_cpu_idle_time_us(cpu, NULL); - if (idle_time == -1ULL) + if (idle_usecs == -1ULL) /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; else - idle = usecs_to_cputime64(idle_time); + idle = idle_usecs * NSEC_PER_USEC; return idle; } static u64 get_iowait_time(int cpu) { - u64 iowait, iowait_time = -1ULL; + u64 iowait, iowait_usecs = -1ULL; if (cpu_online(cpu)) - iowait_time = get_cpu_iowait_time_us(cpu, NULL); + iowait_usecs = get_cpu_iowait_time_us(cpu, NULL); - if (iowait_time == -1ULL) + if (iowait_usecs == -1ULL) /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; else - iowait = usecs_to_cputime64(iowait_time); + iowait = iowait_usecs * NSEC_PER_USEC; return iowait; } @@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p, void *v) } sum += arch_irq_stat(); - seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); + seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); seq_putc(p, '\n'); for_each_online_cpu(i) { @@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p, void *v) guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); seq_putc(p, '\n'); } seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 33de567c25af..7981c4ffe787 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -5,23 +5,20 @@ #include #include #include -#include static int uptime_proc_show(struct seq_file *m, void *v) { struct timespec uptime; struct timespec idle; - u64 idletime; u64 nsec; u32 rem; int i; - idletime = 0; + nsec = 0; for_each_possible_cpu(i) - idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; + nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; get_monotonic_boottime(&uptime); - nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; seq_printf(m, "%lu.%02lu %lu.%02lu\n", diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 9add206b5608..f95ab29a45d0 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -297,7 +297,7 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) { seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[stat], - (long long)cputime64_to_clock_t(val[stat])); + (long long)nsec_to_clock_t(val[stat])); } return 0; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index f7c14cc71d06..61e270926e94 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -75,9 +75,9 @@ static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime) u64 *cpustat = kcpustat_this_cpu->cpustat; cputime_t irq_cputime; - irq_cputime = nsecs_to_cputime64(irqtime) - cpustat[idx]; + irq_cputime = nsecs_to_cputime64(irqtime - cpustat[idx]); irq_cputime = min(irq_cputime, maxtime); - cpustat[idx] += irq_cputime; + cpustat[idx] += cputime_to_nsecs(irq_cputime); return irq_cputime; } @@ -140,7 +140,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; /* Add user time to cpustat. */ - task_group_account_field(p, index, (__force u64) cputime); + task_group_account_field(p, index, cputime_to_nsecs(cputime)); /* Account for user time used */ acct_account_cputime(p); @@ -162,11 +162,11 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { - cpustat[CPUTIME_NICE] += (__force u64) cputime; - cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; + cpustat[CPUTIME_NICE] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_GUEST_NICE] += cputime_to_nsecs(cputime); } else { - cpustat[CPUTIME_USER] += (__force u64) cputime; - cpustat[CPUTIME_GUEST] += (__force u64) cputime; + cpustat[CPUTIME_USER] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_GUEST] += cputime_to_nsecs(cputime); } } @@ -184,7 +184,7 @@ void account_system_index_time(struct task_struct *p, account_group_system_time(p, cputime); /* Add system time to cpustat. */ - task_group_account_field(p, index, (__force u64) cputime); + task_group_account_field(p, index, cputime_to_nsecs(cputime)); /* Account for system time used */ acct_account_cputime(p); @@ -224,7 +224,7 @@ void account_steal_time(cputime_t cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; - cpustat[CPUTIME_STEAL] += (__force u64) cputime; + cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime); } /* @@ -237,9 +237,9 @@ void account_idle_time(cputime_t cputime) struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) - cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; + cpustat[CPUTIME_IOWAIT] += cputime_to_nsecs(cputime); else - cpustat[CPUTIME_IDLE] += (__force u64) cputime; + cpustat[CPUTIME_IDLE] += cputime_to_nsecs(cputime); } /* -- cgit v1.2.3 From 564b733c899f4e12a64946658960fce80cad0b05 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:20 +0100 Subject: macintosh/rack-meter: Convert cputime64_t use to u64 cputime_t is going to be removed and replaced by nsecs units, so convert the drivers/macintosh/rack-meter.c use to u64.. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- drivers/macintosh/rack-meter.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index c114594136d4..e199fd6c71ce 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -52,8 +52,8 @@ struct rackmeter_dma { struct rackmeter_cpu { struct delayed_work sniffer; struct rackmeter *rm; - cputime64_t prev_wall; - cputime64_t prev_idle; + u64 prev_wall; + u64 prev_idle; int zero; } ____cacheline_aligned; @@ -81,7 +81,7 @@ static int rackmeter_ignore_nice; /* This is copied from cpufreq_ondemand, maybe we should put it in * a common header somewhere */ -static inline cputime64_t get_cpu_idle_time(unsigned int cpu) +static inline u64 get_cpu_idle_time(unsigned int cpu) { u64 retval; @@ -91,7 +91,7 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) if (rackmeter_ignore_nice) retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - return nsecs_to_cputime64(retval); + return retval; } static void rackmeter_setup_i2s(struct rackmeter *rm) @@ -217,23 +217,23 @@ static void rackmeter_do_timer(struct work_struct *work) container_of(work, struct rackmeter_cpu, sniffer.work); struct rackmeter *rm = rcpu->rm; unsigned int cpu = smp_processor_id(); - cputime64_t cur_jiffies, total_idle_ticks; - unsigned int total_ticks, idle_ticks; + u64 cur_nsecs, total_idle_nsecs; + u64 total_nsecs, idle_nsecs; int i, offset, load, cumm, pause; - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); - total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall); - rcpu->prev_wall = cur_jiffies; + cur_nsecs = jiffies64_to_nsecs(get_jiffies_64()); + total_nsecs = cur_nsecs - rcpu->prev_wall; + rcpu->prev_wall = cur_nsecs; - total_idle_ticks = get_cpu_idle_time(cpu); - idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle); - idle_ticks = min(idle_ticks, total_ticks); - rcpu->prev_idle = total_idle_ticks; + total_idle_nsecs = get_cpu_idle_time(cpu); + idle_nsecs = total_idle_nsecs - rcpu->prev_idle; + idle_nsecs = min(idle_nsecs, total_nsecs); + rcpu->prev_idle = total_idle_nsecs; /* We do a very dumb calculation to update the LEDs for now, * we'll do better once we have actual PWM implemented */ - load = (9 * (total_ticks - idle_ticks)) / total_ticks; + load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs); offset = cpu << 3; cumm = 0; @@ -278,7 +278,7 @@ static void rackmeter_init_cpu_sniffer(struct rackmeter *rm) continue; rcpu = &rm->cpu[cpu]; rcpu->prev_idle = get_cpu_idle_time(cpu); - rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64()); + rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64()); schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer, msecs_to_jiffies(CPU_SAMPLING_RATE)); } -- cgit v1.2.3 From 16a6d9be90373fb0b521850cd0185a4d460dd152 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:21 +0100 Subject: sched/cputime: Convert guest time accounting to nsecs (u64) cputime_t is being obsolete and replaced by nsecs units in order to make internal timestamps less opaque and more granular. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Tony Luck Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-6-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- fs/proc/array.c | 6 +++--- include/linux/sched.h | 10 +++++----- kernel/sched/cputime.c | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index 51a4213afa2e..25b54cf0c042 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -402,7 +402,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, unsigned long cmin_flt = 0, cmaj_flt = 0; unsigned long min_flt = 0, maj_flt = 0; cputime_t cutime, cstime, utime, stime; - cputime_t cgtime, gtime; + u64 cgtime, gtime; unsigned long rsslim = 0; char tcomm[sizeof(task->comm)]; unsigned long flags; @@ -542,8 +542,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, seq_put_decimal_ull(m, " ", task->rt_priority); seq_put_decimal_ull(m, " ", task->policy); seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task)); - seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime)); - seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime)); + seq_put_decimal_ull(m, " ", nsec_to_clock_t(gtime)); + seq_put_decimal_ll(m, " ", nsec_to_clock_t(cgtime)); if (mm && permitted) { seq_put_decimal_ull(m, " ", mm->start_data); diff --git a/include/linux/sched.h b/include/linux/sched.h index 5f60aed37701..252ff25983c8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -774,8 +774,8 @@ struct signal_struct { */ seqlock_t stats_lock; cputime_t utime, stime, cutime, cstime; - cputime_t gtime; - cputime_t cgtime; + u64 gtime; + u64 cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; @@ -1658,7 +1658,7 @@ struct task_struct { #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME cputime_t utimescaled, stimescaled; #endif - cputime_t gtime; + u64 gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_t vtime_seqcount; @@ -2254,7 +2254,7 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime); -extern cputime_t task_gtime(struct task_struct *t); +extern u64 task_gtime(struct task_struct *t); #else static inline void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) @@ -2263,7 +2263,7 @@ static inline void task_cputime(struct task_struct *t, *stime = t->stime; } -static inline cputime_t task_gtime(struct task_struct *t) +static inline u64 task_gtime(struct task_struct *t) { return t->gtime; } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 61e270926e94..8bcd98e2b821 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -158,7 +158,7 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) /* Add guest time to process. */ p->utime += cputime; account_group_user_time(p, cputime); - p->gtime += cputime; + p->gtime += cputime_to_nsecs(cputime); /* Add guest time to cpustat. */ if (task_nice(p) > 0) { @@ -824,10 +824,10 @@ void vtime_init_idle(struct task_struct *t, int cpu) local_irq_restore(flags); } -cputime_t task_gtime(struct task_struct *t) +u64 task_gtime(struct task_struct *t) { unsigned int seq; - cputime_t gtime; + u64 gtime; if (!vtime_accounting_enabled()) return t->gtime; @@ -837,7 +837,7 @@ cputime_t task_gtime(struct task_struct *t) gtime = t->gtime; if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) - gtime += vtime_delta(t); + gtime += cputime_to_nsecs(vtime_delta(t)); } while (read_seqcount_retry(&t->vtime_seqcount, seq)); -- cgit v1.2.3 From a1cecf2ba78e0a6de00ff99df34b662728535aa5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:22 +0100 Subject: sched/cputime: Introduce special task_cputime_t() API to return old-typed cputime This API returns a task's cputime in cputime_t in order to ease the conversion of cputime internals to use nsecs units instead. Blindly converting all cputime readers to use this API now will later let us convert more smoothly and step by step all these places to use the new nsec based cputime. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-7-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/alpha/kernel/osf_sys.c | 2 +- arch/x86/kernel/apm_32.c | 2 +- drivers/isdn/mISDN/stack.c | 2 +- fs/binfmt_elf.c | 6 +++--- fs/binfmt_elf_fdpic.c | 6 +++--- include/linux/sched.h | 32 ++++++++++++++++++++++++++--- kernel/acct.c | 2 +- kernel/delayacct.c | 4 ++-- kernel/signal.c | 4 ++-- kernel/time/itimer.c | 2 +- kernel/time/posix-cpu-timers.c | 46 +++++++++++++++++++++--------------------- kernel/tsacct.c | 6 +++--- 12 files changed, 70 insertions(+), 44 deletions(-) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 54d8616644e2..0f92438d736b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1154,7 +1154,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: - task_cputime(current, &utime, &stime); + task_cputime_t(current, &utime, &stime); utime_jiffies = cputime_to_jiffies(utime); stime_jiffies = cputime_to_jiffies(stime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 45d44c173cf9..89c84fcdd3c0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -913,7 +913,7 @@ static int apm_cpu_idle(struct cpuidle_device *dev, unsigned int bucket; recalc: - task_cputime(current, &utime, &stime); + task_cputime_t(current, &utime, &stime); if (jiffies_since_last_check > IDLE_CALC_LIMIT) { use_apm_idle = 0; } else if (jiffies_since_last_check > idle_period) { diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 9cb4b621fbc3..0a3661767531 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -306,7 +306,7 @@ mISDNStackd(void *data) "msg %d sleep %d stopped\n", dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, st->stopped_cnt); - task_cputime(st->thread, &utime, &stime); + task_cputime_t(st->thread, &utime, &stime); printk(KERN_DEBUG "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", dev_name(&st->dev->dev), utime, stime); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 422370293cfd..68b915650cae 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1421,19 +1421,19 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { - struct task_cputime cputime; + struct task_cputime_t cputime; /* * This is the record for the group leader. It shows the * group-wide total, not its individual thread total. */ - thread_group_cputime(p, &cputime); + thread_group_cputime_t(p, &cputime); cputime_to_timeval(cputime.utime, &prstatus->pr_utime); cputime_to_timeval(cputime.stime, &prstatus->pr_stime); } else { cputime_t utime, stime; - task_cputime(p, &utime, &stime); + task_cputime_t(p, &utime, &stime); cputime_to_timeval(utime, &prstatus->pr_utime); cputime_to_timeval(stime, &prstatus->pr_stime); } diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index d2e36f82c35d..6ccd9df7247a 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1342,19 +1342,19 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { - struct task_cputime cputime; + struct task_cputime_t cputime; /* * This is the record for the group leader. It shows the * group-wide total, not its individual thread total. */ - thread_group_cputime(p, &cputime); + thread_group_cputime_t(p, &cputime); cputime_to_timeval(cputime.utime, &prstatus->pr_utime); cputime_to_timeval(cputime.stime, &prstatus->pr_stime); } else { cputime_t utime, stime; - task_cputime(p, &utime, &stime); + task_cputime_t(p, &utime, &stime); cputime_to_timeval(utime, &prstatus->pr_utime); cputime_to_timeval(stime, &prstatus->pr_stime); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 252ff25983c8..9cc722f77799 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -615,6 +615,13 @@ struct task_cputime { unsigned long long sum_exec_runtime; }; +/* Temporary type to ease cputime_t to nsecs conversion */ +struct task_cputime_t { + cputime_t utime; + cputime_t stime; + unsigned long long sum_exec_runtime; +}; + /* Alternate field names when used to cache expirations. */ #define virt_exp utime #define prof_exp stime @@ -748,7 +755,7 @@ struct signal_struct { struct thread_group_cputimer cputimer; /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; + struct task_cputime_t cputime_expires; #ifdef CONFIG_NO_HZ_FULL atomic_t tick_dep_mask; @@ -1682,7 +1689,7 @@ struct task_struct { /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt; - struct task_cputime cputime_expires; + struct task_cputime_t cputime_expires; struct list_head cpu_timers[3]; /* process credentials */ @@ -2286,6 +2293,19 @@ static inline void task_cputime_scaled(struct task_struct *t, } #endif +static inline void task_cputime_t(struct task_struct *t, + cputime_t *utime, cputime_t *stime) +{ + task_cputime(t, utime, stime); +} + +static inline void task_cputime_t_scaled(struct task_struct *t, + cputime_t *utimescaled, + cputime_t *stimescaled) +{ + task_cputime_scaled(t, utimescaled, stimescaled); +} + extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); @@ -3499,7 +3519,13 @@ static __always_inline bool need_resched(void) * Thread group CPU time accounting. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times); + +static inline void thread_group_cputime_t(struct task_struct *tsk, + struct task_cputime_t *times) +{ + thread_group_cputime(tsk, (struct task_cputime *)times); +} /* * Reevaluate whether the task has signals pending delivery. diff --git a/kernel/acct.c b/kernel/acct.c index 74963d192c5d..b9b190a8eecf 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -559,7 +559,7 @@ void acct_collect(long exitcode, int group_dead) pacct->ac_flag |= ACORE; if (current->flags & PF_SIGNALED) pacct->ac_flag |= AXSIG; - task_cputime(current, &utime, &stime); + task_cputime_t(current, &utime, &stime); pacct->ac_utime += utime; pacct->ac_stime += stime; pacct->ac_minflt += current->min_flt; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 435c14a45118..228640f2b3d2 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -87,12 +87,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) unsigned long flags, t1; s64 tmp; - task_cputime(tsk, &utime, &stime); + task_cputime_t(tsk, &utime, &stime); tmp = (s64)d->cpu_run_real_total; tmp += cputime_to_nsecs(utime + stime); d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; - task_cputime_scaled(tsk, &utimescaled, &stimescaled); + task_cputime_t_scaled(tsk, &utimescaled, &stimescaled); tmp = (s64)d->cpu_scaled_run_real_total; tmp += cputime_to_nsecs(utimescaled + stimescaled); d->cpu_scaled_run_real_total = diff --git a/kernel/signal.c b/kernel/signal.c index 3603d93a1968..218048a837ea 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1619,7 +1619,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) task_uid(tsk)); rcu_read_unlock(); - task_cputime(tsk, &utime, &stime); + task_cputime_t(tsk, &utime, &stime); info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); @@ -1704,7 +1704,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); rcu_read_unlock(); - task_cputime(tsk, &utime, &stime); + task_cputime_t(tsk, &utime, &stime); info.si_utime = cputime_to_clock_t(utime); info.si_stime = cputime_to_clock_t(stime); diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index 8c89143f9ebf..f2d5097bcb6d 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -53,7 +53,7 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; if (cval) { - struct task_cputime cputime; + struct task_cputime_t cputime; cputime_t t; thread_group_cputimer(tsk, &cputime); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index e9e8c10f0d9a..d53ff711a2a8 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -115,7 +115,7 @@ static void bump_cpu_timer(struct k_itimer *timer, * Checks @cputime to see if all fields are zero. Returns true if all fields * are zero, false if any field is nonzero. */ -static inline int task_cputime_zero(const struct task_cputime *cputime) +static inline int task_cputime_zero(const struct task_cputime_t *cputime) { if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) return 1; @@ -126,7 +126,7 @@ static inline unsigned long long prof_ticks(struct task_struct *p) { cputime_t utime, stime; - task_cputime(p, &utime, &stime); + task_cputime_t(p, &utime, &stime); return cputime_to_expires(utime + stime); } @@ -134,7 +134,7 @@ static inline unsigned long long virt_ticks(struct task_struct *p) { cputime_t utime, stime; - task_cputime(p, &utime, &stime); + task_cputime_t(p, &utime, &stime); return cputime_to_expires(utime); } @@ -210,7 +210,7 @@ retry: } } -static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) +static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum) { __update_gt_cputime(&cputime_atomic->utime, sum->utime); __update_gt_cputime(&cputime_atomic->stime, sum->stime); @@ -218,7 +218,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct } /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ -static inline void sample_cputime_atomic(struct task_cputime *times, +static inline void sample_cputime_atomic(struct task_cputime_t *times, struct task_cputime_atomic *atomic_times) { times->utime = atomic64_read(&atomic_times->utime); @@ -226,10 +226,10 @@ static inline void sample_cputime_atomic(struct task_cputime *times, times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); } -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - struct task_cputime sum; + struct task_cputime_t sum; /* Check if cputimer isn't running. This is accessed without locking. */ if (!READ_ONCE(cputimer->running)) { @@ -238,7 +238,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) * values through the TIMER_ABSTIME flag, therefore we have * to synchronize the timer to the clock every time we start it. */ - thread_group_cputime(tsk, &sum); + thread_group_cputime_t(tsk, &sum); update_gt_cputime(&cputimer->cputime_atomic, &sum); /* @@ -262,21 +262,21 @@ static int cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) { - struct task_cputime cputime; + struct task_cputime_t cputime; switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: - thread_group_cputime(p, &cputime); + thread_group_cputime_t(p, &cputime); *sample = cputime_to_expires(cputime.utime + cputime.stime); break; case CPUCLOCK_VIRT: - thread_group_cputime(p, &cputime); + thread_group_cputime_t(p, &cputime); *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: - thread_group_cputime(p, &cputime); + thread_group_cputime_t(p, &cputime); *sample = cputime.sum_exec_runtime; break; } @@ -466,7 +466,7 @@ static void arm_timer(struct k_itimer *timer) { struct task_struct *p = timer->it.cpu.task; struct list_head *head, *listpos; - struct task_cputime *cputime_expires; + struct task_cputime_t *cputime_expires; struct cpu_timer_list *const nt = &timer->it.cpu; struct cpu_timer_list *next; @@ -562,7 +562,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) { - struct task_cputime cputime; + struct task_cputime_t cputime; thread_group_cputimer(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { @@ -761,7 +761,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) /* * Protect against sighand release/switch in exit/exec and * also make timer sampling safe if it ends up calling - * thread_group_cputime(). + * thread_group_cputime_t(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { @@ -826,7 +826,7 @@ static void check_thread_timers(struct task_struct *tsk, { struct list_head *timers = tsk->cpu_timers; struct signal_struct *const sig = tsk->signal; - struct task_cputime *tsk_expires = &tsk->cputime_expires; + struct task_cputime_t *tsk_expires = &tsk->cputime_expires; unsigned long long expires; unsigned long soft; @@ -934,7 +934,7 @@ static void check_process_timers(struct task_struct *tsk, unsigned long long utime, ptime, virt_expires, prof_expires; unsigned long long sum_sched_runtime, sched_expires; struct list_head *timers = sig->cpu_timers; - struct task_cputime cputime; + struct task_cputime_t cputime; unsigned long soft; /* @@ -1037,7 +1037,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) } else { /* * Protect arm_timer() and timer sampling in case of call to - * thread_group_cputime(). + * thread_group_cputime_t(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { @@ -1080,8 +1080,8 @@ out: * Returns true if any field of the former is greater than the corresponding * field of the latter if the latter field is set. Otherwise returns false. */ -static inline int task_cputime_expired(const struct task_cputime *sample, - const struct task_cputime *expires) +static inline int task_cputime_expired(const struct task_cputime_t *sample, + const struct task_cputime_t *expires) { if (expires->utime && sample->utime >= expires->utime) return 1; @@ -1108,9 +1108,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) struct signal_struct *sig; if (!task_cputime_zero(&tsk->cputime_expires)) { - struct task_cputime task_sample; + struct task_cputime_t task_sample; - task_cputime(tsk, &task_sample.utime, &task_sample.stime); + task_cputime_t(tsk, &task_sample.utime, &task_sample.stime); task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) return 1; @@ -1133,7 +1133,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) */ if (READ_ONCE(sig->cputimer.running) && !READ_ONCE(sig->cputimer.checking_timer)) { - struct task_cputime group_sample; + struct task_cputime_t group_sample; sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); diff --git a/kernel/tsacct.c b/kernel/tsacct.c index f8e26ab963ed..040d0a64d0d1 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -66,11 +66,11 @@ void bacct_add_tsk(struct user_namespace *user_ns, task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; rcu_read_unlock(); - task_cputime(tsk, &utime, &stime); + task_cputime_t(tsk, &utime, &stime); stats->ac_utime = cputime_to_usecs(utime); stats->ac_stime = cputime_to_usecs(stime); - task_cputime_scaled(tsk, &utimescaled, &stimescaled); + task_cputime_t_scaled(tsk, &utimescaled, &stimescaled); stats->ac_utimescaled = cputime_to_usecs(utimescaled); stats->ac_stimescaled = cputime_to_usecs(stimescaled); @@ -159,7 +159,7 @@ void acct_update_integrals(struct task_struct *tsk) unsigned long flags; local_irq_save(flags); - task_cputime(tsk, &utime, &stime); + task_cputime_t(tsk, &utime, &stime); __acct_update_integrals(tsk, utime, stime); local_irq_restore(flags); } -- cgit v1.2.3 From 5613fda9a503cd6137b120298902a34a1386b2c1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:23 +0100 Subject: sched/cputime: Convert task/group cputime to nsecs Now that most cputime readers use the transition API which return the task cputime in old style cputime_t, we can safely store the cputime in nsecs. This will eventually make cputime statistics less opaque and more granular. Back and forth convertions between cputime_t and nsecs in order to deal with cputime_t random granularity won't be needed anymore. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/alpha/kernel/osf_sys.c | 4 ++-- arch/powerpc/kernel/time.c | 4 ++-- arch/s390/kernel/vtime.c | 6 ++--- arch/x86/kvm/hyperv.c | 5 +++-- fs/binfmt_elf.c | 11 +++++++-- fs/binfmt_elf_fdpic.c | 4 ++-- fs/proc/array.c | 10 ++++----- include/linux/sched.h | 55 ++++++++++++++++++++++++++++----------------- kernel/exit.c | 4 ++-- kernel/sched/cputime.c | 35 ++++++++++++++--------------- kernel/signal.c | 4 ++-- kernel/sys.c | 16 ++++++------- 12 files changed, 89 insertions(+), 69 deletions(-) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 0f92438d736b..82ccb43b795b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1163,8 +1163,8 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: - utime_jiffies = cputime_to_jiffies(current->signal->cutime); - stime_jiffies = cputime_to_jiffies(current->signal->cstime); + utime_jiffies = nsecs_to_jiffies(current->signal->cutime); + stime_jiffies = nsecs_to_jiffies(current->signal->cstime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 02e97305d22b..3cca82e065c9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -396,7 +396,7 @@ void vtime_flush(struct task_struct *tsk) account_user_time(tsk, acct->utime); if (acct->utime_scaled) - tsk->utimescaled += acct->utime_scaled; + tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); if (acct->gtime) account_guest_time(tsk, acct->gtime); @@ -411,7 +411,7 @@ void vtime_flush(struct task_struct *tsk) account_system_index_time(tsk, acct->stime, CPUTIME_SYSTEM); if (acct->stime_scaled) - tsk->stimescaled += acct->stime_scaled; + tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); if (acct->hardirq_time) account_system_index_time(tsk, acct->hardirq_time, CPUTIME_IRQ); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 0a9e5d67547d..f2fc27491604 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -114,7 +114,7 @@ static void account_system_index_scaled(struct task_struct *p, cputime_t cputime, cputime_t scaled, enum cpu_usage_stat index) { - p->stimescaled += scaled; + p->stimescaled += cputime_to_nsecs(scaled); account_system_index_time(p, cputime, index); } @@ -167,12 +167,12 @@ static int do_account_vtime(struct task_struct *tsk) /* Push account value */ if (user) { account_user_time(tsk, user); - tsk->utimescaled += scale_vtime(user); + tsk->utimescaled += cputime_to_nsecs(scale_vtime(user)); } if (guest) { account_guest_time(tsk, guest); - tsk->utimescaled += scale_vtime(guest); + tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest)); } if (system) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 1572c35b4f1a..2ecd7dab4631 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -964,10 +964,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, /* Calculate cpu time spent by current task in 100ns units */ static u64 current_task_runtime_100ns(void) { - cputime_t utime, stime; + u64 utime, stime; task_cputime_adjusted(current, &utime, &stime); - return div_u64(cputime_to_nsecs(utime + stime), 100); + + return div_u64(utime + stime, 100); } static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 68b915650cae..6d451936a858 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1411,6 +1411,8 @@ static void fill_note(struct memelfnote *note, const char *name, int type, static void fill_prstatus(struct elf_prstatus *prstatus, struct task_struct *p, long signr) { + struct timeval tv; + prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; prstatus->pr_sigpend = p->pending.signal.sig[0]; prstatus->pr_sighold = p->blocked.sig[0]; @@ -1437,8 +1439,13 @@ static void fill_prstatus(struct elf_prstatus *prstatus, cputime_to_timeval(utime, &prstatus->pr_utime); cputime_to_timeval(stime, &prstatus->pr_stime); } - cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); - cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); + tv = ns_to_timeval(p->signal->cutime); + prstatus->pr_cutime.tv_sec = tv.tv_sec; + prstatus->pr_cutime.tv_usec = tv.tv_usec; + + tv = ns_to_timeval(p->signal->cstime); + prstatus->pr_cstime.tv_sec = tv.tv_sec; + prstatus->pr_cstime.tv_usec = tv.tv_usec; } static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 6ccd9df7247a..e1f373460257 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1358,8 +1358,8 @@ static void fill_prstatus(struct elf_prstatus *prstatus, cputime_to_timeval(utime, &prstatus->pr_utime); cputime_to_timeval(stime, &prstatus->pr_stime); } - cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); - cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); + prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); + prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap; prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap; diff --git a/fs/proc/array.c b/fs/proc/array.c index 25b54cf0c042..fe12b519d09b 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -401,7 +401,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, unsigned long long start_time; unsigned long cmin_flt = 0, cmaj_flt = 0; unsigned long min_flt = 0, maj_flt = 0; - cputime_t cutime, cstime, utime, stime; + u64 cutime, cstime, utime, stime; u64 cgtime, gtime; unsigned long rsslim = 0; char tcomm[sizeof(task->comm)]; @@ -497,10 +497,10 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, seq_put_decimal_ull(m, " ", cmin_flt); seq_put_decimal_ull(m, " ", maj_flt); seq_put_decimal_ull(m, " ", cmaj_flt); - seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime)); - seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime)); - seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime)); - seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime)); + seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime)); + seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime)); + seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime)); + seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime)); seq_put_decimal_ll(m, " ", priority); seq_put_decimal_ll(m, " ", nice); seq_put_decimal_ll(m, " ", num_threads); diff --git a/include/linux/sched.h b/include/linux/sched.h index 9cc722f77799..b7ccc54b35cc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -585,8 +585,8 @@ struct cpu_itimer { */ struct prev_cputime { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - cputime_t utime; - cputime_t stime; + u64 utime; + u64 stime; raw_spinlock_t lock; #endif }; @@ -601,8 +601,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev) /** * struct task_cputime - collected CPU time counts - * @utime: time spent in user mode, in &cputime_t units - * @stime: time spent in kernel mode, in &cputime_t units + * @utime: time spent in user mode, in nanoseconds + * @stime: time spent in kernel mode, in nanoseconds * @sum_exec_runtime: total time spent on the CPU, in nanoseconds * * This structure groups together three kinds of CPU time that are tracked for @@ -610,8 +610,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev) * these counts together and treat all three of them in parallel. */ struct task_cputime { - cputime_t utime; - cputime_t stime; + u64 utime; + u64 stime; unsigned long long sum_exec_runtime; }; @@ -780,7 +780,7 @@ struct signal_struct { * in __exit_signal, except for the group leader. */ seqlock_t stats_lock; - cputime_t utime, stime, cutime, cstime; + u64 utime, stime, cutime, cstime; u64 gtime; u64 cgtime; struct prev_cputime prev_cputime; @@ -1661,9 +1661,9 @@ struct task_struct { int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - cputime_t utime, stime; + u64 utime, stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - cputime_t utimescaled, stimescaled; + u64 utimescaled, stimescaled; #endif u64 gtime; struct prev_cputime prev_cputime; @@ -2260,11 +2260,11 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, - cputime_t *utime, cputime_t *stime); + u64 *utime, u64 *stime); extern u64 task_gtime(struct task_struct *t); #else static inline void task_cputime(struct task_struct *t, - cputime_t *utime, cputime_t *stime) + u64 *utime, u64 *stime) { *utime = t->utime; *stime = t->stime; @@ -2278,16 +2278,16 @@ static inline u64 task_gtime(struct task_struct *t) #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME static inline void task_cputime_scaled(struct task_struct *t, - cputime_t *utimescaled, - cputime_t *stimescaled) + u64 *utimescaled, + u64 *stimescaled) { *utimescaled = t->utimescaled; *stimescaled = t->stimescaled; } #else static inline void task_cputime_scaled(struct task_struct *t, - cputime_t *utimescaled, - cputime_t *stimescaled) + u64 *utimescaled, + u64 *stimescaled) { task_cputime(t, utimescaled, stimescaled); } @@ -2296,18 +2296,26 @@ static inline void task_cputime_scaled(struct task_struct *t, static inline void task_cputime_t(struct task_struct *t, cputime_t *utime, cputime_t *stime) { - task_cputime(t, utime, stime); + u64 ut, st; + + task_cputime(t, &ut, &st); + *utime = nsecs_to_cputime(ut); + *stime = nsecs_to_cputime(st); } static inline void task_cputime_t_scaled(struct task_struct *t, cputime_t *utimescaled, cputime_t *stimescaled) { - task_cputime_scaled(t, utimescaled, stimescaled); + u64 ut, st; + + task_cputime_scaled(t, &ut, &st); + *utimescaled = nsecs_to_cputime(ut); + *stimescaled = nsecs_to_cputime(st); } -extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); -extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); +extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); /* * Per process flags @@ -3522,9 +3530,14 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times); static inline void thread_group_cputime_t(struct task_struct *tsk, - struct task_cputime_t *times) + struct task_cputime_t *cputime) { - thread_group_cputime(tsk, (struct task_cputime *)times); + struct task_cputime times; + + thread_group_cputime(tsk, ×); + cputime->utime = nsecs_to_cputime(times.utime); + cputime->stime = nsecs_to_cputime(times.stime); + cputime->sum_exec_runtime = times.sum_exec_runtime; } /* diff --git a/kernel/exit.c b/kernel/exit.c index 8f14b866f9f6..8e5e21338b3a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -86,7 +86,7 @@ static void __exit_signal(struct task_struct *tsk) bool group_dead = thread_group_leader(tsk); struct sighand_struct *sighand; struct tty_struct *uninitialized_var(tty); - cputime_t utime, stime; + u64 utime, stime; sighand = rcu_dereference_check(tsk->sighand, lockdep_tasklist_lock_is_held()); @@ -1091,7 +1091,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) struct signal_struct *sig = p->signal; struct signal_struct *psig = current->signal; unsigned long maxrss; - cputime_t tgutime, tgstime; + u64 tgutime, tgstime; /* * The resource counters for the group leader are in its diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 8bcd98e2b821..0bdef50d88bc 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -134,7 +134,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) int index; /* Add user time to process. */ - p->utime += cputime; + p->utime += cputime_to_nsecs(cputime); account_group_user_time(p, cputime); index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; @@ -156,7 +156,7 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) u64 *cpustat = kcpustat_this_cpu->cpustat; /* Add guest time to process. */ - p->utime += cputime; + p->utime += cputime_to_nsecs(cputime); account_group_user_time(p, cputime); p->gtime += cputime_to_nsecs(cputime); @@ -180,7 +180,7 @@ void account_system_index_time(struct task_struct *p, cputime_t cputime, enum cpu_usage_stat index) { /* Add system time to process. */ - p->stime += cputime; + p->stime += cputime_to_nsecs(cputime); account_group_system_time(p, cputime); /* Add system time to cpustat. */ @@ -315,7 +315,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) { struct signal_struct *sig = tsk->signal; - cputime_t utime, stime; + u64 utime, stime; struct task_struct *t; unsigned int seq, nextseq; unsigned long flags; @@ -465,14 +465,14 @@ void vtime_account_irq_enter(struct task_struct *tsk) EXPORT_SYMBOL_GPL(vtime_account_irq_enter); #endif /* __ARCH_HAS_VTIME_ACCOUNT */ -void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) +void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) { *ut = p->utime; *st = p->stime; } EXPORT_SYMBOL_GPL(task_cputime_adjusted); -void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) +void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) { struct task_cputime cputime; @@ -543,7 +543,7 @@ void account_idle_ticks(unsigned long ticks) * Perform (stime * rtime) / total, but avoid multiplication overflow by * loosing precision when the numbers are big. */ -static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) +static u64 scale_stime(u64 stime, u64 rtime, u64 total) { u64 scaled; @@ -580,7 +580,7 @@ drop_precision: * followed by a 64/32->64 divide. */ scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); - return (__force cputime_t) scaled; + return scaled; } /* @@ -605,14 +605,14 @@ drop_precision: */ static void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, - cputime_t *ut, cputime_t *st) + u64 *ut, u64 *st) { - cputime_t rtime, stime, utime; + u64 rtime, stime, utime; unsigned long flags; /* Serialize concurrent callers such that we can honour our guarantees */ raw_spin_lock_irqsave(&prev->lock, flags); - rtime = nsecs_to_cputime(curr->sum_exec_runtime); + rtime = curr->sum_exec_runtime; /* * This is possible under two circumstances: @@ -643,8 +643,7 @@ static void cputime_adjust(struct task_cputime *curr, goto update; } - stime = scale_stime((__force u64)stime, (__force u64)rtime, - (__force u64)(stime + utime)); + stime = scale_stime(stime, rtime, stime + utime); update: /* @@ -677,7 +676,7 @@ out: raw_spin_unlock_irqrestore(&prev->lock, flags); } -void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) +void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) { struct task_cputime cputime = { .sum_exec_runtime = p->se.sum_exec_runtime, @@ -688,7 +687,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) } EXPORT_SYMBOL_GPL(task_cputime_adjusted); -void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) +void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) { struct task_cputime cputime; @@ -849,9 +848,9 @@ u64 task_gtime(struct task_struct *t) * add up the pending nohz execution time since the last * cputime snapshot. */ -void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) +void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { - cputime_t delta; + u64 delta; unsigned int seq; if (!vtime_accounting_enabled()) { @@ -870,7 +869,7 @@ void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t)) continue; - delta = vtime_delta(t); + delta = cputime_to_nsecs(vtime_delta(t)); /* * Task runs either in user or kernel space, add pending nohz time to diff --git a/kernel/signal.c b/kernel/signal.c index 218048a837ea..b63522193076 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1620,8 +1620,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig) rcu_read_unlock(); task_cputime_t(tsk, &utime, &stime); - info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); - info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); + info.si_utime = cputime_to_clock_t(utime + nsecs_to_cputime(tsk->signal->utime)); + info.si_stime = cputime_to_clock_t(stime + nsecs_to_cputime(tsk->signal->stime)); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) diff --git a/kernel/sys.c b/kernel/sys.c index 842914ef7de4..7d4a9a6df956 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -881,15 +881,15 @@ SYSCALL_DEFINE0(getegid) void do_sys_times(struct tms *tms) { - cputime_t tgutime, tgstime, cutime, cstime; + u64 tgutime, tgstime, cutime, cstime; thread_group_cputime_adjusted(current, &tgutime, &tgstime); cutime = current->signal->cutime; cstime = current->signal->cstime; - tms->tms_utime = cputime_to_clock_t(tgutime); - tms->tms_stime = cputime_to_clock_t(tgstime); - tms->tms_cutime = cputime_to_clock_t(cutime); - tms->tms_cstime = cputime_to_clock_t(cstime); + tms->tms_utime = nsec_to_clock_t(tgutime); + tms->tms_stime = nsec_to_clock_t(tgstime); + tms->tms_cutime = nsec_to_clock_t(cutime); + tms->tms_cstime = nsec_to_clock_t(cstime); } SYSCALL_DEFINE1(times, struct tms __user *, tbuf) @@ -1544,7 +1544,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) { struct task_struct *t; unsigned long flags; - cputime_t tgutime, tgstime, utime, stime; + u64 tgutime, tgstime, utime, stime; unsigned long maxrss = 0; memset((char *)r, 0, sizeof (*r)); @@ -1600,8 +1600,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) unlock_task_sighand(p, &flags); out: - cputime_to_timeval(utime, &r->ru_utime); - cputime_to_timeval(stime, &r->ru_stime); + r->ru_utime = ns_to_timeval(utime); + r->ru_stime = ns_to_timeval(stime); if (who != RUSAGE_CHILDREN) { struct mm_struct *mm = get_task_mm(p); -- cgit v1.2.3 From dc9b77b56c2e78f18139bddc844bc484fc4281ba Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:24 +0100 Subject: alpha: Convert obsolete cputime_t to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-9-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/alpha/kernel/osf_sys.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 82ccb43b795b..9d27a7d333dc 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1145,7 +1145,7 @@ struct rusage32 { SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; - cputime_t utime, stime; + u64 utime, stime; unsigned long utime_jiffies, stime_jiffies; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) @@ -1154,9 +1154,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: - task_cputime_t(current, &utime, &stime); - utime_jiffies = cputime_to_jiffies(utime); - stime_jiffies = cputime_to_jiffies(stime); + task_cputime(current, &utime, &stime); + utime_jiffies = nsecs_to_jiffies(utime); + stime_jiffies = nsecs_to_jiffies(stime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->min_flt; -- cgit v1.2.3 From f7dcd63de44219fb5e9a36fc2c0ca23ddd79d01c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:25 +0100 Subject: x86: Convert obsolete cputime type to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-10-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/apm_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 89c84fcdd3c0..4a7080c84a5a 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -905,21 +905,21 @@ static int apm_cpu_idle(struct cpuidle_device *dev, { static int use_apm_idle; /* = 0 */ static unsigned int last_jiffies; /* = 0 */ - static unsigned int last_stime; /* = 0 */ - cputime_t stime, utime; + static u64 last_stime; /* = 0 */ + u64 stime, utime; int apm_idle_done = 0; unsigned int jiffies_since_last_check = jiffies - last_jiffies; unsigned int bucket; recalc: - task_cputime_t(current, &utime, &stime); + task_cputime(current, &utime, &stime); if (jiffies_since_last_check > IDLE_CALC_LIMIT) { use_apm_idle = 0; } else if (jiffies_since_last_check > idle_period) { unsigned int idle_percentage; - idle_percentage = cputime_to_jiffies(stime - last_stime); + idle_percentage = nsecs_to_jiffies(stime - last_stime); idle_percentage *= 100; idle_percentage /= jiffies_since_last_check; use_apm_idle = (idle_percentage > idle_threshold); -- cgit v1.2.3 From c0e7a5000d4da3d64b2eeabfb7e9f327626f6ade Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:26 +0100 Subject: isdn: Convert obsolete cputime type to nsecs Not sure if MISDN stats are ABI but it displays task cputime in cputime_t raw value regardless of what type cputime_t wraps which could be either jiffies, nsecs, usecs, or whatever random time unit. Plus it wrongly assumes that cputime_t is long. Given that this dump is broken anyway, lets just display the nanosec value and stick with that. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-11-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- drivers/isdn/mISDN/stack.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 0a3661767531..b324474c0c12 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -203,7 +203,7 @@ mISDNStackd(void *data) { struct mISDNstack *st = data; #ifdef MISDN_MSG_STATS - cputime_t utime, stime; + u64 utime, stime; #endif int err = 0; @@ -306,9 +306,9 @@ mISDNStackd(void *data) "msg %d sleep %d stopped\n", dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, st->stopped_cnt); - task_cputime_t(st->thread, &utime, &stime); + task_cputime(st->thread, &utime, &stime); printk(KERN_DEBUG - "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", + "mISDNStackd daemon for %s utime(%llu) stime(%llu)\n", dev_name(&st->dev->dev), utime, stime); printk(KERN_DEBUG "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", -- cgit v1.2.3 From cd19c364b313c179410fcac8376330964cc9bfd9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:27 +0100 Subject: fs/binfmt: Convert obsolete cputime type to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-12-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/mips/kernel/binfmt_elfn32.c | 12 ++---------- arch/mips/kernel/binfmt_elfo32.c | 12 ++---------- arch/parisc/kernel/binfmt_elf32.c | 11 ++--------- fs/binfmt_elf.c | 26 ++++++++++---------------- fs/binfmt_elf_fdpic.c | 16 ++++++++-------- fs/compat_binfmt_elf.c | 18 ++---------------- include/linux/compat.h | 20 +++++++++++++++++++- 7 files changed, 45 insertions(+), 70 deletions(-) diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 9c7f3e136d50..4a2ff3953b99 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -99,15 +99,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 -#undef cputime_to_timeval -#define cputime_to_timeval cputime_to_compat_timeval -static __inline__ void -cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) -{ - unsigned long jiffies = cputime_to_jiffies(cputime); - - value->tv_usec = (jiffies % HZ) * (1000000L / HZ); - value->tv_sec = jiffies / HZ; -} +#undef ns_to_timeval +#define ns_to_timeval ns_to_compat_timeval #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 1ab34322dd97..3916404e7fd1 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -102,15 +102,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 -#undef cputime_to_timeval -#define cputime_to_timeval cputime_to_compat_timeval -static __inline__ void -cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) -{ - unsigned long jiffies = cputime_to_jiffies(cputime); - - value->tv_usec = (jiffies % HZ) * (1000000L / HZ); - value->tv_sec = jiffies / HZ; -} +#undef ns_to_timeval +#define ns_to_timeval ns_to_compat_timeval #include "../../../fs/binfmt_elf.c" diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c index 00dc66f9c2ba..f2adcf33f8f2 100644 --- a/arch/parisc/kernel/binfmt_elf32.c +++ b/arch/parisc/kernel/binfmt_elf32.c @@ -91,14 +91,7 @@ struct elf_prpsinfo32 current->thread.map_base = DEFAULT_MAP_BASE32; \ current->thread.task_size = DEFAULT_TASK_SIZE32 \ -#undef cputime_to_timeval -#define cputime_to_timeval cputime_to_compat_timeval -static __inline__ void -cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) -{ - unsigned long jiffies = cputime_to_jiffies(cputime); - value->tv_usec = (jiffies % HZ) * (1000000L / HZ); - value->tv_sec = jiffies / HZ; -} +#undef ns_to_timeval +#define ns_to_timeval ns_to_compat_timeval #include "../../../fs/binfmt_elf.c" diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 6d451936a858..e7bf01373bc4 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1411,8 +1411,6 @@ static void fill_note(struct memelfnote *note, const char *name, int type, static void fill_prstatus(struct elf_prstatus *prstatus, struct task_struct *p, long signr) { - struct timeval tv; - prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; prstatus->pr_sigpend = p->pending.signal.sig[0]; prstatus->pr_sighold = p->blocked.sig[0]; @@ -1423,29 +1421,25 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { - struct task_cputime_t cputime; + struct task_cputime cputime; /* * This is the record for the group leader. It shows the * group-wide total, not its individual thread total. */ - thread_group_cputime_t(p, &cputime); - cputime_to_timeval(cputime.utime, &prstatus->pr_utime); - cputime_to_timeval(cputime.stime, &prstatus->pr_stime); + thread_group_cputime(p, &cputime); + prstatus->pr_utime = ns_to_timeval(cputime.utime); + prstatus->pr_stime = ns_to_timeval(cputime.stime); } else { - cputime_t utime, stime; + u64 utime, stime; - task_cputime_t(p, &utime, &stime); - cputime_to_timeval(utime, &prstatus->pr_utime); - cputime_to_timeval(stime, &prstatus->pr_stime); + task_cputime(p, &utime, &stime); + prstatus->pr_utime = ns_to_timeval(utime); + prstatus->pr_stime = ns_to_timeval(stime); } - tv = ns_to_timeval(p->signal->cutime); - prstatus->pr_cutime.tv_sec = tv.tv_sec; - prstatus->pr_cutime.tv_usec = tv.tv_usec; - tv = ns_to_timeval(p->signal->cstime); - prstatus->pr_cstime.tv_sec = tv.tv_sec; - prstatus->pr_cstime.tv_usec = tv.tv_usec; + prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); + prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); } static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index e1f373460257..ffca4bbc3d63 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1342,21 +1342,21 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { - struct task_cputime_t cputime; + struct task_cputime cputime; /* * This is the record for the group leader. It shows the * group-wide total, not its individual thread total. */ - thread_group_cputime_t(p, &cputime); - cputime_to_timeval(cputime.utime, &prstatus->pr_utime); - cputime_to_timeval(cputime.stime, &prstatus->pr_stime); + thread_group_cputime(p, &cputime); + prstatus->pr_utime = ns_to_timeval(cputime.utime); + prstatus->pr_stime = ns_to_timeval(cputime.stime); } else { - cputime_t utime, stime; + u64 utime, stime; - task_cputime_t(p, &utime, &stime); - cputime_to_timeval(utime, &prstatus->pr_utime); - cputime_to_timeval(stime, &prstatus->pr_stime); + task_cputime(p, &utime, &stime); + prstatus->pr_utime = ns_to_timeval(utime); + prstatus->pr_stime = ns_to_timeval(stime); } prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index 4d24d17bcfc1..504b3c3539dc 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c @@ -51,22 +51,8 @@ #define elf_prstatus compat_elf_prstatus #define elf_prpsinfo compat_elf_prpsinfo -/* - * Compat version of cputime_to_compat_timeval, perhaps this - * should be an inline in . - */ -static void cputime_to_compat_timeval(const cputime_t cputime, - struct compat_timeval *value) -{ - struct timeval tv; - cputime_to_timeval(cputime, &tv); - value->tv_sec = tv.tv_sec; - value->tv_usec = tv.tv_usec; -} - -#undef cputime_to_timeval -#define cputime_to_timeval cputime_to_compat_timeval - +#undef ns_to_timeval +#define ns_to_timeval ns_to_compat_timeval /* * To use this file, asm/elf.h must define compat_elf_check_arch. diff --git a/include/linux/compat.h b/include/linux/compat.h index 63609398ef9f..9e40be522793 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, static inline bool in_compat_syscall(void) { return is_compat_task(); } #endif -#else +/** + * ns_to_compat_timeval - Compat version of ns_to_timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the compat_timeval representation of the nsec parameter. + */ +static inline struct compat_timeval ns_to_compat_timeval(s64 nsec) +{ + struct timeval tv; + struct compat_timeval ctv; + + tv = ns_to_timeval(nsec); + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + + return ctv; +} + +#else /* !CONFIG_COMPAT */ #define is_compat_task() (0) static inline bool in_compat_syscall(void) { return false; } -- cgit v1.2.3 From d4bc42af73bf297f7182ed978b19850553242195 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:28 +0100 Subject: acct: Convert obsolete cputime type to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-13-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- kernel/acct.c | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index b7ccc54b35cc..75fc773c158d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -563,7 +563,7 @@ struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; - cputime_t ac_utime, ac_stime; + u64 ac_utime, ac_stime; unsigned long ac_minflt, ac_majflt; }; diff --git a/kernel/acct.c b/kernel/acct.c index b9b190a8eecf..ca9cb55b5855 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -453,8 +453,8 @@ static void fill_ac(acct_t *ac) spin_lock_irq(¤t->sighand->siglock); tty = current->signal->tty; /* Safe as we hold the siglock */ ac->ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0; - ac->ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); - ac->ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); + ac->ac_utime = encode_comp_t(nsec_to_AHZ(pacct->ac_utime)); + ac->ac_stime = encode_comp_t(nsec_to_AHZ(pacct->ac_stime)); ac->ac_flag = pacct->ac_flag; ac->ac_mem = encode_comp_t(pacct->ac_mem); ac->ac_minflt = encode_comp_t(pacct->ac_minflt); @@ -530,7 +530,7 @@ out: void acct_collect(long exitcode, int group_dead) { struct pacct_struct *pacct = ¤t->signal->pacct; - cputime_t utime, stime; + u64 utime, stime; unsigned long vsize = 0; if (group_dead && current->mm) { @@ -559,7 +559,8 @@ void acct_collect(long exitcode, int group_dead) pacct->ac_flag |= ACORE; if (current->flags & PF_SIGNALED) pacct->ac_flag |= AXSIG; - task_cputime_t(current, &utime, &stime); + + task_cputime(current, &utime, &stime); pacct->ac_utime += utime; pacct->ac_stime += stime; pacct->ac_minflt += current->min_flt; -- cgit v1.2.3 From dbf3da1c2d14a6b0b9bac76a3f9912aff48487e7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:29 +0100 Subject: delaycct: Convert obsolete cputime type to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-14-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/delayacct.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 228640f2b3d2..660549656991 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -82,19 +82,19 @@ void __delayacct_blkio_end(void) int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { - cputime_t utime, stime, stimescaled, utimescaled; + u64 utime, stime, stimescaled, utimescaled; unsigned long long t2, t3; unsigned long flags, t1; s64 tmp; - task_cputime_t(tsk, &utime, &stime); + task_cputime(tsk, &utime, &stime); tmp = (s64)d->cpu_run_real_total; - tmp += cputime_to_nsecs(utime + stime); + tmp += utime + stime; d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; - task_cputime_t_scaled(tsk, &utimescaled, &stimescaled); + task_cputime_scaled(tsk, &utimescaled, &stimescaled); tmp = (s64)d->cpu_scaled_run_real_total; - tmp += cputime_to_nsecs(utimescaled + stimescaled); + tmp += utimescaled + stimescaled; d->cpu_scaled_run_real_total = (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; -- cgit v1.2.3 From 605dc2b31a2ab235570107cb650036b41e741165 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:30 +0100 Subject: tsacct: Convert obsolete cputime type to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-15-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- kernel/tsacct.c | 27 ++++++++++++--------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 75fc773c158d..dc44366128d8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1815,7 +1815,7 @@ struct task_struct { #if defined(CONFIG_TASK_XACCT) u64 acct_rss_mem1; /* accumulated rss usage */ u64 acct_vm_mem1; /* accumulated virtual memory usage */ - cputime_t acct_timexpd; /* stime + utime since last update */ + u64 acct_timexpd; /* stime + utime since last update */ #endif #ifdef CONFIG_CPUSETS nodemask_t mems_allowed; /* Protected by alloc_lock */ diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 040d0a64d0d1..5c21f0535056 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -31,7 +31,7 @@ void bacct_add_tsk(struct user_namespace *user_ns, struct taskstats *stats, struct task_struct *tsk) { const struct cred *tcred; - cputime_t utime, stime, utimescaled, stimescaled; + u64 utime, stime, utimescaled, stimescaled; u64 delta; BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); @@ -66,13 +66,13 @@ void bacct_add_tsk(struct user_namespace *user_ns, task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; rcu_read_unlock(); - task_cputime_t(tsk, &utime, &stime); - stats->ac_utime = cputime_to_usecs(utime); - stats->ac_stime = cputime_to_usecs(stime); + task_cputime(tsk, &utime, &stime); + stats->ac_utime = div_u64(utime, NSEC_PER_USEC); + stats->ac_stime = div_u64(stime, NSEC_PER_USEC); - task_cputime_t_scaled(tsk, &utimescaled, &stimescaled); - stats->ac_utimescaled = cputime_to_usecs(utimescaled); - stats->ac_stimescaled = cputime_to_usecs(stimescaled); + task_cputime_scaled(tsk, &utimescaled, &stimescaled); + stats->ac_utimescaled = div_u64(utimescaled, NSEC_PER_USEC); + stats->ac_stimescaled = div_u64(stimescaled, NSEC_PER_USEC); stats->ac_minflt = tsk->min_flt; stats->ac_majflt = tsk->maj_flt; @@ -123,18 +123,15 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) #undef MB static void __acct_update_integrals(struct task_struct *tsk, - cputime_t utime, cputime_t stime) + u64 utime, u64 stime) { - cputime_t time, dtime; - u64 delta; + u64 time, delta; if (!likely(tsk->mm)) return; time = stime + utime; - dtime = time - tsk->acct_timexpd; - /* Avoid division: cputime_t is often in nanoseconds already. */ - delta = cputime_to_nsecs(dtime); + delta = time - tsk->acct_timexpd; if (delta < TICK_NSEC) return; @@ -155,11 +152,11 @@ static void __acct_update_integrals(struct task_struct *tsk, */ void acct_update_integrals(struct task_struct *tsk) { - cputime_t utime, stime; + u64 utime, stime; unsigned long flags; local_irq_save(flags); - task_cputime_t(tsk, &utime, &stime); + task_cputime(tsk, &utime, &stime); __acct_update_integrals(tsk, utime, stime); local_irq_restore(flags); } -- cgit v1.2.3 From bde8285e5cf78417afda79f53bbc26ef801a27e7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:31 +0100 Subject: signal: Convert obsolete cputime type to nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-16-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/signal.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index b63522193076..13f9def8b24a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1581,7 +1581,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) unsigned long flags; struct sighand_struct *psig; bool autoreap = false; - cputime_t utime, stime; + u64 utime, stime; BUG_ON(sig == -1); @@ -1619,9 +1619,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig) task_uid(tsk)); rcu_read_unlock(); - task_cputime_t(tsk, &utime, &stime); - info.si_utime = cputime_to_clock_t(utime + nsecs_to_cputime(tsk->signal->utime)); - info.si_stime = cputime_to_clock_t(stime + nsecs_to_cputime(tsk->signal->stime)); + task_cputime(tsk, &utime, &stime); + info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); + info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) @@ -1685,7 +1685,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, unsigned long flags; struct task_struct *parent; struct sighand_struct *sighand; - cputime_t utime, stime; + u64 utime, stime; if (for_ptracer) { parent = tsk->parent; @@ -1704,9 +1704,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); rcu_read_unlock(); - task_cputime_t(tsk, &utime, &stime); - info.si_utime = cputime_to_clock_t(utime); - info.si_stime = cputime_to_clock_t(stime); + task_cputime(tsk, &utime, &stime); + info.si_utime = nsec_to_clock_t(utime); + info.si_stime = nsec_to_clock_t(stime); info.si_code = why; switch (why) { -- cgit v1.2.3 From a499a5a14dbd1d0315a96fc62a8798059325e9e6 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:32 +0100 Subject: sched/cputime: Increment kcpustat directly on irqtime account The irqtime is accounted is nsecs and stored in cpu_irq_time.hardirq_time and cpu_irq_time.softirq_time. Once the accumulated amount reaches a new jiffy, this one gets accounted to the kcpustat. This was necessary when kcpustat was stored in cputime_t, which could at worst have jiffies granularity. But now kcpustat is stored in nsecs so this whole discretization game with temporary irqtime storage has become unnecessary. We can now directly account the irqtime to the kcpustat. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-17-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/cputime.c | 50 +++++++++++++++++--------------------------------- kernel/sched/sched.h | 7 ++++--- 2 files changed, 21 insertions(+), 36 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 0bdef50d88bc..bee6c97b1e83 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -44,6 +44,7 @@ void disable_sched_clock_irqtime(void) void irqtime_account_irq(struct task_struct *curr) { struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); + u64 *cpustat = kcpustat_this_cpu->cpustat; s64 delta; int cpu; @@ -61,49 +62,35 @@ void irqtime_account_irq(struct task_struct *curr) * in that case, so as not to confuse scheduler with a special task * that do not consume any time, but still wants to run. */ - if (hardirq_count()) - irqtime->hardirq_time += delta; - else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) - irqtime->softirq_time += delta; + if (hardirq_count()) { + cpustat[CPUTIME_IRQ] += delta; + irqtime->tick_delta += delta; + } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) { + cpustat[CPUTIME_SOFTIRQ] += delta; + irqtime->tick_delta += delta; + } u64_stats_update_end(&irqtime->sync); } EXPORT_SYMBOL_GPL(irqtime_account_irq); -static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime) +static cputime_t irqtime_tick_accounted(cputime_t maxtime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; - cputime_t irq_cputime; - - irq_cputime = nsecs_to_cputime64(irqtime - cpustat[idx]); - irq_cputime = min(irq_cputime, maxtime); - cpustat[idx] += cputime_to_nsecs(irq_cputime); - - return irq_cputime; -} + struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); + cputime_t delta; -static cputime_t irqtime_account_hi_update(cputime_t maxtime) -{ - return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time), - CPUTIME_IRQ, maxtime); -} + delta = nsecs_to_cputime(irqtime->tick_delta); + delta = min(delta, maxtime); + irqtime->tick_delta -= cputime_to_nsecs(delta); -static cputime_t irqtime_account_si_update(cputime_t maxtime) -{ - return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time), - CPUTIME_SOFTIRQ, maxtime); + return delta; } #else /* CONFIG_IRQ_TIME_ACCOUNTING */ #define sched_clock_irqtime (0) -static cputime_t irqtime_account_hi_update(cputime_t dummy) -{ - return 0; -} - -static cputime_t irqtime_account_si_update(cputime_t dummy) +static cputime_t irqtime_tick_accounted(cputime_t dummy) { return 0; } @@ -280,10 +267,7 @@ static inline cputime_t account_other_time(cputime_t max) accounted = steal_account_process_time(max); if (accounted < max) - accounted += irqtime_account_hi_update(max - accounted); - - if (accounted < max) - accounted += irqtime_account_si_update(max - accounted); + accounted += irqtime_tick_accounted(max - accounted); return accounted; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6eeae7ebd99b..8ff5cc539e8a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -1827,8 +1828,7 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { } #ifdef CONFIG_IRQ_TIME_ACCOUNTING struct irqtime { - u64 hardirq_time; - u64 softirq_time; + u64 tick_delta; u64 irq_start_time; struct u64_stats_sync sync; }; @@ -1838,12 +1838,13 @@ DECLARE_PER_CPU(struct irqtime, cpu_irqtime); static inline u64 irq_time_read(int cpu) { struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); + u64 *cpustat = kcpustat_cpu(cpu).cpustat; unsigned int seq; u64 total; do { seq = __u64_stats_fetch_begin(&irqtime->sync); - total = irqtime->softirq_time + irqtime->hardirq_time; + total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ]; } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); return total; -- cgit v1.2.3 From 715eb7a9243a058a0722aa2f6ba703ede9113e76 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:33 +0100 Subject: timers/posix-timers: Use TICK_NSEC instead of a dynamically ad-hoc calculated version Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-18-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/time/posix-cpu-timers.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index d53ff711a2a8..8349e02b1c0c 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -890,8 +890,6 @@ static inline void stop_process_timers(struct signal_struct *sig) tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); } -static u32 onecputick; - static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, unsigned long long *expires, unsigned long long cur_time, int signo) @@ -903,9 +901,9 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, if (it->incr) { it->expires += it->incr; it->error += it->incr_error; - if (it->error >= onecputick) { + if (it->error >= TICK_NSEC) { it->expires -= cputime_one_jiffy; - it->error -= onecputick; + it->error -= TICK_NSEC; } } else { it->expires = 0; @@ -1476,15 +1474,10 @@ static __init int init_posix_cpu_timers(void) .clock_get = thread_cpu_clock_get, .timer_create = thread_cpu_timer_create, }; - struct timespec ts; posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); - cputime_to_timespec(cputime_one_jiffy, &ts); - onecputick = ts.tv_nsec; - WARN_ON(ts.tv_sec != 0); - return 0; } __initcall(init_posix_cpu_timers); -- cgit v1.2.3 From ebd7e7fc4bc63be5eaf9da903b8060b02dd711ea Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:34 +0100 Subject: timers/posix-timers: Convert internals to use nsecs Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Also convert posix-cpu-timers to use nsec based internal counters to simplify it. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-19-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/posix-timers.h | 12 +-- include/linux/sched.h | 6 +- kernel/fork.c | 2 +- kernel/sched/cputime.c | 6 +- kernel/sched/stats.h | 4 +- kernel/time/itimer.c | 6 +- kernel/time/posix-cpu-timers.c | 210 +++++++++++++++++------------------------ 7 files changed, 100 insertions(+), 146 deletions(-) diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 62d44c176071..890de52362d0 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -8,19 +8,9 @@ #include -static inline unsigned long long cputime_to_expires(cputime_t expires) -{ - return (__force unsigned long long)expires; -} - -static inline cputime_t expires_to_cputime(unsigned long long expires) -{ - return (__force cputime_t)expires; -} - struct cpu_timer_list { struct list_head entry; - unsigned long long expires, incr; + u64 expires, incr; struct task_struct *task; int firing; }; diff --git a/include/linux/sched.h b/include/linux/sched.h index dc44366128d8..baa6a2834e0f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -755,7 +755,7 @@ struct signal_struct { struct thread_group_cputimer cputimer; /* Earliest-expiration cache. */ - struct task_cputime_t cputime_expires; + struct task_cputime cputime_expires; #ifdef CONFIG_NO_HZ_FULL atomic_t tick_dep_mask; @@ -1689,7 +1689,7 @@ struct task_struct { /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt; - struct task_cputime_t cputime_expires; + struct task_cputime cputime_expires; struct list_head cpu_timers[3]; /* process credentials */ @@ -3527,7 +3527,7 @@ static __always_inline bool need_resched(void) * Thread group CPU time accounting. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); static inline void thread_group_cputime_t(struct task_struct *tsk, struct task_cputime_t *cputime) diff --git a/kernel/fork.c b/kernel/fork.c index 11c5c8ab827c..09992ff2f8fa 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1313,7 +1313,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { - sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); + sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; sig->cputimer.running = true; } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index bee6c97b1e83..f7b9624c7df0 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) /* Add user time to process. */ p->utime += cputime_to_nsecs(cputime); - account_group_user_time(p, cputime); + account_group_user_time(p, cputime_to_nsecs(cputime)); index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; @@ -144,7 +144,7 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) /* Add guest time to process. */ p->utime += cputime_to_nsecs(cputime); - account_group_user_time(p, cputime); + account_group_user_time(p, cputime_to_nsecs(cputime)); p->gtime += cputime_to_nsecs(cputime); /* Add guest time to cpustat. */ @@ -168,7 +168,7 @@ void account_system_index_time(struct task_struct *p, { /* Add system time to process. */ p->stime += cputime_to_nsecs(cputime); - account_group_system_time(p, cputime); + account_group_system_time(p, cputime_to_nsecs(cputime)); /* Add system time to cpustat. */ task_group_account_field(p, index, cputime_to_nsecs(cputime)); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 34659a853505..9788478a66d4 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -216,7 +216,7 @@ static inline bool cputimer_running(struct task_struct *tsk) * running CPU and update the utime field there. */ static inline void account_group_user_time(struct task_struct *tsk, - cputime_t cputime) + u64 cputime) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; @@ -237,7 +237,7 @@ static inline void account_group_user_time(struct task_struct *tsk, * running CPU and update the stime field there. */ static inline void account_group_system_time(struct task_struct *tsk, - cputime_t cputime) + u64 cputime) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index f2d5097bcb6d..bb01ff445ce9 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -53,15 +53,15 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; if (cval) { - struct task_cputime_t cputime; + struct task_cputime cputime; cputime_t t; thread_group_cputimer(tsk, &cputime); if (clock_id == CPUCLOCK_PROF) - t = cputime.utime + cputime.stime; + t = nsecs_to_cputime(cputime.utime + cputime.stime); else /* CPUCLOCK_VIRT */ - t = cputime.utime; + t = nsecs_to_cputime(cputime.utime); if (cval < t) /* about to fire */ diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 8349e02b1c0c..45be3cec0dc2 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -50,39 +50,14 @@ static int check_clock(const clockid_t which_clock) return error; } -static inline unsigned long long -timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) -{ - unsigned long long ret; - - ret = 0; /* high half always zero when .cpu used */ - if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { - ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; - } else { - ret = cputime_to_expires(timespec_to_cputime(tp)); - } - return ret; -} - -static void sample_to_timespec(const clockid_t which_clock, - unsigned long long expires, - struct timespec *tp) -{ - if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) - *tp = ns_to_timespec(expires); - else - cputime_to_timespec((__force cputime_t)expires, tp); -} - /* * Update expiry time from increment, and increase overrun count, * given the current clock sample. */ -static void bump_cpu_timer(struct k_itimer *timer, - unsigned long long now) +static void bump_cpu_timer(struct k_itimer *timer, u64 now) { int i; - unsigned long long delta, incr; + u64 delta, incr; if (timer->it.cpu.incr == 0) return; @@ -115,28 +90,28 @@ static void bump_cpu_timer(struct k_itimer *timer, * Checks @cputime to see if all fields are zero. Returns true if all fields * are zero, false if any field is nonzero. */ -static inline int task_cputime_zero(const struct task_cputime_t *cputime) +static inline int task_cputime_zero(const struct task_cputime *cputime) { if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) return 1; return 0; } -static inline unsigned long long prof_ticks(struct task_struct *p) +static inline u64 prof_ticks(struct task_struct *p) { - cputime_t utime, stime; + u64 utime, stime; - task_cputime_t(p, &utime, &stime); + task_cputime(p, &utime, &stime); - return cputime_to_expires(utime + stime); + return utime + stime; } -static inline unsigned long long virt_ticks(struct task_struct *p) +static inline u64 virt_ticks(struct task_struct *p) { - cputime_t utime, stime; + u64 utime, stime; - task_cputime_t(p, &utime, &stime); + task_cputime(p, &utime, &stime); - return cputime_to_expires(utime); + return utime; } static int @@ -176,8 +151,8 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) /* * Sample a per-thread clock for the given task. */ -static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, - unsigned long long *sample) +static int cpu_clock_sample(const clockid_t which_clock, + struct task_struct *p, u64 *sample) { switch (CPUCLOCK_WHICH(which_clock)) { default: @@ -210,7 +185,7 @@ retry: } } -static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum) +static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) { __update_gt_cputime(&cputime_atomic->utime, sum->utime); __update_gt_cputime(&cputime_atomic->stime, sum->stime); @@ -218,7 +193,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct } /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ -static inline void sample_cputime_atomic(struct task_cputime_t *times, +static inline void sample_cputime_atomic(struct task_cputime *times, struct task_cputime_atomic *atomic_times) { times->utime = atomic64_read(&atomic_times->utime); @@ -226,10 +201,10 @@ static inline void sample_cputime_atomic(struct task_cputime_t *times, times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); } -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times) +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - struct task_cputime_t sum; + struct task_cputime sum; /* Check if cputimer isn't running. This is accessed without locking. */ if (!READ_ONCE(cputimer->running)) { @@ -238,7 +213,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times * values through the TIMER_ABSTIME flag, therefore we have * to synchronize the timer to the clock every time we start it. */ - thread_group_cputime_t(tsk, &sum); + thread_group_cputime(tsk, &sum); update_gt_cputime(&cputimer->cputime_atomic, &sum); /* @@ -260,23 +235,23 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times */ static int cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, - unsigned long long *sample) + u64 *sample) { - struct task_cputime_t cputime; + struct task_cputime cputime; switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: - thread_group_cputime_t(p, &cputime); - *sample = cputime_to_expires(cputime.utime + cputime.stime); + thread_group_cputime(p, &cputime); + *sample = cputime.utime + cputime.stime; break; case CPUCLOCK_VIRT: - thread_group_cputime_t(p, &cputime); - *sample = cputime_to_expires(cputime.utime); + thread_group_cputime(p, &cputime); + *sample = cputime.utime; break; case CPUCLOCK_SCHED: - thread_group_cputime_t(p, &cputime); + thread_group_cputime(p, &cputime); *sample = cputime.sum_exec_runtime; break; } @@ -288,7 +263,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk, struct timespec *tp) { int err = -EINVAL; - unsigned long long rtn; + u64 rtn; if (CPUCLOCK_PERTHREAD(which_clock)) { if (same_thread_group(tsk, current)) @@ -299,7 +274,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk, } if (!err) - sample_to_timespec(which_clock, rtn, tp); + *tp = ns_to_timespec(rtn); return err; } @@ -453,7 +428,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) cleanup_timers(tsk->signal->cpu_timers); } -static inline int expires_gt(cputime_t expires, cputime_t new_exp) +static inline int expires_gt(u64 expires, u64 new_exp) { return expires == 0 || expires > new_exp; } @@ -466,7 +441,7 @@ static void arm_timer(struct k_itimer *timer) { struct task_struct *p = timer->it.cpu.task; struct list_head *head, *listpos; - struct task_cputime_t *cputime_expires; + struct task_cputime *cputime_expires; struct cpu_timer_list *const nt = &timer->it.cpu; struct cpu_timer_list *next; @@ -488,7 +463,7 @@ static void arm_timer(struct k_itimer *timer) list_add(&nt->entry, listpos); if (listpos == head) { - unsigned long long exp = nt->expires; + u64 exp = nt->expires; /* * We are the new earliest-expiring POSIX 1.b timer, hence @@ -499,16 +474,15 @@ static void arm_timer(struct k_itimer *timer) switch (CPUCLOCK_WHICH(timer->it_clock)) { case CPUCLOCK_PROF: - if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) - cputime_expires->prof_exp = expires_to_cputime(exp); + if (expires_gt(cputime_expires->prof_exp, exp)) + cputime_expires->prof_exp = exp; break; case CPUCLOCK_VIRT: - if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) - cputime_expires->virt_exp = expires_to_cputime(exp); + if (expires_gt(cputime_expires->virt_exp, exp)) + cputime_expires->virt_exp = exp; break; case CPUCLOCK_SCHED: - if (cputime_expires->sched_exp == 0 || - cputime_expires->sched_exp > exp) + if (expires_gt(cputime_expires->sched_exp, exp)) cputime_expires->sched_exp = exp; break; } @@ -559,20 +533,19 @@ static void cpu_timer_fire(struct k_itimer *timer) * traversal. */ static int cpu_timer_sample_group(const clockid_t which_clock, - struct task_struct *p, - unsigned long long *sample) + struct task_struct *p, u64 *sample) { - struct task_cputime_t cputime; + struct task_cputime cputime; thread_group_cputimer(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: - *sample = cputime_to_expires(cputime.utime + cputime.stime); + *sample = cputime.utime + cputime.stime; break; case CPUCLOCK_VIRT: - *sample = cputime_to_expires(cputime.utime); + *sample = cputime.utime; break; case CPUCLOCK_SCHED: *sample = cputime.sum_exec_runtime; @@ -593,12 +566,12 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, unsigned long flags; struct sighand_struct *sighand; struct task_struct *p = timer->it.cpu.task; - unsigned long long old_expires, new_expires, old_incr, val; + u64 old_expires, new_expires, old_incr, val; int ret; WARN_ON_ONCE(p == NULL); - new_expires = timespec_to_sample(timer->it_clock, &new->it_value); + new_expires = timespec_to_ns(&new->it_value); /* * Protect against sighand release/switch in exit/exec and p->cpu_timers @@ -659,9 +632,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, bump_cpu_timer(timer, val); if (val < timer->it.cpu.expires) { old_expires = timer->it.cpu.expires - val; - sample_to_timespec(timer->it_clock, - old_expires, - &old->it_value); + old->it_value = ns_to_timespec(old_expires); } else { old->it_value.tv_nsec = 1; old->it_value.tv_sec = 0; @@ -699,8 +670,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, * Install the new reload setting, and * set up the signal and overrun bookkeeping. */ - timer->it.cpu.incr = timespec_to_sample(timer->it_clock, - &new->it_interval); + timer->it.cpu.incr = timespec_to_ns(&new->it_interval); /* * This acts as a modification timestamp for the timer, @@ -723,17 +693,15 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, ret = 0; out: - if (old) { - sample_to_timespec(timer->it_clock, - old_incr, &old->it_interval); - } + if (old) + old->it_interval = ns_to_timespec(old_incr); return ret; } static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) { - unsigned long long now; + u64 now; struct task_struct *p = timer->it.cpu.task; WARN_ON_ONCE(p == NULL); @@ -741,8 +709,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) /* * Easy part: convert the reload time. */ - sample_to_timespec(timer->it_clock, - timer->it.cpu.incr, &itp->it_interval); + itp->it_interval = ns_to_timespec(timer->it.cpu.incr); if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; @@ -761,7 +728,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) /* * Protect against sighand release/switch in exit/exec and * also make timer sampling safe if it ends up calling - * thread_group_cputime_t(). + * thread_group_cputime(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { @@ -771,8 +738,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) * Call the timer disarmed, nothing else to do. */ timer->it.cpu.expires = 0; - sample_to_timespec(timer->it_clock, timer->it.cpu.expires, - &itp->it_value); + itp->it_value = ns_to_timespec(timer->it.cpu.expires); return; } else { cpu_timer_sample_group(timer->it_clock, p, &now); @@ -781,9 +747,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) } if (now < timer->it.cpu.expires) { - sample_to_timespec(timer->it_clock, - timer->it.cpu.expires - now, - &itp->it_value); + itp->it_value = ns_to_timespec(timer->it.cpu.expires - now); } else { /* * The timer should have expired already, but the firing @@ -826,8 +790,8 @@ static void check_thread_timers(struct task_struct *tsk, { struct list_head *timers = tsk->cpu_timers; struct signal_struct *const sig = tsk->signal; - struct task_cputime_t *tsk_expires = &tsk->cputime_expires; - unsigned long long expires; + struct task_cputime *tsk_expires = &tsk->cputime_expires; + u64 expires; unsigned long soft; /* @@ -838,10 +802,10 @@ static void check_thread_timers(struct task_struct *tsk, return; expires = check_timers_list(timers, firing, prof_ticks(tsk)); - tsk_expires->prof_exp = expires_to_cputime(expires); + tsk_expires->prof_exp = expires; expires = check_timers_list(++timers, firing, virt_ticks(tsk)); - tsk_expires->virt_exp = expires_to_cputime(expires); + tsk_expires->virt_exp = expires; tsk_expires->sched_exp = check_timers_list(++timers, firing, tsk->se.sum_exec_runtime); @@ -891,13 +855,12 @@ static inline void stop_process_timers(struct signal_struct *sig) } static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, - unsigned long long *expires, - unsigned long long cur_time, int signo) + u64 *expires, u64 cur_time, int signo) { if (!it->expires) return; - if (cur_time >= it->expires) { + if (cur_time >= cputime_to_nsecs(it->expires)) { if (it->incr) { it->expires += it->incr; it->error += it->incr_error; @@ -915,8 +878,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } - if (it->expires && (!*expires || it->expires < *expires)) { - *expires = it->expires; + if (it->expires && (!*expires || cputime_to_nsecs(it->expires) < *expires)) { + *expires = cputime_to_nsecs(it->expires); } } @@ -929,10 +892,10 @@ static void check_process_timers(struct task_struct *tsk, struct list_head *firing) { struct signal_struct *const sig = tsk->signal; - unsigned long long utime, ptime, virt_expires, prof_expires; - unsigned long long sum_sched_runtime, sched_expires; + u64 utime, ptime, virt_expires, prof_expires; + u64 sum_sched_runtime, sched_expires; struct list_head *timers = sig->cpu_timers; - struct task_cputime_t cputime; + struct task_cputime cputime; unsigned long soft; /* @@ -952,8 +915,8 @@ static void check_process_timers(struct task_struct *tsk, * Collect the current process totals. */ thread_group_cputimer(tsk, &cputime); - utime = cputime_to_expires(cputime.utime); - ptime = utime + cputime_to_expires(cputime.stime); + utime = cputime.utime; + ptime = utime + cputime.stime; sum_sched_runtime = cputime.sum_exec_runtime; prof_expires = check_timers_list(timers, firing, ptime); @@ -969,10 +932,10 @@ static void check_process_timers(struct task_struct *tsk, SIGVTALRM); soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (soft != RLIM_INFINITY) { - unsigned long psecs = cputime_to_secs(ptime); + unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); unsigned long hard = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); - cputime_t x; + u64 x; if (psecs >= hard) { /* * At the hard limit, we just die. @@ -991,14 +954,13 @@ static void check_process_timers(struct task_struct *tsk, sig->rlim[RLIMIT_CPU].rlim_cur = soft; } } - x = secs_to_cputime(soft); - if (!prof_expires || x < prof_expires) { + x = soft * NSEC_PER_SEC; + if (!prof_expires || x < prof_expires) prof_expires = x; - } } - sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); - sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); + sig->cputime_expires.prof_exp = prof_expires; + sig->cputime_expires.virt_exp = virt_expires; sig->cputime_expires.sched_exp = sched_expires; if (task_cputime_zero(&sig->cputime_expires)) stop_process_timers(sig); @@ -1015,7 +977,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) struct sighand_struct *sighand; unsigned long flags; struct task_struct *p = timer->it.cpu.task; - unsigned long long now; + u64 now; WARN_ON_ONCE(p == NULL); @@ -1035,7 +997,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) } else { /* * Protect arm_timer() and timer sampling in case of call to - * thread_group_cputime_t(). + * thread_group_cputime(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { @@ -1078,8 +1040,8 @@ out: * Returns true if any field of the former is greater than the corresponding * field of the latter if the latter field is set. Otherwise returns false. */ -static inline int task_cputime_expired(const struct task_cputime_t *sample, - const struct task_cputime_t *expires) +static inline int task_cputime_expired(const struct task_cputime *sample, + const struct task_cputime *expires) { if (expires->utime && sample->utime >= expires->utime) return 1; @@ -1106,9 +1068,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) struct signal_struct *sig; if (!task_cputime_zero(&tsk->cputime_expires)) { - struct task_cputime_t task_sample; + struct task_cputime task_sample; - task_cputime_t(tsk, &task_sample.utime, &task_sample.stime); + task_cputime(tsk, &task_sample.utime, &task_sample.stime); task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) return 1; @@ -1131,7 +1093,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) */ if (READ_ONCE(sig->cputimer.running) && !READ_ONCE(sig->cputimer.checking_timer)) { - struct task_cputime_t group_sample; + struct task_cputime group_sample; sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); @@ -1214,7 +1176,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, cputime_t *newval, cputime_t *oldval) { - unsigned long long now; + u64 now, new; WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); cpu_timer_sample_group(clock_idx, tsk, &now); @@ -1226,31 +1188,33 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, * it to be absolute. */ if (*oldval) { - if (*oldval <= now) { + if (cputime_to_nsecs(*oldval) <= now) { /* Just about to fire. */ *oldval = cputime_one_jiffy; } else { - *oldval -= now; + *oldval -= nsecs_to_cputime(now); } } if (!*newval) return; - *newval += now; + *newval += nsecs_to_cputime(now); } + new = cputime_to_nsecs(*newval); + /* * Update expiration cache if we are the earliest timer, or eventually * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. */ switch (clock_idx) { case CPUCLOCK_PROF: - if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) - tsk->signal->cputime_expires.prof_exp = *newval; + if (expires_gt(tsk->signal->cputime_expires.prof_exp, new)) + tsk->signal->cputime_expires.prof_exp = new; break; case CPUCLOCK_VIRT: - if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) - tsk->signal->cputime_expires.virt_exp = *newval; + if (expires_gt(tsk->signal->cputime_expires.virt_exp, new)) + tsk->signal->cputime_expires.virt_exp = new; break; } @@ -1308,7 +1272,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, /* * We were interrupted by a signal. */ - sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); + *rqtp = ns_to_timespec(timer.it.cpu.expires); error = posix_cpu_timer_set(&timer, 0, &zero_it, it); if (!error) { /* -- cgit v1.2.3 From 858cf3a8c59968e7c5f7c1a1192459a0d52d1ab4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:35 +0100 Subject: timers/itimer: Convert internal cputime_t units to nsec Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Also convert itimers to use nsec based internal counters. This simplifies it and removes the whole game with error/inc_error which served to deal with cputime_t random granularity. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-20-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/posix-timers.h | 2 +- include/linux/sched.h | 6 ++-- include/trace/events/timer.h | 26 ++++++++--------- kernel/time/itimer.c | 64 +++++++++++++++--------------------------- kernel/time/posix-cpu-timers.c | 43 +++++++++++----------------- 5 files changed, 55 insertions(+), 86 deletions(-) diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 890de52362d0..64aa189efe21 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -119,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, - cputime_t *newval, cputime_t *oldval); + u64 *newval, u64 *oldval); long clock_nanosleep_restart(struct restart_block *restart_block); diff --git a/include/linux/sched.h b/include/linux/sched.h index baa6a2834e0f..268fdd713089 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -568,10 +568,8 @@ struct pacct_struct { }; struct cpu_itimer { - cputime_t expires; - cputime_t incr; - u32 error; - u32 incr_error; + u64 expires; + u64 incr; }; /** diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1448637616d6..1bca99dbb98f 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -269,17 +269,17 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel, TRACE_EVENT(itimer_state, TP_PROTO(int which, const struct itimerval *const value, - cputime_t expires), + unsigned long long expires), TP_ARGS(which, value, expires), TP_STRUCT__entry( - __field( int, which ) - __field( cputime_t, expires ) - __field( long, value_sec ) - __field( long, value_usec ) - __field( long, interval_sec ) - __field( long, interval_usec ) + __field( int, which ) + __field( unsigned long long, expires ) + __field( long, value_sec ) + __field( long, value_usec ) + __field( long, interval_sec ) + __field( long, interval_usec ) ), TP_fast_assign( @@ -292,7 +292,7 @@ TRACE_EVENT(itimer_state, ), TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld", - __entry->which, (unsigned long long)__entry->expires, + __entry->which, __entry->expires, __entry->value_sec, __entry->value_usec, __entry->interval_sec, __entry->interval_usec) ); @@ -305,14 +305,14 @@ TRACE_EVENT(itimer_state, */ TRACE_EVENT(itimer_expire, - TP_PROTO(int which, struct pid *pid, cputime_t now), + TP_PROTO(int which, struct pid *pid, unsigned long long now), TP_ARGS(which, pid, now), TP_STRUCT__entry( - __field( int , which ) - __field( pid_t, pid ) - __field( cputime_t, now ) + __field( int , which ) + __field( pid_t, pid ) + __field( unsigned long long, now ) ), TP_fast_assign( @@ -322,7 +322,7 @@ TRACE_EVENT(itimer_expire, ), TP_printk("which=%d pid=%d now=%llu", __entry->which, - (int) __entry->pid, (unsigned long long)__entry->now) + (int) __entry->pid, __entry->now) ); #ifdef CONFIG_NO_HZ_COMMON diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index bb01ff445ce9..a95f13c31464 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -45,35 +45,35 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, struct itimerval *const value) { - cputime_t cval, cinterval; + u64 val, interval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; spin_lock_irq(&tsk->sighand->siglock); - cval = it->expires; - cinterval = it->incr; - if (cval) { + val = it->expires; + interval = it->incr; + if (val) { struct task_cputime cputime; - cputime_t t; + u64 t; thread_group_cputimer(tsk, &cputime); if (clock_id == CPUCLOCK_PROF) - t = nsecs_to_cputime(cputime.utime + cputime.stime); + t = cputime.utime + cputime.stime; else /* CPUCLOCK_VIRT */ - t = nsecs_to_cputime(cputime.utime); + t = cputime.utime; - if (cval < t) + if (val < t) /* about to fire */ - cval = cputime_one_jiffy; + val = TICK_NSEC; else - cval = cval - t; + val -= t; } spin_unlock_irq(&tsk->sighand->siglock); - cputime_to_timeval(cval, &value->it_value); - cputime_to_timeval(cinterval, &value->it_interval); + value->it_value = ns_to_timeval(val); + value->it_interval = ns_to_timeval(interval); } int do_getitimer(int which, struct itimerval *value) @@ -129,55 +129,35 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } -static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns) -{ - struct timespec ts; - s64 cpu_ns; - - cputime_to_timespec(ct, &ts); - cpu_ns = timespec_to_ns(&ts); - - return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns; -} - static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, const struct itimerval *const value, struct itimerval *const ovalue) { - cputime_t cval, nval, cinterval, ninterval; - s64 ns_ninterval, ns_nval; - u32 error, incr_error; + u64 oval, nval, ointerval, ninterval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; - nval = timeval_to_cputime(&value->it_value); - ns_nval = timeval_to_ns(&value->it_value); - ninterval = timeval_to_cputime(&value->it_interval); - ns_ninterval = timeval_to_ns(&value->it_interval); - - error = cputime_sub_ns(nval, ns_nval); - incr_error = cputime_sub_ns(ninterval, ns_ninterval); + nval = timeval_to_ns(&value->it_value); + ninterval = timeval_to_ns(&value->it_interval); spin_lock_irq(&tsk->sighand->siglock); - cval = it->expires; - cinterval = it->incr; - if (cval || nval) { + oval = it->expires; + ointerval = it->incr; + if (oval || nval) { if (nval > 0) - nval += cputime_one_jiffy; - set_process_cpu_timer(tsk, clock_id, &nval, &cval); + nval += TICK_NSEC; + set_process_cpu_timer(tsk, clock_id, &nval, &oval); } it->expires = nval; it->incr = ninterval; - it->error = error; - it->incr_error = incr_error; trace_itimer_state(clock_id == CPUCLOCK_VIRT ? ITIMER_VIRTUAL : ITIMER_PROF, value, nval); spin_unlock_irq(&tsk->sighand->siglock); if (ovalue) { - cputime_to_timeval(cval, &ovalue->it_value); - cputime_to_timeval(cinterval, &ovalue->it_interval); + ovalue->it_value = ns_to_timeval(oval); + ovalue->it_interval = ns_to_timeval(ointerval); } } diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 45be3cec0dc2..b4377a5e4269 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -20,10 +20,10 @@ */ void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) { - cputime_t cputime = secs_to_cputime(rlim_new); + u64 nsecs = rlim_new * NSEC_PER_SEC; spin_lock_irq(&task->sighand->siglock); - set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); + set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); spin_unlock_irq(&task->sighand->siglock); } @@ -860,17 +860,11 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, if (!it->expires) return; - if (cur_time >= cputime_to_nsecs(it->expires)) { - if (it->incr) { + if (cur_time >= it->expires) { + if (it->incr) it->expires += it->incr; - it->error += it->incr_error; - if (it->error >= TICK_NSEC) { - it->expires -= cputime_one_jiffy; - it->error -= TICK_NSEC; - } - } else { + else it->expires = 0; - } trace_itimer_expire(signo == SIGPROF ? ITIMER_PROF : ITIMER_VIRTUAL, @@ -878,9 +872,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } - if (it->expires && (!*expires || cputime_to_nsecs(it->expires) < *expires)) { - *expires = cputime_to_nsecs(it->expires); - } + if (it->expires && (!*expires || it->expires < *expires)) + *expires = it->expires; } /* @@ -1174,9 +1167,9 @@ void run_posix_cpu_timers(struct task_struct *tsk) * The tsk->sighand->siglock must be held by the caller. */ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, - cputime_t *newval, cputime_t *oldval) + u64 *newval, u64 *oldval) { - u64 now, new; + u64 now; WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); cpu_timer_sample_group(clock_idx, tsk, &now); @@ -1188,33 +1181,31 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, * it to be absolute. */ if (*oldval) { - if (cputime_to_nsecs(*oldval) <= now) { + if (*oldval <= now) { /* Just about to fire. */ - *oldval = cputime_one_jiffy; + *oldval = TICK_NSEC; } else { - *oldval -= nsecs_to_cputime(now); + *oldval -= now; } } if (!*newval) return; - *newval += nsecs_to_cputime(now); + *newval += now; } - new = cputime_to_nsecs(*newval); - /* * Update expiration cache if we are the earliest timer, or eventually * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. */ switch (clock_idx) { case CPUCLOCK_PROF: - if (expires_gt(tsk->signal->cputime_expires.prof_exp, new)) - tsk->signal->cputime_expires.prof_exp = new; + if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) + tsk->signal->cputime_expires.prof_exp = *newval; break; case CPUCLOCK_VIRT: - if (expires_gt(tsk->signal->cputime_expires.virt_exp, new)) - tsk->signal->cputime_expires.virt_exp = new; + if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) + tsk->signal->cputime_expires.virt_exp = *newval; break; } -- cgit v1.2.3 From 71ea47b197de9a53bc3747a8b1c667df9c6a4c68 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:36 +0100 Subject: sched/cputime: Remove temporary cputime_t accessors Now that the whole cputime conversion to nsec units is complete, we can remove the compatibility accessors. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-21-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 40 ---------------------------------------- 1 file changed, 40 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 268fdd713089..d17645402767 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -29,7 +29,6 @@ struct sched_param { #include #include -#include #include #include @@ -613,13 +612,6 @@ struct task_cputime { unsigned long long sum_exec_runtime; }; -/* Temporary type to ease cputime_t to nsecs conversion */ -struct task_cputime_t { - cputime_t utime; - cputime_t stime; - unsigned long long sum_exec_runtime; -}; - /* Alternate field names when used to cache expirations. */ #define virt_exp utime #define prof_exp stime @@ -2291,27 +2283,6 @@ static inline void task_cputime_scaled(struct task_struct *t, } #endif -static inline void task_cputime_t(struct task_struct *t, - cputime_t *utime, cputime_t *stime) -{ - u64 ut, st; - - task_cputime(t, &ut, &st); - *utime = nsecs_to_cputime(ut); - *stime = nsecs_to_cputime(st); -} - -static inline void task_cputime_t_scaled(struct task_struct *t, - cputime_t *utimescaled, - cputime_t *stimescaled) -{ - u64 ut, st; - - task_cputime_scaled(t, &ut, &st); - *utimescaled = nsecs_to_cputime(ut); - *stimescaled = nsecs_to_cputime(st); -} - extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); @@ -3527,17 +3498,6 @@ static __always_inline bool need_resched(void) void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); -static inline void thread_group_cputime_t(struct task_struct *tsk, - struct task_cputime_t *cputime) -{ - struct task_cputime times; - - thread_group_cputime(tsk, ×); - cputime->utime = nsecs_to_cputime(times.utime); - cputime->stime = nsecs_to_cputime(times.stime); - cputime->sum_exec_runtime = times.sum_exec_runtime; -} - /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. -- cgit v1.2.3 From 23244a5c8003d4154161a8289a7d3783b0237c08 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:37 +0100 Subject: sched/cputime: Push time to account_user_time() in nsecs This is one more step toward converting cputime accounting to pure nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-22-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/kernel/time.c | 2 +- arch/powerpc/kernel/time.c | 2 +- arch/s390/kernel/vtime.c | 2 +- include/linux/kernel_stat.h | 2 +- kernel/sched/cputime.c | 42 ++++++++++++++++++++++++------------------ 5 files changed, 28 insertions(+), 22 deletions(-) diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index d040f12ea9f9..ed98b26047c2 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -67,7 +67,7 @@ void vtime_flush(struct task_struct *tsk) cputime_t delta; if (ti->utime) - account_user_time(tsk, cycle_to_cputime(ti->utime)); + account_user_time(tsk, cputime_to_nsecs(cycle_to_cputime(ti->utime))); if (ti->gtime) account_guest_time(tsk, cycle_to_cputime(ti->gtime)); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 3cca82e065c9..c3931d816190 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -393,7 +393,7 @@ void vtime_flush(struct task_struct *tsk) struct cpu_accounting_data *acct = get_accounting(tsk); if (acct->utime) - account_user_time(tsk, acct->utime); + account_user_time(tsk, cputime_to_nsecs(acct->utime)); if (acct->utime_scaled) tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index f2fc27491604..ca206d122302 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -166,7 +166,7 @@ static int do_account_vtime(struct task_struct *tsk) /* Push account value */ if (user) { - account_user_time(tsk, user); + account_user_time(tsk, cputime_to_nsecs(user)); tsk->utimescaled += cputime_to_nsecs(scale_vtime(user)); } diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index c3e38ded2d73..b716001ac23e 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -78,7 +78,7 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -extern void account_user_time(struct task_struct *, cputime_t); +extern void account_user_time(struct task_struct *, u64); extern void account_guest_time(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_system_index_time(struct task_struct *, cputime_t, diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index f7b9624c7df0..55d31c35833a 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -116,18 +116,18 @@ static inline void task_group_account_field(struct task_struct *p, int index, * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in user space since the last update */ -void account_user_time(struct task_struct *p, cputime_t cputime) +void account_user_time(struct task_struct *p, u64 cputime) { int index; /* Add user time to process. */ - p->utime += cputime_to_nsecs(cputime); - account_group_user_time(p, cputime_to_nsecs(cputime)); + p->utime += cputime; + account_group_user_time(p, cputime); index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; /* Add user time to cpustat. */ - task_group_account_field(p, index, cputime_to_nsecs(cputime)); + task_group_account_field(p, index, cputime); /* Account for user time used */ acct_account_cputime(p); @@ -363,8 +363,9 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) static void irqtime_account_process_tick(struct task_struct *p, int user_tick, struct rq *rq, int ticks) { - u64 cputime = (__force u64) cputime_one_jiffy * ticks; + u64 old_cputime = (__force u64) cputime_one_jiffy * ticks; cputime_t other; + u64 cputime; /* * When returning from idle, many ticks can get accounted at @@ -374,9 +375,11 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * other time can exceed ticks occasionally. */ other = account_other_time(ULONG_MAX); - if (other >= cputime) + if (other >= old_cputime) return; - cputime -= other; + + old_cputime -= other; + cputime = cputime_to_nsecs(old_cputime); if (this_cpu_ksoftirqd() == p) { /* @@ -384,15 +387,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ - account_system_index_time(p, cputime, CPUTIME_SOFTIRQ); + account_system_index_time(p, old_cputime, CPUTIME_SOFTIRQ); } else if (user_tick) { account_user_time(p, cputime); } else if (p == rq->idle) { - account_idle_time(cputime); + account_idle_time(old_cputime); } else if (p->flags & PF_VCPU) { /* System time or guest time */ - account_guest_time(p, cputime); + + account_guest_time(p, old_cputime); } else { - account_system_index_time(p, cputime, CPUTIME_SYSTEM); + account_system_index_time(p, old_cputime, CPUTIME_SYSTEM); } } @@ -473,7 +477,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) */ void account_process_tick(struct task_struct *p, int user_tick) { - cputime_t cputime, steal; + cputime_t old_cputime, steal; + u64 cputime; struct rq *rq = this_rq(); if (vtime_accounting_cpu_enabled()) @@ -484,20 +489,21 @@ void account_process_tick(struct task_struct *p, int user_tick) return; } - cputime = cputime_one_jiffy; + old_cputime = cputime_one_jiffy; steal = steal_account_process_time(ULONG_MAX); - if (steal >= cputime) + if (steal >= old_cputime) return; - cputime -= steal; + old_cputime -= steal; + cputime = cputime_to_nsecs(old_cputime); if (user_tick) account_user_time(p, cputime); else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) - account_system_time(p, HARDIRQ_OFFSET, cputime); + account_system_time(p, HARDIRQ_OFFSET, old_cputime); else - account_idle_time(cputime); + account_idle_time(old_cputime); } /* @@ -736,7 +742,7 @@ void vtime_account_user(struct task_struct *tsk) tsk->vtime_snap_whence = VTIME_SYS; if (vtime_delta(tsk)) { delta_cpu = get_vtime_delta(tsk); - account_user_time(tsk, delta_cpu); + account_user_time(tsk, cputime_to_nsecs(delta_cpu)); } write_seqcount_end(&tsk->vtime_seqcount); } -- cgit v1.2.3 From be9095ed4fb3cf69e9fdf64e28ff6b5bd0ec7215 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:38 +0100 Subject: sched/cputime: Push time to account_steal_time() in nsecs This is one more step toward converting cputime accounting to pure nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-23-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/time.c | 2 +- arch/s390/kernel/vtime.c | 2 +- include/linux/kernel_stat.h | 2 +- kernel/sched/cputime.c | 11 ++++++----- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index c3931d816190..53e5982edacf 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -402,7 +402,7 @@ void vtime_flush(struct task_struct *tsk) account_guest_time(tsk, acct->gtime); if (acct->steal_time) - account_steal_time(acct->steal_time); + account_steal_time(cputime_to_nsecs(acct->steal_time)); if (acct->idle_time) account_idle_time(acct->idle_time); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index ca206d122302..1e7023c6ef23 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -188,7 +188,7 @@ static int do_account_vtime(struct task_struct *tsk) steal = S390_lowcore.steal_timer; if ((s64) steal > 0) { S390_lowcore.steal_timer = 0; - account_steal_time(steal); + account_steal_time(cputime_to_nsecs(steal)); } return virt_timer_forward(user + guest + system + hardirq + softirq); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index b716001ac23e..1d55d10abf9d 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -83,7 +83,7 @@ extern void account_guest_time(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_system_index_time(struct task_struct *, cputime_t, enum cpu_usage_stat); -extern void account_steal_time(cputime_t); +extern void account_steal_time(u64); extern void account_idle_time(cputime_t); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 55d31c35833a..9a8028760930 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -207,11 +207,11 @@ void account_system_time(struct task_struct *p, int hardirq_offset, * Account for involuntary wait time. * @cputime: the cpu time spent in involuntary wait */ -void account_steal_time(cputime_t cputime) +void account_steal_time(u64 cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; - cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_STEAL] += cputime; } /* @@ -239,14 +239,15 @@ static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { cputime_t steal_cputime; - u64 steal; + u64 steal, rounded; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; steal_cputime = min(nsecs_to_cputime(steal), maxtime); - account_steal_time(steal_cputime); - this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime); + rounded = cputime_to_nsecs(steal_cputime); + account_steal_time(rounded); + this_rq()->prev_steal_time += rounded; return steal_cputime; } -- cgit v1.2.3 From 18b43a9bd7ae91185e398dd983fb4fffb9e81b3a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:39 +0100 Subject: sched/cputime: Push time to account_idle_time() in nsecs This is one more step toward converting cputime accounting to pure nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-24-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/kernel/time.c | 2 +- arch/powerpc/kernel/time.c | 2 +- arch/s390/kernel/idle.c | 2 +- include/linux/kernel_stat.h | 2 +- kernel/sched/cputime.c | 18 +++++++++--------- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index ed98b26047c2..5dc801d97790 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -73,7 +73,7 @@ void vtime_flush(struct task_struct *tsk) account_guest_time(tsk, cycle_to_cputime(ti->gtime)); if (ti->idle_time) - account_idle_time(cycle_to_cputime(ti->idle_time)); + account_idle_time(cputime_to_nsecs(cycle_to_cputime(ti->idle_time))); if (ti->stime) { delta = cycle_to_cputime(ti->stime); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 53e5982edacf..739897a10fd3 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -405,7 +405,7 @@ void vtime_flush(struct task_struct *tsk) account_steal_time(cputime_to_nsecs(acct->steal_time)); if (acct->idle_time) - account_idle_time(acct->idle_time); + account_idle_time(cputime_to_nsecs(acct->idle_time)); if (acct->stime) account_system_index_time(tsk, acct->stime, CPUTIME_SYSTEM); diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 7a55c29b0b33..99f1d8185ae2 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -43,7 +43,7 @@ void enabled_wait(void) idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->idle_time += idle_time; idle->idle_count++; - account_idle_time(idle_time); + account_idle_time(cputime_to_nsecs(idle_time)); write_seqcount_end(&idle->seqcount); } NOKPROBE_SYMBOL(enabled_wait); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 1d55d10abf9d..e1cd8970e096 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -84,7 +84,7 @@ extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_system_index_time(struct task_struct *, cputime_t, enum cpu_usage_stat); extern void account_steal_time(u64); -extern void account_idle_time(cputime_t); +extern void account_idle_time(u64); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE static inline void account_process_tick(struct task_struct *tsk, int user) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 9a8028760930..fd5375f956fe 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -218,15 +218,15 @@ void account_steal_time(u64 cputime) * Account for idle time. * @cputime: the cpu time spent in idle wait */ -void account_idle_time(cputime_t cputime) +void account_idle_time(u64 cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) - cpustat[CPUTIME_IOWAIT] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_IOWAIT] += cputime; else - cpustat[CPUTIME_IDLE] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_IDLE] += cputime; } /* @@ -392,7 +392,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, } else if (user_tick) { account_user_time(p, cputime); } else if (p == rq->idle) { - account_idle_time(old_cputime); + account_idle_time(cputime); } else if (p->flags & PF_VCPU) { /* System time or guest time */ account_guest_time(p, old_cputime); @@ -504,7 +504,7 @@ void account_process_tick(struct task_struct *p, int user_tick) else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) account_system_time(p, HARDIRQ_OFFSET, old_cputime); else - account_idle_time(old_cputime); + account_idle_time(cputime); } /* @@ -513,15 +513,15 @@ void account_process_tick(struct task_struct *p, int user_tick) */ void account_idle_ticks(unsigned long ticks) { - cputime_t cputime, steal; + u64 cputime, steal; if (sched_clock_irqtime) { irqtime_account_idle_ticks(ticks); return; } - cputime = jiffies_to_cputime(ticks); - steal = steal_account_process_time(ULONG_MAX); + cputime = ticks * TICK_NSEC; + steal = cputime_to_nsecs(steal_account_process_time(ULONG_MAX)); if (steal >= cputime) return; @@ -787,7 +787,7 @@ void vtime_account_idle(struct task_struct *tsk) { cputime_t delta_cpu = get_vtime_delta(tsk); - account_idle_time(delta_cpu); + account_idle_time(cputime_to_nsecs(delta_cpu)); } void arch_vtime_task_switch(struct task_struct *prev) -- cgit v1.2.3 From fb8b049c988f1ff460b063b8a41ea9a3c79921c2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:40 +0100 Subject: sched/cputime: Push time to account_system_time() in nsecs This is one more step toward converting cputime accounting to pure nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-25-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/kernel/time.c | 11 ++++++----- arch/powerpc/kernel/time.c | 15 ++++++++------- arch/s390/kernel/idle.c | 2 +- arch/s390/kernel/vtime.c | 6 +++--- include/linux/kernel_stat.h | 7 +++---- kernel/sched/cputime.c | 39 +++++++++++++++++++-------------------- 6 files changed, 40 insertions(+), 40 deletions(-) diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 5dc801d97790..f15bca4776a7 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -64,29 +65,29 @@ extern cputime_t cycle_to_cputime(u64 cyc); void vtime_flush(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - cputime_t delta; + u64 delta; if (ti->utime) account_user_time(tsk, cputime_to_nsecs(cycle_to_cputime(ti->utime))); if (ti->gtime) - account_guest_time(tsk, cycle_to_cputime(ti->gtime)); + account_guest_time(tsk, cputime_to_nsecs(cycle_to_cputime(ti->gtime))); if (ti->idle_time) account_idle_time(cputime_to_nsecs(cycle_to_cputime(ti->idle_time))); if (ti->stime) { - delta = cycle_to_cputime(ti->stime); + delta = cputime_to_nsecs(cycle_to_cputime(ti->stime)); account_system_index_time(tsk, delta, CPUTIME_SYSTEM); } if (ti->hardirq_time) { - delta = cycle_to_cputime(ti->hardirq_time); + delta = cputime_to_nsecs(cycle_to_cputime(ti->hardirq_time)); account_system_index_time(tsk, delta, CPUTIME_IRQ); } if (ti->softirq_time) { - delta = cycle_to_cputime(ti->softirq_time); + delta = cputime_to_nsecs(cycle_to_cputime(ti->softirq_time)); account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 739897a10fd3..01f53bfe100b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include @@ -72,7 +73,6 @@ #include #include #include -#include #include /* powerpc clocksource/clockevent code */ @@ -399,7 +399,7 @@ void vtime_flush(struct task_struct *tsk) tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); if (acct->gtime) - account_guest_time(tsk, acct->gtime); + account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); if (acct->steal_time) account_steal_time(cputime_to_nsecs(acct->steal_time)); @@ -408,16 +408,17 @@ void vtime_flush(struct task_struct *tsk) account_idle_time(cputime_to_nsecs(acct->idle_time)); if (acct->stime) - account_system_index_time(tsk, acct->stime, CPUTIME_SYSTEM); - + account_system_index_time(tsk, cputime_to_nsecs(acct->stime), + CPUTIME_SYSTEM); if (acct->stime_scaled) tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); if (acct->hardirq_time) - account_system_index_time(tsk, acct->hardirq_time, CPUTIME_IRQ); - + account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), + CPUTIME_IRQ); if (acct->softirq_time) - account_system_index_time(tsk, acct->softirq_time, CPUTIME_SOFTIRQ); + account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), + CPUTIME_SOFTIRQ); acct->utime = 0; acct->utime_scaled = 0; diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 99f1d8185ae2..5c0e08e761c3 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include "entry.h" diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 1e7023c6ef23..b4a3e9e06ef2 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -6,13 +6,13 @@ */ #include +#include #include #include #include #include #include -#include #include #include #include @@ -115,7 +115,7 @@ static void account_system_index_scaled(struct task_struct *p, enum cpu_usage_stat index) { p->stimescaled += cputime_to_nsecs(scaled); - account_system_index_time(p, cputime, index); + account_system_index_time(p, cputime_to_nsecs(cputime), index); } /* @@ -171,7 +171,7 @@ static int do_account_vtime(struct task_struct *tsk) } if (guest) { - account_guest_time(tsk, guest); + account_guest_time(tsk, cputime_to_nsecs(guest)); tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest)); } diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index e1cd8970e096..66be8b6beceb 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -9,7 +9,6 @@ #include #include #include -#include /* * 'kernel_stat.h' contains the definitions needed for doing @@ -79,9 +78,9 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) } extern void account_user_time(struct task_struct *, u64); -extern void account_guest_time(struct task_struct *, cputime_t); -extern void account_system_time(struct task_struct *, int, cputime_t); -extern void account_system_index_time(struct task_struct *, cputime_t, +extern void account_guest_time(struct task_struct *, u64); +extern void account_system_time(struct task_struct *, int, u64); +extern void account_system_index_time(struct task_struct *, u64, enum cpu_usage_stat); extern void account_steal_time(u64); extern void account_idle_time(u64); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index fd5375f956fe..d28e9c53727c 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "sched.h" #ifdef CONFIG_PARAVIRT #include @@ -138,22 +139,22 @@ void account_user_time(struct task_struct *p, u64 cputime) * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in virtual machine since the last update */ -void account_guest_time(struct task_struct *p, cputime_t cputime) +void account_guest_time(struct task_struct *p, u64 cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; /* Add guest time to process. */ - p->utime += cputime_to_nsecs(cputime); - account_group_user_time(p, cputime_to_nsecs(cputime)); - p->gtime += cputime_to_nsecs(cputime); + p->utime += cputime; + account_group_user_time(p, cputime); + p->gtime += cputime; /* Add guest time to cpustat. */ if (task_nice(p) > 0) { - cpustat[CPUTIME_NICE] += cputime_to_nsecs(cputime); - cpustat[CPUTIME_GUEST_NICE] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_NICE] += cputime; + cpustat[CPUTIME_GUEST_NICE] += cputime; } else { - cpustat[CPUTIME_USER] += cputime_to_nsecs(cputime); - cpustat[CPUTIME_GUEST] += cputime_to_nsecs(cputime); + cpustat[CPUTIME_USER] += cputime; + cpustat[CPUTIME_GUEST] += cputime; } } @@ -164,14 +165,14 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) * @index: pointer to cpustat field that has to be updated */ void account_system_index_time(struct task_struct *p, - cputime_t cputime, enum cpu_usage_stat index) + u64 cputime, enum cpu_usage_stat index) { /* Add system time to process. */ - p->stime += cputime_to_nsecs(cputime); - account_group_system_time(p, cputime_to_nsecs(cputime)); + p->stime += cputime; + account_group_system_time(p, cputime); /* Add system time to cpustat. */ - task_group_account_field(p, index, cputime_to_nsecs(cputime)); + task_group_account_field(p, index, cputime); /* Account for system time used */ acct_account_cputime(p); @@ -183,8 +184,7 @@ void account_system_index_time(struct task_struct *p, * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in kernel space since the last update */ -void account_system_time(struct task_struct *p, int hardirq_offset, - cputime_t cputime) +void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime) { int index; @@ -388,16 +388,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ - account_system_index_time(p, old_cputime, CPUTIME_SOFTIRQ); + account_system_index_time(p, cputime, CPUTIME_SOFTIRQ); } else if (user_tick) { account_user_time(p, cputime); } else if (p == rq->idle) { account_idle_time(cputime); } else if (p->flags & PF_VCPU) { /* System time or guest time */ - - account_guest_time(p, old_cputime); + account_guest_time(p, cputime); } else { - account_system_index_time(p, old_cputime, CPUTIME_SYSTEM); + account_system_index_time(p, cputime, CPUTIME_SYSTEM); } } @@ -502,7 +501,7 @@ void account_process_tick(struct task_struct *p, int user_tick) if (user_tick) account_user_time(p, cputime); else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) - account_system_time(p, HARDIRQ_OFFSET, old_cputime); + account_system_time(p, HARDIRQ_OFFSET, cputime); else account_idle_time(cputime); } @@ -722,7 +721,7 @@ static void __vtime_account_system(struct task_struct *tsk) { cputime_t delta_cpu = get_vtime_delta(tsk); - account_system_time(tsk, irq_count(), delta_cpu); + account_system_time(tsk, irq_count(), cputime_to_nsecs(delta_cpu)); } void vtime_account_system(struct task_struct *tsk) -- cgit v1.2.3 From 2b1f967d80e8e5d7361f0e1654c842869570f573 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:41 +0100 Subject: sched/cputime: Complete nsec conversion of tick based accounting This is the final step toward tick based cputime conversion. Now that the whole cputime accounting engine accounts in nsecs, we can convert the very source of the cputime to account in nsecs. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-26-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/cputime.c | 52 +++++++++++++++++++++----------------------------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index d28e9c53727c..f3a56bfa745f 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -75,14 +75,13 @@ void irqtime_account_irq(struct task_struct *curr) } EXPORT_SYMBOL_GPL(irqtime_account_irq); -static cputime_t irqtime_tick_accounted(cputime_t maxtime) +static u64 irqtime_tick_accounted(u64 maxtime) { struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); - cputime_t delta; + u64 delta; - delta = nsecs_to_cputime(irqtime->tick_delta); - delta = min(delta, maxtime); - irqtime->tick_delta -= cputime_to_nsecs(delta); + delta = min(irqtime->tick_delta, maxtime); + irqtime->tick_delta -= delta; return delta; } @@ -91,7 +90,7 @@ static cputime_t irqtime_tick_accounted(cputime_t maxtime) #define sched_clock_irqtime (0) -static cputime_t irqtime_tick_accounted(cputime_t dummy) +static u64 irqtime_tick_accounted(u64 dummy) { return 0; } @@ -234,22 +233,19 @@ void account_idle_time(u64 cputime) * ticks are not redelivered later. Due to that, this function may on * occasion account more time than the calling functions think elapsed. */ -static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) +static __always_inline u64 steal_account_process_time(u64 maxtime) { #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { - cputime_t steal_cputime; - u64 steal, rounded; + u64 steal; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; + steal = min(steal, maxtime); + account_steal_time(steal); + this_rq()->prev_steal_time += steal; - steal_cputime = min(nsecs_to_cputime(steal), maxtime); - rounded = cputime_to_nsecs(steal_cputime); - account_steal_time(rounded); - this_rq()->prev_steal_time += rounded; - - return steal_cputime; + return steal; } #endif return 0; @@ -258,9 +254,9 @@ static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) /* * Account how much elapsed time was spent in steal, irq, or softirq time. */ -static inline cputime_t account_other_time(cputime_t max) +static inline u64 account_other_time(u64 max) { - cputime_t accounted; + u64 accounted; /* Shall be converted to a lockdep-enabled lightweight check */ WARN_ON_ONCE(!irqs_disabled()); @@ -364,9 +360,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) static void irqtime_account_process_tick(struct task_struct *p, int user_tick, struct rq *rq, int ticks) { - u64 old_cputime = (__force u64) cputime_one_jiffy * ticks; - cputime_t other; - u64 cputime; + u64 other, cputime = TICK_NSEC * ticks; /* * When returning from idle, many ticks can get accounted at @@ -376,11 +370,10 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * other time can exceed ticks occasionally. */ other = account_other_time(ULONG_MAX); - if (other >= old_cputime) + if (other >= cputime) return; - old_cputime -= other; - cputime = cputime_to_nsecs(old_cputime); + cputime -= other; if (this_cpu_ksoftirqd() == p) { /* @@ -477,8 +470,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) */ void account_process_tick(struct task_struct *p, int user_tick) { - cputime_t old_cputime, steal; - u64 cputime; + u64 cputime, steal; struct rq *rq = this_rq(); if (vtime_accounting_cpu_enabled()) @@ -489,14 +481,13 @@ void account_process_tick(struct task_struct *p, int user_tick) return; } - old_cputime = cputime_one_jiffy; + cputime = TICK_NSEC; steal = steal_account_process_time(ULONG_MAX); - if (steal >= old_cputime) + if (steal >= cputime) return; - old_cputime -= steal; - cputime = cputime_to_nsecs(old_cputime); + cputime -= steal; if (user_tick) account_user_time(p, cputime); @@ -520,7 +511,7 @@ void account_idle_ticks(unsigned long ticks) } cputime = ticks * TICK_NSEC; - steal = cputime_to_nsecs(steal_account_process_time(ULONG_MAX)); + steal = steal_account_process_time(ULONG_MAX); if (steal >= cputime) return; @@ -741,6 +732,7 @@ void vtime_account_user(struct task_struct *tsk) write_seqcount_begin(&tsk->vtime_seqcount); tsk->vtime_snap_whence = VTIME_SYS; if (vtime_delta(tsk)) { + u64 nsecs; delta_cpu = get_vtime_delta(tsk); account_user_time(tsk, cputime_to_nsecs(delta_cpu)); } -- cgit v1.2.3 From bfce1d6006f383fbb55a89580af37819a77195b7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:42 +0100 Subject: sched/cputime, vtime: Return nsecs instead of cputime_t to account Turn the full dynticks cputime clock source to return nsec while keeping its very internals jiffies based for performance reasons. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-27-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/cputime.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index f3a56bfa745f..2ecec3a4f1ee 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -678,20 +678,20 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -static cputime_t vtime_delta(struct task_struct *tsk) +static u64 vtime_delta(struct task_struct *tsk) { unsigned long now = READ_ONCE(jiffies); if (time_before(now, (unsigned long)tsk->vtime_snap)) return 0; - return jiffies_to_cputime(now - tsk->vtime_snap); + return jiffies_to_nsecs(now - tsk->vtime_snap); } -static cputime_t get_vtime_delta(struct task_struct *tsk) +static u64 get_vtime_delta(struct task_struct *tsk) { unsigned long now = READ_ONCE(jiffies); - cputime_t delta, other; + u64 delta, other; /* * Unlike tick based timing, vtime based timing never has lost @@ -700,7 +700,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) * elapsed time. Limit account_other_time to prevent rounding * errors from causing elapsed vtime to go negative. */ - delta = jiffies_to_cputime(now - tsk->vtime_snap); + delta = jiffies_to_nsecs(now - tsk->vtime_snap); other = account_other_time(delta); WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); tsk->vtime_snap = now; @@ -710,9 +710,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) static void __vtime_account_system(struct task_struct *tsk) { - cputime_t delta_cpu = get_vtime_delta(tsk); - - account_system_time(tsk, irq_count(), cputime_to_nsecs(delta_cpu)); + account_system_time(tsk, irq_count(), get_vtime_delta(tsk)); } void vtime_account_system(struct task_struct *tsk) @@ -727,15 +725,10 @@ void vtime_account_system(struct task_struct *tsk) void vtime_account_user(struct task_struct *tsk) { - cputime_t delta_cpu; - write_seqcount_begin(&tsk->vtime_seqcount); tsk->vtime_snap_whence = VTIME_SYS; - if (vtime_delta(tsk)) { - u64 nsecs; - delta_cpu = get_vtime_delta(tsk); - account_user_time(tsk, cputime_to_nsecs(delta_cpu)); - } + if (vtime_delta(tsk)) + account_user_time(tsk, get_vtime_delta(tsk)); write_seqcount_end(&tsk->vtime_seqcount); } @@ -776,9 +769,7 @@ EXPORT_SYMBOL_GPL(vtime_guest_exit); void vtime_account_idle(struct task_struct *tsk) { - cputime_t delta_cpu = get_vtime_delta(tsk); - - account_idle_time(cputime_to_nsecs(delta_cpu)); + account_idle_time(get_vtime_delta(tsk)); } void arch_vtime_task_switch(struct task_struct *prev) @@ -818,7 +809,7 @@ u64 task_gtime(struct task_struct *t) gtime = t->gtime; if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) - gtime += cputime_to_nsecs(vtime_delta(t)); + gtime += vtime_delta(t); } while (read_seqcount_retry(&t->vtime_seqcount, seq)); @@ -851,7 +842,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t)) continue; - delta = cputime_to_nsecs(vtime_delta(t)); + delta = vtime_delta(t); /* * Task runs either in user or kernel space, add pending nohz time to -- cgit v1.2.3 From f22d6df0b23e66b6d9058e2570402606a6ad069a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:43 +0100 Subject: sched/cputime: Remove jiffies based cputime This cputime_t implementation is now unused, we can remove it. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-28-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/asm-generic/cputime.h | 4 -- include/asm-generic/cputime_jiffies.h | 75 ----------------------------------- 2 files changed, 79 deletions(-) delete mode 100644 include/asm-generic/cputime_jiffies.h diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 51969436b8b8..8a624b454ba3 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,10 +4,6 @@ #include #include -#ifndef CONFIG_VIRT_CPU_ACCOUNTING -# include -#endif - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # include #endif diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h deleted file mode 100644 index 6bb8cd45f53b..000000000000 --- a/include/asm-generic/cputime_jiffies.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H -#define _ASM_GENERIC_CPUTIME_JIFFIES_H - -typedef unsigned long __nocast cputime_t; - -#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) - -#define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) -#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) - -typedef u64 __nocast cputime64_t; - -#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) -#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) - - -/* - * Convert nanoseconds <-> cputime - */ -#define cputime_to_nsecs(__ct) \ - jiffies_to_nsecs(cputime_to_jiffies(__ct)) -#define nsecs_to_cputime64(__nsec) \ - jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) -#define nsecs_to_cputime(__nsec) \ - jiffies_to_cputime(nsecs_to_jiffies(__nsec)) - - -/* - * Convert cputime to microseconds and back. - */ -#define cputime_to_usecs(__ct) \ - jiffies_to_usecs(cputime_to_jiffies(__ct)) -#define usecs_to_cputime(__usec) \ - jiffies_to_cputime(usecs_to_jiffies(__usec)) -#define usecs_to_cputime64(__usec) \ - jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) - -/* - * Convert cputime to seconds and back. - */ -#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) -#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) - -/* - * Convert cputime to timespec and back. - */ -#define timespec_to_cputime(__val) \ - jiffies_to_cputime(timespec_to_jiffies(__val)) -#define cputime_to_timespec(__ct,__val) \ - jiffies_to_timespec(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to timeval and back. - */ -#define timeval_to_cputime(__val) \ - jiffies_to_cputime(timeval_to_jiffies(__val)) -#define cputime_to_timeval(__ct,__val) \ - jiffies_to_timeval(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to clock and back. - */ -#define cputime_to_clock_t(__ct) \ - jiffies_to_clock_t(cputime_to_jiffies(__ct)) -#define clock_t_to_cputime(__x) \ - jiffies_to_cputime(clock_t_to_jiffies(__x)) - -/* - * Convert cputime64 to clock. - */ -#define cputime64_to_clock_t(__ct) \ - jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) - -#endif -- cgit v1.2.3 From 4c562529f1ded4801f572e6dfcb464e598ef8f15 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:44 +0100 Subject: ia64, sched/cputime: Move the nsecs based cputime headers to the last arch using it Initially, nsec based cputime_t implementation belonged to IA64. It got exported later for CONFIG_VIRT_CPU_ACCOUNTING_GEN but now it is again only used by IA64. So let's move it back there. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-29-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/cputime.h | 108 +++++++++++++++++++++++++++++++- include/asm-generic/cputime.h | 4 -- include/asm-generic/cputime_nsecs.h | 121 ------------------------------------ 3 files changed, 106 insertions(+), 127 deletions(-) delete mode 100644 include/asm-generic/cputime_nsecs.h diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index e2d3f5baf265..fee773c039b7 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -20,9 +20,113 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE # include + #else -# include -# include + +#include +#include + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) + +#define cputime_one_jiffy jiffies_to_cputime(1) + +#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) +#define cputime_div_rem(__ct, divisor, remainder) \ + div_u64_rem((__force u64)__ct, divisor, remainder); + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) + + +/* + * Convert cputime <-> nanoseconds + */ +#define cputime_to_nsecs(__ct) \ + (__force u64)(__ct) +#define nsecs_to_cputime(__nsecs) \ + (__force cputime_t)(__nsecs) +#define nsecs_to_cputime64(__nsecs) \ + (__force cputime64_t)(__nsecs) + + +/* + * Convert cputime <-> microseconds + */ +#define cputime_to_usecs(__ct) \ + cputime_div(__ct, NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) +#define usecs_to_cputime64(__usecs) \ + (__force cputime64_t)((__usecs) * NSEC_PER_USEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) \ + cputime_div(__ct, NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_nsec = rem; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(const struct timeval *val) +{ + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_usec = rem / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) \ + cputime_div(__ct, (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) + + extern void arch_vtime_task_switch(struct task_struct *tsk); #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 8a624b454ba3..358e54777b56 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,8 +4,4 @@ #include #include -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -# include -#endif - #endif diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h deleted file mode 100644 index 4e3b18e559b1..000000000000 --- a/include/asm-generic/cputime_nsecs.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Definitions for measuring cputime in nsecs resolution. - * - * Based on - * - * Copyright (C) 2007 FUJITSU LIMITED - * Copyright (C) 2007 Hidetoshi Seto - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#ifndef _ASM_GENERIC_CPUTIME_NSECS_H -#define _ASM_GENERIC_CPUTIME_NSECS_H - -#include - -typedef u64 __nocast cputime_t; -typedef u64 __nocast cputime64_t; - -#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) - -#define cputime_one_jiffy jiffies_to_cputime(1) - -#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) -#define cputime_div_rem(__ct, divisor, remainder) \ - div_u64_rem((__force u64)__ct, divisor, remainder); - -/* - * Convert cputime <-> jiffies (HZ) - */ -#define cputime_to_jiffies(__ct) \ - cputime_div(__ct, NSEC_PER_SEC / HZ) -#define jiffies_to_cputime(__jif) \ - (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) -#define cputime64_to_jiffies64(__ct) \ - cputime_div(__ct, NSEC_PER_SEC / HZ) -#define jiffies64_to_cputime64(__jif) \ - (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) - - -/* - * Convert cputime <-> nanoseconds - */ -#define cputime_to_nsecs(__ct) \ - (__force u64)(__ct) -#define nsecs_to_cputime(__nsecs) \ - (__force cputime_t)(__nsecs) -#define nsecs_to_cputime64(__nsecs) \ - (__force cputime64_t)(__nsecs) - - -/* - * Convert cputime <-> microseconds - */ -#define cputime_to_usecs(__ct) \ - cputime_div(__ct, NSEC_PER_USEC) -#define usecs_to_cputime(__usecs) \ - (__force cputime_t)((__usecs) * NSEC_PER_USEC) -#define usecs_to_cputime64(__usecs) \ - (__force cputime64_t)((__usecs) * NSEC_PER_USEC) - -/* - * Convert cputime <-> seconds - */ -#define cputime_to_secs(__ct) \ - cputime_div(__ct, NSEC_PER_SEC) -#define secs_to_cputime(__secs) \ - (__force cputime_t)((__secs) * NSEC_PER_SEC) - -/* - * Convert cputime <-> timespec (nsec) - */ -static inline cputime_t timespec_to_cputime(const struct timespec *val) -{ - u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; - return (__force cputime_t) ret; -} -static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) -{ - u32 rem; - - val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); - val->tv_nsec = rem; -} - -/* - * Convert cputime <-> timeval (msec) - */ -static inline cputime_t timeval_to_cputime(const struct timeval *val) -{ - u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + - val->tv_usec * NSEC_PER_USEC; - return (__force cputime_t) ret; -} -static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) -{ - u32 rem; - - val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); - val->tv_usec = rem / NSEC_PER_USEC; -} - -/* - * Convert cputime <-> clock (USER_HZ) - */ -#define cputime_to_clock_t(__ct) \ - cputime_div(__ct, (NSEC_PER_SEC / USER_HZ)) -#define clock_t_to_cputime(__x) \ - (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) - -/* - * Convert cputime64 to clock. - */ -#define cputime64_to_clock_t(__ct) \ - cputime_to_clock_t((__force cputime_t)__ct) - -#endif -- cgit v1.2.3 From e2339a4caa5e83a3da5e76880d81e8a23e97aa16 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:45 +0100 Subject: ia64: Convert vtime to use nsec units directly There is no need anymore for this cputime_t midlayer. Let's use nsec units directly. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-30-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/kernel/head.S | 4 ++-- arch/ia64/kernel/time.c | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index c9b5e942f671..3204fddc439c 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -1031,7 +1031,7 @@ GLOBAL_ENTRY(ia64_native_sched_clock) END(ia64_native_sched_clock) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -GLOBAL_ENTRY(cycle_to_cputime) +GLOBAL_ENTRY(cycle_to_nsec) alloc r16=ar.pfs,1,0,0,0 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 ;; @@ -1047,7 +1047,7 @@ GLOBAL_ENTRY(cycle_to_cputime) ;; shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT br.ret.sptk.many rp -END(cycle_to_cputime) +END(cycle_to_nsec) #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_IA64_BRL_EMU diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index f15bca4776a7..faa116822c4c 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -60,7 +60,7 @@ static struct clocksource *itc_clocksource; #include -extern cputime_t cycle_to_cputime(u64 cyc); +extern u64 cycle_to_nsec(u64 cyc); void vtime_flush(struct task_struct *tsk) { @@ -68,26 +68,26 @@ void vtime_flush(struct task_struct *tsk) u64 delta; if (ti->utime) - account_user_time(tsk, cputime_to_nsecs(cycle_to_cputime(ti->utime))); + account_user_time(tsk, cycle_to_nsec(ti->utime)); if (ti->gtime) - account_guest_time(tsk, cputime_to_nsecs(cycle_to_cputime(ti->gtime))); + account_guest_time(tsk, cycle_to_nsec(ti->gtime)); if (ti->idle_time) - account_idle_time(cputime_to_nsecs(cycle_to_cputime(ti->idle_time))); + account_idle_time(cycle_to_nsec(ti->idle_time)); if (ti->stime) { - delta = cputime_to_nsecs(cycle_to_cputime(ti->stime)); + delta = cycle_to_nsec(ti->stime); account_system_index_time(tsk, delta, CPUTIME_SYSTEM); } if (ti->hardirq_time) { - delta = cputime_to_nsecs(cycle_to_cputime(ti->hardirq_time)); + delta = cycle_to_nsec(ti->hardirq_time); account_system_index_time(tsk, delta, CPUTIME_IRQ); } if (ti->softirq_time) { - delta = cputime_to_nsecs(cycle_to_cputime(ti->softirq_time)); + delta = cycle_to_nsec(ti->softirq_time)); account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } -- cgit v1.2.3 From c833d82df73483c1716243bcd9cb5b6fef94621f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:46 +0100 Subject: ia64, sched/cputime: Remove unused cputime definitions This nsec based cputime_t implementation isn't used anymore. We can remove it. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-31-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/cputime.h | 106 ---------------------------------------- 1 file changed, 106 deletions(-) diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index fee773c039b7..44bcffc5681c 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -20,113 +20,7 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE # include - #else - -#include -#include - -typedef u64 __nocast cputime_t; -typedef u64 __nocast cputime64_t; - -#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) - -#define cputime_one_jiffy jiffies_to_cputime(1) - -#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) -#define cputime_div_rem(__ct, divisor, remainder) \ - div_u64_rem((__force u64)__ct, divisor, remainder); - -/* - * Convert cputime <-> jiffies (HZ) - */ -#define cputime_to_jiffies(__ct) \ - cputime_div(__ct, NSEC_PER_SEC / HZ) -#define jiffies_to_cputime(__jif) \ - (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) -#define cputime64_to_jiffies64(__ct) \ - cputime_div(__ct, NSEC_PER_SEC / HZ) -#define jiffies64_to_cputime64(__jif) \ - (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) - - -/* - * Convert cputime <-> nanoseconds - */ -#define cputime_to_nsecs(__ct) \ - (__force u64)(__ct) -#define nsecs_to_cputime(__nsecs) \ - (__force cputime_t)(__nsecs) -#define nsecs_to_cputime64(__nsecs) \ - (__force cputime64_t)(__nsecs) - - -/* - * Convert cputime <-> microseconds - */ -#define cputime_to_usecs(__ct) \ - cputime_div(__ct, NSEC_PER_USEC) -#define usecs_to_cputime(__usecs) \ - (__force cputime_t)((__usecs) * NSEC_PER_USEC) -#define usecs_to_cputime64(__usecs) \ - (__force cputime64_t)((__usecs) * NSEC_PER_USEC) - -/* - * Convert cputime <-> seconds - */ -#define cputime_to_secs(__ct) \ - cputime_div(__ct, NSEC_PER_SEC) -#define secs_to_cputime(__secs) \ - (__force cputime_t)((__secs) * NSEC_PER_SEC) - -/* - * Convert cputime <-> timespec (nsec) - */ -static inline cputime_t timespec_to_cputime(const struct timespec *val) -{ - u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; - return (__force cputime_t) ret; -} -static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) -{ - u32 rem; - - val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); - val->tv_nsec = rem; -} - -/* - * Convert cputime <-> timeval (msec) - */ -static inline cputime_t timeval_to_cputime(const struct timeval *val) -{ - u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + - val->tv_usec * NSEC_PER_USEC; - return (__force cputime_t) ret; -} -static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) -{ - u32 rem; - - val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); - val->tv_usec = rem / NSEC_PER_USEC; -} - -/* - * Convert cputime <-> clock (USER_HZ) - */ -#define cputime_to_clock_t(__ct) \ - cputime_div(__ct, (NSEC_PER_SEC / USER_HZ)) -#define clock_t_to_cputime(__x) \ - (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) - -/* - * Convert cputime64 to clock. - */ -#define cputime64_to_clock_t(__ct) \ - cputime_to_clock_t((__force cputime_t)__ct) - - extern void arch_vtime_task_switch(struct task_struct *tsk); #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -- cgit v1.2.3 From 42b425b3360a20ed08e0d735623f389bdf5e4500 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:47 +0100 Subject: s390, sched/cputime: Make arch_cpu_idle_time() to return nsecs This way we don't need to deal with cputime_t details from the core code. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-32-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/cputime.h | 2 +- arch/s390/kernel/idle.c | 5 +++-- fs/proc/stat.c | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 221b454c734a..9a944815e913 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -141,7 +141,7 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime) return clock; } -cputime64_t arch_cpu_idle_time(int cpu); +u64 arch_cpu_idle_time(int cpu); #define arch_idle_time(cpu) arch_cpu_idle_time(cpu) diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 5c0e08e761c3..d3bf69ef42cf 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -84,7 +84,7 @@ static ssize_t show_idle_time(struct device *dev, } DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); -cputime64_t arch_cpu_idle_time(int cpu) +u64 arch_cpu_idle_time(int cpu) { struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); unsigned long long now, idle_enter, idle_exit; @@ -96,7 +96,8 @@ cputime64_t arch_cpu_idle_time(int cpu) idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_exit = ACCESS_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); - return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; + + return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0); } void arch_cpu_idle_enter(void) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 44475a44cbf1..e47c3e8c4dfe 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -27,7 +27,7 @@ static u64 get_idle_time(int cpu) idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) - idle += cputime_to_nsecs(arch_idle_time(cpu)); + idle += arch_idle_time(cpu); return idle; } @@ -37,7 +37,7 @@ static u64 get_iowait_time(int cpu) iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; if (cpu_online(cpu) && nr_iowait_cpu(cpu)) - iowait += cputime_to_nsecs(arch_idle_time(cpu)); + iowait += arch_idle_time(cpu); return iowait; } -- cgit v1.2.3 From e7f340ca9c0709508e6f590ac64f341058df241d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:48 +0100 Subject: powerpc, sched/cputime: Remove unused cputime definitions Since the core doesn't deal with cputime_t anymore, most of these APIs have been left unused. Lets remove these. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-33-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/cputime.h | 173 ------------------------------------- arch/powerpc/kernel/time.c | 20 +---- 2 files changed, 2 insertions(+), 191 deletions(-) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index aa2e6a34b872..6ec0ba6f1a61 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -18,9 +18,6 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include -#ifdef __KERNEL__ -static inline void setup_cputime_one_jiffy(void) { } -#endif #else #include @@ -36,65 +33,6 @@ typedef u64 __nocast cputime64_t; #define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) #ifdef __KERNEL__ - -/* - * One jiffy in timebase units computed during initialization - */ -extern cputime_t cputime_one_jiffy; - -/* - * Convert cputime <-> jiffies - */ -extern u64 __cputime_jiffies_factor; - -static inline unsigned long cputime_to_jiffies(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_jiffies_factor); -} - -static inline cputime_t jiffies_to_cputime(const unsigned long jif) -{ - u64 ct; - unsigned long sec; - - /* have to be a little careful about overflow */ - ct = jif % HZ; - sec = jif / HZ; - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, HZ); - } - if (sec) - ct += (cputime_t) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; -} - -static inline void setup_cputime_one_jiffy(void) -{ - cputime_one_jiffy = jiffies_to_cputime(1); -} - -static inline cputime64_t jiffies64_to_cputime64(const u64 jif) -{ - u64 ct; - u64 sec = jif; - - /* have to be a little careful about overflow */ - ct = do_div(sec, HZ); - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, HZ); - } - if (sec) - ct += (u64) sec * tb_ticks_per_sec; - return (__force cputime64_t) ct; -} - -static inline u64 cputime64_to_jiffies64(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_jiffies_factor); -} - /* * Convert cputime <-> microseconds */ @@ -105,117 +43,6 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct) return mulhdu((__force u64) ct, __cputime_usec_factor); } -static inline cputime_t usecs_to_cputime(const unsigned long us) -{ - u64 ct; - unsigned long sec; - - /* have to be a little careful about overflow */ - ct = us % 1000000; - sec = us / 1000000; - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, 1000000); - } - if (sec) - ct += (cputime_t) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; -} - -#define usecs_to_cputime64(us) usecs_to_cputime(us) - -/* - * Convert cputime <-> seconds - */ -extern u64 __cputime_sec_factor; - -static inline unsigned long cputime_to_secs(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_sec_factor); -} - -static inline cputime_t secs_to_cputime(const unsigned long sec) -{ - return (__force cputime_t)((u64) sec * tb_ticks_per_sec); -} - -/* - * Convert cputime <-> timespec - */ -static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) -{ - u64 x = (__force u64) ct; - unsigned int frac; - - frac = do_div(x, tb_ticks_per_sec); - p->tv_sec = x; - x = (u64) frac * 1000000000; - do_div(x, tb_ticks_per_sec); - p->tv_nsec = x; -} - -static inline cputime_t timespec_to_cputime(const struct timespec *p) -{ - u64 ct; - - ct = (u64) p->tv_nsec * tb_ticks_per_sec; - do_div(ct, 1000000000); - return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); -} - -/* - * Convert cputime <-> timeval - */ -static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) -{ - u64 x = (__force u64) ct; - unsigned int frac; - - frac = do_div(x, tb_ticks_per_sec); - p->tv_sec = x; - x = (u64) frac * 1000000; - do_div(x, tb_ticks_per_sec); - p->tv_usec = x; -} - -static inline cputime_t timeval_to_cputime(const struct timeval *p) -{ - u64 ct; - - ct = (u64) p->tv_usec * tb_ticks_per_sec; - do_div(ct, 1000000); - return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); -} - -/* - * Convert cputime <-> clock_t (units of 1/USER_HZ seconds) - */ -extern u64 __cputime_clockt_factor; - -static inline unsigned long cputime_to_clock_t(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_clockt_factor); -} - -static inline cputime_t clock_t_to_cputime(const unsigned long clk) -{ - u64 ct; - unsigned long sec; - - /* have to be a little careful about overflow */ - ct = clk % USER_HZ; - sec = clk / USER_HZ; - if (ct) { - ct *= tb_ticks_per_sec; - do_div(ct, USER_HZ); - } - if (sec) - ct += (u64) sec * tb_ticks_per_sec; - return (__force cputime_t) ct; -} - -#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) - /* * PPC64 uses PACA which is task independent for storing accounting data while * PPC32 uses struct thread_info, therefore at task switch the accounting data diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 01f53bfe100b..14e485525e31 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -152,20 +152,11 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE /* - * Factors for converting from cputime_t (timebase ticks) to - * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). - * These are all stored as 0.64 fixed-point binary fractions. + * Factor for converting from cputime_t (timebase ticks) to + * microseconds. This is stored as 0.64 fixed-point binary fraction. */ -u64 __cputime_jiffies_factor; -EXPORT_SYMBOL(__cputime_jiffies_factor); u64 __cputime_usec_factor; EXPORT_SYMBOL(__cputime_usec_factor); -u64 __cputime_sec_factor; -EXPORT_SYMBOL(__cputime_sec_factor); -u64 __cputime_clockt_factor; -EXPORT_SYMBOL(__cputime_clockt_factor); - -cputime_t cputime_one_jiffy; #ifdef CONFIG_PPC_SPLPAR void (*dtl_consumer)(struct dtl_entry *, u64); @@ -181,14 +172,8 @@ static void calc_cputime_factors(void) { struct div_result res; - div128_by_32(HZ, 0, tb_ticks_per_sec, &res); - __cputime_jiffies_factor = res.result_low; div128_by_32(1000000, 0, tb_ticks_per_sec, &res); __cputime_usec_factor = res.result_low; - div128_by_32(1, 0, tb_ticks_per_sec, &res); - __cputime_sec_factor = res.result_low; - div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); - __cputime_clockt_factor = res.result_low; } /* @@ -1053,7 +1038,6 @@ void __init time_init(void) tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; calc_cputime_factors(); - setup_cputime_one_jiffy(); /* * Compute scale factor for sched_clock. -- cgit v1.2.3 From ea3a96d0b4d223fc5e70e06281356762613487bc Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:49 +0100 Subject: s390, sched/cputime: Remove unused cputime definitions Since the core doesn't deal with cputime_t anymore, most of these APIs have been left unused. Lets remove these. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-34-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/s390/include/asm/cputime.h | 107 ---------------------------------------- 1 file changed, 107 deletions(-) diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 9a944815e913..d1c407ddf703 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -25,33 +25,6 @@ static inline unsigned long __div(unsigned long long n, unsigned long base) return n / base; } -#define cputime_one_jiffy jiffies_to_cputime(1) - -/* - * Convert cputime to jiffies and back. - */ -static inline unsigned long cputime_to_jiffies(const cputime_t cputime) -{ - return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ); -} - -static inline cputime_t jiffies_to_cputime(const unsigned int jif) -{ - return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ)); -} - -static inline u64 cputime64_to_jiffies64(cputime64_t cputime) -{ - unsigned long long jif = (__force unsigned long long) cputime; - do_div(jif, CPUTIME_PER_SEC / HZ); - return jif; -} - -static inline cputime64_t jiffies64_to_cputime64(const u64 jif) -{ - return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ)); -} - /* * Convert cputime to microseconds and back. */ @@ -60,86 +33,6 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime) return (__force unsigned long long) cputime >> 12; } -static inline cputime_t usecs_to_cputime(const unsigned int m) -{ - return (__force cputime_t)(m * CPUTIME_PER_USEC); -} - -#define usecs_to_cputime64(m) usecs_to_cputime(m) - -/* - * Convert cputime to milliseconds and back. - */ -static inline unsigned int cputime_to_secs(const cputime_t cputime) -{ - return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1; -} - -static inline cputime_t secs_to_cputime(const unsigned int s) -{ - return (__force cputime_t)(s * CPUTIME_PER_SEC); -} - -/* - * Convert cputime to timespec and back. - */ -static inline cputime_t timespec_to_cputime(const struct timespec *value) -{ - unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC; - return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC)); -} - -static inline void cputime_to_timespec(const cputime_t cputime, - struct timespec *value) -{ - unsigned long long __cputime = (__force unsigned long long) cputime; - value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; - value->tv_sec = __cputime / CPUTIME_PER_SEC; -} - -/* - * Convert cputime to timeval and back. - * Since cputime and timeval have the same resolution (microseconds) - * this is easy. - */ -static inline cputime_t timeval_to_cputime(const struct timeval *value) -{ - unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC; - return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC); -} - -static inline void cputime_to_timeval(const cputime_t cputime, - struct timeval *value) -{ - unsigned long long __cputime = (__force unsigned long long) cputime; - value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; - value->tv_sec = __cputime / CPUTIME_PER_SEC; -} - -/* - * Convert cputime to clock and back. - */ -static inline clock_t cputime_to_clock_t(cputime_t cputime) -{ - unsigned long long clock = (__force unsigned long long) cputime; - do_div(clock, CPUTIME_PER_SEC / USER_HZ); - return clock; -} - -static inline cputime_t clock_t_to_cputime(unsigned long x) -{ - return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ)); -} - -/* - * Convert cputime64 to clock. - */ -static inline clock_t cputime64_to_clock_t(cputime64_t cputime) -{ - unsigned long long clock = (__force unsigned long long) cputime; - do_div(clock, CPUTIME_PER_SEC / USER_HZ); - return clock; -} u64 arch_cpu_idle_time(int cpu); -- cgit v1.2.3 From 934ad473552a48d303a54279518aa19cf674567d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:50 +0100 Subject: sched/cputime: Remove unused nsec_to_cputime() It's unused now. Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-35-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/cputime.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/linux/cputime.h b/include/linux/cputime.h index f2eb2ee535ca..a257d6690621 100644 --- a/include/linux/cputime.h +++ b/include/linux/cputime.h @@ -8,9 +8,4 @@ (cputime_to_usecs(__ct) * NSEC_PER_USEC) #endif -#ifndef nsecs_to_cputime -# define nsecs_to_cputime(__nsecs) \ - usecs_to_cputime((__nsecs) / NSEC_PER_USEC) -#endif - #endif /* __LINUX_CPUTIME_H */ -- cgit v1.2.3 From b672592f022152155fde7db99aafbcf04a2c3ba5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:51 +0100 Subject: sched/cputime: Remove generic asm headers cputime_t is now only used by two architectures: * powerpc (when CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y) * s390 And since the core doesn't use it anymore, we don't need any arch support from the others. So we can remove their stub implementations. A final cleanup would be to provide an efficient pure arch implementation of cputime_to_nsec() for s390 and powerpc and finally remove include/linux/cputime.h . Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-36-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/alpha/include/asm/Kbuild | 1 - arch/arc/include/asm/Kbuild | 1 - arch/arm/include/asm/Kbuild | 1 - arch/arm64/include/asm/Kbuild | 1 - arch/avr32/include/asm/Kbuild | 1 - arch/blackfin/include/asm/Kbuild | 1 - arch/c6x/include/asm/Kbuild | 1 - arch/cris/include/asm/Kbuild | 1 - arch/frv/include/asm/Kbuild | 1 - arch/h8300/include/asm/Kbuild | 1 - arch/hexagon/include/asm/Kbuild | 1 - arch/ia64/include/asm/cputime.h | 4 +--- arch/m32r/include/asm/Kbuild | 1 - arch/m68k/include/asm/Kbuild | 1 - arch/metag/include/asm/Kbuild | 1 - arch/microblaze/include/asm/Kbuild | 1 - arch/mips/include/asm/Kbuild | 1 - arch/mn10300/include/asm/Kbuild | 1 - arch/nios2/include/asm/Kbuild | 1 - arch/openrisc/include/asm/Kbuild | 1 - arch/parisc/include/asm/Kbuild | 1 - arch/powerpc/include/asm/cputime.h | 4 +--- arch/score/include/asm/Kbuild | 1 - arch/sh/include/asm/Kbuild | 1 - arch/sparc/include/asm/Kbuild | 1 - arch/tile/include/asm/Kbuild | 1 - arch/um/include/asm/Kbuild | 1 - arch/unicore32/include/asm/Kbuild | 1 - arch/x86/include/asm/Kbuild | 1 - arch/xtensa/include/asm/Kbuild | 1 - include/asm-generic/cputime.h | 7 ------- include/linux/cputime.h | 2 ++ 32 files changed, 4 insertions(+), 41 deletions(-) delete mode 100644 include/asm-generic/cputime.h diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index bf8475ce85ee..baa152b9348e 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h -generic-y += cputime.h generic-y += exec.h generic-y += export.h generic-y += irq_work.h diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index c332604606dd..63a04013d05a 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += auxvec.h generic-y += bitsperlong.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index efb21757d41f..b14e8c7d71bd 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += bitsperlong.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += early_ioremap.h generic-y += emergency-restart.h diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 8365a84c2640..a12f1afc95a3 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += delay.h generic-y += div64.h generic-y += dma.h diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 241b9b9729d8..3d7ef2c17a7c 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += clkdev.h -generic-y += cputime.h generic-y += delay.h generic-y += device.h generic-y += div64.h diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 2fb67b59d188..d6fa60b158be 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += auxvec.h generic-y += bitsperlong.h generic-y += bugs.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 64465e7e2245..4e9f57433f3a 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -5,7 +5,6 @@ generic-y += barrier.h generic-y += bitsperlong.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 1778805f6380..9f19e19bff9d 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -4,7 +4,6 @@ generic-y += barrier.h generic-y += bitsperlong.h generic-y += clkdev.h generic-y += cmpxchg.h -generic-y += cputime.h generic-y += device.h generic-y += div64.h generic-y += errno.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 1fa084cf1a43..0f5b0d5d313c 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += clkdev.h -generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 373cb23301e3..5efd0c87f3c0 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -5,7 +5,6 @@ generic-y += bugs.h generic-y += cacheflush.h generic-y += checksum.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += delay.h generic-y += device.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index db8ddabc6bd2..a43a7c90e4af 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += barrier.h generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 44bcffc5681c..3d665c0627a8 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -18,9 +18,7 @@ #ifndef __IA64_CPUTIME_H #define __IA64_CPUTIME_H -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -# include -#else +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE extern void arch_vtime_task_switch(struct task_struct *tsk); #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 860e440611c9..652100b64a71 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += clkdev.h -generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += kvm_para.h diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 1f2e5d31cb24..6c76d6c24b3d 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += barrier.h generic-y += bitsperlong.h generic-y += clkdev.h -generic-y += cputime.h generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 167150c701d1..d3731f0db73b 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += auxvec.h generic-y += bitsperlong.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += dma.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index b0ae88c9fed9..6275eb051801 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += barrier.h generic-y += clkdev.h -generic-y += cputime.h generic-y += device.h generic-y += exec.h generic-y += irq_work.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 3269b742a75e..994b1c4392be 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,7 +1,6 @@ # MIPS headers generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += dma-contiguous.h generic-y += emergency-restart.h diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 1c8dd0f5cd5d..97f64c723a0c 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += barrier.h generic-y += clkdev.h -generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index d63330e88379..35b0e883761a 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += bitsperlong.h generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 2832f031fb11..ef8d1ccc3e45 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += checksum.h generic-y += clkdev.h generic-y += cmpxchg-local.h generic-y += cmpxchg.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 91f53c07f410..4e179d770d69 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += auxvec.h generic-y += barrier.h generic-y += clkdev.h -generic-y += cputime.h generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 6ec0ba6f1a61..99b541865d8d 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -16,9 +16,7 @@ #ifndef __POWERPC_CPUTIME_H #define __POWERPC_CPUTIME_H -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include -#else +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include #include diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index a05218ff3fe4..51970bb6c4fe 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -4,7 +4,6 @@ header-y += generic-y += barrier.h generic-y += clkdev.h -generic-y += cputime.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 751c3373a92c..cf2a75063b53 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += bitsperlong.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += delay.h generic-y += div64.h diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 0569bfac4afb..e9e837bc3158 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h -generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 2d1f5638974c..51a339feceac 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -4,7 +4,6 @@ header-y += ../arch/ generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 052f7f6d0551..90c281cd7e1d 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += barrier.h generic-y += bug.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += delay.h generic-y += device.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 256c45b3ae34..5d51ade89f4c 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -4,7 +4,6 @@ generic-y += auxvec.h generic-y += bitsperlong.h generic-y += bugs.h generic-y += clkdev.h -generic-y += cputime.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 2b892e2313a9..5d6a53fd7521 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -7,7 +7,6 @@ generated-y += unistd_64_x32.h generated-y += xen-hypercalls.h generic-y += clkdev.h -generic-y += cputime.h generic-y += dma-contiguous.h generic-y += early_ioremap.h generic-y += mcs_spinlock.h diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index b7fbaa56b51a..9e9760b20be5 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += bitsperlong.h generic-y += bug.h generic-y += clkdev.h -generic-y += cputime.h generic-y += div64.h generic-y += dma-contiguous.h generic-y += emergency-restart.h diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h deleted file mode 100644 index 358e54777b56..000000000000 --- a/include/asm-generic/cputime.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_GENERIC_CPUTIME_H -#define _ASM_GENERIC_CPUTIME_H - -#include -#include - -#endif diff --git a/include/linux/cputime.h b/include/linux/cputime.h index a257d6690621..a691dc4ddc13 100644 --- a/include/linux/cputime.h +++ b/include/linux/cputime.h @@ -1,6 +1,7 @@ #ifndef __LINUX_CPUTIME_H #define __LINUX_CPUTIME_H +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include #ifndef cputime_to_nsecs @@ -8,4 +9,5 @@ (cputime_to_usecs(__ct) * NSEC_PER_USEC) #endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* __LINUX_CPUTIME_H */ -- cgit v1.2.3 From 733ce725aa4bfa9063be053bfe7f4597d76f0dd1 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 27 Jan 2017 12:38:16 -0500 Subject: sched/clock: Add dummy clear_sched_clock_stable() stub function In commit: acb04058de494 ("sched/clock: Fix hotplug crash") the PARISC code gained a call to this function. However the prototype for it is within a CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y #ifdef/#endif. That, combined with this: arch/parisc/Kconfig: select HAVE_UNSTABLE_SCHED_CLOCK if SMP means that PARISC can have it either enabled or disabled, resulting in the following build fail: arch/parisc/kernel/setup.c:180:2: error: implicit declaration of function 'clear_sched_clock_stable' [-Werror=implicit-function-declaration] Add a no-op stub for the non-SMP case to prevent this. Signed-off-by: Paul Gortmaker Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: acb04058de49 ("sched/clock: Fix hotplug crash") Link: http://lkml.kernel.org/r/20170127173816.22733-1-paul.gortmaker@windriver.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index d17645402767..e2ed46d3ed71 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2511,6 +2511,10 @@ static inline void sched_clock_tick(void) { } +static inline void clear_sched_clock_stable(void) +{ +} + static inline void sched_clock_idle_sleep_event(void) { } -- cgit v1.2.3 From 975e155ed8732cb81f55c021c441ae662dd040b5 Mon Sep 17 00:00:00 2001 From: Shile Zhang Date: Sat, 28 Jan 2017 22:00:49 +0800 Subject: sched/rt: Show the 'sched_rr_timeslice' SCHED_RR timeslice tuning knob in milliseconds We added the 'sched_rr_timeslice_ms' SCHED_RR tuning knob in this commit: ce0dbbbb30ae ("sched/rt: Add a tuning knob to allow changing SCHED_RR timeslice") ... which name suggests to users that it's in milliseconds, while in reality it's being set in milliseconds but the result is shown in jiffies. This is obviously confusing when HZ is not 1000, it makes it appear like the value set failed, such as HZ=100: root# echo 100 > /proc/sys/kernel/sched_rr_timeslice_ms root# cat /proc/sys/kernel/sched_rr_timeslice_ms 10 Fix this to be milliseconds all around. Signed-off-by: Shile Zhang Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1485612049-20923-1-git-send-email-shile.zhang@nokia.com Signed-off-by: Ingo Molnar --- include/linux/sched/sysctl.h | 1 + kernel/sched/core.c | 5 +++-- kernel/sched/rt.c | 1 + kernel/sysctl.c | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 441145351301..49308e142aae 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice; extern unsigned int sysctl_sched_autogroup_enabled; #endif +extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; extern int sched_rr_handler(struct ctl_table *table, int write, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d01f9d047397..10e18faaa632 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8471,8 +8471,9 @@ int sched_rr_handler(struct ctl_table *table, int write, /* make sure that internally we keep jiffies */ /* also, writing zero resets timeslice to default */ if (!ret && write) { - sched_rr_timeslice = sched_rr_timeslice <= 0 ? - RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); + sched_rr_timeslice = + sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : + msecs_to_jiffies(sysctl_sched_rr_timeslice); } mutex_unlock(&mutex); return ret; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 704f2b89abf1..4101f9d1aa40 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -9,6 +9,7 @@ #include int sched_rr_timeslice = RR_TIMESLICE; +int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1aea594a54db..bb260ceb3718 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -416,7 +416,7 @@ static struct ctl_table kern_table[] = { }, { .procname = "sched_rr_timeslice_ms", - .data = &sched_rr_timeslice, + .data = &sysctl_sched_rr_timeslice, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rr_handler, -- cgit v1.2.3 From d1ccc66df8bfe30ee2533ceef40633d65154b43d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 11:46:42 +0100 Subject: sched/core: Clean up comments Refresh the comments in the core scheduler code: - Capitalize sentences consistently - Capitalize 'CPU' consistently - ... and other small details. Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 358 ++++++++++++++++++++++++++-------------------------- 1 file changed, 182 insertions(+), 176 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 10e18faaa632..87cf7ba82bd5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1,31 +1,10 @@ /* * kernel/sched/core.c * - * Kernel scheduler and related syscalls + * Core kernel scheduler code and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds - * - * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and - * make semaphores SMP safe - * 1998-11-19 Implemented schedule_timeout() and related stuff - * by Andrea Arcangeli - * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: - * hybrid priority-list and round-robin design with - * an array-switch method of distributing timeslices - * and per-CPU runqueues. Cleanups and useful suggestions - * by Davide Libenzi, preemptible kernel bits by Robert Love. - * 2003-09-03 Interactivity tuning by Con Kolivas. - * 2004-04-02 Scheduler domains code by Nick Piggin - * 2007-04-15 Work begun on replacing all interactivity tuning with a - * fair scheduling design by Con Kolivas. - * 2007-05-05 Load balancing (smp-nice) and other improvements - * by Peter Williams - * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith - * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri - * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, - * Thomas Gleixner, Mike Kravetz */ - #include #include #include @@ -143,7 +122,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; /* - * period over which we measure -rt task cpu usage in us. + * period over which we measure -rt task CPU usage in us. * default: 1s */ unsigned int sysctl_sched_rt_period = 1000000; @@ -156,7 +135,7 @@ __read_mostly int scheduler_running; */ int sysctl_sched_rt_runtime = 950000; -/* cpus with isolated domains */ +/* CPUs with isolated domains */ cpumask_var_t cpu_isolated_map; /* @@ -224,7 +203,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) * If we observe the old cpu in task_rq_lock, the acquire of * the old rq->lock will fully serialize against the stores. * - * If we observe the new cpu in task_rq_lock, the acquire will + * If we observe the new CPU in task_rq_lock, the acquire will * pair with the WMB to ensure we must then also see migrating. */ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { @@ -461,7 +440,7 @@ void wake_up_q(struct wake_q_head *head) task = container_of(node, struct task_struct, wake_q); BUG_ON(!task); - /* task can safely be re-inserted now */ + /* Task can safely be re-inserted now: */ node = node->next; task->wake_q.next = NULL; @@ -519,12 +498,12 @@ void resched_cpu(int cpu) #ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ_COMMON /* - * In the semi idle case, use the nearest busy cpu for migrating timers - * from an idle cpu. This is good for power-savings. + * In the semi idle case, use the nearest busy CPU for migrating timers + * from an idle CPU. This is good for power-savings. * * We don't do similar optimization for completely idle system, as - * selecting an idle cpu will add more delays to the timers than intended - * (as that cpu's timer base may not be uptodate wrt jiffies etc). + * selecting an idle CPU will add more delays to the timers than intended + * (as that CPU's timer base may not be uptodate wrt jiffies etc). */ int get_nohz_timer_target(void) { @@ -553,6 +532,7 @@ unlock: rcu_read_unlock(); return cpu; } + /* * When add_timer_on() enqueues a timer into the timer wheel of an * idle CPU then this timer might expire before the next timer event @@ -1021,7 +1001,7 @@ struct migration_arg { }; /* - * Move (not current) task off this cpu, onto dest cpu. We're doing + * Move (not current) task off this CPU, onto the destination CPU. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're * attempting to rebalance this task on exec (sched_exec). @@ -1055,8 +1035,8 @@ static int migration_cpu_stop(void *data) struct rq *rq = this_rq(); /* - * The original target cpu might have gone down and we might - * be on another cpu but it doesn't matter. + * The original target CPU might have gone down and we might + * be on another CPU but it doesn't matter. */ local_irq_disable(); /* @@ -1174,7 +1154,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, if (p->flags & PF_KTHREAD) { /* * For kernel threads that do indeed end up on online && - * !active we want to ensure they are strict per-cpu threads. + * !active we want to ensure they are strict per-CPU threads. */ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && !cpumask_intersects(new_mask, cpu_active_mask) && @@ -1279,7 +1259,7 @@ static void __migrate_swap_task(struct task_struct *p, int cpu) /* * Task isn't running anymore; make it appear like we migrated * it before it went to sleep. This means on wakeup we make the - * previous cpu our target instead of where it really is. + * previous CPU our target instead of where it really is. */ p->wake_cpu = cpu; } @@ -1511,12 +1491,12 @@ EXPORT_SYMBOL_GPL(kick_process); * * - on cpu-up we allow per-cpu kthreads on the online && !active cpu, * see __set_cpus_allowed_ptr(). At this point the newly online - * cpu isn't yet part of the sched domains, and balancing will not + * CPU isn't yet part of the sched domains, and balancing will not * see it. * - * - on cpu-down we clear cpu_active() to mask the sched domains and + * - on CPU-down we clear cpu_active() to mask the sched domains and * avoid the load balancer to place new tasks on the to be removed - * cpu. Existing tasks will remain running there and will be taken + * CPU. Existing tasks will remain running there and will be taken * off. * * This means that fallback selection must not select !active CPUs. @@ -1532,9 +1512,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) int dest_cpu; /* - * If the node that the cpu is on has been offlined, cpu_to_node() - * will return -1. There is no cpu on the node, and we should - * select the cpu on the other node. + * If the node that the CPU is on has been offlined, cpu_to_node() + * will return -1. There is no CPU on the node, and we should + * select the CPU on the other node. */ if (nid != -1) { nodemask = cpumask_of_node(nid); @@ -1566,7 +1546,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) state = possible; break; } - /* fall-through */ + /* Fall-through */ case possible: do_set_cpus_allowed(p, cpu_possible_mask); state = fail; @@ -1610,7 +1590,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) /* * In order not to call set_task_cpu() on a blocking task we need * to rely on ttwu() to place the task on a valid ->cpus_allowed - * cpu. + * CPU. * * Since this is common to all placement strategies, this lives here. * @@ -1684,7 +1664,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; - /* if a worker is waking up, notify workqueue */ + /* If a worker is waking up, notify the workqueue: */ if (p->flags & PF_WQ_WORKER) wq_worker_waking_up(p, cpu_of(rq)); } @@ -1867,7 +1847,7 @@ void wake_up_if_idle(int cpu) raw_spin_lock_irqsave(&rq->lock, flags); if (is_idle_task(rq->curr)) smp_send_reschedule(cpu); - /* Else cpu is not in idle, do nothing here */ + /* Else CPU is not idle, do nothing here: */ raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -1888,7 +1868,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) #if defined(CONFIG_SMP) if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { - sched_clock_cpu(cpu); /* sync clocks x-cpu */ + sched_clock_cpu(cpu); /* Sync clocks across CPUs */ ttwu_queue_remote(p, cpu, wake_flags); return; } @@ -1907,8 +1887,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) * MIGRATION * * The basic program-order guarantee on SMP systems is that when a task [t] - * migrates, all its activity on its old cpu [c0] happens-before any subsequent - * execution on its new cpu [c1]. + * migrates, all its activity on its old CPU [c0] happens-before any subsequent + * execution on its new CPU [c1]. * * For migration (of runnable tasks) this is provided by the following means: * @@ -1919,7 +1899,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) * * Transitivity guarantees that B happens after A and C after B. * Note: we only require RCpc transitivity. - * Note: the cpu doing B need not be c0 or c1 + * Note: the CPU doing B need not be c0 or c1 * * Example: * @@ -2027,7 +2007,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) trace_sched_waking(p); - success = 1; /* we're going to change ->state */ + /* We're going to change ->state: */ + success = 1; cpu = task_cpu(p); /* @@ -2076,7 +2057,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) smp_rmb(); /* - * If the owning (remote) cpu is still in the middle of schedule() with + * If the owning (remote) CPU is still in the middle of schedule() with * this task as prev, wait until its done referencing the task. * * Pairs with the smp_store_release() in finish_lock_switch(). @@ -2448,7 +2429,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) */ raw_spin_lock_irqsave(&p->pi_lock, flags); /* - * We're setting the cpu for the first time, we don't migrate, + * We're setting the CPU for the first time, we don't migrate, * so use __set_task_cpu(). */ __set_task_cpu(p, cpu); @@ -2591,7 +2572,7 @@ void wake_up_new_task(struct task_struct *p) /* * Fork balancing, do it here and not earlier because: * - cpus_allowed can change in the fork path - * - any previously selected cpu might disappear through hotplug + * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, * as we're not fully set-up yet. @@ -2945,7 +2926,7 @@ unsigned long nr_running(void) } /* - * Check if only the current task is running on the cpu. + * Check if only the current task is running on the CPU. * * Caution: this function does not check that the caller has disabled * preemption, thus the result might have a time-of-check-to-time-of-use @@ -3104,8 +3085,8 @@ unsigned long long task_sched_runtime(struct task_struct *p) * So we have a optimization chance when the task's delta_exec is 0. * Reading ->on_cpu is racy, but this is ok. * - * If we race with it leaving cpu, we'll take a lock. So we're correct. - * If we race with it entering cpu, unaccounted time is 0. This is + * If we race with it leaving CPU, we'll take a lock. So we're correct. + * If we race with it entering CPU, unaccounted time is 0. This is * indistinguishable from the read occurring a few cycles earlier. * If we see ->on_cpu without ->on_rq, the task is leaving, and has * been accounted, so we're correct here as well. @@ -3333,7 +3314,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (unlikely(p == RETRY_TASK)) goto again; - /* assumes fair_sched_class->next == idle_sched_class */ + /* Assumes fair_sched_class->next == idle_sched_class */ if (unlikely(!p)) p = idle_sched_class.pick_next_task(rq, prev, rf); @@ -3350,7 +3331,8 @@ again: } } - BUG(); /* the idle class will always have a runnable task */ + /* The idle class should always have a runnable task: */ + BUG(); } /* @@ -3421,7 +3403,8 @@ static void __sched notrace __schedule(bool preempt) raw_spin_lock(&rq->lock); rq_pin_lock(rq, &rf); - rq->clock_update_flags <<= 1; /* promote REQ to ACT */ + /* Promote REQ to ACT */ + rq->clock_update_flags <<= 1; switch_count = &prev->nivcsw; if (!preempt && prev->state) { @@ -3465,7 +3448,9 @@ static void __sched notrace __schedule(bool preempt) ++*switch_count; trace_sched_switch(preempt, prev, next); - rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */ + + /* Also unlocks the rq: */ + rq = context_switch(rq, prev, next, &rf); } else { rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); rq_unpin_lock(rq, &rf); @@ -3492,14 +3477,18 @@ void __noreturn do_task_dead(void) smp_mb(); raw_spin_unlock_wait(¤t->pi_lock); - /* causes final put_task_struct in finish_task_switch(). */ + /* Causes final put_task_struct in finish_task_switch(): */ __set_current_state(TASK_DEAD); - current->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ + + /* Tell freezer to ignore us: */ + current->flags |= PF_NOFREEZE; + __schedule(false); BUG(); - /* Avoid "noreturn function does return". */ + + /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ for (;;) - cpu_relax(); /* For when BUG is null */ + cpu_relax(); } static inline void sched_submit_work(struct task_struct *tsk) @@ -3792,7 +3781,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) check_class_changed(rq, p, prev_class, oldprio); out_unlock: - preempt_disable(); /* avoid rq from going away on us */ + /* Avoid rq from going away on us: */ + preempt_disable(); __task_rq_unlock(rq, &rf); balance_callback(rq); @@ -3862,7 +3852,7 @@ EXPORT_SYMBOL(set_user_nice); */ int can_nice(const struct task_struct *p, const int nice) { - /* convert nice value [19,-20] to rlimit style value [1,40] */ + /* Convert nice value [19,-20] to rlimit style value [1,40]: */ int nice_rlim = nice_to_rlimit(nice); return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || @@ -3918,7 +3908,7 @@ int task_prio(const struct task_struct *p) } /** - * idle_cpu - is a given cpu idle currently? + * idle_cpu - is a given CPU idle currently? * @cpu: the processor in question. * * Return: 1 if the CPU is currently idle. 0 otherwise. @@ -3942,10 +3932,10 @@ int idle_cpu(int cpu) } /** - * idle_task - return the idle task for a given cpu. + * idle_task - return the idle task for a given CPU. * @cpu: the processor in question. * - * Return: The idle task for the cpu @cpu. + * Return: The idle task for the CPU @cpu. */ struct task_struct *idle_task(int cpu) { @@ -4111,7 +4101,7 @@ __checkparam_dl(const struct sched_attr *attr) } /* - * check the target process has a UID that matches the current process's + * Check the target process has a UID that matches the current process's: */ static bool check_same_owner(struct task_struct *p) { @@ -4126,8 +4116,7 @@ static bool check_same_owner(struct task_struct *p) return match; } -static bool dl_param_changed(struct task_struct *p, - const struct sched_attr *attr) +static bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) { struct sched_dl_entity *dl_se = &p->dl; @@ -4154,10 +4143,10 @@ static int __sched_setscheduler(struct task_struct *p, int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; struct rq *rq; - /* may grab non-irq protected spin_locks */ + /* May grab non-irq protected spin_locks: */ BUG_ON(in_interrupt()); recheck: - /* double check policy once rq lock held */ + /* Double check policy once rq lock held: */ if (policy < 0) { reset_on_fork = p->sched_reset_on_fork; policy = oldpolicy = p->policy; @@ -4197,11 +4186,11 @@ recheck: unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); - /* can't set/change the rt policy */ + /* Can't set/change the rt policy: */ if (policy != p->policy && !rlim_rtprio) return -EPERM; - /* can't increase priority */ + /* Can't increase priority: */ if (attr->sched_priority > p->rt_priority && attr->sched_priority > rlim_rtprio) return -EPERM; @@ -4225,11 +4214,11 @@ recheck: return -EPERM; } - /* can't change other user's priorities */ + /* Can't change other user's priorities: */ if (!check_same_owner(p)) return -EPERM; - /* Normal users shall not reset the sched_reset_on_fork flag */ + /* Normal users shall not reset the sched_reset_on_fork flag: */ if (p->sched_reset_on_fork && !reset_on_fork) return -EPERM; } @@ -4241,7 +4230,7 @@ recheck: } /* - * make sure no PI-waiters arrive (or leave) while we are + * Make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: * * To be able to change p->policy safely, the appropriate @@ -4251,7 +4240,7 @@ recheck: update_rq_clock(rq); /* - * Changing the policy of the stop threads its a very bad idea + * Changing the policy of the stop threads its a very bad idea: */ if (p == rq->stop) { task_rq_unlock(rq, p, &rf); @@ -4307,7 +4296,7 @@ change: #endif } - /* recheck policy now with rq lock held */ + /* Re-check policy now with rq lock held: */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; task_rq_unlock(rq, p, &rf); @@ -4364,15 +4353,15 @@ change: set_curr_task(rq, p); check_class_changed(rq, p, prev_class, oldprio); - preempt_disable(); /* avoid rq from going away on us */ + + /* Avoid rq from going away on us: */ + preempt_disable(); task_rq_unlock(rq, p, &rf); if (pi) rt_mutex_adjust_pi(p); - /* - * Run balance callbacks after we've adjusted the PI chain. - */ + /* Run balance callbacks after we've adjusted the PI chain: */ balance_callback(rq); preempt_enable(); @@ -4465,8 +4454,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) /* * Mimics kernel/events/core.c perf_copy_attr(). */ -static int sched_copy_attr(struct sched_attr __user *uattr, - struct sched_attr *attr) +static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) { u32 size; int ret; @@ -4474,19 +4462,19 @@ static int sched_copy_attr(struct sched_attr __user *uattr, if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) return -EFAULT; - /* - * zero the full structure, so that a short copy will be nice. - */ + /* Zero the full structure, so that a short copy will be nice: */ memset(attr, 0, sizeof(*attr)); ret = get_user(size, &uattr->size); if (ret) return ret; - if (size > PAGE_SIZE) /* silly large */ + /* Bail out on silly large: */ + if (size > PAGE_SIZE) goto err_size; - if (!size) /* abi compat */ + /* ABI compatibility quirk: */ + if (!size) size = SCHED_ATTR_SIZE_VER0; if (size < SCHED_ATTR_SIZE_VER0) @@ -4521,7 +4509,7 @@ static int sched_copy_attr(struct sched_attr __user *uattr, return -EFAULT; /* - * XXX: do we want to be lenient like existing syscalls; or do we want + * XXX: Do we want to be lenient like existing syscalls; or do we want * to be strict and return an error on out-of-bounds values? */ attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); @@ -4541,10 +4529,8 @@ err_size: * * Return: 0 on success. An error code otherwise. */ -SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, - struct sched_param __user *, param) +SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) { - /* negative values for policy are not valid */ if (policy < 0) return -EINVAL; @@ -4854,10 +4840,10 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, } /** - * sys_sched_setaffinity - set the cpu affinity of a process + * sys_sched_setaffinity - set the CPU affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr - * @user_mask_ptr: user-space pointer to the new cpu mask + * @user_mask_ptr: user-space pointer to the new CPU mask * * Return: 0 on success. An error code otherwise. */ @@ -4905,10 +4891,10 @@ out_unlock: } /** - * sys_sched_getaffinity - get the cpu affinity of a process + * sys_sched_getaffinity - get the CPU affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr - * @user_mask_ptr: user-space pointer to hold the current cpu mask + * @user_mask_ptr: user-space pointer to hold the current CPU mask * * Return: size of CPU mask copied to user_mask_ptr on success. An * error code otherwise. @@ -5036,7 +5022,7 @@ EXPORT_SYMBOL(__cond_resched_softirq); * Typical broken usage is: * * while (!event) - * yield(); + * yield(); * * where one assumes that yield() will let 'the other' process run that will * make event true. If the current task is a SCHED_FIFO task that will never @@ -5351,7 +5337,7 @@ void init_idle_bootup_task(struct task_struct *idle) /** * init_idle - set up an idle thread for a given CPU * @idle: task in question - * @cpu: cpu the idle task belongs to + * @cpu: CPU the idle task belongs to * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. @@ -5382,7 +5368,7 @@ void init_idle(struct task_struct *idle, int cpu) #endif /* * We're having a chicken and egg problem, even though we are - * holding rq->lock, the cpu isn't yet set to this cpu so the + * holding rq->lock, the CPU isn't yet set to this CPU so the * lockdep check in task_group() will fail. * * Similar case to sched_fork(). / Alternatively we could @@ -5447,7 +5433,7 @@ int task_can_attach(struct task_struct *p, /* * Kthreads which disallow setaffinity shouldn't be moved - * to a new cpuset; we don't want to change their cpu + * to a new cpuset; we don't want to change their CPU * affinity and isolating such threads by their set of * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for @@ -5548,7 +5534,7 @@ void sched_setnuma(struct task_struct *p, int nid) #ifdef CONFIG_HOTPLUG_CPU /* - * Ensures that the idle task is using init_mm right before its cpu goes + * Ensure that the idle task is using init_mm right before its CPU goes * offline. */ void idle_task_exit(void) @@ -5632,13 +5618,13 @@ static void migrate_tasks(struct rq *dead_rq) for (;;) { /* * There's this thread running, bail when that's the only - * remaining thread. + * remaining thread: */ if (rq->nr_running == 1) break; /* - * pick_next_task assumes pinned rq->lock. + * pick_next_task() assumes pinned rq->lock: */ rq_pin_lock(rq, &rf); next = pick_next_task(rq, &fake_task, &rf); @@ -5730,7 +5716,8 @@ static void set_cpu_rq_start_time(unsigned int cpu) rq->age_stamp = sched_clock_cpu(cpu); } -static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ +/* Protected by sched_domains_mutex: */ +static cpumask_var_t sched_domains_tmpmask; #ifdef CONFIG_SCHED_DEBUG @@ -5997,7 +5984,7 @@ out: } /* - * By default the system creates a single root-domain with all cpus as + * By default the system creates a single root-domain with all CPUs as * members (mimicking the global state we have today). */ struct root_domain def_root_domain; @@ -6083,9 +6070,9 @@ static void destroy_sched_domains(struct sched_domain *sd) * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this * allows us to avoid some pointer chasing select_idle_sibling(). * - * Also keep a unique ID per domain (we use the first cpu number in + * Also keep a unique ID per domain (we use the first CPU number in * the cpumask of the domain), this allows us to quickly tell if - * two cpus are in the same cache domain, see cpus_share_cache(). + * two CPUs are in the same cache domain, see cpus_share_cache(). */ DEFINE_PER_CPU(struct sched_domain *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); @@ -6170,7 +6157,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) update_top_cache_domain(cpu); } -/* Setup the mask of cpus configured for isolated domains */ +/* Setup the mask of CPUs configured for isolated domains */ static int __init isolated_cpu_setup(char *str) { int ret; @@ -6207,8 +6194,7 @@ enum s_alloc { * * In that case build_sched_domains() will have terminated the iteration early * and our sibling sd spans will be empty. Domains should always include the - * cpu they're built on, so check that. - * + * CPU they're built on, so check that. */ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) { @@ -6227,7 +6213,7 @@ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) } /* - * Return the canonical balance cpu for this group, this is the first cpu + * Return the canonical balance CPU for this group, this is the first CPU * of this group that's also in the iteration mask. */ int group_balance_cpu(struct sched_group *sg) @@ -6287,7 +6273,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) /* * Make sure the first group of this domain contains the - * canonical balance cpu. Otherwise the sched_domain iteration + * canonical balance CPU. Otherwise the sched_domain iteration * breaks. See update_sg_lb_stats(). */ if ((!groups && cpumask_test_cpu(cpu, sg_span)) || @@ -6322,7 +6308,9 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) if (sg) { *sg = *per_cpu_ptr(sdd->sg, cpu); (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); - atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ + + /* For claim_allocations: */ + atomic_set(&(*sg)->sgc->ref, 1); } return cpu; @@ -6456,10 +6444,10 @@ static void set_domain_attribute(struct sched_domain *sd, } else request = attr->relax_domain_level; if (request < sd->level) { - /* turn off idle balance on this domain */ + /* Turn off idle balance on this domain: */ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); } else { - /* turn on idle balance on this domain */ + /* Turn on idle balance on this domain: */ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); } } @@ -6473,18 +6461,21 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, switch (what) { case sa_rootdomain: if (!atomic_read(&d->rd->refcount)) - free_rootdomain(&d->rd->rcu); /* fall through */ + free_rootdomain(&d->rd->rcu); + /* Fall through */ case sa_sd: - free_percpu(d->sd); /* fall through */ + free_percpu(d->sd); + /* Fall through */ case sa_sd_storage: - __sdt_free(cpu_map); /* fall through */ + __sdt_free(cpu_map); + /* Fall through */ case sa_none: break; } } -static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, - const struct cpumask *cpu_map) +static enum s_alloc +__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) { memset(d, 0, sizeof(*d)); @@ -6883,7 +6874,7 @@ static void sched_init_numa(void) /* * Now for each level, construct a mask per node which contains all - * cpus of nodes that are that many hops away from us. + * CPUs of nodes that are that many hops away from us. */ for (i = 0; i < level; i++) { sched_domains_numa_masks[i] = @@ -7103,11 +7094,11 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, } /* - * Build sched domains for a given set of cpus and attach the sched domains - * to the individual cpus + * Build sched domains for a given set of CPUs and attach the sched domains + * to the individual CPUs */ -static int build_sched_domains(const struct cpumask *cpu_map, - struct sched_domain_attr *attr) +static int +build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) { enum s_alloc alloc_state; struct sched_domain *sd; @@ -7119,7 +7110,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, if (alloc_state != sa_rootdomain) goto error; - /* Set up domains for cpus specified by the cpu_map. */ + /* Set up domains for CPUs specified by the cpu_map: */ for_each_cpu(i, cpu_map) { struct sched_domain_topology_level *tl; @@ -7185,21 +7176,25 @@ error: return ret; } -static cpumask_var_t *doms_cur; /* current sched domains */ -static int ndoms_cur; /* number of sched domains in 'doms_cur' */ -static struct sched_domain_attr *dattr_cur; - /* attribues of custom domains in 'doms_cur' */ +/* Current sched domains: */ +static cpumask_var_t *doms_cur; + +/* Number of sched domains in 'doms_cur': */ +static int ndoms_cur; + +/* Attribues of custom domains in 'doms_cur' */ +static struct sched_domain_attr *dattr_cur; /* - * Special case: If a kmalloc of a doms_cur partition (array of + * Special case: If a kmalloc() of a doms_cur partition (array of * cpumask) fails, then fallback to a single sched domain, * as determined by the single cpumask fallback_doms. */ -static cpumask_var_t fallback_doms; +static cpumask_var_t fallback_doms; /* * arch_update_cpu_topology lets virtualized architectures update the - * cpu core maps. It is supposed to return 1 if the topology changed + * CPU core maps. It is supposed to return 1 if the topology changed * or 0 if it stayed the same. */ int __weak arch_update_cpu_topology(void) @@ -7234,7 +7229,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. - * For now this just excludes isolated cpus, but could be used to + * For now this just excludes isolated CPUs, but could be used to * exclude other special cases in the future. */ static int init_sched_domains(const struct cpumask *cpu_map) @@ -7254,8 +7249,8 @@ static int init_sched_domains(const struct cpumask *cpu_map) } /* - * Detach sched domains from a group of cpus specified in cpu_map - * These cpus will now be attached to the NULL domain + * Detach sched domains from a group of CPUs specified in cpu_map + * These CPUs will now be attached to the NULL domain */ static void detach_destroy_domains(const struct cpumask *cpu_map) { @@ -7273,7 +7268,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, { struct sched_domain_attr tmp; - /* fast path */ + /* Fast path: */ if (!new && !cur) return 1; @@ -7317,22 +7312,22 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], mutex_lock(&sched_domains_mutex); - /* always unregister in case we don't destroy any domains */ + /* Always unregister in case we don't destroy any domains: */ unregister_sched_domain_sysctl(); - /* Let architecture update cpu core mappings. */ + /* Let the architecture update CPU core mappings: */ new_topology = arch_update_cpu_topology(); n = doms_new ? ndoms_new : 0; - /* Destroy deleted domains */ + /* Destroy deleted domains: */ for (i = 0; i < ndoms_cur; i++) { for (j = 0; j < n && !new_topology; j++) { if (cpumask_equal(doms_cur[i], doms_new[j]) && dattrs_equal(dattr_cur, i, dattr_new, j)) goto match1; } - /* no match - a current sched domain not in new doms_new[] */ + /* No match - a current sched domain not in new doms_new[] */ detach_destroy_domains(doms_cur[i]); match1: ; @@ -7346,23 +7341,24 @@ match1: WARN_ON_ONCE(dattr_new); } - /* Build new domains */ + /* Build new domains: */ for (i = 0; i < ndoms_new; i++) { for (j = 0; j < n && !new_topology; j++) { if (cpumask_equal(doms_new[i], doms_cur[j]) && dattrs_equal(dattr_new, i, dattr_cur, j)) goto match2; } - /* no match - add a new doms_new */ + /* No match - add a new doms_new */ build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); match2: ; } - /* Remember the new sched domains */ + /* Remember the new sched domains: */ if (doms_cur != &fallback_doms) free_sched_domains(doms_cur, ndoms_cur); - kfree(dattr_cur); /* kfree(NULL) is safe */ + + kfree(dattr_cur); doms_cur = doms_new; dattr_cur = dattr_new; ndoms_cur = ndoms_new; @@ -7372,7 +7368,10 @@ match2: mutex_unlock(&sched_domains_mutex); } -static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ +/* + * used to mark begin/end of suspend/resume: + */ +static int num_cpus_frozen; /* * Update cpusets according to cpu_active mask. If cpusets are @@ -7449,7 +7448,7 @@ int sched_cpu_activate(unsigned int cpu) * Put the rq online, if not already. This happens: * * 1) In the early boot process, because we build the real domains - * after all cpus have been brought up. + * after all CPUs have been brought up. * * 2) At runtime, if cpuset_cpu_active() fails to rebuild the * domains. @@ -7564,7 +7563,7 @@ void __init sched_init_smp(void) /* * There's no userspace yet to cause hotplug operations; hence all the - * cpu masks are stable and all blatant races in the below code cannot + * CPU masks are stable and all blatant races in the below code cannot * happen. */ mutex_lock(&sched_domains_mutex); @@ -7684,10 +7683,8 @@ void __init sched_init(void) } #endif /* CONFIG_CPUMASK_OFFSTACK */ - init_rt_bandwidth(&def_rt_bandwidth, - global_rt_period(), global_rt_runtime()); - init_dl_bandwidth(&def_dl_bandwidth, - global_rt_period(), global_rt_runtime()); + init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); + init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); #ifdef CONFIG_SMP init_defrootdomain(); @@ -7723,18 +7720,18 @@ void __init sched_init(void) INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; /* - * How much cpu bandwidth does root_task_group get? + * How much CPU bandwidth does root_task_group get? * * In case of task-groups formed thr' the cgroup filesystem, it - * gets 100% of the cpu resources in the system. This overall - * system cpu resource is divided among the tasks of + * gets 100% of the CPU resources in the system. This overall + * system CPU resource is divided among the tasks of * root_task_group and its child task-groups in a fair manner, * based on each entity's (task or task-group's) weight * (se->load.weight). * * In other words, if root_task_group has 10 tasks of weight * 1024) and two child groups A0 and A1 (of weight 1024 each), - * then A0's share of the cpu resource is: + * then A0's share of the CPU resource is: * * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% * @@ -7843,10 +7840,14 @@ EXPORT_SYMBOL(__might_sleep); void ___might_sleep(const char *file, int line, int preempt_offset) { - static unsigned long prev_jiffy; /* ratelimiting */ + /* Ratelimiting timestamp: */ + static unsigned long prev_jiffy; + unsigned long preempt_disable_ip; - rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ + /* WARN_ON_ONCE() by default, no rate limit required: */ + rcu_sleep_check(); + if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && !is_idle_task(current)) || system_state != SYSTEM_RUNNING || oops_in_progress) @@ -7855,7 +7856,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset) return; prev_jiffy = jiffies; - /* Save this before calling printk(), since that will clobber it */ + /* Save this before calling printk(), since that will clobber it: */ preempt_disable_ip = get_preempt_disable_ip(current); printk(KERN_ERR @@ -7934,7 +7935,7 @@ void normalize_rt_tasks(void) */ /** - * curr_task - return the current task for a given cpu. + * curr_task - return the current task for a given CPU. * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! @@ -7950,13 +7951,13 @@ struct task_struct *curr_task(int cpu) #ifdef CONFIG_IA64 /** - * set_curr_task - set the current task for a given cpu. + * set_curr_task - set the current task for a given CPU. * @cpu: the processor in question. * @p: the task pointer to set. * * Description: This function must only be used when non-maskable interrupts * are serviced on a separate stack. It allows the architecture to switch the - * notion of the current task on a cpu in a non-blocking manner. This function + * notion of the current task on a CPU in a non-blocking manner. This function * must be called with all CPU's synchronized, and interrupts disabled, the * and caller must save the original value of the current task (see * curr_task() above) and restore that value before reenabling interrupts and @@ -8012,7 +8013,8 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) spin_lock_irqsave(&task_group_lock, flags); list_add_rcu(&tg->list, &task_groups); - WARN_ON(!parent); /* root should already exist */ + /* Root should already exist: */ + WARN_ON(!parent); tg->parent = parent; INIT_LIST_HEAD(&tg->children); @@ -8025,13 +8027,13 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) /* rcu callback to free various structures associated with a task group */ static void sched_free_group_rcu(struct rcu_head *rhp) { - /* now it should be safe to free those cfs_rqs */ + /* Now it should be safe to free those cfs_rqs: */ sched_free_group(container_of(rhp, struct task_group, rcu)); } void sched_destroy_group(struct task_group *tg) { - /* wait for possible concurrent references to cfs_rqs complete */ + /* Wait for possible concurrent references to cfs_rqs complete: */ call_rcu(&tg->rcu, sched_free_group_rcu); } @@ -8039,7 +8041,7 @@ void sched_offline_group(struct task_group *tg) { unsigned long flags; - /* end participation in shares distribution */ + /* End participation in shares distribution: */ unregister_fair_sched_group(tg); spin_lock_irqsave(&task_group_lock, flags); @@ -8468,8 +8470,10 @@ int sched_rr_handler(struct ctl_table *table, int write, mutex_lock(&mutex); ret = proc_dointvec(table, write, buffer, lenp, ppos); - /* make sure that internally we keep jiffies */ - /* also, writing zero resets timeslice to default */ + /* + * Make sure that internally we keep jiffies. + * Also, writing zero resets the timeslice to default: + */ if (!ret && write) { sched_rr_timeslice = sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : @@ -8654,9 +8658,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) cfs_b->quota = quota; __refill_cfs_bandwidth_runtime(cfs_b); - /* restart the period timer (if active) to handle new period expiry */ + + /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) start_cfs_bandwidth(cfs_b); + raw_spin_unlock_irq(&cfs_b->lock); for_each_online_cpu(i) { @@ -8794,8 +8800,8 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) parent_quota = parent_b->hierarchical_quota; /* - * ensure max(child_quota) <= parent_quota, inherit when no - * limit is set + * Ensure max(child_quota) <= parent_quota, inherit when no + * limit is set: */ if (quota == RUNTIME_INF) quota = parent_quota; @@ -8904,7 +8910,7 @@ static struct cftype cpu_files[] = { .write_u64 = cpu_rt_period_write_uint, }, #endif - { } /* terminate */ + { } /* Terminate */ }; struct cgroup_subsys cpu_cgrp_subsys = { -- cgit v1.2.3 From 4025819d328cd0efc53ee22e01b800631944b7f3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 11:58:42 +0100 Subject: delayacct: Include include/linux/delayacct.h relies on 'struct taskstats' but does not include the header that defines it. This worked so far because files that included also happened to include other headers that included uapi/linux/taskstats.h. Fix it. Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/delayacct.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6cee17c22313..00e60f79a9cc 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -17,6 +17,7 @@ #ifndef _LINUX_DELAYACCT_H #define _LINUX_DELAYACCT_H +#include #include #include -- cgit v1.2.3 From 535b9552bb81eebe112ee7bd34ee498857b0c26b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 12:29:21 +0100 Subject: sched/rq_clock: Consolidate the ordering of the rq_clock methods update_rq_clock_task() and update_rq_clock() we unnecessarily spread across core.c, requiring an extra prototype line. Move them next to each other and in the proper order. Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 153 ++++++++++++++++++++++++++-------------------------- 1 file changed, 78 insertions(+), 75 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 87cf7ba82bd5..a400190792b9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -73,27 +73,6 @@ DEFINE_MUTEX(sched_domains_mutex); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -static void update_rq_clock_task(struct rq *rq, s64 delta); - -void update_rq_clock(struct rq *rq) -{ - s64 delta; - - lockdep_assert_held(&rq->lock); - - if (rq->clock_update_flags & RQCF_ACT_SKIP) - return; - -#ifdef CONFIG_SCHED_DEBUG - rq->clock_update_flags |= RQCF_UPDATED; -#endif - delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; - if (delta < 0) - return; - rq->clock += delta; - update_rq_clock_task(rq, delta); -} - /* * Debugging: various feature bits */ @@ -218,6 +197,84 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) } } +/* + * RQ-clock updating methods: + */ + +static void update_rq_clock_task(struct rq *rq, s64 delta) +{ +/* + * In theory, the compile should just see 0 here, and optimize out the call + * to sched_rt_avg_update. But I don't trust it... + */ +#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) + s64 steal = 0, irq_delta = 0; +#endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; + + /* + * Since irq_time is only updated on {soft,}irq_exit, we might run into + * this case when a previous update_rq_clock() happened inside a + * {soft,}irq region. + * + * When this happens, we stop ->clock_task and only update the + * prev_irq_time stamp to account for the part that fit, so that a next + * update will consume the rest. This ensures ->clock_task is + * monotonic. + * + * It does however cause some slight miss-attribution of {soft,}irq + * time, a more accurate solution would be to update the irq_time using + * the current rq->clock timestamp, except that would require using + * atomic ops. + */ + if (irq_delta > delta) + irq_delta = delta; + + rq->prev_irq_time += irq_delta; + delta -= irq_delta; +#endif +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + if (static_key_false((¶virt_steal_rq_enabled))) { + steal = paravirt_steal_clock(cpu_of(rq)); + steal -= rq->prev_steal_time_rq; + + if (unlikely(steal > delta)) + steal = delta; + + rq->prev_steal_time_rq += steal; + delta -= steal; + } +#endif + + rq->clock_task += delta; + +#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) + if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) + sched_rt_avg_update(rq, irq_delta + steal); +#endif +} + +void update_rq_clock(struct rq *rq) +{ + s64 delta; + + lockdep_assert_held(&rq->lock); + + if (rq->clock_update_flags & RQCF_ACT_SKIP) + return; + +#ifdef CONFIG_SCHED_DEBUG + rq->clock_update_flags |= RQCF_UPDATED; +#endif + delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; + if (delta < 0) + return; + rq->clock += delta; + update_rq_clock_task(rq, delta); +} + + #ifdef CONFIG_SCHED_HRTICK /* * Use HR-timers to deliver accurate preemption points. @@ -767,60 +824,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } -static void update_rq_clock_task(struct rq *rq, s64 delta) -{ -/* - * In theory, the compile should just see 0 here, and optimize out the call - * to sched_rt_avg_update. But I don't trust it... - */ -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - s64 steal = 0, irq_delta = 0; -#endif -#ifdef CONFIG_IRQ_TIME_ACCOUNTING - irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; - - /* - * Since irq_time is only updated on {soft,}irq_exit, we might run into - * this case when a previous update_rq_clock() happened inside a - * {soft,}irq region. - * - * When this happens, we stop ->clock_task and only update the - * prev_irq_time stamp to account for the part that fit, so that a next - * update will consume the rest. This ensures ->clock_task is - * monotonic. - * - * It does however cause some slight miss-attribution of {soft,}irq - * time, a more accurate solution would be to update the irq_time using - * the current rq->clock timestamp, except that would require using - * atomic ops. - */ - if (irq_delta > delta) - irq_delta = delta; - - rq->prev_irq_time += irq_delta; - delta -= irq_delta; -#endif -#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - if (static_key_false((¶virt_steal_rq_enabled))) { - steal = paravirt_steal_clock(cpu_of(rq)); - steal -= rq->prev_steal_time_rq; - - if (unlikely(steal > delta)) - steal = delta; - - rq->prev_steal_time_rq += steal; - delta -= steal; - } -#endif - - rq->clock_task += delta; - -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) - sched_rt_avg_update(rq, irq_delta + steal); -#endif -} - void sched_set_stop_task(int cpu, struct task_struct *stop) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; -- cgit v1.2.3 From 004172bdad644327dc7a6543186b9d7b529ee944 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 12:01:04 +0100 Subject: sched/core: Remove unnecessary #include headers Over the years sched/core.c accumulated over 50 #include lines, 40 of which are superfluous. (!) Removing them decreases the preprocessed .c file (.i) size noticeably: triton:~/tip> wc -l kernel/sched/core.i Before: 76387 kernel/sched/core.i After: 75896 kernel/sched/core.i Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 59 +++++++++-------------------------------------------- 1 file changed, 10 insertions(+), 49 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a400190792b9..1cea6c61fb01 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5,63 +5,24 @@ * * Copyright (C) 1991-2002 Linus Torvalds */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include + +#include +#include +#include +#include +#include #include -#include +#include +#include +#include #include #include -#include -#ifdef CONFIG_PARAVIRT -#include -#endif #include "sched.h" #include "../workqueue_internal.h" -- cgit v1.2.3 From f2cb13609d5397cdd747f3ed6fb651233851717d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 13:10:18 +0100 Subject: sched/topology: Split out scheduler topology code from core.c into topology.c Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/Makefile | 2 +- kernel/sched/core.c | 1659 +---------------------------------------------- kernel/sched/sched.h | 23 +- kernel/sched/topology.c | 1658 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1684 insertions(+), 1658 deletions(-) create mode 100644 kernel/sched/topology.c diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 5e59b832ae2b..130ce8ac725b 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -18,7 +18,7 @@ endif obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o obj-y += wait.o swait.o completion.o idle.o -obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o +obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1cea6c61fb01..e4aa470ed454 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -31,7 +31,6 @@ #define CREATE_TRACE_POINTS #include -DEFINE_MUTEX(sched_domains_mutex); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); /* @@ -5446,7 +5445,7 @@ out: #ifdef CONFIG_SMP -static bool sched_smp_initialized __read_mostly; +bool sched_smp_initialized __read_mostly; #ifdef CONFIG_NUMA_BALANCING /* Migrate current task p to target_cpu */ @@ -5643,7 +5642,7 @@ static void migrate_tasks(struct rq *dead_rq) } #endif /* CONFIG_HOTPLUG_CPU */ -static void set_rq_online(struct rq *rq) +void set_rq_online(struct rq *rq) { if (!rq->online) { const struct sched_class *class; @@ -5658,7 +5657,7 @@ static void set_rq_online(struct rq *rq) } } -static void set_rq_offline(struct rq *rq) +void set_rq_offline(struct rq *rq) { if (rq->online) { const struct sched_class *class; @@ -5680,1658 +5679,6 @@ static void set_cpu_rq_start_time(unsigned int cpu) rq->age_stamp = sched_clock_cpu(cpu); } -/* Protected by sched_domains_mutex: */ -static cpumask_var_t sched_domains_tmpmask; - -#ifdef CONFIG_SCHED_DEBUG - -static __read_mostly int sched_debug_enabled; - -static int __init sched_debug_setup(char *str) -{ - sched_debug_enabled = 1; - - return 0; -} -early_param("sched_debug", sched_debug_setup); - -static inline bool sched_debug(void) -{ - return sched_debug_enabled; -} - -static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, - struct cpumask *groupmask) -{ - struct sched_group *group = sd->groups; - - cpumask_clear(groupmask); - - printk(KERN_DEBUG "%*s domain %d: ", level, "", level); - - if (!(sd->flags & SD_LOAD_BALANCE)) { - printk("does not load-balance\n"); - if (sd->parent) - printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" - " has parent"); - return -1; - } - - printk(KERN_CONT "span %*pbl level %s\n", - cpumask_pr_args(sched_domain_span(sd)), sd->name); - - if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { - printk(KERN_ERR "ERROR: domain->span does not contain " - "CPU%d\n", cpu); - } - if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { - printk(KERN_ERR "ERROR: domain->groups does not contain" - " CPU%d\n", cpu); - } - - printk(KERN_DEBUG "%*s groups:", level + 1, ""); - do { - if (!group) { - printk("\n"); - printk(KERN_ERR "ERROR: group is NULL\n"); - break; - } - - if (!cpumask_weight(sched_group_cpus(group))) { - printk(KERN_CONT "\n"); - printk(KERN_ERR "ERROR: empty group\n"); - break; - } - - if (!(sd->flags & SD_OVERLAP) && - cpumask_intersects(groupmask, sched_group_cpus(group))) { - printk(KERN_CONT "\n"); - printk(KERN_ERR "ERROR: repeated CPUs\n"); - break; - } - - cpumask_or(groupmask, groupmask, sched_group_cpus(group)); - - printk(KERN_CONT " %*pbl", - cpumask_pr_args(sched_group_cpus(group))); - if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { - printk(KERN_CONT " (cpu_capacity = %lu)", - group->sgc->capacity); - } - - group = group->next; - } while (group != sd->groups); - printk(KERN_CONT "\n"); - - if (!cpumask_equal(sched_domain_span(sd), groupmask)) - printk(KERN_ERR "ERROR: groups don't span domain->span\n"); - - if (sd->parent && - !cpumask_subset(groupmask, sched_domain_span(sd->parent))) - printk(KERN_ERR "ERROR: parent span is not a superset " - "of domain->span\n"); - return 0; -} - -static void sched_domain_debug(struct sched_domain *sd, int cpu) -{ - int level = 0; - - if (!sched_debug_enabled) - return; - - if (!sd) { - printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); - return; - } - - printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); - - for (;;) { - if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) - break; - level++; - sd = sd->parent; - if (!sd) - break; - } -} -#else /* !CONFIG_SCHED_DEBUG */ - -# define sched_debug_enabled 0 -# define sched_domain_debug(sd, cpu) do { } while (0) -static inline bool sched_debug(void) -{ - return false; -} -#endif /* CONFIG_SCHED_DEBUG */ - -static int sd_degenerate(struct sched_domain *sd) -{ - if (cpumask_weight(sched_domain_span(sd)) == 1) - return 1; - - /* Following flags need at least 2 groups */ - if (sd->flags & (SD_LOAD_BALANCE | - SD_BALANCE_NEWIDLE | - SD_BALANCE_FORK | - SD_BALANCE_EXEC | - SD_SHARE_CPUCAPACITY | - SD_ASYM_CPUCAPACITY | - SD_SHARE_PKG_RESOURCES | - SD_SHARE_POWERDOMAIN)) { - if (sd->groups != sd->groups->next) - return 0; - } - - /* Following flags don't use groups */ - if (sd->flags & (SD_WAKE_AFFINE)) - return 0; - - return 1; -} - -static int -sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) -{ - unsigned long cflags = sd->flags, pflags = parent->flags; - - if (sd_degenerate(parent)) - return 1; - - if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) - return 0; - - /* Flags needing groups don't count if only 1 group in parent */ - if (parent->groups == parent->groups->next) { - pflags &= ~(SD_LOAD_BALANCE | - SD_BALANCE_NEWIDLE | - SD_BALANCE_FORK | - SD_BALANCE_EXEC | - SD_ASYM_CPUCAPACITY | - SD_SHARE_CPUCAPACITY | - SD_SHARE_PKG_RESOURCES | - SD_PREFER_SIBLING | - SD_SHARE_POWERDOMAIN); - if (nr_node_ids == 1) - pflags &= ~SD_SERIALIZE; - } - if (~cflags & pflags) - return 0; - - return 1; -} - -static void free_rootdomain(struct rcu_head *rcu) -{ - struct root_domain *rd = container_of(rcu, struct root_domain, rcu); - - cpupri_cleanup(&rd->cpupri); - cpudl_cleanup(&rd->cpudl); - free_cpumask_var(rd->dlo_mask); - free_cpumask_var(rd->rto_mask); - free_cpumask_var(rd->online); - free_cpumask_var(rd->span); - kfree(rd); -} - -static void rq_attach_root(struct rq *rq, struct root_domain *rd) -{ - struct root_domain *old_rd = NULL; - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); - - if (rq->rd) { - old_rd = rq->rd; - - if (cpumask_test_cpu(rq->cpu, old_rd->online)) - set_rq_offline(rq); - - cpumask_clear_cpu(rq->cpu, old_rd->span); - - /* - * If we dont want to free the old_rd yet then - * set old_rd to NULL to skip the freeing later - * in this function: - */ - if (!atomic_dec_and_test(&old_rd->refcount)) - old_rd = NULL; - } - - atomic_inc(&rd->refcount); - rq->rd = rd; - - cpumask_set_cpu(rq->cpu, rd->span); - if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) - set_rq_online(rq); - - raw_spin_unlock_irqrestore(&rq->lock, flags); - - if (old_rd) - call_rcu_sched(&old_rd->rcu, free_rootdomain); -} - -static int init_rootdomain(struct root_domain *rd) -{ - memset(rd, 0, sizeof(*rd)); - - if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) - goto out; - if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) - goto free_span; - if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) - goto free_online; - if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) - goto free_dlo_mask; - - init_dl_bw(&rd->dl_bw); - if (cpudl_init(&rd->cpudl) != 0) - goto free_rto_mask; - - if (cpupri_init(&rd->cpupri) != 0) - goto free_cpudl; - return 0; - -free_cpudl: - cpudl_cleanup(&rd->cpudl); -free_rto_mask: - free_cpumask_var(rd->rto_mask); -free_dlo_mask: - free_cpumask_var(rd->dlo_mask); -free_online: - free_cpumask_var(rd->online); -free_span: - free_cpumask_var(rd->span); -out: - return -ENOMEM; -} - -/* - * By default the system creates a single root-domain with all CPUs as - * members (mimicking the global state we have today). - */ -struct root_domain def_root_domain; - -static void init_defrootdomain(void) -{ - init_rootdomain(&def_root_domain); - - atomic_set(&def_root_domain.refcount, 1); -} - -static struct root_domain *alloc_rootdomain(void) -{ - struct root_domain *rd; - - rd = kmalloc(sizeof(*rd), GFP_KERNEL); - if (!rd) - return NULL; - - if (init_rootdomain(rd) != 0) { - kfree(rd); - return NULL; - } - - return rd; -} - -static void free_sched_groups(struct sched_group *sg, int free_sgc) -{ - struct sched_group *tmp, *first; - - if (!sg) - return; - - first = sg; - do { - tmp = sg->next; - - if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) - kfree(sg->sgc); - - kfree(sg); - sg = tmp; - } while (sg != first); -} - -static void destroy_sched_domain(struct sched_domain *sd) -{ - /* - * If its an overlapping domain it has private groups, iterate and - * nuke them all. - */ - if (sd->flags & SD_OVERLAP) { - free_sched_groups(sd->groups, 1); - } else if (atomic_dec_and_test(&sd->groups->ref)) { - kfree(sd->groups->sgc); - kfree(sd->groups); - } - if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) - kfree(sd->shared); - kfree(sd); -} - -static void destroy_sched_domains_rcu(struct rcu_head *rcu) -{ - struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); - - while (sd) { - struct sched_domain *parent = sd->parent; - destroy_sched_domain(sd); - sd = parent; - } -} - -static void destroy_sched_domains(struct sched_domain *sd) -{ - if (sd) - call_rcu(&sd->rcu, destroy_sched_domains_rcu); -} - -/* - * Keep a special pointer to the highest sched_domain that has - * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this - * allows us to avoid some pointer chasing select_idle_sibling(). - * - * Also keep a unique ID per domain (we use the first CPU number in - * the cpumask of the domain), this allows us to quickly tell if - * two CPUs are in the same cache domain, see cpus_share_cache(). - */ -DEFINE_PER_CPU(struct sched_domain *, sd_llc); -DEFINE_PER_CPU(int, sd_llc_size); -DEFINE_PER_CPU(int, sd_llc_id); -DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); -DEFINE_PER_CPU(struct sched_domain *, sd_numa); -DEFINE_PER_CPU(struct sched_domain *, sd_asym); - -static void update_top_cache_domain(int cpu) -{ - struct sched_domain_shared *sds = NULL; - struct sched_domain *sd; - int id = cpu; - int size = 1; - - sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); - if (sd) { - id = cpumask_first(sched_domain_span(sd)); - size = cpumask_weight(sched_domain_span(sd)); - sds = sd->shared; - } - - rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); - per_cpu(sd_llc_size, cpu) = size; - per_cpu(sd_llc_id, cpu) = id; - rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); - - sd = lowest_flag_domain(cpu, SD_NUMA); - rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); - - sd = highest_flag_domain(cpu, SD_ASYM_PACKING); - rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); -} - -/* - * Attach the domain 'sd' to 'cpu' as its base domain. Callers must - * hold the hotplug lock. - */ -static void -cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) -{ - struct rq *rq = cpu_rq(cpu); - struct sched_domain *tmp; - - /* Remove the sched domains which do not contribute to scheduling. */ - for (tmp = sd; tmp; ) { - struct sched_domain *parent = tmp->parent; - if (!parent) - break; - - if (sd_parent_degenerate(tmp, parent)) { - tmp->parent = parent->parent; - if (parent->parent) - parent->parent->child = tmp; - /* - * Transfer SD_PREFER_SIBLING down in case of a - * degenerate parent; the spans match for this - * so the property transfers. - */ - if (parent->flags & SD_PREFER_SIBLING) - tmp->flags |= SD_PREFER_SIBLING; - destroy_sched_domain(parent); - } else - tmp = tmp->parent; - } - - if (sd && sd_degenerate(sd)) { - tmp = sd; - sd = sd->parent; - destroy_sched_domain(tmp); - if (sd) - sd->child = NULL; - } - - sched_domain_debug(sd, cpu); - - rq_attach_root(rq, rd); - tmp = rq->sd; - rcu_assign_pointer(rq->sd, sd); - destroy_sched_domains(tmp); - - update_top_cache_domain(cpu); -} - -/* Setup the mask of CPUs configured for isolated domains */ -static int __init isolated_cpu_setup(char *str) -{ - int ret; - - alloc_bootmem_cpumask_var(&cpu_isolated_map); - ret = cpulist_parse(str, cpu_isolated_map); - if (ret) { - pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); - return 0; - } - return 1; -} -__setup("isolcpus=", isolated_cpu_setup); - -struct s_data { - struct sched_domain ** __percpu sd; - struct root_domain *rd; -}; - -enum s_alloc { - sa_rootdomain, - sa_sd, - sa_sd_storage, - sa_none, -}; - -/* - * Build an iteration mask that can exclude certain CPUs from the upwards - * domain traversal. - * - * Asymmetric node setups can result in situations where the domain tree is of - * unequal depth, make sure to skip domains that already cover the entire - * range. - * - * In that case build_sched_domains() will have terminated the iteration early - * and our sibling sd spans will be empty. Domains should always include the - * CPU they're built on, so check that. - */ -static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) -{ - const struct cpumask *span = sched_domain_span(sd); - struct sd_data *sdd = sd->private; - struct sched_domain *sibling; - int i; - - for_each_cpu(i, span) { - sibling = *per_cpu_ptr(sdd->sd, i); - if (!cpumask_test_cpu(i, sched_domain_span(sibling))) - continue; - - cpumask_set_cpu(i, sched_group_mask(sg)); - } -} - -/* - * Return the canonical balance CPU for this group, this is the first CPU - * of this group that's also in the iteration mask. - */ -int group_balance_cpu(struct sched_group *sg) -{ - return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); -} - -static int -build_overlap_sched_groups(struct sched_domain *sd, int cpu) -{ - struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; - const struct cpumask *span = sched_domain_span(sd); - struct cpumask *covered = sched_domains_tmpmask; - struct sd_data *sdd = sd->private; - struct sched_domain *sibling; - int i; - - cpumask_clear(covered); - - for_each_cpu(i, span) { - struct cpumask *sg_span; - - if (cpumask_test_cpu(i, covered)) - continue; - - sibling = *per_cpu_ptr(sdd->sd, i); - - /* See the comment near build_group_mask(). */ - if (!cpumask_test_cpu(i, sched_domain_span(sibling))) - continue; - - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(cpu)); - - if (!sg) - goto fail; - - sg_span = sched_group_cpus(sg); - if (sibling->child) - cpumask_copy(sg_span, sched_domain_span(sibling->child)); - else - cpumask_set_cpu(i, sg_span); - - cpumask_or(covered, covered, sg_span); - - sg->sgc = *per_cpu_ptr(sdd->sgc, i); - if (atomic_inc_return(&sg->sgc->ref) == 1) - build_group_mask(sd, sg); - - /* - * Initialize sgc->capacity such that even if we mess up the - * domains and no possible iteration will get us here, we won't - * die on a /0 trap. - */ - sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); - sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; - - /* - * Make sure the first group of this domain contains the - * canonical balance CPU. Otherwise the sched_domain iteration - * breaks. See update_sg_lb_stats(). - */ - if ((!groups && cpumask_test_cpu(cpu, sg_span)) || - group_balance_cpu(sg) == cpu) - groups = sg; - - if (!first) - first = sg; - if (last) - last->next = sg; - last = sg; - last->next = first; - } - sd->groups = groups; - - return 0; - -fail: - free_sched_groups(first, 0); - - return -ENOMEM; -} - -static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) -{ - struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); - struct sched_domain *child = sd->child; - - if (child) - cpu = cpumask_first(sched_domain_span(child)); - - if (sg) { - *sg = *per_cpu_ptr(sdd->sg, cpu); - (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); - - /* For claim_allocations: */ - atomic_set(&(*sg)->sgc->ref, 1); - } - - return cpu; -} - -/* - * build_sched_groups will build a circular linked list of the groups - * covered by the given span, and will set each group's ->cpumask correctly, - * and ->cpu_capacity to 0. - * - * Assumes the sched_domain tree is fully constructed - */ -static int -build_sched_groups(struct sched_domain *sd, int cpu) -{ - struct sched_group *first = NULL, *last = NULL; - struct sd_data *sdd = sd->private; - const struct cpumask *span = sched_domain_span(sd); - struct cpumask *covered; - int i; - - get_group(cpu, sdd, &sd->groups); - atomic_inc(&sd->groups->ref); - - if (cpu != cpumask_first(span)) - return 0; - - lockdep_assert_held(&sched_domains_mutex); - covered = sched_domains_tmpmask; - - cpumask_clear(covered); - - for_each_cpu(i, span) { - struct sched_group *sg; - int group, j; - - if (cpumask_test_cpu(i, covered)) - continue; - - group = get_group(i, sdd, &sg); - cpumask_setall(sched_group_mask(sg)); - - for_each_cpu(j, span) { - if (get_group(j, sdd, NULL) != group) - continue; - - cpumask_set_cpu(j, covered); - cpumask_set_cpu(j, sched_group_cpus(sg)); - } - - if (!first) - first = sg; - if (last) - last->next = sg; - last = sg; - } - last->next = first; - - return 0; -} - -/* - * Initialize sched groups cpu_capacity. - * - * cpu_capacity indicates the capacity of sched group, which is used while - * distributing the load between different sched groups in a sched domain. - * Typically cpu_capacity for all the groups in a sched domain will be same - * unless there are asymmetries in the topology. If there are asymmetries, - * group having more cpu_capacity will pickup more load compared to the - * group having less cpu_capacity. - */ -static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) -{ - struct sched_group *sg = sd->groups; - - WARN_ON(!sg); - - do { - int cpu, max_cpu = -1; - - sg->group_weight = cpumask_weight(sched_group_cpus(sg)); - - if (!(sd->flags & SD_ASYM_PACKING)) - goto next; - - for_each_cpu(cpu, sched_group_cpus(sg)) { - if (max_cpu < 0) - max_cpu = cpu; - else if (sched_asym_prefer(cpu, max_cpu)) - max_cpu = cpu; - } - sg->asym_prefer_cpu = max_cpu; - -next: - sg = sg->next; - } while (sg != sd->groups); - - if (cpu != group_balance_cpu(sg)) - return; - - update_group_capacity(sd, cpu); -} - -/* - * Initializers for schedule domains - * Non-inlined to reduce accumulated stack pressure in build_sched_domains() - */ - -static int default_relax_domain_level = -1; -int sched_domain_level_max; - -static int __init setup_relax_domain_level(char *str) -{ - if (kstrtoint(str, 0, &default_relax_domain_level)) - pr_warn("Unable to set relax_domain_level\n"); - - return 1; -} -__setup("relax_domain_level=", setup_relax_domain_level); - -static void set_domain_attribute(struct sched_domain *sd, - struct sched_domain_attr *attr) -{ - int request; - - if (!attr || attr->relax_domain_level < 0) { - if (default_relax_domain_level < 0) - return; - else - request = default_relax_domain_level; - } else - request = attr->relax_domain_level; - if (request < sd->level) { - /* Turn off idle balance on this domain: */ - sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); - } else { - /* Turn on idle balance on this domain: */ - sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); - } -} - -static void __sdt_free(const struct cpumask *cpu_map); -static int __sdt_alloc(const struct cpumask *cpu_map); - -static void __free_domain_allocs(struct s_data *d, enum s_alloc what, - const struct cpumask *cpu_map) -{ - switch (what) { - case sa_rootdomain: - if (!atomic_read(&d->rd->refcount)) - free_rootdomain(&d->rd->rcu); - /* Fall through */ - case sa_sd: - free_percpu(d->sd); - /* Fall through */ - case sa_sd_storage: - __sdt_free(cpu_map); - /* Fall through */ - case sa_none: - break; - } -} - -static enum s_alloc -__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) -{ - memset(d, 0, sizeof(*d)); - - if (__sdt_alloc(cpu_map)) - return sa_sd_storage; - d->sd = alloc_percpu(struct sched_domain *); - if (!d->sd) - return sa_sd_storage; - d->rd = alloc_rootdomain(); - if (!d->rd) - return sa_sd; - return sa_rootdomain; -} - -/* - * NULL the sd_data elements we've used to build the sched_domain and - * sched_group structure so that the subsequent __free_domain_allocs() - * will not free the data we're using. - */ -static void claim_allocations(int cpu, struct sched_domain *sd) -{ - struct sd_data *sdd = sd->private; - - WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); - *per_cpu_ptr(sdd->sd, cpu) = NULL; - - if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) - *per_cpu_ptr(sdd->sds, cpu) = NULL; - - if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) - *per_cpu_ptr(sdd->sg, cpu) = NULL; - - if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) - *per_cpu_ptr(sdd->sgc, cpu) = NULL; -} - -#ifdef CONFIG_NUMA -static int sched_domains_numa_levels; -enum numa_topology_type sched_numa_topology_type; -static int *sched_domains_numa_distance; -int sched_max_numa_distance; -static struct cpumask ***sched_domains_numa_masks; -static int sched_domains_curr_level; -#endif - -/* - * SD_flags allowed in topology descriptions. - * - * These flags are purely descriptive of the topology and do not prescribe - * behaviour. Behaviour is artificial and mapped in the below sd_init() - * function: - * - * SD_SHARE_CPUCAPACITY - describes SMT topologies - * SD_SHARE_PKG_RESOURCES - describes shared caches - * SD_NUMA - describes NUMA topologies - * SD_SHARE_POWERDOMAIN - describes shared power domain - * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies - * - * Odd one out, which beside describing the topology has a quirk also - * prescribes the desired behaviour that goes along with it: - * - * SD_ASYM_PACKING - describes SMT quirks - */ -#define TOPOLOGY_SD_FLAGS \ - (SD_SHARE_CPUCAPACITY | \ - SD_SHARE_PKG_RESOURCES | \ - SD_NUMA | \ - SD_ASYM_PACKING | \ - SD_ASYM_CPUCAPACITY | \ - SD_SHARE_POWERDOMAIN) - -static struct sched_domain * -sd_init(struct sched_domain_topology_level *tl, - const struct cpumask *cpu_map, - struct sched_domain *child, int cpu) -{ - struct sd_data *sdd = &tl->data; - struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); - int sd_id, sd_weight, sd_flags = 0; - -#ifdef CONFIG_NUMA - /* - * Ugly hack to pass state to sd_numa_mask()... - */ - sched_domains_curr_level = tl->numa_level; -#endif - - sd_weight = cpumask_weight(tl->mask(cpu)); - - if (tl->sd_flags) - sd_flags = (*tl->sd_flags)(); - if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, - "wrong sd_flags in topology description\n")) - sd_flags &= ~TOPOLOGY_SD_FLAGS; - - *sd = (struct sched_domain){ - .min_interval = sd_weight, - .max_interval = 2*sd_weight, - .busy_factor = 32, - .imbalance_pct = 125, - - .cache_nice_tries = 0, - .busy_idx = 0, - .idle_idx = 0, - .newidle_idx = 0, - .wake_idx = 0, - .forkexec_idx = 0, - - .flags = 1*SD_LOAD_BALANCE - | 1*SD_BALANCE_NEWIDLE - | 1*SD_BALANCE_EXEC - | 1*SD_BALANCE_FORK - | 0*SD_BALANCE_WAKE - | 1*SD_WAKE_AFFINE - | 0*SD_SHARE_CPUCAPACITY - | 0*SD_SHARE_PKG_RESOURCES - | 0*SD_SERIALIZE - | 0*SD_PREFER_SIBLING - | 0*SD_NUMA - | sd_flags - , - - .last_balance = jiffies, - .balance_interval = sd_weight, - .smt_gain = 0, - .max_newidle_lb_cost = 0, - .next_decay_max_lb_cost = jiffies, - .child = child, -#ifdef CONFIG_SCHED_DEBUG - .name = tl->name, -#endif - }; - - cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); - sd_id = cpumask_first(sched_domain_span(sd)); - - /* - * Convert topological properties into behaviour. - */ - - if (sd->flags & SD_ASYM_CPUCAPACITY) { - struct sched_domain *t = sd; - - for_each_lower_domain(t) - t->flags |= SD_BALANCE_WAKE; - } - - if (sd->flags & SD_SHARE_CPUCAPACITY) { - sd->flags |= SD_PREFER_SIBLING; - sd->imbalance_pct = 110; - sd->smt_gain = 1178; /* ~15% */ - - } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { - sd->imbalance_pct = 117; - sd->cache_nice_tries = 1; - sd->busy_idx = 2; - -#ifdef CONFIG_NUMA - } else if (sd->flags & SD_NUMA) { - sd->cache_nice_tries = 2; - sd->busy_idx = 3; - sd->idle_idx = 2; - - sd->flags |= SD_SERIALIZE; - if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { - sd->flags &= ~(SD_BALANCE_EXEC | - SD_BALANCE_FORK | - SD_WAKE_AFFINE); - } - -#endif - } else { - sd->flags |= SD_PREFER_SIBLING; - sd->cache_nice_tries = 1; - sd->busy_idx = 2; - sd->idle_idx = 1; - } - - /* - * For all levels sharing cache; connect a sched_domain_shared - * instance. - */ - if (sd->flags & SD_SHARE_PKG_RESOURCES) { - sd->shared = *per_cpu_ptr(sdd->sds, sd_id); - atomic_inc(&sd->shared->ref); - atomic_set(&sd->shared->nr_busy_cpus, sd_weight); - } - - sd->private = sdd; - - return sd; -} - -/* - * Topology list, bottom-up. - */ -static struct sched_domain_topology_level default_topology[] = { -#ifdef CONFIG_SCHED_SMT - { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, -#endif -#ifdef CONFIG_SCHED_MC - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; - -static struct sched_domain_topology_level *sched_domain_topology = - default_topology; - -#define for_each_sd_topology(tl) \ - for (tl = sched_domain_topology; tl->mask; tl++) - -void set_sched_topology(struct sched_domain_topology_level *tl) -{ - if (WARN_ON_ONCE(sched_smp_initialized)) - return; - - sched_domain_topology = tl; -} - -#ifdef CONFIG_NUMA - -static const struct cpumask *sd_numa_mask(int cpu) -{ - return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; -} - -static void sched_numa_warn(const char *str) -{ - static int done = false; - int i,j; - - if (done) - return; - - done = true; - - printk(KERN_WARNING "ERROR: %s\n\n", str); - - for (i = 0; i < nr_node_ids; i++) { - printk(KERN_WARNING " "); - for (j = 0; j < nr_node_ids; j++) - printk(KERN_CONT "%02d ", node_distance(i,j)); - printk(KERN_CONT "\n"); - } - printk(KERN_WARNING "\n"); -} - -bool find_numa_distance(int distance) -{ - int i; - - if (distance == node_distance(0, 0)) - return true; - - for (i = 0; i < sched_domains_numa_levels; i++) { - if (sched_domains_numa_distance[i] == distance) - return true; - } - - return false; -} - -/* - * A system can have three types of NUMA topology: - * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system - * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes - * NUMA_BACKPLANE: nodes can reach other nodes through a backplane - * - * The difference between a glueless mesh topology and a backplane - * topology lies in whether communication between not directly - * connected nodes goes through intermediary nodes (where programs - * could run), or through backplane controllers. This affects - * placement of programs. - * - * The type of topology can be discerned with the following tests: - * - If the maximum distance between any nodes is 1 hop, the system - * is directly connected. - * - If for two nodes A and B, located N > 1 hops away from each other, - * there is an intermediary node C, which is < N hops away from both - * nodes A and B, the system is a glueless mesh. - */ -static void init_numa_topology_type(void) -{ - int a, b, c, n; - - n = sched_max_numa_distance; - - if (sched_domains_numa_levels <= 1) { - sched_numa_topology_type = NUMA_DIRECT; - return; - } - - for_each_online_node(a) { - for_each_online_node(b) { - /* Find two nodes furthest removed from each other. */ - if (node_distance(a, b) < n) - continue; - - /* Is there an intermediary node between a and b? */ - for_each_online_node(c) { - if (node_distance(a, c) < n && - node_distance(b, c) < n) { - sched_numa_topology_type = - NUMA_GLUELESS_MESH; - return; - } - } - - sched_numa_topology_type = NUMA_BACKPLANE; - return; - } - } -} - -static void sched_init_numa(void) -{ - int next_distance, curr_distance = node_distance(0, 0); - struct sched_domain_topology_level *tl; - int level = 0; - int i, j, k; - - sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); - if (!sched_domains_numa_distance) - return; - - /* - * O(nr_nodes^2) deduplicating selection sort -- in order to find the - * unique distances in the node_distance() table. - * - * Assumes node_distance(0,j) includes all distances in - * node_distance(i,j) in order to avoid cubic time. - */ - next_distance = curr_distance; - for (i = 0; i < nr_node_ids; i++) { - for (j = 0; j < nr_node_ids; j++) { - for (k = 0; k < nr_node_ids; k++) { - int distance = node_distance(i, k); - - if (distance > curr_distance && - (distance < next_distance || - next_distance == curr_distance)) - next_distance = distance; - - /* - * While not a strong assumption it would be nice to know - * about cases where if node A is connected to B, B is not - * equally connected to A. - */ - if (sched_debug() && node_distance(k, i) != distance) - sched_numa_warn("Node-distance not symmetric"); - - if (sched_debug() && i && !find_numa_distance(distance)) - sched_numa_warn("Node-0 not representative"); - } - if (next_distance != curr_distance) { - sched_domains_numa_distance[level++] = next_distance; - sched_domains_numa_levels = level; - curr_distance = next_distance; - } else break; - } - - /* - * In case of sched_debug() we verify the above assumption. - */ - if (!sched_debug()) - break; - } - - if (!level) - return; - - /* - * 'level' contains the number of unique distances, excluding the - * identity distance node_distance(i,i). - * - * The sched_domains_numa_distance[] array includes the actual distance - * numbers. - */ - - /* - * Here, we should temporarily reset sched_domains_numa_levels to 0. - * If it fails to allocate memory for array sched_domains_numa_masks[][], - * the array will contain less then 'level' members. This could be - * dangerous when we use it to iterate array sched_domains_numa_masks[][] - * in other functions. - * - * We reset it to 'level' at the end of this function. - */ - sched_domains_numa_levels = 0; - - sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); - if (!sched_domains_numa_masks) - return; - - /* - * Now for each level, construct a mask per node which contains all - * CPUs of nodes that are that many hops away from us. - */ - for (i = 0; i < level; i++) { - sched_domains_numa_masks[i] = - kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); - if (!sched_domains_numa_masks[i]) - return; - - for (j = 0; j < nr_node_ids; j++) { - struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); - if (!mask) - return; - - sched_domains_numa_masks[i][j] = mask; - - for_each_node(k) { - if (node_distance(j, k) > sched_domains_numa_distance[i]) - continue; - - cpumask_or(mask, mask, cpumask_of_node(k)); - } - } - } - - /* Compute default topology size */ - for (i = 0; sched_domain_topology[i].mask; i++); - - tl = kzalloc((i + level + 1) * - sizeof(struct sched_domain_topology_level), GFP_KERNEL); - if (!tl) - return; - - /* - * Copy the default topology bits.. - */ - for (i = 0; sched_domain_topology[i].mask; i++) - tl[i] = sched_domain_topology[i]; - - /* - * .. and append 'j' levels of NUMA goodness. - */ - for (j = 0; j < level; i++, j++) { - tl[i] = (struct sched_domain_topology_level){ - .mask = sd_numa_mask, - .sd_flags = cpu_numa_flags, - .flags = SDTL_OVERLAP, - .numa_level = j, - SD_INIT_NAME(NUMA) - }; - } - - sched_domain_topology = tl; - - sched_domains_numa_levels = level; - sched_max_numa_distance = sched_domains_numa_distance[level - 1]; - - init_numa_topology_type(); -} - -static void sched_domains_numa_masks_set(unsigned int cpu) -{ - int node = cpu_to_node(cpu); - int i, j; - - for (i = 0; i < sched_domains_numa_levels; i++) { - for (j = 0; j < nr_node_ids; j++) { - if (node_distance(j, node) <= sched_domains_numa_distance[i]) - cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); - } - } -} - -static void sched_domains_numa_masks_clear(unsigned int cpu) -{ - int i, j; - - for (i = 0; i < sched_domains_numa_levels; i++) { - for (j = 0; j < nr_node_ids; j++) - cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); - } -} - -#else -static inline void sched_init_numa(void) { } -static void sched_domains_numa_masks_set(unsigned int cpu) { } -static void sched_domains_numa_masks_clear(unsigned int cpu) { } -#endif /* CONFIG_NUMA */ - -static int __sdt_alloc(const struct cpumask *cpu_map) -{ - struct sched_domain_topology_level *tl; - int j; - - for_each_sd_topology(tl) { - struct sd_data *sdd = &tl->data; - - sdd->sd = alloc_percpu(struct sched_domain *); - if (!sdd->sd) - return -ENOMEM; - - sdd->sds = alloc_percpu(struct sched_domain_shared *); - if (!sdd->sds) - return -ENOMEM; - - sdd->sg = alloc_percpu(struct sched_group *); - if (!sdd->sg) - return -ENOMEM; - - sdd->sgc = alloc_percpu(struct sched_group_capacity *); - if (!sdd->sgc) - return -ENOMEM; - - for_each_cpu(j, cpu_map) { - struct sched_domain *sd; - struct sched_domain_shared *sds; - struct sched_group *sg; - struct sched_group_capacity *sgc; - - sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), - GFP_KERNEL, cpu_to_node(j)); - if (!sd) - return -ENOMEM; - - *per_cpu_ptr(sdd->sd, j) = sd; - - sds = kzalloc_node(sizeof(struct sched_domain_shared), - GFP_KERNEL, cpu_to_node(j)); - if (!sds) - return -ENOMEM; - - *per_cpu_ptr(sdd->sds, j) = sds; - - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(j)); - if (!sg) - return -ENOMEM; - - sg->next = sg; - - *per_cpu_ptr(sdd->sg, j) = sg; - - sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), - GFP_KERNEL, cpu_to_node(j)); - if (!sgc) - return -ENOMEM; - - *per_cpu_ptr(sdd->sgc, j) = sgc; - } - } - - return 0; -} - -static void __sdt_free(const struct cpumask *cpu_map) -{ - struct sched_domain_topology_level *tl; - int j; - - for_each_sd_topology(tl) { - struct sd_data *sdd = &tl->data; - - for_each_cpu(j, cpu_map) { - struct sched_domain *sd; - - if (sdd->sd) { - sd = *per_cpu_ptr(sdd->sd, j); - if (sd && (sd->flags & SD_OVERLAP)) - free_sched_groups(sd->groups, 0); - kfree(*per_cpu_ptr(sdd->sd, j)); - } - - if (sdd->sds) - kfree(*per_cpu_ptr(sdd->sds, j)); - if (sdd->sg) - kfree(*per_cpu_ptr(sdd->sg, j)); - if (sdd->sgc) - kfree(*per_cpu_ptr(sdd->sgc, j)); - } - free_percpu(sdd->sd); - sdd->sd = NULL; - free_percpu(sdd->sds); - sdd->sds = NULL; - free_percpu(sdd->sg); - sdd->sg = NULL; - free_percpu(sdd->sgc); - sdd->sgc = NULL; - } -} - -struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, - const struct cpumask *cpu_map, struct sched_domain_attr *attr, - struct sched_domain *child, int cpu) -{ - struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); - - if (child) { - sd->level = child->level + 1; - sched_domain_level_max = max(sched_domain_level_max, sd->level); - child->parent = sd; - - if (!cpumask_subset(sched_domain_span(child), - sched_domain_span(sd))) { - pr_err("BUG: arch topology borken\n"); -#ifdef CONFIG_SCHED_DEBUG - pr_err(" the %s domain not a subset of the %s domain\n", - child->name, sd->name); -#endif - /* Fixup, ensure @sd has at least @child cpus. */ - cpumask_or(sched_domain_span(sd), - sched_domain_span(sd), - sched_domain_span(child)); - } - - } - set_domain_attribute(sd, attr); - - return sd; -} - -/* - * Build sched domains for a given set of CPUs and attach the sched domains - * to the individual CPUs - */ -static int -build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) -{ - enum s_alloc alloc_state; - struct sched_domain *sd; - struct s_data d; - struct rq *rq = NULL; - int i, ret = -ENOMEM; - - alloc_state = __visit_domain_allocation_hell(&d, cpu_map); - if (alloc_state != sa_rootdomain) - goto error; - - /* Set up domains for CPUs specified by the cpu_map: */ - for_each_cpu(i, cpu_map) { - struct sched_domain_topology_level *tl; - - sd = NULL; - for_each_sd_topology(tl) { - sd = build_sched_domain(tl, cpu_map, attr, sd, i); - if (tl == sched_domain_topology) - *per_cpu_ptr(d.sd, i) = sd; - if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) - sd->flags |= SD_OVERLAP; - if (cpumask_equal(cpu_map, sched_domain_span(sd))) - break; - } - } - - /* Build the groups for the domains */ - for_each_cpu(i, cpu_map) { - for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { - sd->span_weight = cpumask_weight(sched_domain_span(sd)); - if (sd->flags & SD_OVERLAP) { - if (build_overlap_sched_groups(sd, i)) - goto error; - } else { - if (build_sched_groups(sd, i)) - goto error; - } - } - } - - /* Calculate CPU capacity for physical packages and nodes */ - for (i = nr_cpumask_bits-1; i >= 0; i--) { - if (!cpumask_test_cpu(i, cpu_map)) - continue; - - for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { - claim_allocations(i, sd); - init_sched_groups_capacity(i, sd); - } - } - - /* Attach the domains */ - rcu_read_lock(); - for_each_cpu(i, cpu_map) { - rq = cpu_rq(i); - sd = *per_cpu_ptr(d.sd, i); - - /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ - if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) - WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); - - cpu_attach_domain(sd, d.rd, i); - } - rcu_read_unlock(); - - if (rq && sched_debug_enabled) { - pr_info("span: %*pbl (max cpu_capacity = %lu)\n", - cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); - } - - ret = 0; -error: - __free_domain_allocs(&d, alloc_state, cpu_map); - return ret; -} - -/* Current sched domains: */ -static cpumask_var_t *doms_cur; - -/* Number of sched domains in 'doms_cur': */ -static int ndoms_cur; - -/* Attribues of custom domains in 'doms_cur' */ -static struct sched_domain_attr *dattr_cur; - -/* - * Special case: If a kmalloc() of a doms_cur partition (array of - * cpumask) fails, then fallback to a single sched domain, - * as determined by the single cpumask fallback_doms. - */ -static cpumask_var_t fallback_doms; - -/* - * arch_update_cpu_topology lets virtualized architectures update the - * CPU core maps. It is supposed to return 1 if the topology changed - * or 0 if it stayed the same. - */ -int __weak arch_update_cpu_topology(void) -{ - return 0; -} - -cpumask_var_t *alloc_sched_domains(unsigned int ndoms) -{ - int i; - cpumask_var_t *doms; - - doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); - if (!doms) - return NULL; - for (i = 0; i < ndoms; i++) { - if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { - free_sched_domains(doms, i); - return NULL; - } - } - return doms; -} - -void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) -{ - unsigned int i; - for (i = 0; i < ndoms; i++) - free_cpumask_var(doms[i]); - kfree(doms); -} - -/* - * Set up scheduler domains and groups. Callers must hold the hotplug lock. - * For now this just excludes isolated CPUs, but could be used to - * exclude other special cases in the future. - */ -static int init_sched_domains(const struct cpumask *cpu_map) -{ - int err; - - arch_update_cpu_topology(); - ndoms_cur = 1; - doms_cur = alloc_sched_domains(ndoms_cur); - if (!doms_cur) - doms_cur = &fallback_doms; - cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); - err = build_sched_domains(doms_cur[0], NULL); - register_sched_domain_sysctl(); - - return err; -} - -/* - * Detach sched domains from a group of CPUs specified in cpu_map - * These CPUs will now be attached to the NULL domain - */ -static void detach_destroy_domains(const struct cpumask *cpu_map) -{ - int i; - - rcu_read_lock(); - for_each_cpu(i, cpu_map) - cpu_attach_domain(NULL, &def_root_domain, i); - rcu_read_unlock(); -} - -/* handle null as "default" */ -static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, - struct sched_domain_attr *new, int idx_new) -{ - struct sched_domain_attr tmp; - - /* Fast path: */ - if (!new && !cur) - return 1; - - tmp = SD_ATTR_INIT; - return !memcmp(cur ? (cur + idx_cur) : &tmp, - new ? (new + idx_new) : &tmp, - sizeof(struct sched_domain_attr)); -} - -/* - * Partition sched domains as specified by the 'ndoms_new' - * cpumasks in the array doms_new[] of cpumasks. This compares - * doms_new[] to the current sched domain partitioning, doms_cur[]. - * It destroys each deleted domain and builds each new domain. - * - * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. - * The masks don't intersect (don't overlap.) We should setup one - * sched domain for each mask. CPUs not in any of the cpumasks will - * not be load balanced. If the same cpumask appears both in the - * current 'doms_cur' domains and in the new 'doms_new', we can leave - * it as it is. - * - * The passed in 'doms_new' should be allocated using - * alloc_sched_domains. This routine takes ownership of it and will - * free_sched_domains it when done with it. If the caller failed the - * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, - * and partition_sched_domains() will fallback to the single partition - * 'fallback_doms', it also forces the domains to be rebuilt. - * - * If doms_new == NULL it will be replaced with cpu_online_mask. - * ndoms_new == 0 is a special case for destroying existing domains, - * and it will not create the default domain. - * - * Call with hotplug lock held - */ -void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ - int i, j, n; - int new_topology; - - mutex_lock(&sched_domains_mutex); - - /* Always unregister in case we don't destroy any domains: */ - unregister_sched_domain_sysctl(); - - /* Let the architecture update CPU core mappings: */ - new_topology = arch_update_cpu_topology(); - - n = doms_new ? ndoms_new : 0; - - /* Destroy deleted domains: */ - for (i = 0; i < ndoms_cur; i++) { - for (j = 0; j < n && !new_topology; j++) { - if (cpumask_equal(doms_cur[i], doms_new[j]) - && dattrs_equal(dattr_cur, i, dattr_new, j)) - goto match1; - } - /* No match - a current sched domain not in new doms_new[] */ - detach_destroy_domains(doms_cur[i]); -match1: - ; - } - - n = ndoms_cur; - if (doms_new == NULL) { - n = 0; - doms_new = &fallback_doms; - cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); - WARN_ON_ONCE(dattr_new); - } - - /* Build new domains: */ - for (i = 0; i < ndoms_new; i++) { - for (j = 0; j < n && !new_topology; j++) { - if (cpumask_equal(doms_new[i], doms_cur[j]) - && dattrs_equal(dattr_new, i, dattr_cur, j)) - goto match2; - } - /* No match - add a new doms_new */ - build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); -match2: - ; - } - - /* Remember the new sched domains: */ - if (doms_cur != &fallback_doms) - free_sched_domains(doms_cur, ndoms_cur); - - kfree(dattr_cur); - doms_cur = doms_new; - dattr_cur = dattr_new; - ndoms_cur = ndoms_new; - - register_sched_domain_sysctl(); - - mutex_unlock(&sched_domains_mutex); -} - /* * used to mark begin/end of suspend/resume: */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8ff5cc539e8a..17ed94b9b413 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -223,7 +223,7 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; } -extern struct mutex sched_domains_mutex; +extern void init_dl_bw(struct dl_bw *dl_b); #ifdef CONFIG_CGROUP_SCHED @@ -584,6 +584,13 @@ struct root_domain { }; extern struct root_domain def_root_domain; +extern struct mutex sched_domains_mutex; +extern cpumask_var_t fallback_doms; +extern cpumask_var_t sched_domains_tmpmask; + +extern void init_defrootdomain(void); +extern int init_sched_domains(const struct cpumask *cpu_map); +extern void rq_attach_root(struct rq *rq, struct root_domain *rd); #endif /* CONFIG_SMP */ @@ -886,6 +893,16 @@ extern int sched_max_numa_distance; extern bool find_numa_distance(int distance); #endif +#ifdef CONFIG_NUMA +extern void sched_init_numa(void); +extern void sched_domains_numa_masks_set(unsigned int cpu); +extern void sched_domains_numa_masks_clear(unsigned int cpu); +#else +static inline void sched_init_numa(void) { } +static inline void sched_domains_numa_masks_set(unsigned int cpu) { } +static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } +#endif + #ifdef CONFIG_NUMA_BALANCING /* The regions in numa_faults array from task_struct */ enum numa_faults_stats { @@ -1752,6 +1769,10 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __release(rq2->lock); } +extern void set_rq_online (struct rq *rq); +extern void set_rq_offline(struct rq *rq); +extern bool sched_smp_initialized; + #else /* CONFIG_SMP */ /* diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c new file mode 100644 index 000000000000..1b0b4fb12837 --- /dev/null +++ b/kernel/sched/topology.c @@ -0,0 +1,1658 @@ +/* + * Scheduler topology setup/handling methods + */ +#include +#include + +#include "sched.h" + +DEFINE_MUTEX(sched_domains_mutex); + +/* Protected by sched_domains_mutex: */ +cpumask_var_t sched_domains_tmpmask; + +#ifdef CONFIG_SCHED_DEBUG + +static __read_mostly int sched_debug_enabled; + +static int __init sched_debug_setup(char *str) +{ + sched_debug_enabled = 1; + + return 0; +} +early_param("sched_debug", sched_debug_setup); + +static inline bool sched_debug(void) +{ + return sched_debug_enabled; +} + +static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, + struct cpumask *groupmask) +{ + struct sched_group *group = sd->groups; + + cpumask_clear(groupmask); + + printk(KERN_DEBUG "%*s domain %d: ", level, "", level); + + if (!(sd->flags & SD_LOAD_BALANCE)) { + printk("does not load-balance\n"); + if (sd->parent) + printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" + " has parent"); + return -1; + } + + printk(KERN_CONT "span %*pbl level %s\n", + cpumask_pr_args(sched_domain_span(sd)), sd->name); + + if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { + printk(KERN_ERR "ERROR: domain->span does not contain " + "CPU%d\n", cpu); + } + if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { + printk(KERN_ERR "ERROR: domain->groups does not contain" + " CPU%d\n", cpu); + } + + printk(KERN_DEBUG "%*s groups:", level + 1, ""); + do { + if (!group) { + printk("\n"); + printk(KERN_ERR "ERROR: group is NULL\n"); + break; + } + + if (!cpumask_weight(sched_group_cpus(group))) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: empty group\n"); + break; + } + + if (!(sd->flags & SD_OVERLAP) && + cpumask_intersects(groupmask, sched_group_cpus(group))) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: repeated CPUs\n"); + break; + } + + cpumask_or(groupmask, groupmask, sched_group_cpus(group)); + + printk(KERN_CONT " %*pbl", + cpumask_pr_args(sched_group_cpus(group))); + if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { + printk(KERN_CONT " (cpu_capacity = %lu)", + group->sgc->capacity); + } + + group = group->next; + } while (group != sd->groups); + printk(KERN_CONT "\n"); + + if (!cpumask_equal(sched_domain_span(sd), groupmask)) + printk(KERN_ERR "ERROR: groups don't span domain->span\n"); + + if (sd->parent && + !cpumask_subset(groupmask, sched_domain_span(sd->parent))) + printk(KERN_ERR "ERROR: parent span is not a superset " + "of domain->span\n"); + return 0; +} + +static void sched_domain_debug(struct sched_domain *sd, int cpu) +{ + int level = 0; + + if (!sched_debug_enabled) + return; + + if (!sd) { + printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); + return; + } + + printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + + for (;;) { + if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) + break; + level++; + sd = sd->parent; + if (!sd) + break; + } +} +#else /* !CONFIG_SCHED_DEBUG */ + +# define sched_debug_enabled 0 +# define sched_domain_debug(sd, cpu) do { } while (0) +static inline bool sched_debug(void) +{ + return false; +} +#endif /* CONFIG_SCHED_DEBUG */ + +static int sd_degenerate(struct sched_domain *sd) +{ + if (cpumask_weight(sched_domain_span(sd)) == 1) + return 1; + + /* Following flags need at least 2 groups */ + if (sd->flags & (SD_LOAD_BALANCE | + SD_BALANCE_NEWIDLE | + SD_BALANCE_FORK | + SD_BALANCE_EXEC | + SD_SHARE_CPUCAPACITY | + SD_ASYM_CPUCAPACITY | + SD_SHARE_PKG_RESOURCES | + SD_SHARE_POWERDOMAIN)) { + if (sd->groups != sd->groups->next) + return 0; + } + + /* Following flags don't use groups */ + if (sd->flags & (SD_WAKE_AFFINE)) + return 0; + + return 1; +} + +static int +sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) +{ + unsigned long cflags = sd->flags, pflags = parent->flags; + + if (sd_degenerate(parent)) + return 1; + + if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) + return 0; + + /* Flags needing groups don't count if only 1 group in parent */ + if (parent->groups == parent->groups->next) { + pflags &= ~(SD_LOAD_BALANCE | + SD_BALANCE_NEWIDLE | + SD_BALANCE_FORK | + SD_BALANCE_EXEC | + SD_ASYM_CPUCAPACITY | + SD_SHARE_CPUCAPACITY | + SD_SHARE_PKG_RESOURCES | + SD_PREFER_SIBLING | + SD_SHARE_POWERDOMAIN); + if (nr_node_ids == 1) + pflags &= ~SD_SERIALIZE; + } + if (~cflags & pflags) + return 0; + + return 1; +} + +static void free_rootdomain(struct rcu_head *rcu) +{ + struct root_domain *rd = container_of(rcu, struct root_domain, rcu); + + cpupri_cleanup(&rd->cpupri); + cpudl_cleanup(&rd->cpudl); + free_cpumask_var(rd->dlo_mask); + free_cpumask_var(rd->rto_mask); + free_cpumask_var(rd->online); + free_cpumask_var(rd->span); + kfree(rd); +} + +void rq_attach_root(struct rq *rq, struct root_domain *rd) +{ + struct root_domain *old_rd = NULL; + unsigned long flags; + + raw_spin_lock_irqsave(&rq->lock, flags); + + if (rq->rd) { + old_rd = rq->rd; + + if (cpumask_test_cpu(rq->cpu, old_rd->online)) + set_rq_offline(rq); + + cpumask_clear_cpu(rq->cpu, old_rd->span); + + /* + * If we dont want to free the old_rd yet then + * set old_rd to NULL to skip the freeing later + * in this function: + */ + if (!atomic_dec_and_test(&old_rd->refcount)) + old_rd = NULL; + } + + atomic_inc(&rd->refcount); + rq->rd = rd; + + cpumask_set_cpu(rq->cpu, rd->span); + if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) + set_rq_online(rq); + + raw_spin_unlock_irqrestore(&rq->lock, flags); + + if (old_rd) + call_rcu_sched(&old_rd->rcu, free_rootdomain); +} + +static int init_rootdomain(struct root_domain *rd) +{ + memset(rd, 0, sizeof(*rd)); + + if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) + goto out; + if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) + goto free_span; + if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) + goto free_online; + if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) + goto free_dlo_mask; + + init_dl_bw(&rd->dl_bw); + if (cpudl_init(&rd->cpudl) != 0) + goto free_rto_mask; + + if (cpupri_init(&rd->cpupri) != 0) + goto free_cpudl; + return 0; + +free_cpudl: + cpudl_cleanup(&rd->cpudl); +free_rto_mask: + free_cpumask_var(rd->rto_mask); +free_dlo_mask: + free_cpumask_var(rd->dlo_mask); +free_online: + free_cpumask_var(rd->online); +free_span: + free_cpumask_var(rd->span); +out: + return -ENOMEM; +} + +/* + * By default the system creates a single root-domain with all CPUs as + * members (mimicking the global state we have today). + */ +struct root_domain def_root_domain; + +void init_defrootdomain(void) +{ + init_rootdomain(&def_root_domain); + + atomic_set(&def_root_domain.refcount, 1); +} + +static struct root_domain *alloc_rootdomain(void) +{ + struct root_domain *rd; + + rd = kmalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return NULL; + + if (init_rootdomain(rd) != 0) { + kfree(rd); + return NULL; + } + + return rd; +} + +static void free_sched_groups(struct sched_group *sg, int free_sgc) +{ + struct sched_group *tmp, *first; + + if (!sg) + return; + + first = sg; + do { + tmp = sg->next; + + if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) + kfree(sg->sgc); + + kfree(sg); + sg = tmp; + } while (sg != first); +} + +static void destroy_sched_domain(struct sched_domain *sd) +{ + /* + * If its an overlapping domain it has private groups, iterate and + * nuke them all. + */ + if (sd->flags & SD_OVERLAP) { + free_sched_groups(sd->groups, 1); + } else if (atomic_dec_and_test(&sd->groups->ref)) { + kfree(sd->groups->sgc); + kfree(sd->groups); + } + if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) + kfree(sd->shared); + kfree(sd); +} + +static void destroy_sched_domains_rcu(struct rcu_head *rcu) +{ + struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); + + while (sd) { + struct sched_domain *parent = sd->parent; + destroy_sched_domain(sd); + sd = parent; + } +} + +static void destroy_sched_domains(struct sched_domain *sd) +{ + if (sd) + call_rcu(&sd->rcu, destroy_sched_domains_rcu); +} + +/* + * Keep a special pointer to the highest sched_domain that has + * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this + * allows us to avoid some pointer chasing select_idle_sibling(). + * + * Also keep a unique ID per domain (we use the first CPU number in + * the cpumask of the domain), this allows us to quickly tell if + * two CPUs are in the same cache domain, see cpus_share_cache(). + */ +DEFINE_PER_CPU(struct sched_domain *, sd_llc); +DEFINE_PER_CPU(int, sd_llc_size); +DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); +DEFINE_PER_CPU(struct sched_domain *, sd_numa); +DEFINE_PER_CPU(struct sched_domain *, sd_asym); + +static void update_top_cache_domain(int cpu) +{ + struct sched_domain_shared *sds = NULL; + struct sched_domain *sd; + int id = cpu; + int size = 1; + + sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); + if (sd) { + id = cpumask_first(sched_domain_span(sd)); + size = cpumask_weight(sched_domain_span(sd)); + sds = sd->shared; + } + + rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); + per_cpu(sd_llc_size, cpu) = size; + per_cpu(sd_llc_id, cpu) = id; + rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); + + sd = lowest_flag_domain(cpu, SD_NUMA); + rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); + + sd = highest_flag_domain(cpu, SD_ASYM_PACKING); + rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); +} + +/* + * Attach the domain 'sd' to 'cpu' as its base domain. Callers must + * hold the hotplug lock. + */ +static void +cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + struct sched_domain *tmp; + + /* Remove the sched domains which do not contribute to scheduling. */ + for (tmp = sd; tmp; ) { + struct sched_domain *parent = tmp->parent; + if (!parent) + break; + + if (sd_parent_degenerate(tmp, parent)) { + tmp->parent = parent->parent; + if (parent->parent) + parent->parent->child = tmp; + /* + * Transfer SD_PREFER_SIBLING down in case of a + * degenerate parent; the spans match for this + * so the property transfers. + */ + if (parent->flags & SD_PREFER_SIBLING) + tmp->flags |= SD_PREFER_SIBLING; + destroy_sched_domain(parent); + } else + tmp = tmp->parent; + } + + if (sd && sd_degenerate(sd)) { + tmp = sd; + sd = sd->parent; + destroy_sched_domain(tmp); + if (sd) + sd->child = NULL; + } + + sched_domain_debug(sd, cpu); + + rq_attach_root(rq, rd); + tmp = rq->sd; + rcu_assign_pointer(rq->sd, sd); + destroy_sched_domains(tmp); + + update_top_cache_domain(cpu); +} + +/* Setup the mask of CPUs configured for isolated domains */ +static int __init isolated_cpu_setup(char *str) +{ + int ret; + + alloc_bootmem_cpumask_var(&cpu_isolated_map); + ret = cpulist_parse(str, cpu_isolated_map); + if (ret) { + pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); + return 0; + } + return 1; +} +__setup("isolcpus=", isolated_cpu_setup); + +struct s_data { + struct sched_domain ** __percpu sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain, + sa_sd, + sa_sd_storage, + sa_none, +}; + +/* + * Build an iteration mask that can exclude certain CPUs from the upwards + * domain traversal. + * + * Asymmetric node setups can result in situations where the domain tree is of + * unequal depth, make sure to skip domains that already cover the entire + * range. + * + * In that case build_sched_domains() will have terminated the iteration early + * and our sibling sd spans will be empty. Domains should always include the + * CPU they're built on, so check that. + */ +static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) +{ + const struct cpumask *span = sched_domain_span(sd); + struct sd_data *sdd = sd->private; + struct sched_domain *sibling; + int i; + + for_each_cpu(i, span) { + sibling = *per_cpu_ptr(sdd->sd, i); + if (!cpumask_test_cpu(i, sched_domain_span(sibling))) + continue; + + cpumask_set_cpu(i, sched_group_mask(sg)); + } +} + +/* + * Return the canonical balance CPU for this group, this is the first CPU + * of this group that's also in the iteration mask. + */ +int group_balance_cpu(struct sched_group *sg) +{ + return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); +} + +static int +build_overlap_sched_groups(struct sched_domain *sd, int cpu) +{ + struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; + const struct cpumask *span = sched_domain_span(sd); + struct cpumask *covered = sched_domains_tmpmask; + struct sd_data *sdd = sd->private; + struct sched_domain *sibling; + int i; + + cpumask_clear(covered); + + for_each_cpu(i, span) { + struct cpumask *sg_span; + + if (cpumask_test_cpu(i, covered)) + continue; + + sibling = *per_cpu_ptr(sdd->sd, i); + + /* See the comment near build_group_mask(). */ + if (!cpumask_test_cpu(i, sched_domain_span(sibling))) + continue; + + sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, cpu_to_node(cpu)); + + if (!sg) + goto fail; + + sg_span = sched_group_cpus(sg); + if (sibling->child) + cpumask_copy(sg_span, sched_domain_span(sibling->child)); + else + cpumask_set_cpu(i, sg_span); + + cpumask_or(covered, covered, sg_span); + + sg->sgc = *per_cpu_ptr(sdd->sgc, i); + if (atomic_inc_return(&sg->sgc->ref) == 1) + build_group_mask(sd, sg); + + /* + * Initialize sgc->capacity such that even if we mess up the + * domains and no possible iteration will get us here, we won't + * die on a /0 trap. + */ + sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); + sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; + + /* + * Make sure the first group of this domain contains the + * canonical balance CPU. Otherwise the sched_domain iteration + * breaks. See update_sg_lb_stats(). + */ + if ((!groups && cpumask_test_cpu(cpu, sg_span)) || + group_balance_cpu(sg) == cpu) + groups = sg; + + if (!first) + first = sg; + if (last) + last->next = sg; + last = sg; + last->next = first; + } + sd->groups = groups; + + return 0; + +fail: + free_sched_groups(first, 0); + + return -ENOMEM; +} + +static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) +{ + struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); + struct sched_domain *child = sd->child; + + if (child) + cpu = cpumask_first(sched_domain_span(child)); + + if (sg) { + *sg = *per_cpu_ptr(sdd->sg, cpu); + (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); + + /* For claim_allocations: */ + atomic_set(&(*sg)->sgc->ref, 1); + } + + return cpu; +} + +/* + * build_sched_groups will build a circular linked list of the groups + * covered by the given span, and will set each group's ->cpumask correctly, + * and ->cpu_capacity to 0. + * + * Assumes the sched_domain tree is fully constructed + */ +static int +build_sched_groups(struct sched_domain *sd, int cpu) +{ + struct sched_group *first = NULL, *last = NULL; + struct sd_data *sdd = sd->private; + const struct cpumask *span = sched_domain_span(sd); + struct cpumask *covered; + int i; + + get_group(cpu, sdd, &sd->groups); + atomic_inc(&sd->groups->ref); + + if (cpu != cpumask_first(span)) + return 0; + + lockdep_assert_held(&sched_domains_mutex); + covered = sched_domains_tmpmask; + + cpumask_clear(covered); + + for_each_cpu(i, span) { + struct sched_group *sg; + int group, j; + + if (cpumask_test_cpu(i, covered)) + continue; + + group = get_group(i, sdd, &sg); + cpumask_setall(sched_group_mask(sg)); + + for_each_cpu(j, span) { + if (get_group(j, sdd, NULL) != group) + continue; + + cpumask_set_cpu(j, covered); + cpumask_set_cpu(j, sched_group_cpus(sg)); + } + + if (!first) + first = sg; + if (last) + last->next = sg; + last = sg; + } + last->next = first; + + return 0; +} + +/* + * Initialize sched groups cpu_capacity. + * + * cpu_capacity indicates the capacity of sched group, which is used while + * distributing the load between different sched groups in a sched domain. + * Typically cpu_capacity for all the groups in a sched domain will be same + * unless there are asymmetries in the topology. If there are asymmetries, + * group having more cpu_capacity will pickup more load compared to the + * group having less cpu_capacity. + */ +static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) +{ + struct sched_group *sg = sd->groups; + + WARN_ON(!sg); + + do { + int cpu, max_cpu = -1; + + sg->group_weight = cpumask_weight(sched_group_cpus(sg)); + + if (!(sd->flags & SD_ASYM_PACKING)) + goto next; + + for_each_cpu(cpu, sched_group_cpus(sg)) { + if (max_cpu < 0) + max_cpu = cpu; + else if (sched_asym_prefer(cpu, max_cpu)) + max_cpu = cpu; + } + sg->asym_prefer_cpu = max_cpu; + +next: + sg = sg->next; + } while (sg != sd->groups); + + if (cpu != group_balance_cpu(sg)) + return; + + update_group_capacity(sd, cpu); +} + +/* + * Initializers for schedule domains + * Non-inlined to reduce accumulated stack pressure in build_sched_domains() + */ + +static int default_relax_domain_level = -1; +int sched_domain_level_max; + +static int __init setup_relax_domain_level(char *str) +{ + if (kstrtoint(str, 0, &default_relax_domain_level)) + pr_warn("Unable to set relax_domain_level\n"); + + return 1; +} +__setup("relax_domain_level=", setup_relax_domain_level); + +static void set_domain_attribute(struct sched_domain *sd, + struct sched_domain_attr *attr) +{ + int request; + + if (!attr || attr->relax_domain_level < 0) { + if (default_relax_domain_level < 0) + return; + else + request = default_relax_domain_level; + } else + request = attr->relax_domain_level; + if (request < sd->level) { + /* Turn off idle balance on this domain: */ + sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); + } else { + /* Turn on idle balance on this domain: */ + sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); + } +} + +static void __sdt_free(const struct cpumask *cpu_map); +static int __sdt_alloc(const struct cpumask *cpu_map); + +static void __free_domain_allocs(struct s_data *d, enum s_alloc what, + const struct cpumask *cpu_map) +{ + switch (what) { + case sa_rootdomain: + if (!atomic_read(&d->rd->refcount)) + free_rootdomain(&d->rd->rcu); + /* Fall through */ + case sa_sd: + free_percpu(d->sd); + /* Fall through */ + case sa_sd_storage: + __sdt_free(cpu_map); + /* Fall through */ + case sa_none: + break; + } +} + +static enum s_alloc +__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) +{ + memset(d, 0, sizeof(*d)); + + if (__sdt_alloc(cpu_map)) + return sa_sd_storage; + d->sd = alloc_percpu(struct sched_domain *); + if (!d->sd) + return sa_sd_storage; + d->rd = alloc_rootdomain(); + if (!d->rd) + return sa_sd; + return sa_rootdomain; +} + +/* + * NULL the sd_data elements we've used to build the sched_domain and + * sched_group structure so that the subsequent __free_domain_allocs() + * will not free the data we're using. + */ +static void claim_allocations(int cpu, struct sched_domain *sd) +{ + struct sd_data *sdd = sd->private; + + WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); + *per_cpu_ptr(sdd->sd, cpu) = NULL; + + if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) + *per_cpu_ptr(sdd->sds, cpu) = NULL; + + if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) + *per_cpu_ptr(sdd->sg, cpu) = NULL; + + if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) + *per_cpu_ptr(sdd->sgc, cpu) = NULL; +} + +#ifdef CONFIG_NUMA +static int sched_domains_numa_levels; +enum numa_topology_type sched_numa_topology_type; +static int *sched_domains_numa_distance; +int sched_max_numa_distance; +static struct cpumask ***sched_domains_numa_masks; +static int sched_domains_curr_level; +#endif + +/* + * SD_flags allowed in topology descriptions. + * + * These flags are purely descriptive of the topology and do not prescribe + * behaviour. Behaviour is artificial and mapped in the below sd_init() + * function: + * + * SD_SHARE_CPUCAPACITY - describes SMT topologies + * SD_SHARE_PKG_RESOURCES - describes shared caches + * SD_NUMA - describes NUMA topologies + * SD_SHARE_POWERDOMAIN - describes shared power domain + * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies + * + * Odd one out, which beside describing the topology has a quirk also + * prescribes the desired behaviour that goes along with it: + * + * SD_ASYM_PACKING - describes SMT quirks + */ +#define TOPOLOGY_SD_FLAGS \ + (SD_SHARE_CPUCAPACITY | \ + SD_SHARE_PKG_RESOURCES | \ + SD_NUMA | \ + SD_ASYM_PACKING | \ + SD_ASYM_CPUCAPACITY | \ + SD_SHARE_POWERDOMAIN) + +static struct sched_domain * +sd_init(struct sched_domain_topology_level *tl, + const struct cpumask *cpu_map, + struct sched_domain *child, int cpu) +{ + struct sd_data *sdd = &tl->data; + struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); + int sd_id, sd_weight, sd_flags = 0; + +#ifdef CONFIG_NUMA + /* + * Ugly hack to pass state to sd_numa_mask()... + */ + sched_domains_curr_level = tl->numa_level; +#endif + + sd_weight = cpumask_weight(tl->mask(cpu)); + + if (tl->sd_flags) + sd_flags = (*tl->sd_flags)(); + if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, + "wrong sd_flags in topology description\n")) + sd_flags &= ~TOPOLOGY_SD_FLAGS; + + *sd = (struct sched_domain){ + .min_interval = sd_weight, + .max_interval = 2*sd_weight, + .busy_factor = 32, + .imbalance_pct = 125, + + .cache_nice_tries = 0, + .busy_idx = 0, + .idle_idx = 0, + .newidle_idx = 0, + .wake_idx = 0, + .forkexec_idx = 0, + + .flags = 1*SD_LOAD_BALANCE + | 1*SD_BALANCE_NEWIDLE + | 1*SD_BALANCE_EXEC + | 1*SD_BALANCE_FORK + | 0*SD_BALANCE_WAKE + | 1*SD_WAKE_AFFINE + | 0*SD_SHARE_CPUCAPACITY + | 0*SD_SHARE_PKG_RESOURCES + | 0*SD_SERIALIZE + | 0*SD_PREFER_SIBLING + | 0*SD_NUMA + | sd_flags + , + + .last_balance = jiffies, + .balance_interval = sd_weight, + .smt_gain = 0, + .max_newidle_lb_cost = 0, + .next_decay_max_lb_cost = jiffies, + .child = child, +#ifdef CONFIG_SCHED_DEBUG + .name = tl->name, +#endif + }; + + cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); + sd_id = cpumask_first(sched_domain_span(sd)); + + /* + * Convert topological properties into behaviour. + */ + + if (sd->flags & SD_ASYM_CPUCAPACITY) { + struct sched_domain *t = sd; + + for_each_lower_domain(t) + t->flags |= SD_BALANCE_WAKE; + } + + if (sd->flags & SD_SHARE_CPUCAPACITY) { + sd->flags |= SD_PREFER_SIBLING; + sd->imbalance_pct = 110; + sd->smt_gain = 1178; /* ~15% */ + + } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { + sd->imbalance_pct = 117; + sd->cache_nice_tries = 1; + sd->busy_idx = 2; + +#ifdef CONFIG_NUMA + } else if (sd->flags & SD_NUMA) { + sd->cache_nice_tries = 2; + sd->busy_idx = 3; + sd->idle_idx = 2; + + sd->flags |= SD_SERIALIZE; + if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { + sd->flags &= ~(SD_BALANCE_EXEC | + SD_BALANCE_FORK | + SD_WAKE_AFFINE); + } + +#endif + } else { + sd->flags |= SD_PREFER_SIBLING; + sd->cache_nice_tries = 1; + sd->busy_idx = 2; + sd->idle_idx = 1; + } + + /* + * For all levels sharing cache; connect a sched_domain_shared + * instance. + */ + if (sd->flags & SD_SHARE_PKG_RESOURCES) { + sd->shared = *per_cpu_ptr(sdd->sds, sd_id); + atomic_inc(&sd->shared->ref); + atomic_set(&sd->shared->nr_busy_cpus, sd_weight); + } + + sd->private = sdd; + + return sd; +} + +/* + * Topology list, bottom-up. + */ +static struct sched_domain_topology_level default_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, +#endif +#ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + +static struct sched_domain_topology_level *sched_domain_topology = + default_topology; + +#define for_each_sd_topology(tl) \ + for (tl = sched_domain_topology; tl->mask; tl++) + +void set_sched_topology(struct sched_domain_topology_level *tl) +{ + if (WARN_ON_ONCE(sched_smp_initialized)) + return; + + sched_domain_topology = tl; +} + +#ifdef CONFIG_NUMA + +static const struct cpumask *sd_numa_mask(int cpu) +{ + return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; +} + +static void sched_numa_warn(const char *str) +{ + static int done = false; + int i,j; + + if (done) + return; + + done = true; + + printk(KERN_WARNING "ERROR: %s\n\n", str); + + for (i = 0; i < nr_node_ids; i++) { + printk(KERN_WARNING " "); + for (j = 0; j < nr_node_ids; j++) + printk(KERN_CONT "%02d ", node_distance(i,j)); + printk(KERN_CONT "\n"); + } + printk(KERN_WARNING "\n"); +} + +bool find_numa_distance(int distance) +{ + int i; + + if (distance == node_distance(0, 0)) + return true; + + for (i = 0; i < sched_domains_numa_levels; i++) { + if (sched_domains_numa_distance[i] == distance) + return true; + } + + return false; +} + +/* + * A system can have three types of NUMA topology: + * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system + * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes + * NUMA_BACKPLANE: nodes can reach other nodes through a backplane + * + * The difference between a glueless mesh topology and a backplane + * topology lies in whether communication between not directly + * connected nodes goes through intermediary nodes (where programs + * could run), or through backplane controllers. This affects + * placement of programs. + * + * The type of topology can be discerned with the following tests: + * - If the maximum distance between any nodes is 1 hop, the system + * is directly connected. + * - If for two nodes A and B, located N > 1 hops away from each other, + * there is an intermediary node C, which is < N hops away from both + * nodes A and B, the system is a glueless mesh. + */ +static void init_numa_topology_type(void) +{ + int a, b, c, n; + + n = sched_max_numa_distance; + + if (sched_domains_numa_levels <= 1) { + sched_numa_topology_type = NUMA_DIRECT; + return; + } + + for_each_online_node(a) { + for_each_online_node(b) { + /* Find two nodes furthest removed from each other. */ + if (node_distance(a, b) < n) + continue; + + /* Is there an intermediary node between a and b? */ + for_each_online_node(c) { + if (node_distance(a, c) < n && + node_distance(b, c) < n) { + sched_numa_topology_type = + NUMA_GLUELESS_MESH; + return; + } + } + + sched_numa_topology_type = NUMA_BACKPLANE; + return; + } + } +} + +void sched_init_numa(void) +{ + int next_distance, curr_distance = node_distance(0, 0); + struct sched_domain_topology_level *tl; + int level = 0; + int i, j, k; + + sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); + if (!sched_domains_numa_distance) + return; + + /* + * O(nr_nodes^2) deduplicating selection sort -- in order to find the + * unique distances in the node_distance() table. + * + * Assumes node_distance(0,j) includes all distances in + * node_distance(i,j) in order to avoid cubic time. + */ + next_distance = curr_distance; + for (i = 0; i < nr_node_ids; i++) { + for (j = 0; j < nr_node_ids; j++) { + for (k = 0; k < nr_node_ids; k++) { + int distance = node_distance(i, k); + + if (distance > curr_distance && + (distance < next_distance || + next_distance == curr_distance)) + next_distance = distance; + + /* + * While not a strong assumption it would be nice to know + * about cases where if node A is connected to B, B is not + * equally connected to A. + */ + if (sched_debug() && node_distance(k, i) != distance) + sched_numa_warn("Node-distance not symmetric"); + + if (sched_debug() && i && !find_numa_distance(distance)) + sched_numa_warn("Node-0 not representative"); + } + if (next_distance != curr_distance) { + sched_domains_numa_distance[level++] = next_distance; + sched_domains_numa_levels = level; + curr_distance = next_distance; + } else break; + } + + /* + * In case of sched_debug() we verify the above assumption. + */ + if (!sched_debug()) + break; + } + + if (!level) + return; + + /* + * 'level' contains the number of unique distances, excluding the + * identity distance node_distance(i,i). + * + * The sched_domains_numa_distance[] array includes the actual distance + * numbers. + */ + + /* + * Here, we should temporarily reset sched_domains_numa_levels to 0. + * If it fails to allocate memory for array sched_domains_numa_masks[][], + * the array will contain less then 'level' members. This could be + * dangerous when we use it to iterate array sched_domains_numa_masks[][] + * in other functions. + * + * We reset it to 'level' at the end of this function. + */ + sched_domains_numa_levels = 0; + + sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); + if (!sched_domains_numa_masks) + return; + + /* + * Now for each level, construct a mask per node which contains all + * CPUs of nodes that are that many hops away from us. + */ + for (i = 0; i < level; i++) { + sched_domains_numa_masks[i] = + kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); + if (!sched_domains_numa_masks[i]) + return; + + for (j = 0; j < nr_node_ids; j++) { + struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); + if (!mask) + return; + + sched_domains_numa_masks[i][j] = mask; + + for_each_node(k) { + if (node_distance(j, k) > sched_domains_numa_distance[i]) + continue; + + cpumask_or(mask, mask, cpumask_of_node(k)); + } + } + } + + /* Compute default topology size */ + for (i = 0; sched_domain_topology[i].mask; i++); + + tl = kzalloc((i + level + 1) * + sizeof(struct sched_domain_topology_level), GFP_KERNEL); + if (!tl) + return; + + /* + * Copy the default topology bits.. + */ + for (i = 0; sched_domain_topology[i].mask; i++) + tl[i] = sched_domain_topology[i]; + + /* + * .. and append 'j' levels of NUMA goodness. + */ + for (j = 0; j < level; i++, j++) { + tl[i] = (struct sched_domain_topology_level){ + .mask = sd_numa_mask, + .sd_flags = cpu_numa_flags, + .flags = SDTL_OVERLAP, + .numa_level = j, + SD_INIT_NAME(NUMA) + }; + } + + sched_domain_topology = tl; + + sched_domains_numa_levels = level; + sched_max_numa_distance = sched_domains_numa_distance[level - 1]; + + init_numa_topology_type(); +} + +void sched_domains_numa_masks_set(unsigned int cpu) +{ + int node = cpu_to_node(cpu); + int i, j; + + for (i = 0; i < sched_domains_numa_levels; i++) { + for (j = 0; j < nr_node_ids; j++) { + if (node_distance(j, node) <= sched_domains_numa_distance[i]) + cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); + } + } +} + +void sched_domains_numa_masks_clear(unsigned int cpu) +{ + int i, j; + + for (i = 0; i < sched_domains_numa_levels; i++) { + for (j = 0; j < nr_node_ids; j++) + cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); + } +} + +#endif /* CONFIG_NUMA */ + +static int __sdt_alloc(const struct cpumask *cpu_map) +{ + struct sched_domain_topology_level *tl; + int j; + + for_each_sd_topology(tl) { + struct sd_data *sdd = &tl->data; + + sdd->sd = alloc_percpu(struct sched_domain *); + if (!sdd->sd) + return -ENOMEM; + + sdd->sds = alloc_percpu(struct sched_domain_shared *); + if (!sdd->sds) + return -ENOMEM; + + sdd->sg = alloc_percpu(struct sched_group *); + if (!sdd->sg) + return -ENOMEM; + + sdd->sgc = alloc_percpu(struct sched_group_capacity *); + if (!sdd->sgc) + return -ENOMEM; + + for_each_cpu(j, cpu_map) { + struct sched_domain *sd; + struct sched_domain_shared *sds; + struct sched_group *sg; + struct sched_group_capacity *sgc; + + sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), + GFP_KERNEL, cpu_to_node(j)); + if (!sd) + return -ENOMEM; + + *per_cpu_ptr(sdd->sd, j) = sd; + + sds = kzalloc_node(sizeof(struct sched_domain_shared), + GFP_KERNEL, cpu_to_node(j)); + if (!sds) + return -ENOMEM; + + *per_cpu_ptr(sdd->sds, j) = sds; + + sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, cpu_to_node(j)); + if (!sg) + return -ENOMEM; + + sg->next = sg; + + *per_cpu_ptr(sdd->sg, j) = sg; + + sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), + GFP_KERNEL, cpu_to_node(j)); + if (!sgc) + return -ENOMEM; + + *per_cpu_ptr(sdd->sgc, j) = sgc; + } + } + + return 0; +} + +static void __sdt_free(const struct cpumask *cpu_map) +{ + struct sched_domain_topology_level *tl; + int j; + + for_each_sd_topology(tl) { + struct sd_data *sdd = &tl->data; + + for_each_cpu(j, cpu_map) { + struct sched_domain *sd; + + if (sdd->sd) { + sd = *per_cpu_ptr(sdd->sd, j); + if (sd && (sd->flags & SD_OVERLAP)) + free_sched_groups(sd->groups, 0); + kfree(*per_cpu_ptr(sdd->sd, j)); + } + + if (sdd->sds) + kfree(*per_cpu_ptr(sdd->sds, j)); + if (sdd->sg) + kfree(*per_cpu_ptr(sdd->sg, j)); + if (sdd->sgc) + kfree(*per_cpu_ptr(sdd->sgc, j)); + } + free_percpu(sdd->sd); + sdd->sd = NULL; + free_percpu(sdd->sds); + sdd->sds = NULL; + free_percpu(sdd->sg); + sdd->sg = NULL; + free_percpu(sdd->sgc); + sdd->sgc = NULL; + } +} + +struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *child, int cpu) +{ + struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); + + if (child) { + sd->level = child->level + 1; + sched_domain_level_max = max(sched_domain_level_max, sd->level); + child->parent = sd; + + if (!cpumask_subset(sched_domain_span(child), + sched_domain_span(sd))) { + pr_err("BUG: arch topology borken\n"); +#ifdef CONFIG_SCHED_DEBUG + pr_err(" the %s domain not a subset of the %s domain\n", + child->name, sd->name); +#endif + /* Fixup, ensure @sd has at least @child cpus. */ + cpumask_or(sched_domain_span(sd), + sched_domain_span(sd), + sched_domain_span(child)); + } + + } + set_domain_attribute(sd, attr); + + return sd; +} + +/* + * Build sched domains for a given set of CPUs and attach the sched domains + * to the individual CPUs + */ +static int +build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) +{ + enum s_alloc alloc_state; + struct sched_domain *sd; + struct s_data d; + struct rq *rq = NULL; + int i, ret = -ENOMEM; + + alloc_state = __visit_domain_allocation_hell(&d, cpu_map); + if (alloc_state != sa_rootdomain) + goto error; + + /* Set up domains for CPUs specified by the cpu_map: */ + for_each_cpu(i, cpu_map) { + struct sched_domain_topology_level *tl; + + sd = NULL; + for_each_sd_topology(tl) { + sd = build_sched_domain(tl, cpu_map, attr, sd, i); + if (tl == sched_domain_topology) + *per_cpu_ptr(d.sd, i) = sd; + if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) + sd->flags |= SD_OVERLAP; + if (cpumask_equal(cpu_map, sched_domain_span(sd))) + break; + } + } + + /* Build the groups for the domains */ + for_each_cpu(i, cpu_map) { + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { + sd->span_weight = cpumask_weight(sched_domain_span(sd)); + if (sd->flags & SD_OVERLAP) { + if (build_overlap_sched_groups(sd, i)) + goto error; + } else { + if (build_sched_groups(sd, i)) + goto error; + } + } + } + + /* Calculate CPU capacity for physical packages and nodes */ + for (i = nr_cpumask_bits-1; i >= 0; i--) { + if (!cpumask_test_cpu(i, cpu_map)) + continue; + + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { + claim_allocations(i, sd); + init_sched_groups_capacity(i, sd); + } + } + + /* Attach the domains */ + rcu_read_lock(); + for_each_cpu(i, cpu_map) { + rq = cpu_rq(i); + sd = *per_cpu_ptr(d.sd, i); + + /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ + if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) + WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); + + cpu_attach_domain(sd, d.rd, i); + } + rcu_read_unlock(); + + if (rq && sched_debug_enabled) { + pr_info("span: %*pbl (max cpu_capacity = %lu)\n", + cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); + } + + ret = 0; +error: + __free_domain_allocs(&d, alloc_state, cpu_map); + return ret; +} + +/* Current sched domains: */ +static cpumask_var_t *doms_cur; + +/* Number of sched domains in 'doms_cur': */ +static int ndoms_cur; + +/* Attribues of custom domains in 'doms_cur' */ +static struct sched_domain_attr *dattr_cur; + +/* + * Special case: If a kmalloc() of a doms_cur partition (array of + * cpumask) fails, then fallback to a single sched domain, + * as determined by the single cpumask fallback_doms. + */ +cpumask_var_t fallback_doms; + +/* + * arch_update_cpu_topology lets virtualized architectures update the + * CPU core maps. It is supposed to return 1 if the topology changed + * or 0 if it stayed the same. + */ +int __weak arch_update_cpu_topology(void) +{ + return 0; +} + +cpumask_var_t *alloc_sched_domains(unsigned int ndoms) +{ + int i; + cpumask_var_t *doms; + + doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); + if (!doms) + return NULL; + for (i = 0; i < ndoms; i++) { + if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { + free_sched_domains(doms, i); + return NULL; + } + } + return doms; +} + +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) +{ + unsigned int i; + for (i = 0; i < ndoms; i++) + free_cpumask_var(doms[i]); + kfree(doms); +} + +/* + * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * For now this just excludes isolated CPUs, but could be used to + * exclude other special cases in the future. + */ +int init_sched_domains(const struct cpumask *cpu_map) +{ + int err; + + arch_update_cpu_topology(); + ndoms_cur = 1; + doms_cur = alloc_sched_domains(ndoms_cur); + if (!doms_cur) + doms_cur = &fallback_doms; + cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); + err = build_sched_domains(doms_cur[0], NULL); + register_sched_domain_sysctl(); + + return err; +} + +/* + * Detach sched domains from a group of CPUs specified in cpu_map + * These CPUs will now be attached to the NULL domain + */ +static void detach_destroy_domains(const struct cpumask *cpu_map) +{ + int i; + + rcu_read_lock(); + for_each_cpu(i, cpu_map) + cpu_attach_domain(NULL, &def_root_domain, i); + rcu_read_unlock(); +} + +/* handle null as "default" */ +static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, + struct sched_domain_attr *new, int idx_new) +{ + struct sched_domain_attr tmp; + + /* Fast path: */ + if (!new && !cur) + return 1; + + tmp = SD_ATTR_INIT; + return !memcmp(cur ? (cur + idx_cur) : &tmp, + new ? (new + idx_new) : &tmp, + sizeof(struct sched_domain_attr)); +} + +/* + * Partition sched domains as specified by the 'ndoms_new' + * cpumasks in the array doms_new[] of cpumasks. This compares + * doms_new[] to the current sched domain partitioning, doms_cur[]. + * It destroys each deleted domain and builds each new domain. + * + * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. + * The masks don't intersect (don't overlap.) We should setup one + * sched domain for each mask. CPUs not in any of the cpumasks will + * not be load balanced. If the same cpumask appears both in the + * current 'doms_cur' domains and in the new 'doms_new', we can leave + * it as it is. + * + * The passed in 'doms_new' should be allocated using + * alloc_sched_domains. This routine takes ownership of it and will + * free_sched_domains it when done with it. If the caller failed the + * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, + * and partition_sched_domains() will fallback to the single partition + * 'fallback_doms', it also forces the domains to be rebuilt. + * + * If doms_new == NULL it will be replaced with cpu_online_mask. + * ndoms_new == 0 is a special case for destroying existing domains, + * and it will not create the default domain. + * + * Call with hotplug lock held + */ +void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ + int i, j, n; + int new_topology; + + mutex_lock(&sched_domains_mutex); + + /* Always unregister in case we don't destroy any domains: */ + unregister_sched_domain_sysctl(); + + /* Let the architecture update CPU core mappings: */ + new_topology = arch_update_cpu_topology(); + + n = doms_new ? ndoms_new : 0; + + /* Destroy deleted domains: */ + for (i = 0; i < ndoms_cur; i++) { + for (j = 0; j < n && !new_topology; j++) { + if (cpumask_equal(doms_cur[i], doms_new[j]) + && dattrs_equal(dattr_cur, i, dattr_new, j)) + goto match1; + } + /* No match - a current sched domain not in new doms_new[] */ + detach_destroy_domains(doms_cur[i]); +match1: + ; + } + + n = ndoms_cur; + if (doms_new == NULL) { + n = 0; + doms_new = &fallback_doms; + cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); + WARN_ON_ONCE(dattr_new); + } + + /* Build new domains: */ + for (i = 0; i < ndoms_new; i++) { + for (j = 0; j < n && !new_topology; j++) { + if (cpumask_equal(doms_new[i], doms_cur[j]) + && dattrs_equal(dattr_new, i, dattr_cur, j)) + goto match2; + } + /* No match - add a new doms_new */ + build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); +match2: + ; + } + + /* Remember the new sched domains: */ + if (doms_cur != &fallback_doms) + free_sched_domains(doms_cur, ndoms_cur); + + kfree(dattr_cur); + doms_cur = doms_new; + dattr_cur = dattr_new; + ndoms_cur = ndoms_new; + + register_sched_domain_sysctl(); + + mutex_unlock(&sched_domains_mutex); +} + -- cgit v1.2.3 From 1051408f7ecdcd1baf86f5dd5fdc44740be3b23d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 18:42:41 +0100 Subject: sched/autogroup: Rename auto_group.[ch] to autogroup.[ch] The names are all 'autogroup', not 'auto_group' - so rename the kernel/sched/auto_group.[ch] to match the existing nomenclature. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Thomas Gleixner Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/Makefile | 2 +- kernel/sched/auto_group.c | 271 ---------------------------------------------- kernel/sched/auto_group.h | 64 ----------- kernel/sched/autogroup.c | 271 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/autogroup.h | 64 +++++++++++ kernel/sched/sched.h | 2 +- 6 files changed, 337 insertions(+), 337 deletions(-) delete mode 100644 kernel/sched/auto_group.c delete mode 100644 kernel/sched/auto_group.h create mode 100644 kernel/sched/autogroup.c create mode 100644 kernel/sched/autogroup.h diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 130ce8ac725b..89ab6758667b 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -19,7 +19,7 @@ obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o obj-y += wait.o swait.o completion.o idle.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o -obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o +obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c deleted file mode 100644 index da39489d2d80..000000000000 --- a/kernel/sched/auto_group.c +++ /dev/null @@ -1,271 +0,0 @@ -#include "sched.h" - -#include -#include -#include -#include -#include -#include - -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; -static struct autogroup autogroup_default; -static atomic_t autogroup_seq_nr; - -void __init autogroup_init(struct task_struct *init_task) -{ - autogroup_default.tg = &root_task_group; - kref_init(&autogroup_default.kref); - init_rwsem(&autogroup_default.lock); - init_task->signal->autogroup = &autogroup_default; -} - -void autogroup_free(struct task_group *tg) -{ - kfree(tg->autogroup); -} - -static inline void autogroup_destroy(struct kref *kref) -{ - struct autogroup *ag = container_of(kref, struct autogroup, kref); - -#ifdef CONFIG_RT_GROUP_SCHED - /* We've redirected RT tasks to the root task group... */ - ag->tg->rt_se = NULL; - ag->tg->rt_rq = NULL; -#endif - sched_offline_group(ag->tg); - sched_destroy_group(ag->tg); -} - -static inline void autogroup_kref_put(struct autogroup *ag) -{ - kref_put(&ag->kref, autogroup_destroy); -} - -static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) -{ - kref_get(&ag->kref); - return ag; -} - -static inline struct autogroup *autogroup_task_get(struct task_struct *p) -{ - struct autogroup *ag; - unsigned long flags; - - if (!lock_task_sighand(p, &flags)) - return autogroup_kref_get(&autogroup_default); - - ag = autogroup_kref_get(p->signal->autogroup); - unlock_task_sighand(p, &flags); - - return ag; -} - -static inline struct autogroup *autogroup_create(void) -{ - struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); - struct task_group *tg; - - if (!ag) - goto out_fail; - - tg = sched_create_group(&root_task_group); - - if (IS_ERR(tg)) - goto out_free; - - kref_init(&ag->kref); - init_rwsem(&ag->lock); - ag->id = atomic_inc_return(&autogroup_seq_nr); - ag->tg = tg; -#ifdef CONFIG_RT_GROUP_SCHED - /* - * Autogroup RT tasks are redirected to the root task group - * so we don't have to move tasks around upon policy change, - * or flail around trying to allocate bandwidth on the fly. - * A bandwidth exception in __sched_setscheduler() allows - * the policy change to proceed. - */ - free_rt_sched_group(tg); - tg->rt_se = root_task_group.rt_se; - tg->rt_rq = root_task_group.rt_rq; -#endif - tg->autogroup = ag; - - sched_online_group(tg, &root_task_group); - return ag; - -out_free: - kfree(ag); -out_fail: - if (printk_ratelimit()) { - printk(KERN_WARNING "autogroup_create: %s failure.\n", - ag ? "sched_create_group()" : "kmalloc()"); - } - - return autogroup_kref_get(&autogroup_default); -} - -bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) -{ - if (tg != &root_task_group) - return false; - /* - * If we race with autogroup_move_group() the caller can use the old - * value of signal->autogroup but in this case sched_move_task() will - * be called again before autogroup_kref_put(). - * - * However, there is no way sched_autogroup_exit_task() could tell us - * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. - */ - if (p->flags & PF_EXITING) - return false; - - return true; -} - -void sched_autogroup_exit_task(struct task_struct *p) -{ - /* - * We are going to call exit_notify() and autogroup_move_group() can't - * see this thread after that: we can no longer use signal->autogroup. - * See the PF_EXITING check in task_wants_autogroup(). - */ - sched_move_task(p); -} - -static void -autogroup_move_group(struct task_struct *p, struct autogroup *ag) -{ - struct autogroup *prev; - struct task_struct *t; - unsigned long flags; - - BUG_ON(!lock_task_sighand(p, &flags)); - - prev = p->signal->autogroup; - if (prev == ag) { - unlock_task_sighand(p, &flags); - return; - } - - p->signal->autogroup = autogroup_kref_get(ag); - /* - * We can't avoid sched_move_task() after we changed signal->autogroup, - * this process can already run with task_group() == prev->tg or we can - * race with cgroup code which can read autogroup = prev under rq->lock. - * In the latter case for_each_thread() can not miss a migrating thread, - * cpu_cgroup_attach() must not be possible after cgroup_exit() and it - * can't be removed from thread list, we hold ->siglock. - * - * If an exiting thread was already removed from thread list we rely on - * sched_autogroup_exit_task(). - */ - for_each_thread(p, t) - sched_move_task(t); - - unlock_task_sighand(p, &flags); - autogroup_kref_put(prev); -} - -/* Allocates GFP_KERNEL, cannot be called under any spinlock */ -void sched_autogroup_create_attach(struct task_struct *p) -{ - struct autogroup *ag = autogroup_create(); - - autogroup_move_group(p, ag); - /* drop extra reference added by autogroup_create() */ - autogroup_kref_put(ag); -} -EXPORT_SYMBOL(sched_autogroup_create_attach); - -/* Cannot be called under siglock. Currently has no users */ -void sched_autogroup_detach(struct task_struct *p) -{ - autogroup_move_group(p, &autogroup_default); -} -EXPORT_SYMBOL(sched_autogroup_detach); - -void sched_autogroup_fork(struct signal_struct *sig) -{ - sig->autogroup = autogroup_task_get(current); -} - -void sched_autogroup_exit(struct signal_struct *sig) -{ - autogroup_kref_put(sig->autogroup); -} - -static int __init setup_autogroup(char *str) -{ - sysctl_sched_autogroup_enabled = 0; - - return 1; -} - -__setup("noautogroup", setup_autogroup); - -#ifdef CONFIG_PROC_FS - -int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) -{ - static unsigned long next = INITIAL_JIFFIES; - struct autogroup *ag; - unsigned long shares; - int err; - - if (nice < MIN_NICE || nice > MAX_NICE) - return -EINVAL; - - err = security_task_setnice(current, nice); - if (err) - return err; - - if (nice < 0 && !can_nice(current, nice)) - return -EPERM; - - /* this is a heavy operation taking global locks.. */ - if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) - return -EAGAIN; - - next = HZ / 10 + jiffies; - ag = autogroup_task_get(p); - shares = scale_load(sched_prio_to_weight[nice + 20]); - - down_write(&ag->lock); - err = sched_group_set_shares(ag->tg, shares); - if (!err) - ag->nice = nice; - up_write(&ag->lock); - - autogroup_kref_put(ag); - - return err; -} - -void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) -{ - struct autogroup *ag = autogroup_task_get(p); - - if (!task_group_is_autogroup(ag->tg)) - goto out; - - down_read(&ag->lock); - seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); - up_read(&ag->lock); - -out: - autogroup_kref_put(ag); -} -#endif /* CONFIG_PROC_FS */ - -#ifdef CONFIG_SCHED_DEBUG -int autogroup_path(struct task_group *tg, char *buf, int buflen) -{ - if (!task_group_is_autogroup(tg)) - return 0; - - return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); -} -#endif /* CONFIG_SCHED_DEBUG */ diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h deleted file mode 100644 index 890c95f2587a..000000000000 --- a/kernel/sched/auto_group.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifdef CONFIG_SCHED_AUTOGROUP - -#include -#include - -struct autogroup { - /* - * reference doesn't mean how many thread attach to this - * autogroup now. It just stands for the number of task - * could use this autogroup. - */ - struct kref kref; - struct task_group *tg; - struct rw_semaphore lock; - unsigned long id; - int nice; -}; - -extern void autogroup_init(struct task_struct *init_task); -extern void autogroup_free(struct task_group *tg); - -static inline bool task_group_is_autogroup(struct task_group *tg) -{ - return !!tg->autogroup; -} - -extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); - -static inline struct task_group * -autogroup_task_group(struct task_struct *p, struct task_group *tg) -{ - int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); - - if (enabled && task_wants_autogroup(p, tg)) - return p->signal->autogroup->tg; - - return tg; -} - -extern int autogroup_path(struct task_group *tg, char *buf, int buflen); - -#else /* !CONFIG_SCHED_AUTOGROUP */ - -static inline void autogroup_init(struct task_struct *init_task) { } -static inline void autogroup_free(struct task_group *tg) { } -static inline bool task_group_is_autogroup(struct task_group *tg) -{ - return 0; -} - -static inline struct task_group * -autogroup_task_group(struct task_struct *p, struct task_group *tg) -{ - return tg; -} - -#ifdef CONFIG_SCHED_DEBUG -static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) -{ - return 0; -} -#endif - -#endif /* CONFIG_SCHED_AUTOGROUP */ diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c new file mode 100644 index 000000000000..da39489d2d80 --- /dev/null +++ b/kernel/sched/autogroup.c @@ -0,0 +1,271 @@ +#include "sched.h" + +#include +#include +#include +#include +#include +#include + +unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; +static struct autogroup autogroup_default; +static atomic_t autogroup_seq_nr; + +void __init autogroup_init(struct task_struct *init_task) +{ + autogroup_default.tg = &root_task_group; + kref_init(&autogroup_default.kref); + init_rwsem(&autogroup_default.lock); + init_task->signal->autogroup = &autogroup_default; +} + +void autogroup_free(struct task_group *tg) +{ + kfree(tg->autogroup); +} + +static inline void autogroup_destroy(struct kref *kref) +{ + struct autogroup *ag = container_of(kref, struct autogroup, kref); + +#ifdef CONFIG_RT_GROUP_SCHED + /* We've redirected RT tasks to the root task group... */ + ag->tg->rt_se = NULL; + ag->tg->rt_rq = NULL; +#endif + sched_offline_group(ag->tg); + sched_destroy_group(ag->tg); +} + +static inline void autogroup_kref_put(struct autogroup *ag) +{ + kref_put(&ag->kref, autogroup_destroy); +} + +static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) +{ + kref_get(&ag->kref); + return ag; +} + +static inline struct autogroup *autogroup_task_get(struct task_struct *p) +{ + struct autogroup *ag; + unsigned long flags; + + if (!lock_task_sighand(p, &flags)) + return autogroup_kref_get(&autogroup_default); + + ag = autogroup_kref_get(p->signal->autogroup); + unlock_task_sighand(p, &flags); + + return ag; +} + +static inline struct autogroup *autogroup_create(void) +{ + struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); + struct task_group *tg; + + if (!ag) + goto out_fail; + + tg = sched_create_group(&root_task_group); + + if (IS_ERR(tg)) + goto out_free; + + kref_init(&ag->kref); + init_rwsem(&ag->lock); + ag->id = atomic_inc_return(&autogroup_seq_nr); + ag->tg = tg; +#ifdef CONFIG_RT_GROUP_SCHED + /* + * Autogroup RT tasks are redirected to the root task group + * so we don't have to move tasks around upon policy change, + * or flail around trying to allocate bandwidth on the fly. + * A bandwidth exception in __sched_setscheduler() allows + * the policy change to proceed. + */ + free_rt_sched_group(tg); + tg->rt_se = root_task_group.rt_se; + tg->rt_rq = root_task_group.rt_rq; +#endif + tg->autogroup = ag; + + sched_online_group(tg, &root_task_group); + return ag; + +out_free: + kfree(ag); +out_fail: + if (printk_ratelimit()) { + printk(KERN_WARNING "autogroup_create: %s failure.\n", + ag ? "sched_create_group()" : "kmalloc()"); + } + + return autogroup_kref_get(&autogroup_default); +} + +bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) +{ + if (tg != &root_task_group) + return false; + /* + * If we race with autogroup_move_group() the caller can use the old + * value of signal->autogroup but in this case sched_move_task() will + * be called again before autogroup_kref_put(). + * + * However, there is no way sched_autogroup_exit_task() could tell us + * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. + */ + if (p->flags & PF_EXITING) + return false; + + return true; +} + +void sched_autogroup_exit_task(struct task_struct *p) +{ + /* + * We are going to call exit_notify() and autogroup_move_group() can't + * see this thread after that: we can no longer use signal->autogroup. + * See the PF_EXITING check in task_wants_autogroup(). + */ + sched_move_task(p); +} + +static void +autogroup_move_group(struct task_struct *p, struct autogroup *ag) +{ + struct autogroup *prev; + struct task_struct *t; + unsigned long flags; + + BUG_ON(!lock_task_sighand(p, &flags)); + + prev = p->signal->autogroup; + if (prev == ag) { + unlock_task_sighand(p, &flags); + return; + } + + p->signal->autogroup = autogroup_kref_get(ag); + /* + * We can't avoid sched_move_task() after we changed signal->autogroup, + * this process can already run with task_group() == prev->tg or we can + * race with cgroup code which can read autogroup = prev under rq->lock. + * In the latter case for_each_thread() can not miss a migrating thread, + * cpu_cgroup_attach() must not be possible after cgroup_exit() and it + * can't be removed from thread list, we hold ->siglock. + * + * If an exiting thread was already removed from thread list we rely on + * sched_autogroup_exit_task(). + */ + for_each_thread(p, t) + sched_move_task(t); + + unlock_task_sighand(p, &flags); + autogroup_kref_put(prev); +} + +/* Allocates GFP_KERNEL, cannot be called under any spinlock */ +void sched_autogroup_create_attach(struct task_struct *p) +{ + struct autogroup *ag = autogroup_create(); + + autogroup_move_group(p, ag); + /* drop extra reference added by autogroup_create() */ + autogroup_kref_put(ag); +} +EXPORT_SYMBOL(sched_autogroup_create_attach); + +/* Cannot be called under siglock. Currently has no users */ +void sched_autogroup_detach(struct task_struct *p) +{ + autogroup_move_group(p, &autogroup_default); +} +EXPORT_SYMBOL(sched_autogroup_detach); + +void sched_autogroup_fork(struct signal_struct *sig) +{ + sig->autogroup = autogroup_task_get(current); +} + +void sched_autogroup_exit(struct signal_struct *sig) +{ + autogroup_kref_put(sig->autogroup); +} + +static int __init setup_autogroup(char *str) +{ + sysctl_sched_autogroup_enabled = 0; + + return 1; +} + +__setup("noautogroup", setup_autogroup); + +#ifdef CONFIG_PROC_FS + +int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) +{ + static unsigned long next = INITIAL_JIFFIES; + struct autogroup *ag; + unsigned long shares; + int err; + + if (nice < MIN_NICE || nice > MAX_NICE) + return -EINVAL; + + err = security_task_setnice(current, nice); + if (err) + return err; + + if (nice < 0 && !can_nice(current, nice)) + return -EPERM; + + /* this is a heavy operation taking global locks.. */ + if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) + return -EAGAIN; + + next = HZ / 10 + jiffies; + ag = autogroup_task_get(p); + shares = scale_load(sched_prio_to_weight[nice + 20]); + + down_write(&ag->lock); + err = sched_group_set_shares(ag->tg, shares); + if (!err) + ag->nice = nice; + up_write(&ag->lock); + + autogroup_kref_put(ag); + + return err; +} + +void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) +{ + struct autogroup *ag = autogroup_task_get(p); + + if (!task_group_is_autogroup(ag->tg)) + goto out; + + down_read(&ag->lock); + seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); + up_read(&ag->lock); + +out: + autogroup_kref_put(ag); +} +#endif /* CONFIG_PROC_FS */ + +#ifdef CONFIG_SCHED_DEBUG +int autogroup_path(struct task_group *tg, char *buf, int buflen) +{ + if (!task_group_is_autogroup(tg)) + return 0; + + return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); +} +#endif /* CONFIG_SCHED_DEBUG */ diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h new file mode 100644 index 000000000000..890c95f2587a --- /dev/null +++ b/kernel/sched/autogroup.h @@ -0,0 +1,64 @@ +#ifdef CONFIG_SCHED_AUTOGROUP + +#include +#include + +struct autogroup { + /* + * reference doesn't mean how many thread attach to this + * autogroup now. It just stands for the number of task + * could use this autogroup. + */ + struct kref kref; + struct task_group *tg; + struct rw_semaphore lock; + unsigned long id; + int nice; +}; + +extern void autogroup_init(struct task_struct *init_task); +extern void autogroup_free(struct task_group *tg); + +static inline bool task_group_is_autogroup(struct task_group *tg) +{ + return !!tg->autogroup; +} + +extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); + + if (enabled && task_wants_autogroup(p, tg)) + return p->signal->autogroup->tg; + + return tg; +} + +extern int autogroup_path(struct task_group *tg, char *buf, int buflen); + +#else /* !CONFIG_SCHED_AUTOGROUP */ + +static inline void autogroup_init(struct task_struct *init_task) { } +static inline void autogroup_free(struct task_group *tg) { } +static inline bool task_group_is_autogroup(struct task_group *tg) +{ + return 0; +} + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + return tg; +} + +#ifdef CONFIG_SCHED_DEBUG +static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) +{ + return 0; +} +#endif + +#endif /* CONFIG_SCHED_AUTOGROUP */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 17ed94b9b413..71b10a9b73cf 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1069,7 +1069,7 @@ static inline void sched_ttwu_pending(void) { } #endif /* CONFIG_SMP */ #include "stats.h" -#include "auto_group.h" +#include "autogroup.h" #ifdef CONFIG_CGROUP_SCHED -- cgit v1.2.3 From bb3bac2ca9a3a5b7fa601781adf70167a0449d75 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 6 Feb 2017 11:04:26 -0500 Subject: sched/core: Remove unlikely() annotation from sched_move_task() The check for 'running' in sched_move_task() has an unlikely() around it. That is, it is unlikely that the task being moved is running. That use to be true. But with a couple of recent updates, it is now likely that the task will be running. The first change came from ea86cb4b7621 ("sched/cgroup: Fix cpu_cgroup_fork() handling") that moved around the use case of sched_move_task() in do_fork() where the call is now done after the task is woken (hence it is running). The second change came from 8e5bfa8c1f84 ("sched/autogroup: Do not use autogroup->tg in zombie threads") where sched_move_task() is called by the exit path, by the task that is exiting. Hence it too is running. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Link: http://lkml.kernel.org/r/20170206110426.27ca6426@gandalf.local.home Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e4aa470ed454..34e2291a9a6c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6404,14 +6404,14 @@ void sched_move_task(struct task_struct *tsk) if (queued) dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE); - if (unlikely(running)) + if (running) put_prev_task(rq, tsk); sched_change_group(tsk, TASK_MOVE_GROUP); if (queued) enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); - if (unlikely(running)) + if (running) set_curr_task(rq, tsk); task_rq_unlock(rq, tsk, &rf); -- cgit v1.2.3