From 20125c872a3f129cef7fdec2b7681da98502a55d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 24 Jun 2021 11:41:17 +0200 Subject: x86/xen: Make save_fl() noinstr vmlinux.o: warning: objtool: pv_ops[30]: native_save_fl vmlinux.o: warning: objtool: pv_ops[30]: __raw_callee_save_xen_save_fl vmlinux.o: warning: objtool: pv_ops[30]: xen_save_fl_direct vmlinux.o: warning: objtool: lockdep_hardirqs_off()+0x73: call to pv_ops[30]() leaves .noinstr.text section Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20210624095148.749712274@infradead.org --- arch/x86/include/asm/paravirt.h | 7 +++++-- arch/x86/kernel/irqflags.S | 2 ++ arch/x86/xen/irq.c | 4 ++-- arch/x86/xen/xen-asm.S | 32 ++++++++++++++++---------------- 4 files changed, 25 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 34da790ac429..cebec95a7124 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -653,10 +653,10 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); * functions. */ #define PV_THUNK_NAME(func) "__raw_callee_save_" #func -#define PV_CALLEE_SAVE_REGS_THUNK(func) \ +#define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \ extern typeof(func) __raw_callee_save_##func; \ \ - asm(".pushsection .text;" \ + asm(".pushsection " section ", \"ax\";" \ ".globl " PV_THUNK_NAME(func) ";" \ ".type " PV_THUNK_NAME(func) ", @function;" \ PV_THUNK_NAME(func) ":" \ @@ -669,6 +669,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ ".popsection") +#define PV_CALLEE_SAVE_REGS_THUNK(func) \ + __PV_CALLEE_SAVE_REGS_THUNK(func, ".text") + /* Get a reference to a callee-save function */ #define PV_CALLEE_SAVE(func) \ ((struct paravirt_callee_save) { __raw_callee_save_##func }) diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S index 8ef35063964b..760e1f293093 100644 --- a/arch/x86/kernel/irqflags.S +++ b/arch/x86/kernel/irqflags.S @@ -7,9 +7,11 @@ /* * unsigned long native_save_fl(void) */ +.pushsection .noinstr.text, "ax" SYM_FUNC_START(native_save_fl) pushf pop %_ASM_AX ret SYM_FUNC_END(native_save_fl) +.popsection EXPORT_SYMBOL(native_save_fl) diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index dfa091d79c2e..9c71f43ba303 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -24,7 +24,7 @@ void xen_force_evtchn_callback(void) (void)HYPERVISOR_xen_version(0, NULL); } -asmlinkage __visible unsigned long xen_save_fl(void) +asmlinkage __visible noinstr unsigned long xen_save_fl(void) { struct vcpu_info *vcpu; unsigned long flags; @@ -40,7 +40,7 @@ asmlinkage __visible unsigned long xen_save_fl(void) */ return (-flags) & X86_EFLAGS_IF; } -PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); +__PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl, ".noinstr.text"); asmlinkage __visible void xen_irq_disable(void) { diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index aef4a1e8f33f..0883e39fee2e 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -57,22 +57,6 @@ SYM_FUNC_START(xen_irq_disable_direct) ret SYM_FUNC_END(xen_irq_disable_direct) -/* - * (xen_)save_fl is used to get the current interrupt enable status. - * Callers expect the status to be in X86_EFLAGS_IF, and other bits - * may be set in the return value. We take advantage of this by - * making sure that X86_EFLAGS_IF has the right value (and other bits - * in that byte are 0), but other bits in the return value are - * undefined. We need to toggle the state of the bit, because Xen and - * x86 use opposite senses (mask vs enable). - */ -SYM_FUNC_START(xen_save_fl_direct) - testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - setz %ah - addb %ah, %ah - ret -SYM_FUNC_END(xen_save_fl_direct) - /* * Force an event check by making a hypercall, but preserve regs * before making the call. @@ -103,6 +87,22 @@ SYM_FUNC_START(check_events) SYM_FUNC_END(check_events) .pushsection .noinstr.text, "ax" +/* + * (xen_)save_fl is used to get the current interrupt enable status. + * Callers expect the status to be in X86_EFLAGS_IF, and other bits + * may be set in the return value. We take advantage of this by + * making sure that X86_EFLAGS_IF has the right value (and other bits + * in that byte are 0), but other bits in the return value are + * undefined. We need to toggle the state of the bit, because Xen and + * x86 use opposite senses (mask vs enable). + */ +SYM_FUNC_START(xen_save_fl_direct) + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + setz %ah + addb %ah, %ah + ret +SYM_FUNC_END(xen_save_fl_direct) + SYM_FUNC_START(xen_read_cr2) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX -- cgit v1.2.3