diff options
author | Jane Malalane <jane.malalane@citrix.com> | 2022-07-29 08:04:16 +0100 |
---|---|---|
committer | Juergen Gross <jgross@suse.com> | 2022-08-12 11:28:21 +0200 |
commit | b1c3497e604ddccea5af4071831ed0e4680fb35e (patch) | |
tree | 108b8bd765f1e0496592013bb988ac329ca19de9 /drivers/xen | |
parent | 251e90e7e346a23742b90e2c4db19d322e071d99 (diff) |
x86/xen: Add support for HVMOP_set_evtchn_upcall_vector
Implement support for the HVMOP_set_evtchn_upcall_vector hypercall in
order to set the per-vCPU event channel vector callback on Linux and
use it in preference of HVM_PARAM_CALLBACK_IRQ.
If the per-VCPU vector setup is successful on BSP, use this method
for the APs. If not, fallback to the global vector-type callback.
Also register callback_irq at per-vCPU event channel setup to trick
toolstack to think the domain is enlightened.
Suggested-by: "Roger Pau Monné" <roger.pau@citrix.com>
Signed-off-by: Jane Malalane <jane.malalane@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Link: https://lore.kernel.org/r/20220729070416.23306-1-jane.malalane@citrix.com
Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/events/events_base.c | 53 |
1 files changed, 47 insertions, 6 deletions
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 46d9295d9a6e..206d4b466e44 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -45,6 +45,7 @@ #include <asm/irq.h> #include <asm/io_apic.h> #include <asm/i8259.h> +#include <asm/xen/cpuid.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> @@ -2183,6 +2184,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = { .irq_ack = ack_dynirq, }; +#ifdef CONFIG_X86 #ifdef CONFIG_XEN_PVHVM /* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any @@ -2195,11 +2197,48 @@ void xen_setup_callback_vector(void) callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); if (xen_set_callback_via(callback_via)) { pr_err("Request for Xen HVM callback vector failed\n"); - xen_have_vector_callback = 0; + xen_have_vector_callback = false; } } } +/* + * Setup per-vCPU vector-type callbacks. If this setup is unavailable, + * fallback to the global vector-type callback. + */ +static __init void xen_init_setup_upcall_vector(void) +{ + if (!xen_have_vector_callback) + return; + + if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) && + !xen_set_upcall_vector(0)) + xen_percpu_upcall = true; + else if (xen_feature(XENFEAT_hvm_callback_vector)) + xen_setup_callback_vector(); + else + xen_have_vector_callback = false; +} + +int xen_set_upcall_vector(unsigned int cpu) +{ + int rc; + xen_hvm_evtchn_upcall_vector_t op = { + .vector = HYPERVISOR_CALLBACK_VECTOR, + .vcpu = per_cpu(xen_vcpu_id, cpu), + }; + + rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op); + if (rc) + return rc; + + /* Trick toolstack to think we are enlightened. */ + if (!cpu) + rc = xen_set_callback_via(1); + + return rc; +} + static __init void xen_alloc_callback_vector(void) { if (!xen_have_vector_callback) @@ -2210,8 +2249,11 @@ static __init void xen_alloc_callback_vector(void) } #else void xen_setup_callback_vector(void) {} +static inline void xen_init_setup_upcall_vector(void) {} +int xen_set_upcall_vector(unsigned int cpu) {} static inline void xen_alloc_callback_vector(void) {} -#endif +#endif /* CONFIG_XEN_PVHVM */ +#endif /* CONFIG_X86 */ bool xen_fifo_events = true; module_param_named(fifo_events, xen_fifo_events, bool, 0); @@ -2271,10 +2313,9 @@ void __init xen_init_IRQ(void) if (xen_initial_domain()) pci_xen_initial_domain(); } - if (xen_feature(XENFEAT_hvm_callback_vector)) { - xen_setup_callback_vector(); - xen_alloc_callback_vector(); - } + xen_init_setup_upcall_vector(); + xen_alloc_callback_vector(); + if (xen_hvm_domain()) { native_init_IRQ(); |