From 93ee37c2a6a97e5d358af2d8f0510817b6e75679 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 11 Apr 2018 17:54:20 +0100 Subject: thread_info: Add update_thread_flag() helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are a number of bits of code sprinkled around the kernel to set a thread flag if a certain condition is true, and clear it otherwise. To help make those call sites terser and less cumbersome, this patch adds a new family of thread flag manipulators update*_thread_flag([...,] flag, cond) which do the equivalent of: if (cond) set*_thread_flag([...,] flag); else clear*_thread_flag([...,] flag); Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Acked-by: Steven Rostedt (VMware) Acked-by: Marc Zyngier Acked-by: Catalin Marinas Acked-by: Peter Zijlstra (Intel) Cc: Ingo Molnar Cc: Oleg Nesterov Signed-off-by: Marc Zyngier --- include/linux/sched.h | 6 ++++++ include/linux/thread_info.h | 11 +++++++++++ 2 files changed, 17 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index b3d697f3b573..c2c305199721 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1578,6 +1578,12 @@ static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) clear_ti_thread_flag(task_thread_info(tsk), flag); } +static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, + bool value) +{ + update_ti_thread_flag(task_thread_info(tsk), flag, value); +} + static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) { return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index cf2862bd134a..8d8821b3689a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -60,6 +60,15 @@ static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) clear_bit(flag, (unsigned long *)&ti->flags); } +static inline void update_ti_thread_flag(struct thread_info *ti, int flag, + bool value) +{ + if (value) + set_ti_thread_flag(ti, flag); + else + clear_ti_thread_flag(ti, flag); +} + static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_set_bit(flag, (unsigned long *)&ti->flags); @@ -79,6 +88,8 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) set_ti_thread_flag(current_thread_info(), flag) #define clear_thread_flag(flag) \ clear_ti_thread_flag(current_thread_info(), flag) +#define update_thread_flag(flag, value) \ + update_ti_thread_flag(current_thread_info(), flag, value) #define test_and_set_thread_flag(flag) \ test_and_set_ti_thread_flag(current_thread_info(), flag) #define test_and_clear_thread_flag(flag) \ -- cgit v1.2.3 From bd2a6394fd2d3ea528d4b9c67f829e35f1f5d5dd Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 23 Feb 2018 17:23:57 +0100 Subject: KVM: arm/arm64: Introduce kvm_arch_vcpu_run_pid_change MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KVM/ARM differs from other architectures in having to maintain an additional virtual address space from that of the host and the guest, because we split the execution of KVM across both EL1 and EL2. This results in a need to explicitly map data structures into EL2 (hyp) which are accessed from the hyp code. As we are about to be more clever with our FPSIMD handling on arm64, which stores data in the task struct and uses thread_info flags, we will have to map parts of the currently executing task struct into the EL2 virtual address space. However, we don't want to do this on every KVM_RUN, because it is a fairly expensive operation to walk the page tables, and the common execution mode is to map a single thread to a VCPU. By introducing a hook that architectures can select with HAVE_KVM_VCPU_RUN_PID_CHANGE, we do not introduce overhead for other architectures, but have a simple way to only map the data we need when required for arm64. This patch introduces the framework only, and wires it up in the arm/arm64 KVM common code. No functional change. Signed-off-by: Christoffer Dall Signed-off-by: Dave Martin Reviewed-by: Marc Zyngier Reviewed-by: Alex Bennée Signed-off-by: Marc Zyngier --- include/linux/kvm_host.h | 9 +++++++++ virt/kvm/Kconfig | 3 +++ virt/kvm/kvm_main.c | 7 ++++++- 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6930c63126c7..4268ace60bf1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1276,4 +1276,13 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE +int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); +#else +static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ + #endif diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index cca7e065a075..72143cfaf6ec 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -54,3 +54,6 @@ config HAVE_KVM_IRQ_BYPASS config HAVE_KVM_VCPU_ASYNC_IOCTL bool + +config HAVE_KVM_VCPU_RUN_PID_CHANGE + bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c7b2e927f699..c32e2407713d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2550,8 +2550,13 @@ static long kvm_vcpu_ioctl(struct file *filp, oldpid = rcu_access_pointer(vcpu->pid); if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) { /* The thread running this VCPU changed. */ - struct pid *newpid = get_task_pid(current, PIDTYPE_PID); + struct pid *newpid; + r = kvm_arch_vcpu_run_pid_change(vcpu); + if (r) + break; + + newpid = get_task_pid(current, PIDTYPE_PID); rcu_assign_pointer(vcpu->pid, newpid); if (oldpid) synchronize_rcu(); -- cgit v1.2.3 From dbd9733ab6742dfb7d4f6c49e7d412d4e792d8be Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Tue, 22 May 2018 09:55:08 +0200 Subject: KVM: arm/arm64: Replace the single rdist region by a list At the moment KVM supports a single rdist region. We want to support several separate rdist regions so let's introduce a list of them. This patch currently only cares about a single entry in this list as the functionality to register several redist regions is not yet there. So this only translates the existing code into something functionally similar using that new data struct. The redistributor region handle is stored in the vgic_cpu structure to allow later computation of the TYPER last bit. Signed-off-by: Eric Auger Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- include/kvm/arm_vgic.h | 14 ++++++++++---- virt/kvm/arm/vgic/vgic-init.c | 16 ++++++++++++++-- virt/kvm/arm/vgic/vgic-kvm-device.c | 13 +++++++++++-- virt/kvm/arm/vgic/vgic-mmio-v3.c | 38 +++++++++++++++++++++++++++++-------- virt/kvm/arm/vgic/vgic-v3.c | 20 +++++++++++-------- 5 files changed, 77 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index e7efe12a81bd..90e489f685ae 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -201,6 +201,14 @@ struct vgic_its { struct vgic_state_iter; +struct vgic_redist_region { + u32 index; + gpa_t base; + u32 count; /* number of redistributors or 0 if single region */ + u32 free_index; /* index of the next free redistributor */ + struct list_head list; +}; + struct vgic_dist { bool in_kernel; bool ready; @@ -220,10 +228,7 @@ struct vgic_dist { /* either a GICv2 CPU interface */ gpa_t vgic_cpu_base; /* or a number of GICv3 redistributor regions */ - struct { - gpa_t vgic_redist_base; - gpa_t vgic_redist_free_offset; - }; + struct list_head rd_regions; }; /* distributor enabled */ @@ -311,6 +316,7 @@ struct vgic_cpu { */ struct vgic_io_device rd_iodev; struct vgic_io_device sgi_iodev; + struct vgic_redist_region *rdreg; /* Contains the attributes and gpa of the LPI pending tables. */ u64 pendbaser; diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 9a5aed7eecfd..8901b2d8fca1 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -167,8 +167,11 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) kvm->arch.vgic.vgic_model = type; kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; - kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; - kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF; + + if (type == KVM_DEV_TYPE_ARM_VGIC_V2) + kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; + else + INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); out_unlock: for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { @@ -303,6 +306,7 @@ out: static void kvm_vgic_dist_destroy(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; + struct vgic_redist_region *rdreg, *next; dist->ready = false; dist->initialized = false; @@ -311,6 +315,14 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) dist->spis = NULL; dist->nr_spis = 0; + if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { + list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) { + list_del(&rdreg->list); + kfree(rdreg); + } + INIT_LIST_HEAD(&dist->rd_regions); + } + if (vgic_supports_direct_msis(kvm)) vgic_v4_teardown(kvm); } diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 10ae6f394b71..76ab3691f7fe 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -66,6 +66,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) int r = 0; struct vgic_dist *vgic = &kvm->arch.vgic; phys_addr_t *addr_ptr, alignment; + u64 undef_value = VGIC_ADDR_UNDEF; mutex_lock(&kvm->lock); switch (type) { @@ -84,7 +85,9 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) addr_ptr = &vgic->vgic_dist_base; alignment = SZ_64K; break; - case KVM_VGIC_V3_ADDR_TYPE_REDIST: + case KVM_VGIC_V3_ADDR_TYPE_REDIST: { + struct vgic_redist_region *rdreg; + r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); if (r) break; @@ -92,8 +95,14 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) r = vgic_v3_set_redist_base(kvm, *addr); goto out; } - addr_ptr = &vgic->vgic_redist_base; + rdreg = list_first_entry(&vgic->rd_regions, + struct vgic_redist_region, list); + if (!rdreg) + addr_ptr = &undef_value; + else + addr_ptr = &rdreg->base; break; + } default: r = -ENODEV; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 671fe81f8e1d..d1aab183a1cc 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -580,8 +580,10 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; struct vgic_dist *vgic = &kvm->arch.vgic; + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev; + struct vgic_redist_region *rdreg; gpa_t rd_base, sgi_base; int ret; @@ -591,13 +593,17 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) * function for all VCPUs when the base address is set. Just return * without doing any work for now. */ - if (IS_VGIC_ADDR_UNDEF(vgic->vgic_redist_base)) + rdreg = list_first_entry(&vgic->rd_regions, + struct vgic_redist_region, list); + if (!rdreg) return 0; if (!vgic_v3_check_base(kvm)) return -EINVAL; - rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset; + vgic_cpu->rdreg = rdreg; + + rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE; sgi_base = rd_base + SZ_64K; kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); @@ -631,7 +637,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) goto out; } - vgic->vgic_redist_free_offset += 2 * SZ_64K; + rdreg->free_index++; out: mutex_unlock(&kvm->slots_lock); return ret; @@ -673,19 +679,31 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr) { struct vgic_dist *vgic = &kvm->arch.vgic; + struct vgic_redist_region *rdreg; int ret; /* vgic_check_ioaddr makes sure we don't do this twice */ - ret = vgic_check_ioaddr(kvm, &vgic->vgic_redist_base, addr, SZ_64K); + if (!list_empty(&vgic->rd_regions)) + return -EINVAL; + + rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL); + if (!rdreg) + return -ENOMEM; + + rdreg->base = VGIC_ADDR_UNDEF; + + ret = vgic_check_ioaddr(kvm, &rdreg->base, addr, SZ_64K); if (ret) - return ret; + goto out; - vgic->vgic_redist_base = addr; + rdreg->base = addr; if (!vgic_v3_check_base(kvm)) { - vgic->vgic_redist_base = VGIC_ADDR_UNDEF; - return -EINVAL; + ret = -EINVAL; + goto out; } + list_add(&rdreg->list, &vgic->rd_regions); + /* * Register iodevs for each existing VCPU. Adding more VCPUs * afterwards will register the iodevs when needed. @@ -695,6 +713,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr) return ret; return 0; + +out: + kfree(rdreg); + return ret; } int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index c7423f3768e5..56e6e903d998 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -427,6 +427,9 @@ bool vgic_v3_check_base(struct kvm *kvm) { struct vgic_dist *d = &kvm->arch.vgic; gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE; + struct vgic_redist_region *rdreg = + list_first_entry(&d->rd_regions, + struct vgic_redist_region, list); redist_size *= atomic_read(&kvm->online_vcpus); @@ -434,18 +437,17 @@ bool vgic_v3_check_base(struct kvm *kvm) d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) return false; - if (!IS_VGIC_ADDR_UNDEF(d->vgic_redist_base) && - d->vgic_redist_base + redist_size < d->vgic_redist_base) + if (rdreg && (rdreg->base + redist_size < rdreg->base)) return false; /* Both base addresses must be set to check if they overlap */ - if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) || - IS_VGIC_ADDR_UNDEF(d->vgic_redist_base)) + if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) || !rdreg) return true; - if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base) + if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= rdreg->base) return true; - if (d->vgic_redist_base + redist_size <= d->vgic_dist_base) + + if (rdreg->base + redist_size <= d->vgic_dist_base) return true; return false; @@ -455,12 +457,14 @@ int vgic_v3_map_resources(struct kvm *kvm) { int ret = 0; struct vgic_dist *dist = &kvm->arch.vgic; + struct vgic_redist_region *rdreg = + list_first_entry(&dist->rd_regions, + struct vgic_redist_region, list); if (vgic_ready(kvm)) goto out; - if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || - IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) { + if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || !rdreg) { kvm_err("Need to set vgic distributor addresses first\n"); ret = -ENXIO; goto out; -- cgit v1.2.3 From 5ec17fbac6713be82b90f54d5a31251803fd8de5 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Tue, 22 May 2018 09:55:13 +0200 Subject: KVM: arm/arm64: Remove kvm_vgic_vcpu_early_init kvm_vgic_vcpu_early_init gets called after kvm_vgic_cpu_init which is confusing. The call path is as follows: kvm_vm_ioctl_create_vcpu |_ kvm_arch_cpu_create |_ kvm_vcpu_init |_ kvm_arch_vcpu_init |_ kvm_vgic_vcpu_init |_ kvm_arch_vcpu_postcreate |_ kvm_vgic_vcpu_early_init Static initialization currently done in kvm_vgic_vcpu_early_init() can be moved to kvm_vgic_vcpu_init(). So let's move the code and remove kvm_vgic_vcpu_early_init(). kvm_arch_vcpu_postcreate() does nothing. Signed-off-by: Eric Auger Signed-off-by: Marc Zyngier --- include/kvm/arm_vgic.h | 1 - virt/kvm/arm/arm.c | 1 - virt/kvm/arm/vgic/vgic-init.c | 80 ++++++++++++++++++++----------------------- 3 files changed, 37 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 90e489f685ae..08ccbe37dcda 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -338,7 +338,6 @@ void kvm_vgic_early_init(struct kvm *kvm); int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); int kvm_vgic_create(struct kvm *kvm, u32 type); void kvm_vgic_destroy(struct kvm *kvm); -void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 39e777155e7c..126b98fbf9ba 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -292,7 +292,6 @@ out: void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { - kvm_vgic_vcpu_early_init(vcpu); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 8901b2d8fca1..272af9704952 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -44,7 +44,7 @@ * * CPU Interface: * - * - kvm_vgic_vcpu_early_init(): initialization of static data that + * - kvm_vgic_vcpu_init(): initialization of static data that * doesn't depend on any sizing information or emulation type. No * allocation is allowed there. */ @@ -67,46 +67,6 @@ void kvm_vgic_early_init(struct kvm *kvm) spin_lock_init(&dist->lpi_list_lock); } -/** - * kvm_vgic_vcpu_early_init() - Initialize static VGIC VCPU data structures - * @vcpu: The VCPU whose VGIC data structures whould be initialized - * - * Only do initialization, but do not actually enable the VGIC CPU interface - * yet. - */ -void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu) -{ - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; - int i; - - INIT_LIST_HEAD(&vgic_cpu->ap_list_head); - spin_lock_init(&vgic_cpu->ap_list_lock); - - /* - * Enable and configure all SGIs to be edge-triggered and - * configure all PPIs as level-triggered. - */ - for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { - struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; - - INIT_LIST_HEAD(&irq->ap_list); - spin_lock_init(&irq->irq_lock); - irq->intid = i; - irq->vcpu = NULL; - irq->target_vcpu = vcpu; - irq->targets = 1U << vcpu->vcpu_id; - kref_init(&irq->refcount); - if (vgic_irq_is_sgi(i)) { - /* SGIs */ - irq->enabled = 1; - irq->config = VGIC_CONFIG_EDGE; - } else { - /* PPIs */ - irq->config = VGIC_CONFIG_LEVEL; - } - } -} - /* CREATION */ /** @@ -224,13 +184,47 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) } /** - * kvm_vgic_vcpu_init() - Register VCPU-specific KVM iodevs + * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data + * structures and register VCPU-specific KVM iodevs + * * @vcpu: pointer to the VCPU being created and initialized + * + * Only do initialization, but do not actually enable the + * VGIC CPU interface */ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) { - int ret = 0; + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + int ret = 0; + int i; + + INIT_LIST_HEAD(&vgic_cpu->ap_list_head); + spin_lock_init(&vgic_cpu->ap_list_lock); + + /* + * Enable and configure all SGIs to be edge-triggered and + * configure all PPIs as level-triggered. + */ + for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { + struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; + + INIT_LIST_HEAD(&irq->ap_list); + spin_lock_init(&irq->irq_lock); + irq->intid = i; + irq->vcpu = NULL; + irq->target_vcpu = vcpu; + irq->targets = 1U << vcpu->vcpu_id; + kref_init(&irq->refcount); + if (vgic_irq_is_sgi(i)) { + /* SGIs */ + irq->enabled = 1; + irq->config = VGIC_CONFIG_EDGE; + } else { + /* PPIs */ + irq->config = VGIC_CONFIG_LEVEL; + } + } if (!irqchip_in_kernel(vcpu->kvm)) return 0; -- cgit v1.2.3 From e25028c8ded011d19f9a11164807507c94febc01 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Tue, 22 May 2018 09:55:18 +0200 Subject: KVM: arm/arm64: Bump VGIC_V3_MAX_CPUS to 512 Let's raise the number of supported vcpus along with vgic v3 now that HW is looming with more physical CPUs. Signed-off-by: Eric Auger Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- include/kvm/arm_vgic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 08ccbe37dcda..cfdd2484cc42 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -28,7 +28,7 @@ #include -#define VGIC_V3_MAX_CPUS 255 +#define VGIC_V3_MAX_CPUS 512 #define VGIC_V2_MAX_CPUS 8 #define VGIC_NR_IRQS_LEGACY 256 #define VGIC_NR_SGIS 16 -- cgit v1.2.3 From 7053df4edb3ae3ae15c316fe49122c0b3936e9dd Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 16 May 2018 17:21:28 +0200 Subject: KVM: introduce kvm_make_vcpus_request_mask() API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hyper-V style PV TLB flush hypercalls inmplementation will use this API. To avoid memory allocation in CONFIG_CPUMASK_OFFSTACK case add cpumask_var_t argument. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Radim Krčmář --- include/linux/kvm_host.h | 3 +++ virt/kvm/kvm_main.c | 34 ++++++++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6d6e79c59e68..14e710d639c7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -730,6 +730,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); + +bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + unsigned long *vcpu_bitmap, cpumask_var_t tmp); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); long kvm_arch_dev_ioctl(struct file *filp, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c7b2e927f699..b125d94307d2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -203,29 +203,47 @@ static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) return true; } -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) +bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + unsigned long *vcpu_bitmap, cpumask_var_t tmp) { int i, cpu, me; - cpumask_var_t cpus; - bool called; struct kvm_vcpu *vcpu; - - zalloc_cpumask_var(&cpus, GFP_ATOMIC); + bool called; me = get_cpu(); + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!test_bit(i, vcpu_bitmap)) + continue; + kvm_make_request(req, vcpu); cpu = vcpu->cpu; if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) continue; - if (cpus != NULL && cpu != -1 && cpu != me && + if (tmp != NULL && cpu != -1 && cpu != me && kvm_request_needs_ipi(vcpu, req)) - __cpumask_set_cpu(cpu, cpus); + __cpumask_set_cpu(cpu, tmp); } - called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); + + called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); put_cpu(); + + return called; +} + +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) +{ + cpumask_var_t cpus; + bool called; + static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] + = {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX}; + + zalloc_cpumask_var(&cpus, GFP_ATOMIC); + + called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus); + free_cpumask_var(cpus); return called; } -- cgit v1.2.3 From c1aea9196ef4f6b64a8ef7e62a933f7e4164aed9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 16 May 2018 17:21:31 +0200 Subject: KVM: x86: hyperv: declare KVM_CAP_HYPERV_TLBFLUSH capability MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need a new capability to indicate support for the newly added HvFlushVirtualAddress{List,Space}{,Ex} hypercalls. Upon seeing this capability, userspace is supposed to announce PV TLB flush features by setting the appropriate CPUID bits (if needed). Signed-off-by: Vitaly Kuznetsov Signed-off-by: Radim Krčmář --- Documentation/virtual/kvm/api.txt | 9 +++++++++ arch/x86/kvm/x86.c | 1 + include/uapi/linux/kvm.h | 1 + 3 files changed, 11 insertions(+) (limited to 'include') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 758bf403a169..c563da4244da 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4603,3 +4603,12 @@ Architectures: s390 This capability indicates that kvm will implement the interfaces to handle reset, migration and nested KVM for branch prediction blocking. The stfle facility 82 should not be provided to the guest without this capability. + +8.14 KVM_CAP_HYPERV_TLBFLUSH + +Architectures: x86 + +This capability indicates that KVM supports paravirtualized Hyper-V TLB Flush +hypercalls: +HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx, +HvFlushVirtualAddressList, HvFlushVirtualAddressListEx. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b7bf9ac9b6d1..22bd20fedd6d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2871,6 +2871,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_HYPERV_EVENTFD: + case KVM_CAP_HYPERV_TLBFLUSH: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b02c41e53d56..b252ceb3965c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_BPB 152 #define KVM_CAP_GET_MSR_FEATURES 153 #define KVM_CAP_HYPERV_EVENTFD 154 +#define KVM_CAP_HYPERV_TLBFLUSH 155 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From 1499fa809e9e6713952ef84a7e9d51606881681f Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Thu, 19 Apr 2018 00:49:58 +0530 Subject: kvm: Change return type to vm_fault_t Use new return type vm_fault_t for fault handler. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. commit 1c8f422059ae ("mm: change return type to vm_fault_t") Signed-off-by: Souptick Joarder Reviewed-by: Matthew Wilcox Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 2 +- virt/kvm/arm/arm.c | 2 +- virt/kvm/kvm_main.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 2549fdd27ee1..03e0e0f189cc 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1076,7 +1076,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return -ENOIOCTLCMD; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4e387647b5af..3764d000872e 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1830,7 +1830,7 @@ out: return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index e521f7699032..7142508ca6e1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3993,7 +3993,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { #ifdef CONFIG_KVM_S390_UCONTROL if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 22bd20fedd6d..1d3dfc2c941d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3966,7 +3966,7 @@ out_nofree: return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b81769a5a2b7..bca28637bcd4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -739,7 +739,7 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 126b98fbf9ba..3d92214c5038 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -165,7 +165,7 @@ int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) return 0; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c5f6a552e486..8938c9e553df 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2358,7 +2358,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) } EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); -static int kvm_vcpu_fault(struct vm_fault *vmf) +static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) { struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; struct page *page; -- cgit v1.2.3 From d1e5b0e98ea27b4f17871dc4e8ea4b0447e35221 Mon Sep 17 00:00:00 2001 From: Marc Orr Date: Tue, 15 May 2018 04:37:37 -0700 Subject: kvm: Make VM ioctl do valloc for some archs The kvm struct has been bloating. For example, it's tens of kilo-bytes for x86, which turns out to be a large amount of memory to allocate contiguously via kzalloc. Thus, this patch does the following: 1. Uses architecture-specific routines to allocate the kvm struct via vzalloc for x86. 2. Switches arm to __KVM_HAVE_ARCH_VM_ALLOC so that it can use vzalloc when has_vhe() is true. Other architectures continue to default to kalloc, as they have a dependency on kalloc or have a small-enough struct kvm. Signed-off-by: Marc Orr Reviewed-by: Marc Zyngier Signed-off-by: Paolo Bonzini --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm64/include/asm/kvm_host.h | 4 ++++ arch/x86/kvm/svm.c | 4 ++-- arch/x86/kvm/vmx.c | 4 ++-- include/linux/kvm_host.h | 5 +++++ virt/kvm/arm/arm.c | 15 +++++++++++++++ 6 files changed, 32 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f079a2039c8a..4b12f32f540c 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -324,4 +324,8 @@ static inline bool kvm_arm_harden_branch_predictor(void) static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} +#define __KVM_HAVE_ARCH_VM_ALLOC +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a4ca202ff3f2..c923d3e17ba3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -482,4 +482,8 @@ static inline bool kvm_arm_harden_branch_predictor(void) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +#define __KVM_HAVE_ARCH_VM_ALLOC +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index de21d5c5168b..d9305f1723f5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1852,13 +1852,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm, static struct kvm *svm_vm_alloc(void) { - struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL); + struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm)); return &kvm_svm->kvm; } static void svm_vm_free(struct kvm *kvm) { - kfree(to_kvm_svm(kvm)); + vfree(to_kvm_svm(kvm)); } static void sev_vm_destroy(struct kvm *kvm) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e50beb76d846..d205e9246f99 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10139,13 +10139,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static struct kvm *vmx_vm_alloc(void) { - struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL); + struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); return &kvm_vmx->kvm; } static void vmx_vm_free(struct kvm *kvm) { - kfree(to_kvm_vmx(kvm)); + vfree(to_kvm_vmx(kvm)); } static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bca28637bcd4..4ee7bc548a83 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -811,6 +812,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC +/* + * All architectures that want to use vzalloc currently also + * need their own kvm_arch_alloc_vm implementation. + */ static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 3d92214c5038..72be779cffe2 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -251,6 +251,21 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } +struct kvm *kvm_arch_alloc_vm(void) +{ + if (!has_vhe()) + return kzalloc(sizeof(struct kvm), GFP_KERNEL); + + return vzalloc(sizeof(struct kvm)); +} + +void kvm_arch_free_vm(struct kvm *kvm) +{ + if (!has_vhe()) + kfree(kvm); + else + vfree(kvm); +} struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { -- cgit v1.2.3 From 766d3571d8e50d3a73b77043dc632226f9e6b389 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 8 Jun 2018 02:19:53 +0300 Subject: kvm: fix typo in flag name KVM_X86_DISABLE_EXITS_HTL really refers to exit on halt. Obviously a typo: should be named KVM_X86_DISABLE_EXITS_HLT. Fixes: caa057a2cad ("KVM: X86: Provide a capability to disable HLT intercepts") Cc: stable@vger.kernel.org Signed-off-by: Michael S. Tsirkin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 4 ++-- include/uapi/linux/kvm.h | 4 ++-- tools/include/uapi/linux/kvm.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 439fb0c7dbc0..06dd4cdb2ca8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2899,7 +2899,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_CLOCK_TSC_STABLE; break; case KVM_CAP_X86_DISABLE_EXITS: - r |= KVM_X86_DISABLE_EXITS_HTL | KVM_X86_DISABLE_EXITS_PAUSE; + r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE; if(kvm_can_mwait_in_guest()) r |= KVM_X86_DISABLE_EXITS_MWAIT; break; @@ -4253,7 +4253,7 @@ split_irqchip_unlock: if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && kvm_can_mwait_in_guest()) kvm->arch.mwait_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_HTL) + if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) kvm->arch.hlt_in_guest = true; if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) kvm->arch.pause_in_guest = true; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b252ceb3965c..b6270a3b38e9 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -677,10 +677,10 @@ struct kvm_ioeventfd { }; #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) -#define KVM_X86_DISABLE_EXITS_HTL (1 << 1) +#define KVM_X86_DISABLE_EXITS_HLT (1 << 1) #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ - KVM_X86_DISABLE_EXITS_HTL | \ + KVM_X86_DISABLE_EXITS_HLT | \ KVM_X86_DISABLE_EXITS_PAUSE) /* for KVM_ENABLE_CAP */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index b02c41e53d56..39e364c70caf 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -677,10 +677,10 @@ struct kvm_ioeventfd { }; #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) -#define KVM_X86_DISABLE_EXITS_HTL (1 << 1) +#define KVM_X86_DISABLE_EXITS_HLT (1 << 1) #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ - KVM_X86_DISABLE_EXITS_HTL | \ + KVM_X86_DISABLE_EXITS_HLT | \ KVM_X86_DISABLE_EXITS_PAUSE) /* for KVM_ENABLE_CAP */ -- cgit v1.2.3