summaryrefslogtreecommitdiff
path: root/arch/x86/hyperv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/hyperv')
-rw-r--r--arch/x86/hyperv/Makefile1
-rw-r--r--arch/x86/hyperv/hv_apic.c12
-rw-r--r--arch/x86/hyperv/hv_init.c18
-rw-r--r--arch/x86/hyperv/hv_vtl.c227
-rw-r--r--arch/x86/hyperv/ivm.c148
-rw-r--r--arch/x86/hyperv/mmu.c11
6 files changed, 336 insertions, 81 deletions
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index 5d2de10809ae..3a1548054b48 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := hv_init.o mmu.o nested.o irqdomain.o ivm.o
obj-$(CONFIG_X86_64) += hv_apic.o hv_proc.o
+obj-$(CONFIG_HYPERV_VTL_MODE) += hv_vtl.o
ifdef CONFIG_X86_64
obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index fb8b2c088681..1fbda2f94184 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -96,6 +96,11 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
wrmsr(HV_X64_MSR_EOI, val, 0);
}
+static bool cpu_is_self(int cpu)
+{
+ return cpu == smp_processor_id();
+}
+
/*
* IPI implementation on Hyper-V.
*/
@@ -128,10 +133,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
*/
if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- if (exclude_self)
- nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
- else
- nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
+
+ nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
+ exclude_self ? cpu_is_self : NULL);
/*
* 'nr_bank <= 0' means some CPUs in cpumask can't be
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 41ef036ebb7b..a5f9474f08e1 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -29,7 +29,6 @@
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
-#include <linux/swiotlb.h>
int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull;
@@ -64,7 +63,10 @@ static int hyperv_init_ghcb(void)
* memory boundary and map it here.
*/
rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
- ghcb_va = memremap(ghcb_gpa, HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+
+ /* Mask out vTOM bit. ioremap_cache() maps decrypted */
+ ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
+ ghcb_va = (void *)ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
if (!ghcb_va)
return -ENOMEM;
@@ -218,7 +220,7 @@ static int hv_cpu_die(unsigned int cpu)
if (hv_ghcb_pg) {
ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
if (*ghcb_va)
- memunmap(*ghcb_va);
+ iounmap(*ghcb_va);
*ghcb_va = NULL;
}
@@ -504,16 +506,6 @@ void __init hyperv_init(void)
/* Query the VMs extended capability once, so that it can be cached. */
hv_query_ext_cap(0);
-#ifdef CONFIG_SWIOTLB
- /*
- * Swiotlb bounce buffer needs to be mapped in extra address
- * space. Map function doesn't work in the early place and so
- * call swiotlb_update_mem_attributes() here.
- */
- if (hv_is_isolation_supported())
- swiotlb_update_mem_attributes();
-#endif
-
return;
clean_guest_os_id:
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
new file mode 100644
index 000000000000..1ba5d3b99b16
--- /dev/null
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Author:
+ * Saurabh Sengar <ssengar@microsoft.com>
+ */
+
+#include <asm/apic.h>
+#include <asm/boot.h>
+#include <asm/desc.h>
+#include <asm/i8259.h>
+#include <asm/mshyperv.h>
+#include <asm/realmode.h>
+
+extern struct boot_params boot_params;
+static struct real_mode_header hv_vtl_real_mode_header;
+
+void __init hv_vtl_init_platform(void)
+{
+ pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+ x86_init.timers.timer_init = x86_init_noop;
+
+ x86_platform.get_wallclock = get_rtc_noop;
+ x86_platform.set_wallclock = set_rtc_noop;
+ x86_platform.get_nmi_reason = hv_get_nmi_reason;
+
+ x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
+ x86_platform.legacy.rtc = 0;
+ x86_platform.legacy.warm_reset = 0;
+ x86_platform.legacy.reserve_bios_regions = 0;
+ x86_platform.legacy.devices.pnpbios = 0;
+}
+
+static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
+{
+ return ((u64)desc->base3 << 32) | ((u64)desc->base2 << 24) |
+ (desc->base1 << 16) | desc->base0;
+}
+
+static inline u32 hv_vtl_system_desc_limit(struct ldttss_desc *desc)
+{
+ return ((u32)desc->limit1 << 16) | (u32)desc->limit0;
+}
+
+typedef void (*secondary_startup_64_fn)(void*, void*);
+static void hv_vtl_ap_entry(void)
+{
+ ((secondary_startup_64_fn)secondary_startup_64)(&boot_params, &boot_params);
+}
+
+static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
+{
+ u64 status;
+ int ret = 0;
+ struct hv_enable_vp_vtl *input;
+ unsigned long irq_flags;
+
+ struct desc_ptr gdt_ptr;
+ struct desc_ptr idt_ptr;
+
+ struct ldttss_desc *tss;
+ struct ldttss_desc *ldt;
+ struct desc_struct *gdt;
+
+ u64 rsp = current->thread.sp;
+ u64 rip = (u64)&hv_vtl_ap_entry;
+
+ native_store_gdt(&gdt_ptr);
+ store_idt(&idt_ptr);
+
+ gdt = (struct desc_struct *)((void *)(gdt_ptr.address));
+ tss = (struct ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+ ldt = (struct ldttss_desc *)(gdt + GDT_ENTRY_LDT);
+
+ local_irq_save(irq_flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->vp_index = target_vp_index;
+ input->target_vtl.target_vtl = HV_VTL_MGMT;
+
+ /*
+ * The x86_64 Linux kernel follows the 16-bit -> 32-bit -> 64-bit
+ * mode transition sequence after waking up an AP with SIPI whose
+ * vector points to the 16-bit AP startup trampoline code. Here in
+ * VTL2, we can't perform that sequence as the AP has to start in
+ * the 64-bit mode.
+ *
+ * To make this happen, we tell the hypervisor to load a valid 64-bit
+ * context (most of which is just magic numbers from the CPU manual)
+ * so that AP jumps right to the 64-bit entry of the kernel, and the
+ * control registers are loaded with values that let the AP fetch the
+ * code and data and carry on with work it gets assigned.
+ */
+
+ input->vp_context.rip = rip;
+ input->vp_context.rsp = rsp;
+ input->vp_context.rflags = 0x0000000000000002;
+ input->vp_context.efer = __rdmsr(MSR_EFER);
+ input->vp_context.cr0 = native_read_cr0();
+ input->vp_context.cr3 = __native_read_cr3();
+ input->vp_context.cr4 = native_read_cr4();
+ input->vp_context.msr_cr_pat = __rdmsr(MSR_IA32_CR_PAT);
+ input->vp_context.idtr.limit = idt_ptr.size;
+ input->vp_context.idtr.base = idt_ptr.address;
+ input->vp_context.gdtr.limit = gdt_ptr.size;
+ input->vp_context.gdtr.base = gdt_ptr.address;
+
+ /* Non-system desc (64bit), long, code, present */
+ input->vp_context.cs.selector = __KERNEL_CS;
+ input->vp_context.cs.base = 0;
+ input->vp_context.cs.limit = 0xffffffff;
+ input->vp_context.cs.attributes = 0xa09b;
+ /* Non-system desc (64bit), data, present, granularity, default */
+ input->vp_context.ss.selector = __KERNEL_DS;
+ input->vp_context.ss.base = 0;
+ input->vp_context.ss.limit = 0xffffffff;
+ input->vp_context.ss.attributes = 0xc093;
+
+ /* System desc (128bit), present, LDT */
+ input->vp_context.ldtr.selector = GDT_ENTRY_LDT * 8;
+ input->vp_context.ldtr.base = hv_vtl_system_desc_base(ldt);
+ input->vp_context.ldtr.limit = hv_vtl_system_desc_limit(ldt);
+ input->vp_context.ldtr.attributes = 0x82;
+
+ /* System desc (128bit), present, TSS, 0x8b - busy, 0x89 -- default */
+ input->vp_context.tr.selector = GDT_ENTRY_TSS * 8;
+ input->vp_context.tr.base = hv_vtl_system_desc_base(tss);
+ input->vp_context.tr.limit = hv_vtl_system_desc_limit(tss);
+ input->vp_context.tr.attributes = 0x8b;
+
+ status = hv_do_hypercall(HVCALL_ENABLE_VP_VTL, input, NULL);
+
+ if (!hv_result_success(status) &&
+ hv_result(status) != HV_STATUS_VTL_ALREADY_ENABLED) {
+ pr_err("HVCALL_ENABLE_VP_VTL failed for VP : %d ! [Err: %#llx\n]",
+ target_vp_index, status);
+ ret = -EINVAL;
+ goto free_lock;
+ }
+
+ status = hv_do_hypercall(HVCALL_START_VP, input, NULL);
+
+ if (!hv_result_success(status)) {
+ pr_err("HVCALL_START_VP failed for VP : %d ! [Err: %#llx]\n",
+ target_vp_index, status);
+ ret = -EINVAL;
+ }
+
+free_lock:
+ local_irq_restore(irq_flags);
+
+ return ret;
+}
+
+static int hv_vtl_apicid_to_vp_id(u32 apic_id)
+{
+ u64 control;
+ u64 status;
+ unsigned long irq_flags;
+ struct hv_get_vp_from_apic_id_in *input;
+ u32 *output, ret;
+
+ local_irq_save(irq_flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->apic_ids[0] = apic_id;
+
+ output = (u32 *)input;
+
+ control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
+ status = hv_do_hypercall(control, input, output);
+ ret = output[0];
+
+ local_irq_restore(irq_flags);
+
+ if (!hv_result_success(status)) {
+ pr_err("failed to get vp id from apic id %d, status %#llx\n",
+ apic_id, status);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
+{
+ int vp_id;
+
+ pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
+ vp_id = hv_vtl_apicid_to_vp_id(apicid);
+
+ if (vp_id < 0) {
+ pr_err("Couldn't find CPU with APIC ID %d\n", apicid);
+ return -EINVAL;
+ }
+ if (vp_id > ms_hyperv.max_vp_index) {
+ pr_err("Invalid CPU id %d for APIC ID %d\n", vp_id, apicid);
+ return -EINVAL;
+ }
+
+ return hv_vtl_bringup_vcpu(vp_id, start_eip);
+}
+
+static int __init hv_vtl_early_init(void)
+{
+ /*
+ * `boot_cpu_has` returns the runtime feature support,
+ * and here is the earliest it can be used.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_XSAVE))
+ panic("XSAVE has to be disabled as it is not supported by this module.\n"
+ "Please add 'noxsave' to the kernel command line.\n");
+
+ real_mode_header = &hv_vtl_real_mode_header;
+ apic->wakeup_secondary_cpu_64 = hv_vtl_wakeup_secondary_cpu;
+
+ return 0;
+}
+early_initcall(hv_vtl_early_init);
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 4f79dc76042d..cc92388b7a99 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -13,6 +13,8 @@
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
+#include <asm/coco.h>
+#include <asm/mem_encrypt.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
@@ -233,41 +235,6 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
-#endif
-
-enum hv_isolation_type hv_get_isolation_type(void)
-{
- if (!(ms_hyperv.priv_high & HV_ISOLATION))
- return HV_ISOLATION_TYPE_NONE;
- return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
-}
-EXPORT_SYMBOL_GPL(hv_get_isolation_type);
-
-/*
- * hv_is_isolation_supported - Check system runs in the Hyper-V
- * isolation VM.
- */
-bool hv_is_isolation_supported(void)
-{
- if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
- return false;
-
- if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
- return false;
-
- return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
-}
-
-DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
-
-/*
- * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
- * isolation VM.
- */
-bool hv_isolation_type_snp(void)
-{
- return static_branch_unlikely(&isolation_type_snp);
-}
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.
@@ -320,27 +287,25 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
}
/*
- * hv_set_mem_host_visibility - Set specified memory visible to host.
+ * hv_vtom_set_host_visibility - Set specified memory visible to host.
*
* In Isolation VM, all guest memory is encrypted from host and guest
* needs to set memory visible to host via hvcall before sharing memory
* with host. This function works as wrap of hv_mark_gpa_visibility()
* with memory base and size.
*/
-int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visible)
+static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
{
- enum hv_mem_host_visibility visibility = visible ?
- VMBUS_PAGE_VISIBLE_READ_WRITE : VMBUS_PAGE_NOT_VISIBLE;
+ enum hv_mem_host_visibility visibility = enc ?
+ VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
u64 *pfn_array;
int ret = 0;
+ bool result = true;
int i, pfn;
- if (!hv_is_isolation_supported() || !hv_hypercall_pg)
- return 0;
-
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!pfn_array)
- return -ENOMEM;
+ return false;
for (i = 0, pfn = 0; i < pagecount; i++) {
pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
@@ -349,41 +314,98 @@ int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visibl
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
ret = hv_mark_gpa_visibility(pfn, pfn_array,
visibility);
- if (ret)
+ if (ret) {
+ result = false;
goto err_free_pfn_array;
+ }
pfn = 0;
}
}
err_free_pfn_array:
kfree(pfn_array);
- return ret;
+ return result;
}
-/*
- * hv_map_memory - map memory to extra space in the AMD SEV-SNP Isolation VM.
- */
-void *hv_map_memory(void *addr, unsigned long size)
+static bool hv_vtom_tlb_flush_required(bool private)
{
- unsigned long *pfns = kcalloc(size / PAGE_SIZE,
- sizeof(unsigned long), GFP_KERNEL);
- void *vaddr;
- int i;
+ return true;
+}
+
+static bool hv_vtom_cache_flush_required(void)
+{
+ return false;
+}
- if (!pfns)
- return NULL;
+static bool hv_is_private_mmio(u64 addr)
+{
+ /*
+ * Hyper-V always provides a single IO-APIC in a guest VM.
+ * When a paravisor is used, it is emulated by the paravisor
+ * in the guest context and must be mapped private.
+ */
+ if (addr >= HV_IOAPIC_BASE_ADDRESS &&
+ addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
+ return true;
+
+ /* Same with a vTPM */
+ if (addr >= VTPM_BASE_ADDRESS &&
+ addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
+ return true;
+
+ return false;
+}
+
+void __init hv_vtom_init(void)
+{
+ /*
+ * By design, a VM using vTOM doesn't see the SEV setting,
+ * so SEV initialization is bypassed and sev_status isn't set.
+ * Set it here to indicate a vTOM VM.
+ */
+ sev_status = MSR_AMD64_SNP_VTOM;
+ cc_set_vendor(CC_VENDOR_AMD);
+ cc_set_mask(ms_hyperv.shared_gpa_boundary);
+ physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
+
+ x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
+ x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
+ x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
+ x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
+}
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+enum hv_isolation_type hv_get_isolation_type(void)
+{
+ if (!(ms_hyperv.priv_high & HV_ISOLATION))
+ return HV_ISOLATION_TYPE_NONE;
+ return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
+}
+EXPORT_SYMBOL_GPL(hv_get_isolation_type);
- for (i = 0; i < size / PAGE_SIZE; i++)
- pfns[i] = vmalloc_to_pfn(addr + i * PAGE_SIZE) +
- (ms_hyperv.shared_gpa_boundary >> PAGE_SHIFT);
+/*
+ * hv_is_isolation_supported - Check system runs in the Hyper-V
+ * isolation VM.
+ */
+bool hv_is_isolation_supported(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return false;
- vaddr = vmap_pfn(pfns, size / PAGE_SIZE, PAGE_KERNEL_IO);
- kfree(pfns);
+ if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
+ return false;
- return vaddr;
+ return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}
-void hv_unmap_memory(void *addr)
+DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
+
+/*
+ * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
+ * isolation VM.
+ */
+bool hv_isolation_type_snp(void)
{
- vunmap(addr);
+ return static_branch_unlikely(&isolation_type_snp);
}
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 0ad2378fe6ad..8460bd35e10c 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -52,6 +52,11 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
return gva_n - offset;
}
+static bool cpu_is_lazy(int cpu)
+{
+ return per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
+}
+
static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
@@ -60,6 +65,7 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
struct hv_tlb_flush *flush;
u64 status;
unsigned long flags;
+ bool do_lazy = !info->freed_tables;
trace_hyperv_mmu_flush_tlb_multi(cpus, info);
@@ -112,6 +118,8 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
goto do_ex_hypercall;
for_each_cpu(cpu, cpus) {
+ if (do_lazy && cpu_is_lazy(cpu))
+ continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL) {
local_irq_restore(flags);
@@ -198,7 +206,8 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->hv_vp_set.valid_bank_mask = 0;
flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
+ nr_bank = cpumask_to_vpset_skip(&flush->hv_vp_set, cpus,
+ info->freed_tables ? NULL : cpu_is_lazy);
if (nr_bank < 0)
return HV_STATUS_INVALID_PARAMETER;