diff options
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/ipl.h | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 34 | ||||
-rw-r--r-- | arch/s390/include/asm/pgalloc.h | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 72 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 2 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/kvm.h | 10 |
6 files changed, 79 insertions, 55 deletions
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 2fcccc0c997c..c81661e756a0 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -17,12 +17,12 @@ #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ sizeof(struct ipl_block_fcp)) -#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8) +#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16) #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ sizeof(struct ipl_block_ccw)) -#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8) +#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16) #define IPL_MAX_SUPPORTED_VERSION (0) @@ -38,10 +38,11 @@ struct ipl_list_hdr { u8 pbt; u8 flags; u16 reserved2; + u8 loadparm[8]; } __attribute__((packed)); struct ipl_block_fcp { - u8 reserved1[313-1]; + u8 reserved1[305-1]; u8 opt; u8 reserved2[3]; u16 reserved3; @@ -62,7 +63,6 @@ struct ipl_block_fcp { offsetof(struct ipl_block_fcp, scp_data))) struct ipl_block_ccw { - u8 load_parm[8]; u8 reserved1[84]; u8 reserved2[2]; u16 devno; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 773bef7614d8..2175f911a73a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -13,8 +13,11 @@ #ifndef ASM_KVM_HOST_H #define ASM_KVM_HOST_H + +#include <linux/types.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> +#include <linux/kvm_types.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <asm/debug.h> @@ -154,7 +157,9 @@ struct kvm_s390_sie_block { __u8 armid; /* 0x00e3 */ __u8 reservede4[4]; /* 0x00e4 */ __u64 tecmc; /* 0x00e8 */ - __u8 reservedf0[16]; /* 0x00f0 */ + __u8 reservedf0[12]; /* 0x00f0 */ +#define CRYCB_FORMAT1 0x00000001 + __u32 crycbd; /* 0x00fc */ __u64 gcr[16]; /* 0x0100 */ __u64 gbea; /* 0x0180 */ __u8 reserved188[24]; /* 0x0188 */ @@ -187,6 +192,7 @@ struct kvm_vcpu_stat { u32 exit_stop_request; u32 exit_validity; u32 exit_instruction; + u32 halt_wakeup; u32 instruction_lctl; u32 instruction_lctlg; u32 instruction_stctl; @@ -407,6 +413,15 @@ struct s390_io_adapter { #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8) #define MAX_S390_ADAPTER_MAPS 256 +struct kvm_s390_crypto { + struct kvm_s390_crypto_cb *crycb; + __u32 crycbd; +}; + +struct kvm_s390_crypto_cb { + __u8 reserved00[128]; /* 0x0000 */ +}; + struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; @@ -420,6 +435,7 @@ struct kvm_arch{ struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; spinlock_t start_stop_lock; + struct kvm_s390_crypto crypto; }; #define KVM_HVA_ERR_BAD (-1UL) @@ -431,8 +447,6 @@ static inline bool kvm_is_error_hva(unsigned long addr) } #define ASYNC_PF_PER_VCPU 64 -struct kvm_vcpu; -struct kvm_async_pf; struct kvm_arch_async_pf { unsigned long pfault_token; }; @@ -450,4 +464,18 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; + +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_check_processor_compat(void *rtn) {} +static inline void kvm_arch_exit(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} + #endif diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 9e18a61d3df3..d39a31c3cdf2 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -18,9 +18,9 @@ unsigned long *crst_table_alloc(struct mm_struct *); void crst_table_free(struct mm_struct *, unsigned long *); -unsigned long *page_table_alloc(struct mm_struct *, unsigned long); +unsigned long *page_table_alloc(struct mm_struct *); void page_table_free(struct mm_struct *, unsigned long *); -void page_table_free_rcu(struct mmu_gather *, unsigned long *); +void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long, bool init_skey); @@ -145,8 +145,8 @@ static inline void pmd_populate(struct mm_struct *mm, /* * page table entry allocation/free routines. */ -#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) -#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) +#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) +#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5efb2fe186e7..b7054356cc98 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -30,6 +30,7 @@ #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/page-flags.h> +#include <linux/radix-tree.h> #include <asm/bug.h> #include <asm/page.h> @@ -789,82 +790,67 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) /** * struct gmap_struct - guest address space + * @crst_list: list of all crst tables used in the guest address space * @mm: pointer to the parent mm_struct + * @guest_to_host: radix tree with guest to host address translation + * @host_to_guest: radix tree with pointer to segment table entries + * @guest_table_lock: spinlock to protect all entries in the guest page table * @table: pointer to the page directory * @asce: address space control element for gmap page table - * @crst_list: list of all crst tables used in the guest address space * @pfault_enabled: defines if pfaults are applicable for the guest */ struct gmap { struct list_head list; + struct list_head crst_list; struct mm_struct *mm; + struct radix_tree_root guest_to_host; + struct radix_tree_root host_to_guest; + spinlock_t guest_table_lock; unsigned long *table; unsigned long asce; + unsigned long asce_end; void *private; - struct list_head crst_list; bool pfault_enabled; }; /** - * struct gmap_rmap - reverse mapping for segment table entries - * @gmap: pointer to the gmap_struct - * @entry: pointer to a segment table entry - * @vmaddr: virtual address in the guest address space - */ -struct gmap_rmap { - struct list_head list; - struct gmap *gmap; - unsigned long *entry; - unsigned long vmaddr; -}; - -/** - * struct gmap_pgtable - gmap information attached to a page table - * @vmaddr: address of the 1MB segment in the process virtual memory - * @mapper: list of segment table entries mapping a page table - */ -struct gmap_pgtable { - unsigned long vmaddr; - struct list_head mapper; -}; - -/** * struct gmap_notifier - notify function block for page invalidation * @notifier_call: address of callback function */ struct gmap_notifier { struct list_head list; - void (*notifier_call)(struct gmap *gmap, unsigned long address); + void (*notifier_call)(struct gmap *gmap, unsigned long gaddr); }; -struct gmap *gmap_alloc(struct mm_struct *mm); +struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); void gmap_free(struct gmap *gmap); void gmap_enable(struct gmap *gmap); void gmap_disable(struct gmap *gmap); int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long len); int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); -unsigned long __gmap_translate(unsigned long address, struct gmap *); -unsigned long gmap_translate(unsigned long address, struct gmap *); -unsigned long __gmap_fault(unsigned long address, struct gmap *); -unsigned long gmap_fault(unsigned long address, struct gmap *); -void gmap_discard(unsigned long from, unsigned long to, struct gmap *); -void __gmap_zap(unsigned long address, struct gmap *); +unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); +unsigned long gmap_translate(struct gmap *, unsigned long gaddr); +int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); +int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); +void gmap_discard(struct gmap *, unsigned long from, unsigned long to); +void __gmap_zap(struct gmap *, unsigned long gaddr); bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); void gmap_register_ipte_notifier(struct gmap_notifier *); void gmap_unregister_ipte_notifier(struct gmap_notifier *); int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); -void gmap_do_ipte_notify(struct mm_struct *, pte_t *); +void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE if (pgste_val(pgste) & PGSTE_IN_BIT) { pgste_val(pgste) &= ~PGSTE_IN_BIT; - gmap_do_ipte_notify(mm, ptep); + gmap_do_ipte_notify(mm, addr, ptep); } #endif return pgste; @@ -1110,7 +1096,7 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, pgste_val(pgste) &= ~PGSTE_UC_BIT; pte = *ptep; if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { - pgste = pgste_ipte_notify(mm, ptep, pgste); + pgste = pgste_ipte_notify(mm, addr, ptep, pgste); __ptep_ipte(addr, ptep); if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) pte_val(pte) |= _PAGE_PROTECT; @@ -1132,7 +1118,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); } oldpte = pte = *ptep; @@ -1179,7 +1165,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, ptep, pgste); + pgste = pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; @@ -1203,7 +1189,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_ipte_notify(mm, ptep, pgste); + pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; @@ -1240,7 +1226,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); } pte = *ptep; @@ -1274,7 +1260,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, if (!full && mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, ptep, pgste); + pgste = pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; @@ -1299,7 +1285,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, if (pte_write(pte)) { if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, ptep, pgste); + pgste = pgste_ipte_notify(mm, address, ptep, pgste); } ptep_flush_lazy(mm, address, ptep); @@ -1325,7 +1311,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, return 0; if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); } ptep_flush_direct(vma->vm_mm, address, ptep); diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index a25f09fbaf36..572c59949004 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -105,7 +105,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long address) { - page_table_free_rcu(tlb, (unsigned long *) pte); + page_table_free_rcu(tlb, (unsigned long *) pte, address); } /* diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 0fc26430a1e5..48eda3ab4944 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -111,12 +111,22 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_GPRS (1UL << 1) #define KVM_SYNC_ACRS (1UL << 2) #define KVM_SYNC_CRS (1UL << 3) +#define KVM_SYNC_ARCH0 (1UL << 4) +#define KVM_SYNC_PFAULT (1UL << 5) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ __u64 gprs[16]; /* general purpose registers */ __u32 acrs[16]; /* access registers */ __u64 crs[16]; /* control registers */ + __u64 todpr; /* tod programmable register [ARCH0] */ + __u64 cputm; /* cpu timer [ARCH0] */ + __u64 ckc; /* clock comparator [ARCH0] */ + __u64 pp; /* program parameter [ARCH0] */ + __u64 gbea; /* guest breaking-event address [ARCH0] */ + __u64 pft; /* pfault token [PFAULT] */ + __u64 pfs; /* pfault select [PFAULT] */ + __u64 pfc; /* pfault compare [PFAULT] */ }; #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) |