summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/Kbuild1
-rw-r--r--include/asm-x86/kvm.h191
-rw-r--r--include/asm-x86/kvm_host.h611
-rw-r--r--include/asm-x86/kvm_para.h105
-rw-r--r--include/asm-x86/kvm_x86_emulate.h186
-rw-r--r--include/asm-x86/lguest.h2
-rw-r--r--include/asm-x86/lguest_hcall.h6
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/kvm.h203
-rw-r--r--include/linux/kvm_host.h299
-rw-r--r--include/linux/kvm_para.h82
-rw-r--r--include/linux/kvm_types.h54
-rw-r--r--include/linux/selinux.h45
-rw-r--r--include/net/netlabel.h99
-rw-r--r--include/scsi/scsi.h20
-rw-r--r--include/scsi/scsi_cmnd.h59
-rw-r--r--include/scsi/scsi_eh.h9
-rw-r--r--include/scsi/scsi_host.h17
20 files changed, 1714 insertions, 282 deletions
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index e6189b229143..3c6f0f80e827 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -3,6 +3,7 @@ include include/asm-generic/Kbuild.asm
header-y += boot.h
header-y += bootparam.h
header-y += debugreg.h
+header-y += kvm.h
header-y += ldt.h
header-y += msr-index.h
header-y += prctl.h
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
new file mode 100644
index 000000000000..7a71120426a3
--- /dev/null
+++ b/include/asm-x86/kvm.h
@@ -0,0 +1,191 @@
+#ifndef __LINUX_KVM_X86_H
+#define __LINUX_KVM_X86_H
+
+/*
+ * KVM x86 specific structures and definitions
+ *
+ */
+
+#include <asm/types.h>
+#include <linux/ioctl.h>
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+struct kvm_memory_alias {
+ __u32 slot; /* this has a different namespace than memory slots */
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size;
+ __u64 target_phys_addr;
+};
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+ __u8 last_irr; /* edge detection */
+ __u8 irr; /* interrupt request register */
+ __u8 imr; /* interrupt mask register */
+ __u8 isr; /* interrupt service register */
+ __u8 priority_add; /* highest irq priority */
+ __u8 irq_base;
+ __u8 read_reg_select;
+ __u8 poll;
+ __u8 special_mask;
+ __u8 init_state;
+ __u8 auto_eoi;
+ __u8 rotate_on_auto_eoi;
+ __u8 special_fully_nested_mode;
+ __u8 init4; /* true if 4 byte init */
+ __u8 elcr; /* PIIX edge/trigger selection */
+ __u8 elcr_mask;
+};
+
+#define KVM_IOAPIC_NUM_PINS 24
+struct kvm_ioapic_state {
+ __u64 base_address;
+ __u32 ioregsel;
+ __u32 id;
+ __u32 irr;
+ __u32 pad;
+ union {
+ __u64 bits;
+ struct {
+ __u8 vector;
+ __u8 delivery_mode:3;
+ __u8 dest_mode:1;
+ __u8 delivery_status:1;
+ __u8 polarity:1;
+ __u8 remote_irr:1;
+ __u8 trig_mode:1;
+ __u8 mask:1;
+ __u8 reserve:7;
+ __u8 reserved[4];
+ __u8 dest_id;
+ } fields;
+ } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER 0
+#define KVM_IRQCHIP_PIC_SLAVE 1
+#define KVM_IRQCHIP_IOAPIC 2
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 rax, rbx, rcx, rdx;
+ __u64 rsi, rdi, rsp, rbp;
+ __u64 r8, r9, r10, r11;
+ __u64 r12, r13, r14, r15;
+ __u64 rip, rflags;
+};
+
+/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
+#define KVM_APIC_REG_SIZE 0x400
+struct kvm_lapic_state {
+ char regs[KVM_APIC_REG_SIZE];
+};
+
+struct kvm_segment {
+ __u64 base;
+ __u32 limit;
+ __u16 selector;
+ __u8 type;
+ __u8 present, dpl, db, s, l, g, avl;
+ __u8 unusable;
+ __u8 padding;
+};
+
+struct kvm_dtable {
+ __u64 base;
+ __u16 limit;
+ __u16 padding[3];
+};
+
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+ /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+ __u8 fpr[8][16];
+ __u16 fcw;
+ __u16 fsw;
+ __u8 ftwx; /* in fxsave format */
+ __u8 pad1;
+ __u16 last_opcode;
+ __u64 last_ip;
+ __u64 last_dp;
+ __u8 xmm[16][16];
+ __u32 mxcsr;
+ __u32 pad2;
+};
+
+struct kvm_msr_entry {
+ __u32 index;
+ __u32 reserved;
+ __u64 data;
+};
+
+/* for KVM_GET_MSRS and KVM_SET_MSRS */
+struct kvm_msrs {
+ __u32 nmsrs; /* number of msrs in entries */
+ __u32 pad;
+
+ struct kvm_msr_entry entries[0];
+};
+
+/* for KVM_GET_MSR_INDEX_LIST */
+struct kvm_msr_list {
+ __u32 nmsrs; /* number of msrs in entries */
+ __u32 indices[0];
+};
+
+
+struct kvm_cpuid_entry {
+ __u32 function;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding;
+};
+
+/* for KVM_SET_CPUID */
+struct kvm_cpuid {
+ __u32 nent;
+ __u32 padding;
+ struct kvm_cpuid_entry entries[0];
+};
+
+struct kvm_cpuid_entry2 {
+ __u32 function;
+ __u32 index;
+ __u32 flags;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding[3];
+};
+
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
+#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
+#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
+
+/* for KVM_SET_CPUID2 */
+struct kvm_cpuid2 {
+ __u32 nent;
+ __u32 padding;
+ struct kvm_cpuid_entry2 entries[0];
+};
+
+#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
new file mode 100644
index 000000000000..4702b04b979a
--- /dev/null
+++ b/include/asm-x86/kvm_host.h
@@ -0,0 +1,611 @@
+#/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * This header defines architecture specific interfaces, x86 version
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef ASM_KVM_HOST_H
+#define ASM_KVM_HOST_H
+
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#include <linux/kvm_types.h>
+
+#include <asm/desc.h>
+
+#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
+#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
+#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
+
+#define KVM_GUEST_CR0_MASK \
+ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
+ | X86_CR0_NW | X86_CR0_CD)
+#define KVM_VM_CR0_ALWAYS_ON \
+ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
+ | X86_CR0_MP)
+#define KVM_GUEST_CR4_MASK \
+ (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
+#define INVALID_PAGE (~(hpa_t)0)
+#define UNMAPPED_GVA (~(gpa_t)0)
+
+#define DE_VECTOR 0
+#define UD_VECTOR 6
+#define NM_VECTOR 7
+#define DF_VECTOR 8
+#define TS_VECTOR 10
+#define NP_VECTOR 11
+#define SS_VECTOR 12
+#define GP_VECTOR 13
+#define PF_VECTOR 14
+
+#define SELECTOR_TI_MASK (1 << 2)
+#define SELECTOR_RPL_MASK 0x03
+
+#define IOPL_SHIFT 12
+
+#define KVM_ALIAS_SLOTS 4
+
+#define KVM_PERMILLE_MMU_PAGES 20
+#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_NUM_MMU_PAGES 1024
+#define KVM_MIN_FREE_MMU_PAGES 5
+#define KVM_REFILL_PAGES 25
+#define KVM_MAX_CPUID_ENTRIES 40
+
+extern spinlock_t kvm_lock;
+extern struct list_head vm_list;
+
+struct kvm_vcpu;
+struct kvm;
+
+enum {
+ VCPU_REGS_RAX = 0,
+ VCPU_REGS_RCX = 1,
+ VCPU_REGS_RDX = 2,
+ VCPU_REGS_RBX = 3,
+ VCPU_REGS_RSP = 4,
+ VCPU_REGS_RBP = 5,
+ VCPU_REGS_RSI = 6,
+ VCPU_REGS_RDI = 7,
+#ifdef CONFIG_X86_64
+ VCPU_REGS_R8 = 8,
+ VCPU_REGS_R9 = 9,
+ VCPU_REGS_R10 = 10,
+ VCPU_REGS_R11 = 11,
+ VCPU_REGS_R12 = 12,
+ VCPU_REGS_R13 = 13,
+ VCPU_REGS_R14 = 14,
+ VCPU_REGS_R15 = 15,
+#endif
+ NR_VCPU_REGS
+};
+
+enum {
+ VCPU_SREG_CS,
+ VCPU_SREG_DS,
+ VCPU_SREG_ES,
+ VCPU_SREG_FS,
+ VCPU_SREG_GS,
+ VCPU_SREG_SS,
+ VCPU_SREG_TR,
+ VCPU_SREG_LDTR,
+};
+
+#include <asm/kvm_x86_emulate.h>
+
+#define KVM_NR_MEM_OBJS 40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
+#define NR_PTE_CHAIN_ENTRIES 5
+
+struct kvm_pte_chain {
+ u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
+ struct hlist_node link;
+};
+
+/*
+ * kvm_mmu_page_role, below, is defined as:
+ *
+ * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
+ * bits 4:7 - page table level for this shadow (1-4)
+ * bits 8:9 - page table quadrant for 2-level guests
+ * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
+ * bits 17:19 - common access permissions for all ptes in this shadow page
+ */
+union kvm_mmu_page_role {
+ unsigned word;
+ struct {
+ unsigned glevels : 4;
+ unsigned level : 4;
+ unsigned quadrant : 2;
+ unsigned pad_for_nice_hex_output : 6;
+ unsigned metaphysical : 1;
+ unsigned access : 3;
+ };
+};
+
+struct kvm_mmu_page {
+ struct list_head link;
+ struct hlist_node hash_link;
+
+ /*
+ * The following two entries are used to key the shadow page in the
+ * hash table.
+ */
+ gfn_t gfn;
+ union kvm_mmu_page_role role;
+
+ u64 *spt;
+ /* hold the gfn of each spte inside spt */
+ gfn_t *gfns;
+ unsigned long slot_bitmap; /* One bit set per slot which has memory
+ * in this shadow page.
+ */
+ int multimapped; /* More than one parent_pte? */
+ int root_count; /* Currently serving as active root */
+ union {
+ u64 *parent_pte; /* !multimapped */
+ struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
+ };
+};
+
+/*
+ * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
+ * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
+ * mode.
+ */
+struct kvm_mmu {
+ void (*new_cr3)(struct kvm_vcpu *vcpu);
+ int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
+ void (*free)(struct kvm_vcpu *vcpu);
+ gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
+ void (*prefetch_page)(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page);
+ hpa_t root_hpa;
+ int root_level;
+ int shadow_root_level;
+
+ u64 *pae_root;
+};
+
+struct kvm_vcpu_arch {
+ u64 host_tsc;
+ int interrupt_window_open;
+ unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
+ DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
+ unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
+ unsigned long rip; /* needs vcpu_load_rsp_rip() */
+
+ unsigned long cr0;
+ unsigned long cr2;
+ unsigned long cr3;
+ unsigned long cr4;
+ unsigned long cr8;
+ u64 pdptrs[4]; /* pae */
+ u64 shadow_efer;
+ u64 apic_base;
+ struct kvm_lapic *apic; /* kernel irqchip context */
+#define VCPU_MP_STATE_RUNNABLE 0
+#define VCPU_MP_STATE_UNINITIALIZED 1
+#define VCPU_MP_STATE_INIT_RECEIVED 2
+#define VCPU_MP_STATE_SIPI_RECEIVED 3
+#define VCPU_MP_STATE_HALTED 4
+ int mp_state;
+ int sipi_vector;
+ u64 ia32_misc_enable_msr;
+ bool tpr_access_reporting;
+
+ struct kvm_mmu mmu;
+
+ struct kvm_mmu_memory_cache mmu_pte_chain_cache;
+ struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+ struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memory_cache mmu_page_header_cache;
+
+ gfn_t last_pt_write_gfn;
+ int last_pt_write_count;
+ u64 *last_pte_updated;
+
+ struct {
+ gfn_t gfn; /* presumed gfn during guest pte update */
+ struct page *page; /* page corresponding to that gfn */
+ } update_pte;
+
+ struct i387_fxsave_struct host_fx_image;
+ struct i387_fxsave_struct guest_fx_image;
+
+ gva_t mmio_fault_cr2;
+ struct kvm_pio_request pio;
+ void *pio_data;
+
+ struct kvm_queued_exception {
+ bool pending;
+ bool has_error_code;
+ u8 nr;
+ u32 error_code;
+ } exception;
+
+ struct {
+ int active;
+ u8 save_iopl;
+ struct kvm_save_segment {
+ u16 selector;
+ unsigned long base;
+ u32 limit;
+ u32 ar;
+ } tr, es, ds, fs, gs;
+ } rmode;
+ int halt_request; /* real mode on Intel only */
+
+ int cpuid_nent;
+ struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+ /* emulate context */
+
+ struct x86_emulate_ctxt emulate_ctxt;
+};
+
+struct kvm_mem_alias {
+ gfn_t base_gfn;
+ unsigned long npages;
+ gfn_t target_gfn;
+};
+
+struct kvm_arch{
+ int naliases;
+ struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+
+ unsigned int n_free_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_alloc_mmu_pages;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ /*
+ * Hash table of struct kvm_mmu_page.
+ */
+ struct list_head active_mmu_pages;
+ struct kvm_pic *vpic;
+ struct kvm_ioapic *vioapic;
+
+ int round_robin_prev_vcpu;
+ unsigned int tss_addr;
+ struct page *apic_access_page;
+};
+
+struct kvm_vm_stat {
+ u32 mmu_shadow_zapped;
+ u32 mmu_pte_write;
+ u32 mmu_pte_updated;
+ u32 mmu_pde_zapped;
+ u32 mmu_flooded;
+ u32 mmu_recycled;
+ u32 mmu_cache_miss;
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 pf_fixed;
+ u32 pf_guest;
+ u32 tlb_flush;
+ u32 invlpg;
+
+ u32 exits;
+ u32 io_exits;
+ u32 mmio_exits;
+ u32 signal_exits;
+ u32 irq_window_exits;
+ u32 halt_exits;
+ u32 halt_wakeup;
+ u32 request_irq_exits;
+ u32 irq_exits;
+ u32 host_state_reload;
+ u32 efer_reload;
+ u32 fpu_reload;
+ u32 insn_emulation;
+ u32 insn_emulation_fail;
+};
+
+struct descriptor_table {
+ u16 limit;
+ unsigned long base;
+} __attribute__((packed));
+
+struct kvm_x86_ops {
+ int (*cpu_has_kvm_support)(void); /* __init */
+ int (*disabled_by_bios)(void); /* __init */
+ void (*hardware_enable)(void *dummy); /* __init */
+ void (*hardware_disable)(void *dummy);
+ void (*check_processor_compatibility)(void *rtn);
+ int (*hardware_setup)(void); /* __init */
+ void (*hardware_unsetup)(void); /* __exit */
+ bool (*cpu_has_accelerated_tpr)(void);
+
+ /* Create, but do not attach this VCPU */
+ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+ int (*vcpu_reset)(struct kvm_vcpu *vcpu);
+
+ void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*vcpu_decache)(struct kvm_vcpu *vcpu);
+
+ int (*set_guest_debug)(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg);
+ void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
+ int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
+ int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+ u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
+ void (*get_segment)(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg);
+ void (*set_segment)(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg);
+ void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
+ void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
+ void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
+ void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
+ void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
+ void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+ void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+ void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+ void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+ unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
+ void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
+ int *exception);
+ void (*cache_regs)(struct kvm_vcpu *vcpu);
+ void (*decache_regs)(struct kvm_vcpu *vcpu);
+ unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+ void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+
+ void (*tlb_flush)(struct kvm_vcpu *vcpu);
+
+ void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
+ int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+ void (*patch_hypercall)(struct kvm_vcpu *vcpu,
+ unsigned char *hypercall_addr);
+ int (*get_irq)(struct kvm_vcpu *vcpu);
+ void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
+ void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code);
+ bool (*exception_injected)(struct kvm_vcpu *vcpu);
+ void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
+ void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
+ struct kvm_run *run);
+
+ int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
+};
+
+extern struct kvm_x86_ops *kvm_x86_ops;
+
+int kvm_mmu_module_init(void);
+void kvm_mmu_module_exit(void);
+
+void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
+int kvm_mmu_create(struct kvm_vcpu *vcpu);
+int kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
+
+int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+void kvm_mmu_zap_all(struct kvm *kvm);
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+};
+
+#define EMULTYPE_NO_DECODE (1 << 0)
+#define EMULTYPE_TRAP_UD (1 << 1)
+int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long cr2, u16 error_code, int emulation_type);
+void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
+void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
+void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
+void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
+ unsigned long *rflags);
+
+unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
+void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
+ unsigned long *rflags);
+int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+
+struct x86_emulate_ctxt;
+
+int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+ int size, unsigned port);
+int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+ int size, unsigned long count, int down,
+ gva_t address, int rep, unsigned port);
+void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
+int emulate_clts(struct kvm_vcpu *vcpu);
+int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long *dest);
+int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long value);
+
+void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
+void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
+void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
+unsigned long get_cr8(struct kvm_vcpu *vcpu);
+void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
+
+int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+
+void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
+void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
+ u32 error_code);
+
+void fx_init(struct kvm_vcpu *vcpu);
+
+int emulator_read_std(unsigned long addr,
+ void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
+int emulator_write_emulated(unsigned long addr,
+ const void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
+
+unsigned long segment_base(u16 selector);
+
+void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *new, int bytes);
+int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
+void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+int kvm_mmu_load(struct kvm_vcpu *vcpu);
+void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
+
+int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
+
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+int complete_pio(struct kvm_vcpu *vcpu);
+
+static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+{
+ struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
+
+ return (struct kvm_mmu_page *)page_private(page);
+}
+
+static inline u16 read_fs(void)
+{
+ u16 seg;
+ asm("mov %%fs, %0" : "=g"(seg));
+ return seg;
+}
+
+static inline u16 read_gs(void)
+{
+ u16 seg;
+ asm("mov %%gs, %0" : "=g"(seg));
+ return seg;
+}
+
+static inline u16 read_ldt(void)
+{
+ u16 ldt;
+ asm("sldt %0" : "=g"(ldt));
+ return ldt;
+}
+
+static inline void load_fs(u16 sel)
+{
+ asm("mov %0, %%fs" : : "rm"(sel));
+}
+
+static inline void load_gs(u16 sel)
+{
+ asm("mov %0, %%gs" : : "rm"(sel));
+}
+
+#ifndef load_ldt
+static inline void load_ldt(u16 sel)
+{
+ asm("lldt %0" : : "rm"(sel));
+}
+#endif
+
+static inline void get_idt(struct descriptor_table *table)
+{
+ asm("sidt %0" : "=m"(*table));
+}
+
+static inline void get_gdt(struct descriptor_table *table)
+{
+ asm("sgdt %0" : "=m"(*table));
+}
+
+static inline unsigned long read_tr_base(void)
+{
+ u16 tr;
+ asm("str %0" : "=g"(tr));
+ return segment_base(tr);
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long read_msr(unsigned long msr)
+{
+ u64 value;
+
+ rdmsrl(msr, value);
+ return value;
+}
+#endif
+
+static inline void fx_save(struct i387_fxsave_struct *image)
+{
+ asm("fxsave (%0)":: "r" (image));
+}
+
+static inline void fx_restore(struct i387_fxsave_struct *image)
+{
+ asm("fxrstor (%0)":: "r" (image));
+}
+
+static inline void fpu_init(void)
+{
+ asm("finit");
+}
+
+static inline u32 get_rdx_init_val(void)
+{
+ return 0x600; /* P6 family */
+}
+
+static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+{
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+}
+
+#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
+#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
+#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
+#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
+#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
+#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
+#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
+#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
+
+#define MSR_IA32_TIME_STAMP_COUNTER 0x010
+
+#define TSS_IOPB_BASE_OFFSET 0x66
+#define TSS_BASE_SIZE 0x68
+#define TSS_IOPB_SIZE (65536 / 8)
+#define TSS_REDIRECTION_SIZE (256 / 8)
+#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+
+#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
new file mode 100644
index 000000000000..c6f3fd8d8c53
--- /dev/null
+++ b/include/asm-x86/kvm_para.h
@@ -0,0 +1,105 @@
+#ifndef __X86_KVM_PARA_H
+#define __X86_KVM_PARA_H
+
+/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
+ * should be used to determine that a VM is running under KVM.
+ */
+#define KVM_CPUID_SIGNATURE 0x40000000
+
+/* This CPUID returns a feature bitmap in eax. Before enabling a particular
+ * paravirtualization, the appropriate feature bit should be checked.
+ */
+#define KVM_CPUID_FEATURES 0x40000001
+
+#ifdef __KERNEL__
+#include <asm/processor.h>
+
+/* This instruction is vmcall. On non-VT architectures, it will generate a
+ * trap that we will then rewrite to the appropriate instruction.
+ */
+#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
+
+/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
+ * instruction. The hypervisor may replace it with something else but only the
+ * instructions are guaranteed to be supported.
+ *
+ * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
+ * The hypercall number should be placed in rax and the return value will be
+ * placed in rax. No other registers will be clobbered unless explicited
+ * noted by the particular hypercall.
+ */
+
+static inline long kvm_hypercall0(unsigned int nr)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr));
+ return ret;
+}
+
+static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1));
+ return ret;
+}
+
+static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2));
+ return ret;
+}
+
+static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3));
+ return ret;
+}
+
+static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4));
+ return ret;
+}
+
+static inline int kvm_para_available(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char signature[13];
+
+ cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
+ memcpy(signature + 0, &ebx, 4);
+ memcpy(signature + 4, &ecx, 4);
+ memcpy(signature + 8, &edx, 4);
+ signature[12] = 0;
+
+ if (strcmp(signature, "KVMKVMKVM") == 0)
+ return 1;
+
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return cpuid_eax(KVM_CPUID_FEATURES);
+}
+
+#endif
+
+#endif
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
new file mode 100644
index 000000000000..7db91b9bdcd4
--- /dev/null
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -0,0 +1,186 @@
+/******************************************************************************
+ * x86_emulate.h
+ *
+ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
+ *
+ * Copyright (c) 2005 Keir Fraser
+ *
+ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
+ */
+
+#ifndef __X86_EMULATE_H__
+#define __X86_EMULATE_H__
+
+struct x86_emulate_ctxt;
+
+/*
+ * x86_emulate_ops:
+ *
+ * These operations represent the instruction emulator's interface to memory.
+ * There are two categories of operation: those that act on ordinary memory
+ * regions (*_std), and those that act on memory regions known to require
+ * special treatment or emulation (*_emulated).
+ *
+ * The emulator assumes that an instruction accesses only one 'emulated memory'
+ * location, that this location is the given linear faulting address (cr2), and
+ * that this is one of the instruction's data operands. Instruction fetches and
+ * stack operations are assumed never to access emulated memory. The emulator
+ * automatically deduces which operand of a string-move operation is accessing
+ * emulated memory, and assumes that the other operand accesses normal memory.
+ *
+ * NOTES:
+ * 1. The emulator isn't very smart about emulated vs. standard memory.
+ * 'Emulated memory' access addresses should be checked for sanity.
+ * 'Normal memory' accesses may fault, and the caller must arrange to
+ * detect and handle reentrancy into the emulator via recursive faults.
+ * Accesses may be unaligned and may cross page boundaries.
+ * 2. If the access fails (cannot emulate, or a standard access faults) then
+ * it is up to the memop to propagate the fault to the guest VM via
+ * some out-of-band mechanism, unknown to the emulator. The memop signals
+ * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
+ * then immediately bail.
+ * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
+ * cmpxchg8b_emulated need support 8-byte accesses.
+ * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
+ */
+/* Access completed successfully: continue emulation as normal. */
+#define X86EMUL_CONTINUE 0
+/* Access is unhandleable: bail from emulation and return error to caller. */
+#define X86EMUL_UNHANDLEABLE 1
+/* Terminate emulation but return success to the caller. */
+#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
+#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */
+#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */
+struct x86_emulate_ops {
+ /*
+ * read_std: Read bytes of standard (non-emulated/special) memory.
+ * Used for instruction fetch, stack operations, and others.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*read_std)(unsigned long addr, void *val,
+ unsigned int bytes, struct kvm_vcpu *vcpu);
+
+ /*
+ * read_emulated: Read bytes from emulated/special memory area.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*read_emulated) (unsigned long addr,
+ void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
+
+ /*
+ * write_emulated: Read bytes from emulated/special memory area.
+ * @addr: [IN ] Linear address to which to write.
+ * @val: [IN ] Value to write to memory (low-order bytes used as
+ * required).
+ * @bytes: [IN ] Number of bytes to write to memory.
+ */
+ int (*write_emulated) (unsigned long addr,
+ const void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
+
+ /*
+ * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
+ * emulated/special memory area.
+ * @addr: [IN ] Linear address to access.
+ * @old: [IN ] Value expected to be current at @addr.
+ * @new: [IN ] Value to write to @addr.
+ * @bytes: [IN ] Number of bytes to access using CMPXCHG.
+ */
+ int (*cmpxchg_emulated) (unsigned long addr,
+ const void *old,
+ const void *new,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
+
+};
+
+/* Type, address-of, and value of an instruction's operand. */
+struct operand {
+ enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
+ unsigned int bytes;
+ unsigned long val, orig_val, *ptr;
+};
+
+struct fetch_cache {
+ u8 data[15];
+ unsigned long start;
+ unsigned long end;
+};
+
+struct decode_cache {
+ u8 twobyte;
+ u8 b;
+ u8 lock_prefix;
+ u8 rep_prefix;
+ u8 op_bytes;
+ u8 ad_bytes;
+ u8 rex_prefix;
+ struct operand src;
+ struct operand dst;
+ unsigned long *override_base;
+ unsigned int d;
+ unsigned long regs[NR_VCPU_REGS];
+ unsigned long eip;
+ /* modrm */
+ u8 modrm;
+ u8 modrm_mod;
+ u8 modrm_reg;
+ u8 modrm_rm;
+ u8 use_modrm_ea;
+ unsigned long modrm_ea;
+ unsigned long modrm_val;
+ struct fetch_cache fetch;
+};
+
+struct x86_emulate_ctxt {
+ /* Register state before/after emulation. */
+ struct kvm_vcpu *vcpu;
+
+ /* Linear faulting address (if emulating a page-faulting instruction). */
+ unsigned long eflags;
+
+ /* Emulated execution mode, represented by an X86EMUL_MODE value. */
+ int mode;
+
+ unsigned long cs_base;
+ unsigned long ds_base;
+ unsigned long es_base;
+ unsigned long ss_base;
+ unsigned long gs_base;
+ unsigned long fs_base;
+
+ /* decode cache */
+
+ struct decode_cache decode;
+};
+
+/* Repeat String Operation Prefix */
+#define REPE_PREFIX 1
+#define REPNE_PREFIX 2
+
+/* Execution mode, passed to the emulator. */
+#define X86EMUL_MODE_REAL 0 /* Real mode. */
+#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
+#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
+#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
+
+/* Host execution mode. */
+#if defined(__i386__)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
+#elif defined(CONFIG_X86_64)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
+#endif
+
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops);
+int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops);
+
+#endif /* __X86_EMULATE_H__ */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index 1c8367a692f6..4d9367b72976 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -56,7 +56,7 @@ struct lguest_ro_state
struct desc_struct guest_gdt[GDT_ENTRIES];
};
-struct lguest_arch
+struct lg_cpu_arch
{
/* The GDT entries copied into lguest_ro_state when running. */
struct desc_struct gdt[GDT_ENTRIES];
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index 2091779e91fb..758b9a5d4539 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -4,7 +4,7 @@
#define LHCALL_FLUSH_ASYNC 0
#define LHCALL_LGUEST_INIT 1
-#define LHCALL_CRASH 2
+#define LHCALL_SHUTDOWN 2
#define LHCALL_LOAD_GDT 3
#define LHCALL_NEW_PGTABLE 4
#define LHCALL_FLUSH_TLB 5
@@ -20,6 +20,10 @@
#define LGUEST_TRAP_ENTRY 0x1F
+/* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */
+#define LGUEST_SHUTDOWN_POWEROFF 1
+#define LGUEST_SHUTDOWN_RESTART 2
+
#ifndef __ASSEMBLY__
#include <asm/hw_irq.h>
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 27b9350052b4..85b2482cc736 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -100,7 +100,6 @@ header-y += iso_fs.h
header-y += ixjuser.h
header-y += jffs2.h
header-y += keyctl.h
-header-y += kvm.h
header-y += limits.h
header-y += lock_dlm_plock.h
header-y += magic.h
@@ -256,6 +255,7 @@ unifdef-y += kd.h
unifdef-y += kernelcapi.h
unifdef-y += kernel.h
unifdef-y += keyboard.h
+unifdef-$(CONFIG_HAVE_KVM) += kvm.h
unifdef-y += llc.h
unifdef-y += loop.h
unifdef-y += lp.h
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c68781692838..bdd6f5de5fc4 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -115,6 +115,8 @@
#define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Not used */
#define AUDIT_MAC_IPSEC_DELSPD 1414 /* Not used */
#define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */
+#define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */
+#define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */
#define AUDIT_FIRST_KERN_ANOM_MSG 1700
#define AUDIT_LAST_KERN_ANOM_MSG 1799
diff --git a/include/linux/device.h b/include/linux/device.h
index 1880208964d6..db375be333c7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -84,6 +84,9 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
struct device *bus_find_device(struct bus_type *bus, struct device *start,
void *data,
int (*match)(struct device *dev, void *data));
+struct device *bus_find_device_by_name(struct bus_type *bus,
+ struct device *start,
+ const char *name);
int __must_check bus_for_each_drv(struct bus_type *bus,
struct device_driver *start, void *data,
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 057a7f34ee36..4de4fd2d8607 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -9,12 +9,10 @@
#include <asm/types.h>
#include <linux/ioctl.h>
+#include <asm/kvm.h>
#define KVM_API_VERSION 12
-/* Architectural interrupt line count. */
-#define KVM_NR_INTERRUPTS 256
-
/* for KVM_CREATE_MEMORY_REGION */
struct kvm_memory_region {
__u32 slot;
@@ -23,17 +21,19 @@ struct kvm_memory_region {
__u64 memory_size; /* bytes */
};
-/* for kvm_memory_region::flags */
-#define KVM_MEM_LOG_DIRTY_PAGES 1UL
-
-struct kvm_memory_alias {
- __u32 slot; /* this has a different namespace than memory slots */
+/* for KVM_SET_USER_MEMORY_REGION */
+struct kvm_userspace_memory_region {
+ __u32 slot;
__u32 flags;
__u64 guest_phys_addr;
- __u64 memory_size;
- __u64 target_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* start of the userspace allocated memory */
};
+/* for kvm_memory_region::flags */
+#define KVM_MEM_LOG_DIRTY_PAGES 1UL
+
+
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
/*
@@ -45,62 +45,18 @@ struct kvm_irq_level {
__u32 level;
};
-/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
-struct kvm_pic_state {
- __u8 last_irr; /* edge detection */
- __u8 irr; /* interrupt request register */
- __u8 imr; /* interrupt mask register */
- __u8 isr; /* interrupt service register */
- __u8 priority_add; /* highest irq priority */
- __u8 irq_base;
- __u8 read_reg_select;
- __u8 poll;
- __u8 special_mask;
- __u8 init_state;
- __u8 auto_eoi;
- __u8 rotate_on_auto_eoi;
- __u8 special_fully_nested_mode;
- __u8 init4; /* true if 4 byte init */
- __u8 elcr; /* PIIX edge/trigger selection */
- __u8 elcr_mask;
-};
-
-#define KVM_IOAPIC_NUM_PINS 24
-struct kvm_ioapic_state {
- __u64 base_address;
- __u32 ioregsel;
- __u32 id;
- __u32 irr;
- __u32 pad;
- union {
- __u64 bits;
- struct {
- __u8 vector;
- __u8 delivery_mode:3;
- __u8 dest_mode:1;
- __u8 delivery_status:1;
- __u8 polarity:1;
- __u8 remote_irr:1;
- __u8 trig_mode:1;
- __u8 mask:1;
- __u8 reserve:7;
- __u8 reserved[4];
- __u8 dest_id;
- } fields;
- } redirtbl[KVM_IOAPIC_NUM_PINS];
-};
-
-#define KVM_IRQCHIP_PIC_MASTER 0
-#define KVM_IRQCHIP_PIC_SLAVE 1
-#define KVM_IRQCHIP_IOAPIC 2
struct kvm_irqchip {
__u32 chip_id;
__u32 pad;
union {
char dummy[512]; /* reserving space */
+#ifdef CONFIG_X86
struct kvm_pic_state pic;
+#endif
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
struct kvm_ioapic_state ioapic;
+#endif
} chip;
};
@@ -116,6 +72,7 @@ struct kvm_irqchip {
#define KVM_EXIT_FAIL_ENTRY 9
#define KVM_EXIT_INTR 10
#define KVM_EXIT_SET_TPR 11
+#define KVM_EXIT_TPR_ACCESS 12
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -174,90 +131,17 @@ struct kvm_run {
__u32 longmode;
__u32 pad;
} hypercall;
+ /* KVM_EXIT_TPR_ACCESS */
+ struct {
+ __u64 rip;
+ __u32 is_write;
+ __u32 pad;
+ } tpr_access;
/* Fix the size of the union. */
char padding[256];
};
};
-/* for KVM_GET_REGS and KVM_SET_REGS */
-struct kvm_regs {
- /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
- __u64 rax, rbx, rcx, rdx;
- __u64 rsi, rdi, rsp, rbp;
- __u64 r8, r9, r10, r11;
- __u64 r12, r13, r14, r15;
- __u64 rip, rflags;
-};
-
-/* for KVM_GET_FPU and KVM_SET_FPU */
-struct kvm_fpu {
- __u8 fpr[8][16];
- __u16 fcw;
- __u16 fsw;
- __u8 ftwx; /* in fxsave format */
- __u8 pad1;
- __u16 last_opcode;
- __u64 last_ip;
- __u64 last_dp;
- __u8 xmm[16][16];
- __u32 mxcsr;
- __u32 pad2;
-};
-
-/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
-#define KVM_APIC_REG_SIZE 0x400
-struct kvm_lapic_state {
- char regs[KVM_APIC_REG_SIZE];
-};
-
-struct kvm_segment {
- __u64 base;
- __u32 limit;
- __u16 selector;
- __u8 type;
- __u8 present, dpl, db, s, l, g, avl;
- __u8 unusable;
- __u8 padding;
-};
-
-struct kvm_dtable {
- __u64 base;
- __u16 limit;
- __u16 padding[3];
-};
-
-/* for KVM_GET_SREGS and KVM_SET_SREGS */
-struct kvm_sregs {
- /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
- struct kvm_segment cs, ds, es, fs, gs, ss;
- struct kvm_segment tr, ldt;
- struct kvm_dtable gdt, idt;
- __u64 cr0, cr2, cr3, cr4, cr8;
- __u64 efer;
- __u64 apic_base;
- __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
-};
-
-struct kvm_msr_entry {
- __u32 index;
- __u32 reserved;
- __u64 data;
-};
-
-/* for KVM_GET_MSRS and KVM_SET_MSRS */
-struct kvm_msrs {
- __u32 nmsrs; /* number of msrs in entries */
- __u32 pad;
-
- struct kvm_msr_entry entries[0];
-};
-
-/* for KVM_GET_MSR_INDEX_LIST */
-struct kvm_msr_list {
- __u32 nmsrs; /* number of msrs in entries */
- __u32 indices[0];
-};
-
/* for KVM_TRANSLATE */
struct kvm_translation {
/* in */
@@ -302,28 +186,24 @@ struct kvm_dirty_log {
};
};
-struct kvm_cpuid_entry {
- __u32 function;
- __u32 eax;
- __u32 ebx;
- __u32 ecx;
- __u32 edx;
- __u32 padding;
-};
-
-/* for KVM_SET_CPUID */
-struct kvm_cpuid {
- __u32 nent;
- __u32 padding;
- struct kvm_cpuid_entry entries[0];
-};
-
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
__u8 sigset[0];
};
+/* for KVM_TPR_ACCESS_REPORTING */
+struct kvm_tpr_access_ctl {
+ __u32 enabled;
+ __u32 flags;
+ __u32 reserved[8];
+};
+
+/* for KVM_SET_VAPIC_ADDR */
+struct kvm_vapic_addr {
+ __u64 vapic_addr;
+};
+
#define KVMIO 0xAE
/*
@@ -347,11 +227,21 @@ struct kvm_signal_mask {
*/
#define KVM_CAP_IRQCHIP 0
#define KVM_CAP_HLT 1
+#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
+#define KVM_CAP_USER_MEMORY 3
+#define KVM_CAP_SET_TSS_ADDR 4
+#define KVM_CAP_EXT_CPUID 5
+#define KVM_CAP_VAPIC 6
/*
* ioctls for VM fds
*/
#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
+#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
+#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\
+ struct kvm_userspace_memory_region)
+#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
@@ -359,6 +249,7 @@ struct kvm_signal_mask {
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
+#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x48, struct kvm_cpuid2)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
@@ -384,5 +275,11 @@ struct kvm_signal_mask {
#define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu)
#define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state)
#define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state)
+#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2)
+#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
+/* Available with KVM_CAP_VAPIC */
+#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
+/* Available with KVM_CAP_VAPIC */
+#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
new file mode 100644
index 000000000000..ea4764b0a2f4
--- /dev/null
+++ b/include/linux/kvm_host.h
@@ -0,0 +1,299 @@
+#ifndef __KVM_HOST_H
+#define __KVM_HOST_H
+
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/hardirq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/preempt.h>
+#include <asm/signal.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+#include <linux/kvm_types.h>
+
+#include <asm/kvm_host.h>
+
+#define KVM_MAX_VCPUS 4
+#define KVM_MEMORY_SLOTS 8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_PIO_PAGE_OFFSET 1
+
+/*
+ * vcpu->requests bit members
+ */
+#define KVM_REQ_TLB_FLUSH 0
+#define KVM_REQ_MIGRATE_TIMER 1
+#define KVM_REQ_REPORT_TPR_ACCESS 2
+
+struct kvm_vcpu;
+extern struct kmem_cache *kvm_vcpu_cache;
+
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
+/*
+ * It would be nice to use something smarter than a linear search, TBD...
+ * Thankfully we dont expect many devices to register (famous last words :),
+ * so until then it will suffice. At least its abstracted so we can change
+ * in one place.
+ */
+struct kvm_io_bus {
+ int dev_count;
+#define NR_IOBUS_DEVS 6
+ struct kvm_io_device *devs[NR_IOBUS_DEVS];
+};
+
+void kvm_io_bus_init(struct kvm_io_bus *bus);
+void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+ struct kvm_io_device *dev);
+
+struct kvm_vcpu {
+ struct kvm *kvm;
+ struct preempt_notifier preempt_notifier;
+ int vcpu_id;
+ struct mutex mutex;
+ int cpu;
+ struct kvm_run *run;
+ int guest_mode;
+ unsigned long requests;
+ struct kvm_guest_debug guest_debug;
+ int fpu_active;
+ int guest_fpu_loaded;
+ wait_queue_head_t wq;
+ int sigset_active;
+ sigset_t sigset;
+ struct kvm_vcpu_stat stat;
+
+#ifdef CONFIG_HAS_IOMEM
+ int mmio_needed;
+ int mmio_read_completed;
+ int mmio_is_write;
+ int mmio_size;
+ unsigned char mmio_data[8];
+ gpa_t mmio_phys_addr;
+#endif
+
+ struct kvm_vcpu_arch arch;
+};
+
+struct kvm_memory_slot {
+ gfn_t base_gfn;
+ unsigned long npages;
+ unsigned long flags;
+ unsigned long *rmap;
+ unsigned long *dirty_bitmap;
+ unsigned long userspace_addr;
+ int user_alloc;
+};
+
+struct kvm {
+ struct mutex lock; /* protects the vcpus array and APIC accesses */
+ spinlock_t mmu_lock;
+ struct mm_struct *mm; /* userspace tied to this vm */
+ int nmemslots;
+ struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
+ KVM_PRIVATE_MEM_SLOTS];
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ struct list_head vm_list;
+ struct file *filp;
+ struct kvm_io_bus mmio_bus;
+ struct kvm_io_bus pio_bus;
+ struct kvm_vm_stat stat;
+ struct kvm_arch arch;
+};
+
+/* The guest did something we don't support. */
+#define pr_unimpl(vcpu, fmt, ...) \
+ do { \
+ if (printk_ratelimit()) \
+ printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
+ current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
+ } while (0)
+
+#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
+#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+
+int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
+void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_put(struct kvm_vcpu *vcpu);
+
+void decache_vcpus_on_cpu(int cpu);
+
+
+int kvm_init(void *opaque, unsigned int vcpu_size,
+ struct module *module);
+void kvm_exit(void);
+
+#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
+#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
+static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
+struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
+
+extern struct page *bad_page;
+
+int is_error_page(struct page *page);
+int kvm_is_error_hva(unsigned long addr);
+int kvm_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc);
+int __kvm_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc);
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc);
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+void kvm_release_page_clean(struct page *page);
+void kvm_release_page_dirty(struct page *page);
+int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
+ int len);
+int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
+int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
+ int offset, int len);
+int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ unsigned long len);
+int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
+void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+
+void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_resched(struct kvm_vcpu *vcpu);
+void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_flush_remote_tlbs(struct kvm *kvm);
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+
+int kvm_dev_ioctl_check_extension(long ext);
+
+int kvm_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log, int *is_dirty);
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log);
+
+int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
+ struct
+ kvm_userspace_memory_region *mem,
+ int user_alloc);
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+void kvm_arch_destroy_vm(struct kvm *kvm);
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr);
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+
+int kvm_arch_init(void *opaque);
+void kvm_arch_exit(void);
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_arch_hardware_enable(void *garbage);
+void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+
+void kvm_free_physmem(struct kvm *kvm);
+
+struct kvm *kvm_arch_create_vm(void);
+void kvm_arch_destroy_vm(struct kvm *kvm);
+
+int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+static inline void kvm_guest_enter(void)
+{
+ account_system_vtime(current);
+ current->flags |= PF_VCPU;
+}
+
+static inline void kvm_guest_exit(void)
+{
+ account_system_vtime(current);
+ current->flags &= ~PF_VCPU;
+}
+
+static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ return slot - kvm->memslots;
+}
+
+static inline gpa_t gfn_to_gpa(gfn_t gfn)
+{
+ return (gpa_t)gfn << PAGE_SHIFT;
+}
+
+static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+{
+ set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
+}
+
+enum kvm_stat_kind {
+ KVM_STAT_VM,
+ KVM_STAT_VCPU,
+};
+
+struct kvm_stats_debugfs_item {
+ const char *name;
+ int offset;
+ enum kvm_stat_kind kind;
+ struct dentry *dentry;
+};
+extern struct kvm_stats_debugfs_item debugfs_entries[];
+
+#endif
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 3b292565a693..5497aac0d2f8 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -2,72 +2,30 @@
#define __LINUX_KVM_PARA_H
/*
- * Guest OS interface for KVM paravirtualization
- *
- * Note: this interface is totally experimental, and is certain to change
- * as we make progress.
+ * This header file provides a method for making a hypercall to the host
+ * Architectures should define:
+ * - kvm_hypercall0, kvm_hypercall1...
+ * - kvm_arch_para_features
+ * - kvm_para_available
*/
-/*
- * Per-VCPU descriptor area shared between guest and host. Writable to
- * both guest and host. Registered with the host by the guest when
- * a guest acknowledges paravirtual mode.
- *
- * NOTE: all addresses are guest-physical addresses (gpa), to make it
- * easier for the hypervisor to map between the various addresses.
- */
-struct kvm_vcpu_para_state {
- /*
- * API version information for compatibility. If there's any support
- * mismatch (too old host trying to execute too new guest) then
- * the host will deny entry into paravirtual mode. Any other
- * combination (new host + old guest and new host + new guest)
- * is supposed to work - new host versions will support all old
- * guest API versions.
- */
- u32 guest_version;
- u32 host_version;
- u32 size;
- u32 ret;
-
- /*
- * The address of the vm exit instruction (VMCALL or VMMCALL),
- * which the host will patch according to the CPU model the
- * VM runs on:
- */
- u64 hypercall_gpa;
-
-} __attribute__ ((aligned(PAGE_SIZE)));
-
-#define KVM_PARA_API_VERSION 1
-
-/*
- * This is used for an RDMSR's ECX parameter to probe for a KVM host.
- * Hopefully no CPU vendor will use up this number. This is placed well
- * out of way of the typical space occupied by CPU vendors' MSR indices,
- * and we think (or at least hope) it wont be occupied in the future
- * either.
- */
-#define MSR_KVM_API_MAGIC 0x87655678
+/* Return values for hypercalls */
+#define KVM_ENOSYS 1000
-#define KVM_EINVAL 1
+#define KVM_HC_VAPIC_POLL_IRQ 1
/*
- * Hypercall calling convention:
- *
- * Each hypercall may have 0-6 parameters.
- *
- * 64-bit hypercall index is in RAX, goes from 0 to __NR_hypercalls-1
- *
- * 64-bit parameters 1-6 are in the standard gcc x86_64 calling convention
- * order: RDI, RSI, RDX, RCX, R8, R9.
- *
- * 32-bit index is EBX, parameters are: EAX, ECX, EDX, ESI, EDI, EBP.
- * (the first 3 are according to the gcc regparm calling convention)
- *
- * No registers are clobbered by the hypercall, except that the
- * return value is in RAX.
+ * hypercalls use architecture specific
*/
-#define __NR_hypercalls 0
+#include <asm/kvm_para.h>
+
+#ifdef __KERNEL__
+static inline int kvm_para_has_feature(unsigned int feature)
+{
+ if (kvm_arch_para_features() & (1UL << feature))
+ return 1;
+ return 0;
+}
+#endif /* __KERNEL__ */
+#endif /* __LINUX_KVM_PARA_H */
-#endif
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
new file mode 100644
index 000000000000..1c4e46decb22
--- /dev/null
+++ b/include/linux/kvm_types.h
@@ -0,0 +1,54 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __KVM_TYPES_H__
+#define __KVM_TYPES_H__
+
+#include <asm/types.h>
+
+/*
+ * Address types:
+ *
+ * gva - guest virtual address
+ * gpa - guest physical address
+ * gfn - guest frame number
+ * hva - host virtual address
+ * hpa - host physical address
+ * hfn - host frame number
+ */
+
+typedef unsigned long gva_t;
+typedef u64 gpa_t;
+typedef unsigned long gfn_t;
+
+typedef unsigned long hva_t;
+typedef u64 hpa_t;
+typedef unsigned long hfn_t;
+
+struct kvm_pio_request {
+ unsigned long count;
+ int cur_count;
+ struct page *guest_pages[2];
+ unsigned guest_page_offset;
+ int in;
+ int port;
+ int size;
+ int string;
+ int down;
+ int rep;
+};
+
+#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
index 6080f73fc85f..8c2cc4c02526 100644
--- a/include/linux/selinux.h
+++ b/include/linux/selinux.h
@@ -120,16 +120,35 @@ void selinux_get_task_sid(struct task_struct *tsk, u32 *sid);
int selinux_string_to_sid(char *str, u32 *sid);
/**
- * selinux_relabel_packet_permission - check permission to relabel a packet
- * @sid: ID value to be applied to network packet (via SECMARK, most likely)
+ * selinux_secmark_relabel_packet_permission - secmark permission check
+ * @sid: SECMARK ID value to be applied to network packet
*
- * Returns 0 if the current task is allowed to label packets with the
- * supplied security ID. Note that it is implicit that the packet is always
- * being relabeled from the default unlabled value, and that the access
- * control decision is made in the AVC.
+ * Returns 0 if the current task is allowed to set the SECMARK label of
+ * packets with the supplied security ID. Note that it is implicit that
+ * the packet is always being relabeled from the default unlabeled value,
+ * and that the access control decision is made in the AVC.
*/
-int selinux_relabel_packet_permission(u32 sid);
+int selinux_secmark_relabel_packet_permission(u32 sid);
+/**
+ * selinux_secmark_refcount_inc - increments the secmark use counter
+ *
+ * SELinux keeps track of the current SECMARK targets in use so it knows
+ * when to apply SECMARK label access checks to network packets. This
+ * function incements this reference count to indicate that a new SECMARK
+ * target has been configured.
+ */
+void selinux_secmark_refcount_inc(void);
+
+/**
+ * selinux_secmark_refcount_dec - decrements the secmark use counter
+ *
+ * SELinux keeps track of the current SECMARK targets in use so it knows
+ * when to apply SECMARK label access checks to network packets. This
+ * function decements this reference count to indicate that one of the
+ * existing SECMARK targets has been removed/flushed.
+ */
+void selinux_secmark_refcount_dec(void);
#else
static inline int selinux_audit_rule_init(u32 field, u32 op,
@@ -184,11 +203,21 @@ static inline int selinux_string_to_sid(const char *str, u32 *sid)
return 0;
}
-static inline int selinux_relabel_packet_permission(u32 sid)
+static inline int selinux_secmark_relabel_packet_permission(u32 sid)
{
return 0;
}
+static inline void selinux_secmark_refcount_inc(void)
+{
+ return;
+}
+
+static inline void selinux_secmark_refcount_dec(void)
+{
+ return;
+}
+
#endif /* CONFIG_SECURITY_SELINUX */
#endif /* _LINUX_SELINUX_H */
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 2e5b2f6f9fa0..b3213c7c5309 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -67,7 +67,11 @@
* NetLabel NETLINK protocol
*/
-#define NETLBL_PROTO_VERSION 1
+/* NetLabel NETLINK protocol version
+ * 1: initial version
+ * 2: added static labels for unlabeled connections
+ */
+#define NETLBL_PROTO_VERSION 2
/* NetLabel NETLINK types/families */
#define NETLBL_NLTYPE_NONE 0
@@ -105,17 +109,49 @@ struct netlbl_dom_map;
/* Domain mapping operations */
int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
-/* LSM security attributes */
+/*
+ * LSM security attributes
+ */
+
+/**
+ * struct netlbl_lsm_cache - NetLabel LSM security attribute cache
+ * @refcount: atomic reference counter
+ * @free: LSM supplied function to free the cache data
+ * @data: LSM supplied cache data
+ *
+ * Description:
+ * This structure is provided for LSMs which wish to make use of the NetLabel
+ * caching mechanism to store LSM specific data/attributes in the NetLabel
+ * cache. If the LSM has to perform a lot of translation from the NetLabel
+ * security attributes into it's own internal representation then the cache
+ * mechanism can provide a way to eliminate some or all of that translation
+ * overhead on a cache hit.
+ *
+ */
struct netlbl_lsm_cache {
atomic_t refcount;
void (*free) (const void *data);
void *data;
};
-/* The catmap bitmap field MUST be a power of two in length and large
+
+/**
+ * struct netlbl_lsm_secattr_catmap - NetLabel LSM secattr category bitmap
+ * @startbit: the value of the lowest order bit in the bitmap
+ * @bitmap: the category bitmap
+ * @next: pointer to the next bitmap "node" or NULL
+ *
+ * Description:
+ * This structure is used to represent category bitmaps. Due to the large
+ * number of categories supported by most labeling protocols it is not
+ * practical to transfer a full bitmap internally so NetLabel adopts a sparse
+ * bitmap structure modeled after SELinux's ebitmap structure.
+ * The catmap bitmap field MUST be a power of two in length and large
* enough to hold at least 240 bits. Special care (i.e. check the code!)
* should be used when changing these values as the LSM implementation
* probably has functions which rely on the sizes of these types to speed
- * processing. */
+ * processing.
+ *
+ */
#define NETLBL_CATMAP_MAPTYPE u64
#define NETLBL_CATMAP_MAPCNT 4
#define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
@@ -127,22 +163,48 @@ struct netlbl_lsm_secattr_catmap {
NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
struct netlbl_lsm_secattr_catmap *next;
};
+
+/**
+ * struct netlbl_lsm_secattr - NetLabel LSM security attributes
+ * @flags: indicate which attributes are contained in this structure
+ * @type: indicate the NLTYPE of the attributes
+ * @domain: the NetLabel LSM domain
+ * @cache: NetLabel LSM specific cache
+ * @attr.mls: MLS sensitivity label
+ * @attr.mls.cat: MLS category bitmap
+ * @attr.mls.lvl: MLS sensitivity level
+ * @attr.secid: LSM specific secid token
+ *
+ * Description:
+ * This structure is used to pass security attributes between NetLabel and the
+ * LSM modules. The flags field is used to specify which fields within the
+ * struct are valid and valid values can be created by bitwise OR'ing the
+ * NETLBL_SECATTR_* defines. The domain field is typically set by the LSM to
+ * specify domain specific configuration settings and is not usually used by
+ * NetLabel itself when returning security attributes to the LSM.
+ *
+ */
#define NETLBL_SECATTR_NONE 0x00000000
#define NETLBL_SECATTR_DOMAIN 0x00000001
#define NETLBL_SECATTR_CACHE 0x00000002
#define NETLBL_SECATTR_MLS_LVL 0x00000004
#define NETLBL_SECATTR_MLS_CAT 0x00000008
+#define NETLBL_SECATTR_SECID 0x00000010
#define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \
- NETLBL_SECATTR_MLS_CAT)
+ NETLBL_SECATTR_MLS_CAT | \
+ NETLBL_SECATTR_SECID)
struct netlbl_lsm_secattr {
u32 flags;
-
+ u32 type;
char *domain;
-
- u32 mls_lvl;
- struct netlbl_lsm_secattr_catmap *mls_cat;
-
struct netlbl_lsm_cache *cache;
+ union {
+ struct {
+ struct netlbl_lsm_secattr_catmap *cat;
+ u32 lvl;
+ } mls;
+ u32 secid;
+ } attr;
};
/*
@@ -231,10 +293,7 @@ static inline void netlbl_secattr_catmap_free(
*/
static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
{
- secattr->flags = 0;
- secattr->domain = NULL;
- secattr->mls_cat = NULL;
- secattr->cache = NULL;
+ memset(secattr, 0, sizeof(*secattr));
}
/**
@@ -248,11 +307,11 @@ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
*/
static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
{
- if (secattr->cache)
- netlbl_secattr_cache_free(secattr->cache);
kfree(secattr->domain);
- if (secattr->mls_cat)
- netlbl_secattr_catmap_free(secattr->mls_cat);
+ if (secattr->flags & NETLBL_SECATTR_CACHE)
+ netlbl_secattr_cache_free(secattr->cache);
+ if (secattr->flags & NETLBL_SECATTR_MLS_CAT)
+ netlbl_secattr_catmap_free(secattr->attr.mls.cat);
}
/**
@@ -300,7 +359,7 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
gfp_t flags);
/*
- * LSM protocol operations
+ * LSM protocol operations (NetLabel LSM/kernel API)
*/
int netlbl_enabled(void);
int netlbl_sock_setattr(struct sock *sk,
@@ -308,6 +367,7 @@ int netlbl_sock_setattr(struct sock *sk,
int netlbl_sock_getattr(struct sock *sk,
struct netlbl_lsm_secattr *secattr);
int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ u16 family,
struct netlbl_lsm_secattr *secattr);
void netlbl_skbuff_err(struct sk_buff *skb, int error);
@@ -360,6 +420,7 @@ static inline int netlbl_sock_getattr(struct sock *sk,
return -ENOSYS;
}
static inline int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ u16 family,
struct netlbl_lsm_secattr *secattr)
{
return -ENOSYS;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 702fcfeb37f1..82251575a9b4 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,6 +11,25 @@
#include <linux/types.h>
/*
+ * The maximum number of SG segments that we will put inside a
+ * scatterlist (unless chaining is used). Should ideally fit inside a
+ * single page, to avoid a higher order allocation. We could define this
+ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
+ * minimum value is 32
+ */
+#define SCSI_MAX_SG_SEGMENTS 128
+
+/*
+ * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
+ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+ */
+#ifdef ARCH_HAS_SG_CHAIN
+#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
+#else
+#define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS
+#endif
+
+/*
* SCSI command lengths
*/
@@ -83,6 +102,7 @@ extern const unsigned char scsi_command_size[8];
#define READ_TOC 0x43
#define LOG_SELECT 0x4c
#define LOG_SENSE 0x4d
+#define XDWRITEREAD_10 0x53
#define MODE_SELECT_10 0x55
#define RESERVE_10 0x56
#define RELEASE_10 0x57
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a457fca66f61..de28aab820b0 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -2,15 +2,20 @@
#define _SCSI_SCSI_CMND_H
#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
-struct request;
struct Scsi_Host;
struct scsi_device;
+struct scsi_data_buffer {
+ struct sg_table table;
+ unsigned length;
+ int resid;
+};
/* embedded in scsi_cmnd */
struct scsi_pointer {
@@ -61,15 +66,11 @@ struct scsi_cmnd {
/* These elements define the operation we are about to perform */
#define MAX_COMMAND_SIZE 16
unsigned char cmnd[MAX_COMMAND_SIZE];
- unsigned request_bufflen; /* Actual request size */
struct timer_list eh_timeout; /* Used to time out the command. */
- void *request_buffer; /* Actual requested buffer */
/* These elements define the operation we ultimately want to perform */
- struct sg_table sg_table;
- unsigned short use_sg; /* Number of pieces of scatter-gather */
-
+ struct scsi_data_buffer sdb;
unsigned underflow; /* Return error if less than
this amount is transferred */
@@ -79,10 +80,6 @@ struct scsi_cmnd {
reconnects. Probably == sector
size */
- int resid; /* Number of bytes requested to be
- transferred less actual number
- transferred (0 if not supported) */
-
struct request *request; /* The command we are
working on */
@@ -127,27 +124,55 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern int scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
-extern void scsi_free_sgtable(struct scsi_cmnd *);
+extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask);
+extern void scsi_release_buffers(struct scsi_cmnd *cmd);
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
-#define scsi_sg_count(cmd) ((cmd)->use_sg)
-#define scsi_sglist(cmd) ((cmd)->sg_table.sgl)
-#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
+static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
+{
+ return cmd->sdb.table.nents;
+}
+
+static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
+{
+ return cmd->sdb.table.sgl;
+}
+
+static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
+{
+ return cmd->sdb.length;
+}
static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
{
- cmd->resid = resid;
+ cmd->sdb.resid = resid;
}
static inline int scsi_get_resid(struct scsi_cmnd *cmd)
{
- return cmd->resid;
+ return cmd->sdb.resid;
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
+static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd)
+{
+ return blk_bidi_rq(cmd->request) &&
+ (cmd->request->next_rq->special != NULL);
+}
+
+static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd)
+{
+ return scsi_bidi_cmnd(cmd) ?
+ cmd->request->next_rq->special : &cmd->sdb;
+}
+
+static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd)
+{
+ return &cmd->sdb;
+}
+
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index d21b8913ceb3..25071d5d9bf8 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -68,16 +68,15 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
extern int scsi_reset_provider(struct scsi_device *, int);
struct scsi_eh_save {
+ /* saved state */
int result;
enum dma_data_direction data_direction;
unsigned char cmd_len;
unsigned char cmnd[MAX_COMMAND_SIZE];
+ struct scsi_data_buffer sdb;
+ struct request *next_rq;
- void *buffer;
- unsigned bufflen;
- unsigned short use_sg;
- int resid;
-
+ /* new command support */
struct scatterlist sense_sgl;
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 0fd4746ee39d..5c58d594126a 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -39,9 +39,6 @@ struct blk_queue_tags;
#define DISABLE_CLUSTERING 0
#define ENABLE_CLUSTERING 1
-#define DISABLE_SG_CHAINING 0
-#define ENABLE_SG_CHAINING 1
-
enum scsi_eh_timer_return {
EH_NOT_HANDLED,
EH_HANDLED,
@@ -136,9 +133,9 @@ struct scsi_host_template {
* the done callback is invoked.
*
* This is called to inform the LLD to transfer
- * cmd->request_bufflen bytes. The cmd->use_sg speciefies the
+ * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
* number of scatterlist entried in the command and
- * cmd->request_buffer contains the scatterlist.
+ * scsi_sglist(cmd) returns the scatterlist.
*
* return values: see queuecommand
*
@@ -446,15 +443,6 @@ struct scsi_host_template {
unsigned ordered_tag:1;
/*
- * true if the low-level driver can support sg chaining. this
- * will be removed eventually when all the drivers are
- * converted to support sg chaining.
- *
- * Status: OBSOLETE
- */
- unsigned use_sg_chaining:1;
-
- /*
* Countdown for host blocking with no commands outstanding
*/
unsigned int max_host_blocked;
@@ -598,7 +586,6 @@ struct Scsi_Host {
unsigned unchecked_isa_dma:1;
unsigned use_clustering:1;
unsigned use_blk_tcq:1;
- unsigned use_sg_chaining:1;
/*
* Host has requested that no further requests come through for the