diff options
author | Simon Guo <wei.guo.simon@gmail.com> | 2018-05-23 15:01:58 +0800 |
---|---|---|
committer | Paul Mackerras <paulus@ozlabs.org> | 2018-06-01 10:29:55 +1000 |
commit | 8d2e2fc5e082a7b3f858cefb6e65700f28d2955e (patch) | |
tree | a19a6efb386cf16990f4d4ff6486c532bcc67d9f | |
parent | 66c33e796cf9d5f7150bf4c701786d0527b594b6 (diff) |
KVM: PPC: Book3S PR: Add transaction memory save/restore skeleton
The transaction memory checkpoint area save/restore behavior is
triggered when VCPU qemu process is switching out/into CPU, i.e.
at kvmppc_core_vcpu_put_pr() and kvmppc_core_vcpu_load_pr().
MSR TM active state is determined by TS bits:
active: 10(transactional) or 01 (suspended)
inactive: 00 (non-transactional)
We don't "fake" TM functionality for guest. We "sync" guest virtual
MSR TM active state(10 or 01) with shadow MSR. That is to say,
we don't emulate a transactional guest with a TM inactive MSR.
TM SPR support(TFIAR/TFAR/TEXASR) has already been supported by
commit 9916d57e64a4 ("KVM: PPC: Book3S PR: Expose TM registers").
Math register support (FPR/VMX/VSX) will be done at subsequent
patch.
Whether TM context need to be saved/restored can be determined
by kvmppc_get_msr() TM active state:
* TM active - save/restore TM context
* TM inactive - no need to do so and only save/restore
TM SPRs.
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Suggested-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 9 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 27 |
3 files changed, 36 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 20d3d5a87296..fc15ad9dfc3b 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -257,6 +257,15 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd); extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); +void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); +#else +static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} +static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} +#endif + extern int kvm_irq_bypass; static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8dc5e439b387..fa4efa7e88f7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -627,7 +627,6 @@ struct kvm_vcpu_arch { struct thread_vr_state vr_tm; u32 vrsave_tm; /* also USPRG0 */ - #endif #ifdef CONFIG_KVM_EXIT_TIMING diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 92e467ebadf0..a14721f034fb 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -43,6 +43,7 @@ #include <linux/module.h> #include <linux/miscdevice.h> #include <asm/asm-prototypes.h> +#include <asm/tm.h> #include "book3s.h" @@ -115,6 +116,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) if (kvmppc_is_split_real(vcpu)) kvmppc_fixup_split_real(vcpu); + + kvmppc_restore_tm_pr(vcpu); } static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) @@ -134,6 +137,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + kvmppc_save_tm_pr(vcpu); /* Enable AIL if supported */ if (cpu_has_feature(CPU_FTR_HVMODE) && @@ -304,6 +308,29 @@ static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) tm_disable(); } +void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) +{ + if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { + kvmppc_save_tm_sprs(vcpu); + return; + } + + preempt_disable(); + _kvmppc_save_tm_pr(vcpu, mfmsr()); + preempt_enable(); +} + +void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) +{ + if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { + kvmppc_restore_tm_sprs(vcpu); + return; + } + + preempt_disable(); + _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); + preempt_enable(); +} #endif static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |