diff options
author | Xiaoyao Li <xiaoyao.li@intel.com> | 2020-03-25 11:09:23 +0800 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-03-27 11:43:29 +0100 |
commit | dbaba47085b0c2aa793ce849750164bd3765e163 (patch) | |
tree | a398007467f441d2f871fba4c150e8a04fa016ca /arch/x86 | |
parent | 6650cdd9a8ccf00555dbbe743d58541ad8feb6a7 (diff) |
x86/split_lock: Rework the initialization flow of split lock detection
Current initialization flow of split lock detection has following issues:
1. It assumes the initial value of MSR_TEST_CTRL.SPLIT_LOCK_DETECT to be
zero. However, it's possible that BIOS/firmware has set it.
2. X86_FEATURE_SPLIT_LOCK_DETECT flag is unconditionally set even if
there is a virtualization flaw that FMS indicates the existence while
it's actually not supported.
Rework the initialization flow to solve above issues. In detail, explicitly
clear and set split_lock_detect bit to verify MSR_TEST_CTRL can be
accessed, and rdmsr after wrmsr to ensure bit is cleared/set successfully.
X86_FEATURE_SPLIT_LOCK_DETECT flag is set only when the feature does exist
and the feature is not disabled with kernel param "split_lock_detect=off"
On each processor, explicitly updating the SPLIT_LOCK_DETECT bit based on
sld_sate in split_lock_init() since BIOS/firmware may touch it.
Originally-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200325030924.132881-2-xiaoyao.li@intel.com
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 75 |
1 files changed, 42 insertions, 33 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index db3e745e5d47..0c859c91d008 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -44,7 +44,7 @@ enum split_lock_detect_state { * split_lock_setup() will switch this to sld_warn on systems that support * split lock detect, unless there is a command line override. */ -static enum split_lock_detect_state sld_state = sld_off; +static enum split_lock_detect_state sld_state __ro_after_init = sld_off; /* * Processors which have self-snooping capability can handle conflicting @@ -984,78 +984,87 @@ static inline bool match_option(const char *arg, int arglen, const char *opt) return len == arglen && !strncmp(arg, opt, len); } +static bool split_lock_verify_msr(bool on) +{ + u64 ctrl, tmp; + + if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) + return false; + if (on) + ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; + else + ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; + if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) + return false; + rdmsrl(MSR_TEST_CTRL, tmp); + return ctrl == tmp; +} + static void __init split_lock_setup(void) { + enum split_lock_detect_state state = sld_warn; char arg[20]; int i, ret; - setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); - sld_state = sld_warn; + if (!split_lock_verify_msr(false)) { + pr_info("MSR access failed: Disabled\n"); + return; + } ret = cmdline_find_option(boot_command_line, "split_lock_detect", arg, sizeof(arg)); if (ret >= 0) { for (i = 0; i < ARRAY_SIZE(sld_options); i++) { if (match_option(arg, ret, sld_options[i].option)) { - sld_state = sld_options[i].state; + state = sld_options[i].state; break; } } } - switch (sld_state) { + switch (state) { case sld_off: pr_info("disabled\n"); - break; - + return; case sld_warn: pr_info("warning about user-space split_locks\n"); break; - case sld_fatal: pr_info("sending SIGBUS on user-space split_locks\n"); break; } + + if (!split_lock_verify_msr(true)) { + pr_info("MSR access failed: Disabled\n"); + return; + } + + sld_state = state; + setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); } /* - * Locking is not required at the moment because only bit 29 of this - * MSR is implemented and locking would not prevent that the operation - * of one thread is immediately undone by the sibling thread. - * Use the "safe" versions of rdmsr/wrmsr here because although code - * checks CPUID and MSR bits to make sure the TEST_CTRL MSR should - * exist, there may be glitches in virtualization that leave a guest - * with an incorrect view of real h/w capabilities. + * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking + * is not implemented as one thread could undo the setting of the other + * thread immediately after dropping the lock anyway. */ -static bool __sld_msr_set(bool on) +static void sld_update_msr(bool on) { u64 test_ctrl_val; - if (rdmsrl_safe(MSR_TEST_CTRL, &test_ctrl_val)) - return false; + rdmsrl(MSR_TEST_CTRL, test_ctrl_val); if (on) test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; else test_ctrl_val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; - return !wrmsrl_safe(MSR_TEST_CTRL, test_ctrl_val); + wrmsrl(MSR_TEST_CTRL, test_ctrl_val); } static void split_lock_init(void) { - if (sld_state == sld_off) - return; - - if (__sld_msr_set(true)) - return; - - /* - * If this is anything other than the boot-cpu, you've done - * funny things and you get to keep whatever pieces. - */ - pr_warn("MSR fail -- disabled\n"); - sld_state = sld_off; + split_lock_verify_msr(sld_state != sld_off); } bool handle_user_split_lock(struct pt_regs *regs, long error_code) @@ -1071,7 +1080,7 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code) * progress and set TIF_SLD so the detection is re-enabled via * switch_to_sld() when the task is scheduled out. */ - __sld_msr_set(false); + sld_update_msr(false); set_tsk_thread_flag(current, TIF_SLD); return true; } @@ -1085,7 +1094,7 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code) */ void switch_to_sld(unsigned long tifn) { - __sld_msr_set(!(tifn & _TIF_SLD)); + sld_update_msr(!(tifn & _TIF_SLD)); } #define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY} |