Commit 13fb5927 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

kvm: x86: disable shattered huge page recovery for PREEMPT_RT.



If a huge page is recovered (and becomes no executable) while another
thread is executing it, the resulting contention on mmu_lock can cause
latency spikes.  Disabling recovery for PREEMPT_RT kernels fixes this
issue.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8c5bd25b
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -51,7 +51,12 @@
extern bool itlb_multihit_kvm_mitigation;

static int __read_mostly nx_huge_pages = -1;
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
#endif

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);