Commit b407fc57 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge
Browse files

x86/paravirt: flush pending mmu updates on context switch



Impact: allow preemption during lazy mmu updates

If we're in lazy mmu mode when context switching, leave
lazy mmu mode, but remember the task's state in
TIF_LAZY_MMU_UPDATES.  When we resume the task, check this
flag and re-enter lazy mmu mode if its set.

This sets things up for allowing lazy mmu mode while preemptible,
though that won't actually be active until the next change.

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
parent 7fd7d83d
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1418,7 +1418,6 @@ void paravirt_enter_lazy_cpu(void);
void paravirt_leave_lazy_cpu(void);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_leave_lazy(enum paravirt_lazy_mode mode);

#define  __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(void)
+2 −0
Original line number Diff line number Diff line
@@ -94,6 +94,7 @@ struct thread_info {
#define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
#define TIF_DEBUGCTLMSR		25	/* uses thread_struct.debugctlmsr */
#define TIF_DS_AREA_MSR		26      /* uses thread_struct.ds_area_msr */
#define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */

#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -115,6 +116,7 @@ struct thread_info {
#define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
#define _TIF_DEBUGCTLMSR	(1 << TIF_DEBUGCTLMSR)
#define _TIF_DS_AREA_MSR	(1 << TIF_DS_AREA_MSR)
#define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)

/* work to do in syscall_trace_enter() */
#define _TIF_WORK_SYSCALL_ENTRY	\
+1 −1
Original line number Diff line number Diff line
@@ -201,7 +201,7 @@ static void kvm_leave_lazy_mmu(void)
	struct kvm_para_state *state = kvm_para_state();

	mmu_queue_flush(state);
	paravirt_leave_lazy(paravirt_get_lazy_mode());
	paravirt_leave_lazy_mmu();
	state->mode = paravirt_get_lazy_mode();
}

+10 −3
Original line number Diff line number Diff line
@@ -252,7 +252,7 @@ static inline void enter_lazy(enum paravirt_lazy_mode mode)
	__get_cpu_var(paravirt_lazy_mode) = mode;
}

void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
static void leave_lazy(enum paravirt_lazy_mode mode)
{
	BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode);
	BUG_ON(preemptible());
@@ -267,17 +267,24 @@ void paravirt_enter_lazy_mmu(void)

void paravirt_leave_lazy_mmu(void)
{
	paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
	leave_lazy(PARAVIRT_LAZY_MMU);
}

void paravirt_enter_lazy_cpu(void)
{
	if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
		arch_leave_lazy_mmu_mode();
		set_thread_flag(TIF_LAZY_MMU_UPDATES);
	}
	enter_lazy(PARAVIRT_LAZY_CPU);
}

void paravirt_leave_lazy_cpu(void)
{
	paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
	leave_lazy(PARAVIRT_LAZY_CPU);

	if (test_and_clear_thread_flag(TIF_LAZY_MMU_UPDATES))
		arch_enter_lazy_mmu_mode();
}

enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+10 −4
Original line number Diff line number Diff line
@@ -473,16 +473,22 @@ static void vmi_enter_lazy_cpu(void)
	vmi_ops.set_lazy_mode(2);
}

static void vmi_leave_lazy_cpu(void)
{
	vmi_ops.set_lazy_mode(0);
	paravirt_leave_lazy_cpu();
}

static void vmi_enter_lazy_mmu(void)
{
	paravirt_enter_lazy_mmu();
	vmi_ops.set_lazy_mode(1);
}

static void vmi_leave_lazy(void)
static void vmi_leave_lazy_mmu(void)
{
	paravirt_leave_lazy(paravirt_get_lazy_mode());
	vmi_ops.set_lazy_mode(0);
	paravirt_leave_lazy_mmu();
}

static inline int __init check_vmi_rom(struct vrom_header *rom)
@@ -718,12 +724,12 @@ static inline int __init activate_vmi(void)

	para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
		  set_lazy_mode, SetLazyMode);
	para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
	para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy_cpu,
		  set_lazy_mode, SetLazyMode);

	para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
		  set_lazy_mode, SetLazyMode);
	para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
	para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
		  set_lazy_mode, SetLazyMode);

	/* user and kernel flush are just handled with different flags to FlushTLB */
Loading