Commit 7fd7d83d authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge
Browse files

x86/pvops: replace arch_enter_lazy_cpu_mode with arch_start_context_switch



Impact: simplification, prepare for later changes

Make lazy cpu mode more specific to context switching, so that
it makes sense to do more context-switch specific things in
the callbacks.

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
parent b8bcfe99
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
@@ -1420,19 +1420,17 @@ void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_leave_lazy(enum paravirt_lazy_mode mode);

#define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
static inline void arch_enter_lazy_cpu_mode(void)
#define  __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(void)
{
	PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
}

static inline void arch_leave_lazy_cpu_mode(void)
static inline void arch_end_context_switch(void)
{
	PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
}

void arch_flush_lazy_cpu_mode(void);

#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
+0 −13
Original line number Diff line number Diff line
@@ -301,19 +301,6 @@ void arch_flush_lazy_mmu_mode(void)
	preempt_enable();
}

void arch_flush_lazy_cpu_mode(void)
{
	preempt_disable();

	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
		WARN_ON(preempt_count() == 1);
		arch_leave_lazy_cpu_mode();
		arch_enter_lazy_cpu_mode();
	}

	preempt_enable();
}

struct pv_info pv_info = {
	.name = "bare hardware",
	.paravirt_enabled = 0,
+1 −1
Original line number Diff line number Diff line
@@ -407,7 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * done before math_state_restore, so the TS bit is up
	 * to date.
	 */
	arch_leave_lazy_cpu_mode();
	arch_end_context_switch();

	/* If the task has used fpu the last 5 timeslices, just do a full
	 * restore of the math state immediately to avoid the trap; the
+1 −1
Original line number Diff line number Diff line
@@ -428,7 +428,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	 * done before math_state_restore, so the TS bit is up
	 * to date.
	 */
	arch_leave_lazy_cpu_mode();
	arch_end_context_switch();

	/*
	 * Switch FS and GS.
+1 −4
Original line number Diff line number Diff line
@@ -1119,10 +1119,8 @@ static void drop_other_mm_ref(void *info)

	/* If this cpu still has a stale cr3 reference, then make sure
	   it has been flushed. */
	if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
	if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
		load_cr3(swapper_pg_dir);
		arch_flush_lazy_cpu_mode();
	}
}

static void xen_drop_mm_ref(struct mm_struct *mm)
@@ -1135,7 +1133,6 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
			load_cr3(swapper_pg_dir);
		else
			leave_mm(smp_processor_id());
		arch_flush_lazy_cpu_mode();
	}

	/* Get the "official" set of cpus referring to our pagetable. */
Loading