Commit 4ee91519 authored by Rik van Riel's avatar Rik van Riel Committed by Borislav Petkov
Browse files

x86/fpu: Add an __fpregs_load_activate() internal helper



Add a helper function that ensures the floating point registers for the
current task are active. Use with preemption disabled.

While at it, add fpregs_lock/unlock() helpers too, to be used in later
patches.

 [ bp: Add a comment about its intended usage. ]

Signed-off-by: default avatarRik van Riel <riel@surriel.com>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aubrey Li <aubrey.li@intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190403164156.19645-10-bigeasy@linutronix.de
parent 0169f53e
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@

#ifndef _ASM_X86_FPU_API_H
#define _ASM_X86_FPU_API_H
#include <linux/preempt.h>

/*
 * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
@@ -22,6 +23,16 @@ extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);

static inline void fpregs_lock(void)
{
	preempt_disable();
}

static inline void fpregs_unlock(void)
{
	preempt_enable();
}

/*
 * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
 *
+14 −8
Original line number Diff line number Diff line
@@ -484,6 +484,18 @@ static inline void fpregs_activate(struct fpu *fpu)
	trace_x86_fpu_regs_activated(fpu);
}

/*
 * Internal helper, do not use directly. Use switch_fpu_return() instead.
 */
static inline void __fpregs_load_activate(struct fpu *fpu, int cpu)
{
	if (!fpregs_state_valid(fpu, cpu)) {
		if (current->mm)
			copy_kernel_to_fpregs(&fpu->state);
		fpregs_activate(fpu);
	}
}

/*
 * FPU state switching for scheduling.
 *
@@ -522,14 +534,8 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 */
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
	if (static_cpu_has(X86_FEATURE_FPU)) {
		if (!fpregs_state_valid(new_fpu, cpu)) {
			if (current->mm)
				copy_kernel_to_fpregs(&new_fpu->state);
		}

		fpregs_activate(new_fpu);
	}
	if (static_cpu_has(X86_FEATURE_FPU))
		__fpregs_load_activate(new_fpu, cpu);
}

/*