Commit 0a319ef7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 FPU updates from Ingo Molnar:
 "Most of the changes here related to 'XSAVES supervisor state' support,
  which is a feature that allows kernel-only data to be automatically
  saved/restored by the FPU context switching code.

  CPU features that can be supported this way are Intel PT, 'PASID' and
  CET features"

* tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu/xstate: Restore supervisor states for signal return
  x86/fpu/xstate: Preserve supervisor states for the slow path in __fpu__restore_sig()
  x86/fpu: Introduce copy_supervisor_to_kernel()
  x86/fpu/xstate: Update copy_kernel_to_xregs_err() for supervisor states
  x86/fpu/xstate: Update sanitize_restored_xstate() for supervisor xstates
  x86/fpu/xstate: Define new functions for clearing fpregs and xstates
  x86/fpu/xstate: Introduce XSAVES supervisor states
  x86/fpu/xstate: Separate user and supervisor xfeatures mask
  x86/fpu/xstate: Define new macros for supervisor and user xstates
  x86/fpu/xstate: Rename validate_xstate_header() to validate_user_xstate_header()
parents eff5ddad 55e00fb6
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -31,7 +31,8 @@ extern void fpu__save(struct fpu *fpu);
extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu);
extern int  fpu__copy(struct task_struct *dst, struct task_struct *src);
extern void fpu__clear(struct fpu *fpu);
extern void fpu__clear_user_states(struct fpu *fpu);
extern void fpu__clear_all(struct fpu *fpu);
extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
extern int  dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);

@@ -92,7 +93,7 @@ static inline void fpstate_init_xstate(struct xregs_state *xsave)
	 * XRSTORS requires these bits set in xcomp_bv, or it will
	 * trigger #GP:
	 */
	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask;
	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
}

static inline void fpstate_init_fxstate(struct fxregs_state *fx)
@@ -399,6 +400,9 @@ static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
	u32 hmask = mask >> 32;
	int err;

	if (static_cpu_has(X86_FEATURE_XSAVES))
		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
	else
		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);

	return err;
+37 −15
Original line number Diff line number Diff line
@@ -21,11 +21,8 @@
#define XSAVE_YMM_SIZE	    256
#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)

/* Supervisor features */
#define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT)

/* All currently supported features */
#define XCNTXT_MASK		(XFEATURE_MASK_FP | \
/* All currently supported user features */
#define XFEATURE_MASK_USER_SUPPORTED (XFEATURE_MASK_FP | \
				      XFEATURE_MASK_SSE | \
				      XFEATURE_MASK_YMM | \
				      XFEATURE_MASK_OPMASK | \
@@ -35,13 +32,37 @@
				      XFEATURE_MASK_BNDREGS | \
				      XFEATURE_MASK_BNDCSR)

/* All currently supported supervisor features */
#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (0)

/*
 * Unsupported supervisor features. When a supervisor feature in this mask is
 * supported in the future, move it to the supported supervisor feature mask.
 */
#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT)

/* All supervisor states including supported and unsupported states. */
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
				      XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)

#ifdef CONFIG_X86_64
#define REX_PREFIX	"0x48, "
#else
#define REX_PREFIX
#endif

extern u64 xfeatures_mask;
extern u64 xfeatures_mask_all;

static inline u64 xfeatures_mask_supervisor(void)
{
	return xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
}

static inline u64 xfeatures_mask_user(void)
{
	return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED;
}

extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];

extern void __init update_regset_xstate_info(unsigned int size,
@@ -54,8 +75,9 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
void copy_supervisor_to_kernel(struct xregs_state *xsave);

/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
extern int validate_xstate_header(const struct xstate_header *hdr);
int validate_user_xstate_header(const struct xstate_header *hdr);

#endif
+35 −18
Original line number Diff line number Diff line
@@ -291,15 +291,13 @@ void fpu__drop(struct fpu *fpu)
}

/*
 * Clear FPU registers by setting them up from
 * the init fpstate:
 * Clear FPU registers by setting them up from the init fpstate.
 * Caller must do fpregs_[un]lock() around it.
 */
static inline void copy_init_fpstate_to_fpregs(void)
static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
{
	fpregs_lock();

	if (use_xsave())
		copy_kernel_to_xregs(&init_fpstate.xsave, -1);
		copy_kernel_to_xregs(&init_fpstate.xsave, features_mask);
	else if (static_cpu_has(X86_FEATURE_FXSR))
		copy_kernel_to_fxregs(&init_fpstate.fxsave);
	else
@@ -307,9 +305,6 @@ static inline void copy_init_fpstate_to_fpregs(void)

	if (boot_cpu_has(X86_FEATURE_OSPKE))
		copy_init_pkru_to_fpregs();

	fpregs_mark_activate();
	fpregs_unlock();
}

/*
@@ -318,18 +313,40 @@ static inline void copy_init_fpstate_to_fpregs(void)
 * Called by sys_execve(), by the signal handler code and by various
 * error paths.
 */
void fpu__clear(struct fpu *fpu)
static void fpu__clear(struct fpu *fpu, bool user_only)
{
	WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
	WARN_ON_FPU(fpu != &current->thread.fpu);

	if (!static_cpu_has(X86_FEATURE_FPU)) {
		fpu__drop(fpu);

	/*
	 * Make sure fpstate is cleared and initialized.
	 */
		fpu__initialize(fpu);
	if (static_cpu_has(X86_FEATURE_FPU))
		copy_init_fpstate_to_fpregs();
		return;
	}

	fpregs_lock();

	if (user_only) {
		if (!fpregs_state_valid(fpu, smp_processor_id()) &&
		    xfeatures_mask_supervisor())
			copy_kernel_to_xregs(&fpu->state.xsave,
					     xfeatures_mask_supervisor());
		copy_init_fpstate_to_fpregs(xfeatures_mask_user());
	} else {
		copy_init_fpstate_to_fpregs(xfeatures_mask_all);
	}

	fpregs_mark_activate();
	fpregs_unlock();
}

void fpu__clear_user_states(struct fpu *fpu)
{
	fpu__clear(fpu, true);
}

void fpu__clear_all(struct fpu *fpu)
{
	fpu__clear(fpu, false);
}

/*
+2 −1
Original line number Diff line number Diff line
@@ -224,7 +224,8 @@ static void __init fpu__init_system_xstate_size_legacy(void)
 */
u64 __init fpu__get_supported_xfeatures_mask(void)
{
	return XCNTXT_MASK;
	return XFEATURE_MASK_USER_SUPPORTED |
	       XFEATURE_MASK_SUPERVISOR_SUPPORTED;
}

/* Legacy code to initialize eager fpu mode. */
+1 −1
Original line number Diff line number Diff line
@@ -139,7 +139,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
	} else {
		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
		if (!ret)
			ret = validate_xstate_header(&xsave->header);
			ret = validate_user_xstate_header(&xsave->header);
	}

	/*
Loading