Commit e4a81bfc authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86/fpu: Rename fpu::fpstate_active to fpu::initialized

The x86 FPU code used to have a complex state machine where both the FPU
registers and the FPU state context could be 'active' (or inactive)
independently of each other - which enabled features like lazy FPU restore.

Much of this complexity is gone in the current code: now we basically can
have FPU-less tasks (kernel threads) that don't use (and save/restore) FPU
state at all, plus full FPU users that save/restore directly with no laziness
whatsoever.

But the fpu::fpstate_active still carries bits of the old complexity - meanwhile
this flag has become a simple flag that shows whether the FPU context saving
area in the thread struct is initialized and used, or not.

Rename it to fpu::initialized to express this simplicity in the name as well.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20170923130016.21448-30-mingo@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 685c930d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
		 ksig->ka.sa.sa_restorer)
		sp = (unsigned long) ksig->ka.sa.sa_restorer;

	if (fpu->fpstate_active) {
	if (fpu->initialized) {
		unsigned long fx_aligned, math_size;

		sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
+2 −2
Original line number Diff line number Diff line
@@ -527,7 +527,7 @@ static inline void fpregs_activate(struct fpu *fpu)
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
	if (old_fpu->fpstate_active) {
	if (old_fpu->initialized) {
		if (!copy_fpregs_to_fpstate(old_fpu))
			old_fpu->last_cpu = -1;
		else
@@ -550,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
	bool preload = static_cpu_has(X86_FEATURE_FPU) &&
		       new_fpu->fpstate_active;
		       new_fpu->initialized;

	if (preload) {
		if (!fpregs_state_valid(new_fpu, cpu))
+3 −3
Original line number Diff line number Diff line
@@ -293,13 +293,13 @@ struct fpu {
	unsigned int			last_cpu;

	/*
	 * @fpstate_active:
	 * @initialized:
	 *
	 * This flag indicates whether this context is active: if the task
	 * This flag indicates whether this context is initialized: if the task
	 * is not running then we can restore from this context, if the task
	 * is running then we should save into this context.
	 */
	unsigned char			fpstate_active;
	unsigned char			initialized;

	/*
	 * @state:
+4 −4
Original line number Diff line number Diff line
@@ -12,22 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,

	TP_STRUCT__entry(
		__field(struct fpu *, fpu)
		__field(bool, fpstate_active)
		__field(bool, initialized)
		__field(u64, xfeatures)
		__field(u64, xcomp_bv)
		),

	TP_fast_assign(
		__entry->fpu		= fpu;
		__entry->fpstate_active	= fpu->fpstate_active;
		__entry->initialized	= fpu->initialized;
		if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
			__entry->xfeatures = fpu->state.xsave.header.xfeatures;
			__entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
		}
	),
	TP_printk("x86/fpu: %p fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
	TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
			__entry->fpu,
			__entry->fpstate_active,
			__entry->initialized,
			__entry->xfeatures,
			__entry->xcomp_bv
	)
+12 −12
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)

	kernel_fpu_disable();

	if (fpu->fpstate_active) {
	if (fpu->initialized) {
		/*
		 * Ignore return value -- we don't care if reg state
		 * is clobbered.
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
{
	struct fpu *fpu = &current->thread.fpu;

	if (fpu->fpstate_active)
	if (fpu->initialized)
		copy_kernel_to_fpregs(&fpu->state);

	kernel_fpu_enable();
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)

	preempt_disable();
	trace_x86_fpu_before_save(fpu);
	if (fpu->fpstate_active) {
	if (fpu->initialized) {
		if (!copy_fpregs_to_fpstate(fpu)) {
			copy_kernel_to_fpregs(&fpu->state);
		}
@@ -191,7 +191,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
	dst_fpu->last_cpu = -1;

	if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
	if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
		return 0;

	WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -240,13 +240,13 @@ void fpu__activate_curr(struct fpu *fpu)
{
	WARN_ON_FPU(fpu != &current->thread.fpu);

	if (!fpu->fpstate_active) {
	if (!fpu->initialized) {
		fpstate_init(&fpu->state);
		trace_x86_fpu_init_state(fpu);

		trace_x86_fpu_activate_state(fpu);
		/* Safe to do for the current task: */
		fpu->fpstate_active = 1;
		fpu->initialized = 1;
	}
}
EXPORT_SYMBOL_GPL(fpu__activate_curr);
@@ -271,13 +271,13 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
	if (fpu == &current->thread.fpu) {
		fpu__save(fpu);
	} else {
		if (!fpu->fpstate_active) {
		if (!fpu->initialized) {
			fpstate_init(&fpu->state);
			trace_x86_fpu_init_state(fpu);

			trace_x86_fpu_activate_state(fpu);
			/* Safe to do for current and for stopped child tasks: */
			fpu->fpstate_active = 1;
			fpu->initialized = 1;
		}
	}
}
@@ -303,7 +303,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
	 */
	WARN_ON_FPU(fpu == &current->thread.fpu);

	if (fpu->fpstate_active) {
	if (fpu->initialized) {
		/* Invalidate any lazy state: */
		__fpu_invalidate_fpregs_state(fpu);
	} else {
@@ -312,7 +312,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)

		trace_x86_fpu_activate_state(fpu);
		/* Safe to do for stopped child tasks: */
		fpu->fpstate_active = 1;
		fpu->initialized = 1;
	}
}

@@ -354,7 +354,7 @@ void fpu__drop(struct fpu *fpu)
	preempt_disable();

	if (fpu == &current->thread.fpu) {
		if (fpu->fpstate_active) {
		if (fpu->initialized) {
			/* Ignore delayed exceptions from user space */
			asm volatile("1: fwait\n"
				     "2:\n"
@@ -363,7 +363,7 @@ void fpu__drop(struct fpu *fpu)
		}
	}

	fpu->fpstate_active = 0;
	fpu->initialized = 0;

	trace_x86_fpu_dropped(fpu);

Loading