Commit 2823e83a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner
Browse files

x86/entry: __always_inline CR2 for noinstr



vmlinux.o: warning: objtool: exc_page_fault()+0x9: call to read_cr2() leaves .noinstr.text section
vmlinux.o: warning: objtool: exc_page_fault()+0x24: call to prefetchw() leaves .noinstr.text section
vmlinux.o: warning: objtool: exc_page_fault()+0x21: call to kvm_handle_async_pf.isra.0() leaves .noinstr.text section
vmlinux.o: warning: objtool: exc_nmi()+0x1cc: call to write_cr2() leaves .noinstr.text section

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200603114052.243227806@infradead.org

parent 6eebad1a
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -141,7 +141,7 @@ static inline void kvm_disable_steal_time(void)
	return;
	return;
}
}


static inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
{
	return false;
	return false;
}
}
+1 −1
Original line number Original line Diff line number Diff line
@@ -823,7 +823,7 @@ static inline void prefetch(const void *x)
 * Useful for spinlocks to avoid one state transition in the
 * Useful for spinlocks to avoid one state transition in the
 * cache coherency protocol:
 * cache coherency protocol:
 */
 */
static inline void prefetchw(const void *x)
static __always_inline void prefetchw(const void *x)
{
{
	alternative_input(BASE_PREFETCH, "prefetchw %P1",
	alternative_input(BASE_PREFETCH, "prefetchw %P1",
			  X86_FEATURE_3DNOWPREFETCH,
			  X86_FEATURE_3DNOWPREFETCH,
+4 −4
Original line number Original line Diff line number Diff line
@@ -28,14 +28,14 @@ static inline unsigned long native_read_cr0(void)
	return val;
	return val;
}
}


static inline unsigned long native_read_cr2(void)
static __always_inline unsigned long native_read_cr2(void)
{
{
	unsigned long val;
	unsigned long val;
	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
	return val;
	return val;
}
}


static inline void native_write_cr2(unsigned long val)
static __always_inline void native_write_cr2(unsigned long val)
{
{
	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}
}
@@ -160,12 +160,12 @@ static inline void write_cr0(unsigned long x)
	native_write_cr0(x);
	native_write_cr0(x);
}
}


static inline unsigned long read_cr2(void)
static __always_inline unsigned long read_cr2(void)
{
{
	return native_read_cr2();
	return native_read_cr2();
}
}


static inline void write_cr2(unsigned long x)
static __always_inline void write_cr2(unsigned long x)
{
{
	native_write_cr2(x);
	native_write_cr2(x);
}
}