Commit 33cb5243 authored by Harvey Harrison's avatar Harvey Harrison Committed by Ingo Molnar
Browse files

x86: cosmetic fixes fault_{32|64}.c



First step towards unifying these files.
- Checkpatch trailing whitespace fixes
- Checkpatch indentation of switch statement fixes
- Checkpatch single statement ifs need no braces fixes
- Checkpatch consistent spacing after comma fixes
- Introduce defines for pagefault error bits from X86_64 and add useful
  comment from X86_32.  Use these defines in X86_32 where obvious.
- Unify comments between 32|64 bit
- Small ifdef movement for CONFIG_KPROBES in notify_page_fault()
- Introduce X86_64 only case statement

No Functional Changes.

Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1d16b53e
Loading
Loading
Loading
Loading
+83 −65
Original line number Diff line number Diff line
/*
 *  linux/arch/i386/mm/fault.c
 *
 *  Copyright (C) 1995  Linus Torvalds
 */

@@ -30,11 +28,25 @@
#include <asm/desc.h>
#include <asm/segment.h>

/*
 * Page fault error code bits
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *	bit 2 == 0 means kernel, 1 means user-mode
 *	bit 3 == 1 means use of reserved bit detected
 *	bit 4 == 1 means fault was an instruction fetch
 */
#define PF_PROT	(1<<0)
#define PF_WRITE	(1<<1)
#define PF_USER	(1<<2)
#define PF_RSVD	(1<<3)
#define PF_INSTR	(1<<4)

extern void die(const char *, struct pt_regs *, long);

#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
@@ -46,13 +58,10 @@ static inline int notify_page_fault(struct pt_regs *regs)
	}

	return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
	return 0;
}
#endif
}

/*
 * Return EIP plus the CS segment base.  The segment limit is also
@@ -116,9 +125,9 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
	/* Decode the code segment base from the descriptor */
	base = get_desc_base((struct desc_struct *)desc);

	if (seg & (1<<2)) { 
	if (seg & (1<<2))
		mutex_unlock(&current->mm->context.lock);
	} else
	else
		put_cpu();

	/* Adjust EIP and segment limit, and clamp at the kernel limit.
@@ -158,16 +167,32 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
		switch (instr_hi) {
		case 0x20:
		case 0x30:
			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
			/*
			 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
			 * In X86_64 long mode, the CPU will signal invalid
			 * opcode if some of these prefixes are present so
			 * X86_64 will never get here anyway
			 */
			scan_more = ((instr_lo & 7) == 0x6);
			break;
			
#ifdef CONFIG_X86_64
		case 0x40:
			/*
			 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
			 * Need to figure out under what instruction mode the
			 * instruction was issued. Could check the LDT for lm,
			 * but for now it's good enough to assume that long
			 * mode only uses well known segments or kernel.
			 */
			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
			break;
#endif
		case 0x60:
			/* 0x64 thru 0x67 are valid prefixes in all modes. */
			scan_more = (instr_lo & 0xC) == 0x4;
			break;
		case 0xF0:
			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
			/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
			scan_more = !instr_lo || (instr_lo>>1) == 1;
			break;
		case 0x00:
@@ -284,13 +309,6 @@ int show_unhandled_signals = 1;
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * error_code:
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *	bit 2 == 0 means kernel, 1 means user-mode
 *	bit 3 == 1 means use of reserved bit detected
 *	bit 4 == 1 means fault was an instruction fetch
 */
void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
@@ -350,7 +368,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)

	/*
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault..
	 * atomic region then we must not take the fault.
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;
@@ -371,7 +389,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
	 * thus avoiding the deadlock.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if ((error_code & 4) == 0 &&
		if ((error_code & PF_USER) == 0 &&
		    !search_exception_tables(regs->ip))
			goto bad_area_nosemaphore;
		down_read(&mm->mmap_sem);
@@ -384,7 +402,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (error_code & 4) {
	if (error_code & PF_USER) {
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
@@ -403,15 +421,15 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
good_area:
	si_code = SEGV_ACCERR;
	write = 0;
	switch (error_code & 3) {
	switch (error_code & (PF_PROT|PF_WRITE)) {
	default:	/* 3: write, present */
		/* fall through */
		case 2:		/* write, not present */
	case PF_WRITE:		/* write, not present */
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		write++;
		break;
		case 1:		/* read, present */
	case PF_PROT:		/* read, present */
		goto bad_area;
	case 0:			/* read, not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
@@ -457,7 +475,7 @@ bad_area:

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (error_code & 4) {
	if (error_code & PF_USER) {
		/*
		 * It's possible to have interrupts off here.
		 */
@@ -605,7 +623,7 @@ do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!(error_code & 4))
	if (!(error_code & PF_USER))
		goto no_context;

	/* User space => ok to do another page fault */
+77 −74
Original line number Diff line number Diff line
/*
 *  linux/arch/x86-64/mm/fault.c
 *
 *  Copyright (C) 1995  Linus Torvalds
 *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
 */
@@ -33,16 +31,23 @@
#include <asm/proto.h>
#include <asm-generic/sections.h>

/* Page fault error code bits */
#define PF_PROT	(1<<0)		/* or no page found */
/*
 * Page fault error code bits
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *	bit 2 == 0 means kernel, 1 means user-mode
 *	bit 3 == 1 means use of reserved bit detected
 *	bit 4 == 1 means fault was an instruction fetch
 */
#define PF_PROT	(1<<0)
#define PF_WRITE	(1<<1)
#define PF_USER	(1<<2)
#define PF_RSVD	(1<<3)
#define PF_INSTR	(1<<4)

#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
@@ -54,13 +59,10 @@ static inline int notify_page_fault(struct pt_regs *regs)
	}

	return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
	return 0;
}
#endif
}

/* Sometimes the CPU reports invalid exceptions on prefetch.
   Check that here and ignore.
@@ -98,23 +100,26 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
		switch (instr_hi) {
		case 0x20:
		case 0x30:
			/* Values 0x26,0x2E,0x36,0x3E are valid x86
			   prefixes.  In long mode, the CPU will signal
			   invalid opcode if some of these prefixes are
			   present so we will never get here anyway */
			/*
			 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
			 * In X86_64 long mode, the CPU will signal invalid
			 * opcode if some of these prefixes are present so
			 * X86_64 will never get here anyway
			 */
			scan_more = ((instr_lo & 7) == 0x6);
			break;
			
#ifdef CONFIG_X86_64
		case 0x40:
			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
			   Need to figure out under what instruction mode the
			   instruction was issued ... */
			/* Could check the LDT for lm, but for now it's good
			   enough to assume that long mode only uses well known
			   segments or kernel. */
			/*
			 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
			 * Need to figure out under what instruction mode the
			 * instruction was issued. Could check the LDT for lm,
			 * but for now it's good enough to assume that long
			 * mode only uses well known segments or kernel.
			 */
			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
			break;
			
#endif
		case 0x60:
			/* 0x64 thru 0x67 are valid prefixes in all modes. */
			scan_more = (instr_lo & 0xC) == 0x4;
@@ -360,8 +365,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
		pgtable_bad(address, regs, error_code);

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault.
	 */
	if (unlikely(in_atomic() || !mm))
		goto bad_area_nosemaphore;
@@ -403,7 +408,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (error_code & 4) {
	if (error_code & PF_USER) {
		/* Allow userspace just enough access below the stack pointer
		 * to let the 'enter' instruction work.
		 */
@@ -505,11 +510,9 @@ bad_area_nosemaphore:
	}

no_context:
	
	/* Are we prepared to handle this kernel fault?  */
	if (fixup_exception(regs)) {
	if (fixup_exception(regs))
		return;
	}

	/*
	 * Hall of shame of CPU/BIOS bugs.