Commit 9a7513cf authored by Al Viro's avatar Al Viro
Browse files

frv: switch to use of fixup_exception()



Massage frv search_exception_table() to
	a) taking pt_regs pointer as explicit argument
	b) updating ->pc on success
Simplifies callers a bit and allows to convert to generic extable.h,
while we are at it.

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent d597580d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line

generic-y += clkdev.h
generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+1 −22
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <asm/segment.h>
#include <asm/sections.h>
#include <asm/extable.h>

#define __ptr(x) ((unsigned long __force *)(x))

@@ -59,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
#define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)

/*
 * The exception table consists of pairs of addresses: the first is the
 * address of an instruction that is allowed to fault, and the second is
 * the address at which the program should continue.  No registers are
 * modified, so it is entirely up to the continuation code to figure out
 * what to do.
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 */
struct exception_table_entry
{
	unsigned long insn, fixup;
};

/* Returns 0 if exception not found and fixup otherwise.  */
extern unsigned long search_exception_table(unsigned long);


/*
 * These are the main single-value transfer routines.  They automatically
@@ -314,6 +295,4 @@ extern long strnlen_user(const char __user *src, long count);

#define strlen_user(str) strnlen_user(str, 32767)

extern unsigned long search_exception_table(unsigned long addr);

#endif /* _ASM_UACCESS_H */
+1 −6
Original line number Diff line number Diff line
@@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0,
	siginfo_t info;

#ifdef CONFIG_MMU
	unsigned long fixup;

	fixup = search_exception_table(__frame->pc);
	if (fixup) {
		__frame->pc = fixup;
	if (fixup_exception(__frame))
		return;
	}
#endif

	die_if_kernel("-- Memory Access Exception --\n"
+13 −14
Original line number Diff line number Diff line
@@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand
extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
extern spinlock_t modlist_lock;


/*****************************************************************************/
/*
 * see if there's a fixup handler available to deal with a kernel fault
 */
unsigned long search_exception_table(unsigned long pc)
int fixup_exception(struct pt_regs *regs)
{
	const struct exception_table_entry *extab;
	unsigned long pc = regs->pc;

	/* determine if the fault lay during a memcpy_user or a memset_user */
	if (__frame->lr == (unsigned long) &__memset_user_error_lr &&
	if (regs->lr == (unsigned long) &__memset_user_error_lr &&
	    (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
	    ) {
		/* the fault occurred in a protected memset
		 * - we search for the return address (in LR) instead of the program counter
		 * - it was probably during a clear_user()
		 */
		return (unsigned long) &__memset_user_error_handler;
		regs->pc = (unsigned long) &__memset_user_error_handler;
		return 1;
	}

	if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
	if (regs->lr == (unsigned long) &__memcpy_user_error_lr &&
	    (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
	    ) {
		/* the fault occurred in a protected memset
		 * - we search for the return address (in LR) instead of the program counter
		 * - it was probably during a copy_to/from_user()
		 */
		return (unsigned long) &__memcpy_user_error_handler;
		regs->pc = (unsigned long) &__memcpy_user_error_handler;
		return 1;
	}

	extab = search_exception_tables(pc);
	if (extab)
		return extab->fixup;
	if (extab) {
		regs->pc = extab->fixup;
		return 1;
	}

	return 0;

} /* end search_exception_table() */
}
+2 −4
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
{
	struct vm_area_struct *vma;
	struct mm_struct *mm;
	unsigned long _pme, lrai, lrad, fixup;
	unsigned long _pme, lrai, lrad;
	unsigned long flags = 0;
	siginfo_t info;
	pgd_t *pge;
@@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear

 no_context:
	/* are we prepared to handle this kernel fault? */
	if ((fixup = search_exception_table(__frame->pc)) != 0) {
		__frame->pc = fixup;
	if (fixup_exception(__frame))
		return;
	}

/*
 * Oops. The kernel tried to access some bad page. We'll have to