Commit e1d74fbe authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus' of git://github.com/openrisc/linux

Pull OpenRISC updates from Stafford Horne:
 "A few patches all over the place during this cycle, mostly bug and
  sparse warning fixes for OpenRISC, but a few enhancements too. Note,
  there are 2 non OpenRISC specific fixups.

  Non OpenRISC fixes:

   - In init we need to align the init_task correctly to fix an issue
     with MUTEX_FLAGS, reviewed by Peter Z. No one picked this up so I
     kept it on my tree.

   - In asm-generic/io.h I fixed up some sparse warnings, OK'd by Arnd.
     Arnd asked to merge it via my tree.

  OpenRISC fixes:

   - Many fixes for OpenRISC sprase warnings.

   - Add support OpenRISC SMP tlb flushing rather than always flushing
     the entire TLB on every CPU.

   - Fix bug when dumping stack via /proc/xxx/stack of user threads"

* tag 'for-linus' of git://github.com/openrisc/linux:
  openrisc: uaccess: Add user address space check to access_ok
  openrisc: signal: Fix sparse address space warnings
  openrisc: uaccess: Remove unused macro __addr_ok
  openrisc: uaccess: Use static inline function in access_ok
  openrisc: uaccess: Fix sparse address space warnings
  openrisc: io: Fixup defines and move include to the end
  asm-generic/io.h: Fix sparse warnings on big-endian architectures
  openrisc: Implement proper SMP tlb flushing
  openrisc: Fix oops caused when dumping stack
  openrisc: Add support for external initrd images
  init: Align init_task to avoid conflict with MUTEX_FLAGS
  openrisc: fix __user in raw_copy_to_user()'s prototype
parents 7fca4dee 55b2662e
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -14,6 +14,8 @@
#ifndef __ASM_OPENRISC_IO_H
#define __ASM_OPENRISC_IO_H

#include <linux/types.h>

/*
 * PCI: can we really do 0 here if we have no port IO?
 */
@@ -25,9 +27,12 @@
#define PIO_OFFSET		0
#define PIO_MASK		0

#include <asm-generic/io.h>

#define ioremap ioremap
void __iomem *ioremap(phys_addr_t offset, unsigned long size);

#define iounmap iounmap
extern void iounmap(void *addr);

#include <asm-generic/io.h>

#endif
+12 −11
Original line number Diff line number Diff line
@@ -48,16 +48,17 @@
/* Ensure that the range from addr to addr+size is all within the process'
 * address space
 */
#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
static inline int __range_ok(unsigned long addr, unsigned long size)
{
	const mm_segment_t fs = get_fs();

/* Ensure that addr is below task's addr_limit */
#define __addr_ok(addr) ((unsigned long) addr < get_fs())
	return size <= fs && addr <= (fs - size);
}

#define access_ok(addr, size)						\
({ 									\
	unsigned long __ao_addr = (unsigned long)(addr);		\
	unsigned long __ao_size = (unsigned long)(size);		\
	__range_ok(__ao_addr, __ao_size);				\
	__chk_user_ptr(addr);						\
	__range_ok((unsigned long)(addr), (size));			\
})

/*
@@ -100,7 +101,7 @@ extern long __put_user_bad(void);
#define __put_user_check(x, ptr, size)					\
({									\
	long __pu_err = -EFAULT;					\
	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
	if (access_ok(__pu_addr, size))			\
		__put_user_size((x), __pu_addr, (size), __pu_err);	\
	__pu_err;							\
@@ -173,7 +174,7 @@ struct __large_struct {
#define __get_user_check(x, ptr, size)					\
({									\
	long __gu_err = -EFAULT, __gu_val = 0;				\
	const __typeof__(*(ptr)) * __gu_addr = (ptr);			\
	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
	if (access_ok(__gu_addr, size))			\
		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
@@ -241,17 +242,17 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long size)
	return __copy_tofrom_user(to, (__force const void *)from, size);
}
static inline unsigned long
raw_copy_to_user(void *to, const void __user *from, unsigned long size)
raw_copy_to_user(void __user *to, const void *from, unsigned long size)
{
	return __copy_tofrom_user((__force void *)to, from, size);
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER

extern unsigned long __clear_user(void *addr, unsigned long size);
extern unsigned long __clear_user(void __user *addr, unsigned long size);

static inline __must_check unsigned long
clear_user(void *addr, unsigned long size)
clear_user(void __user *addr, unsigned long size)
{
	if (likely(access_ok(addr, size)))
		size = __clear_user(addr, size);
+5 −3
Original line number Diff line number Diff line
@@ -292,13 +292,15 @@ void __init setup_arch(char **cmdline_p)
	init_mm.brk = (unsigned long)_end;

#ifdef CONFIG_BLK_DEV_INITRD
	initrd_start = (unsigned long)&__initrd_start;
	initrd_end = (unsigned long)&__initrd_end;
	if (initrd_start == initrd_end) {
		printk(KERN_INFO "Initial ramdisk not found\n");
		initrd_start = 0;
		initrd_end = 0;
	}
	} else {
		printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
		       (void *)(initrd_start), initrd_end - initrd_start);
		initrd_below_start_ok = 1;
	}
#endif

	/* setup memblock allocator */
+7 −7
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ static int restore_sigcontext(struct pt_regs *regs,

asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->sp;
	sigset_t set;

	/*
@@ -76,7 +76,7 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
	 * then frame should be dword aligned here.  If it's
	 * not, then the user is trying to mess with us.
	 */
	if (((long)frame) & 3)
	if (((unsigned long)frame) & 3)
		goto badframe;

	if (!access_ok(frame, sizeof(*frame)))
@@ -151,7 +151,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
			  struct pt_regs *regs)
{
	struct rt_sigframe *frame;
	struct rt_sigframe __user *frame;
	unsigned long return_ip;
	int err = 0;

@@ -181,10 +181,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
		l.ori r11,r0,__NR_sigreturn
		l.sys 1
	 */
	err |= __put_user(0xa960,             (short *)(frame->retcode + 0));
	err |= __put_user(__NR_rt_sigreturn,  (short *)(frame->retcode + 2));
	err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
	err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
	err |= __put_user(0xa960,             (short __user *)(frame->retcode + 0));
	err |= __put_user(__NR_rt_sigreturn,  (short __user *)(frame->retcode + 2));
	err |= __put_user(0x20000001, (unsigned long __user *)(frame->retcode + 4));
	err |= __put_user(0x15000000, (unsigned long __user *)(frame->retcode + 8));

	if (err)
		return -EFAULT;
+77 −8
Original line number Diff line number Diff line
@@ -219,30 +219,99 @@ static inline void ipi_flush_tlb_all(void *ignored)
	local_flush_tlb_all();
}

static inline void ipi_flush_tlb_mm(void *info)
{
	struct mm_struct *mm = (struct mm_struct *)info;

	local_flush_tlb_mm(mm);
}

static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
{
	unsigned int cpuid;

	if (cpumask_empty(cmask))
		return;

	cpuid = get_cpu();

	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
		/* local cpu is the only cpu present in cpumask */
		local_flush_tlb_mm(mm);
	} else {
		on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
	}
	put_cpu();
}

struct flush_tlb_data {
	unsigned long addr1;
	unsigned long addr2;
};

static inline void ipi_flush_tlb_page(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_page(NULL, fd->addr1);
}

static inline void ipi_flush_tlb_range(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
}

static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start,
				unsigned long end)
{
	unsigned int cpuid;

	if (cpumask_empty(cmask))
		return;

	cpuid = get_cpu();

	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
		/* local cpu is the only cpu present in cpumask */
		if ((end - start) <= PAGE_SIZE)
			local_flush_tlb_page(NULL, start);
		else
			local_flush_tlb_range(NULL, start, end);
	} else {
		struct flush_tlb_data fd;

		fd.addr1 = start;
		fd.addr2 = end;

		if ((end - start) <= PAGE_SIZE)
			on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
		else
			on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
	}
	put_cpu();
}

void flush_tlb_all(void)
{
	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
}

/*
 * FIXME: implement proper functionality instead of flush_tlb_all.
 * *But*, as things currently stands, the local_tlb_flush_* functions will
 * all boil down to local_tlb_flush_all anyway.
 */
void flush_tlb_mm(struct mm_struct *mm)
{
	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
	smp_flush_tlb_mm(mm_cpumask(mm), mm);
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
}

void flush_tlb_range(struct vm_area_struct *vma,
		     unsigned long start, unsigned long end)
{
	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end);
}

/* Instruction cache invalidate - performed on each cpu */
Loading