Commit dd1c3ed7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'xtensa-20190307' of git://github.com/jcmvbkbc/linux-xtensa

Pull xtensa updates from Max Filippov:

 - use generic spinlock/rwlock implementations

 - clean up IPI processing

 - document boot parameters passing to the kernel

 - fix get_wchan

 - various cleanups in time.c, process.c, traps.c and thread_info.h

* tag 'xtensa-20190307' of git://github.com/jcmvbkbc/linux-xtensa:
  xtensa: simplify trap_init
  xtensa: drop unused definitions
  xtensa: fix get_wchan
  xtensa: use generic spinlock/rwlock implementation
  xtensa: provide xchg for sizes 1 and 2
  xtensa: clean up arch/xtensa/kernel/time.c
  xtensa: SMP: rework IPI processing
  xtensa: document boot parameter passing
parents 6c3ac113 60deebe6
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
Passing boot parameters to the kernel.

Boot parameters are represented as a TLV list in the memory. Please see
arch/xtensa/include/asm/bootparam.h for definition of the bp_tag structure and
tag value constants. First entry in the list must have type BP_TAG_FIRST, last
entry must have type BP_TAG_LAST. The address of the first list entry is
passed to the kernel in the register a2. The address type depends on MMU type:
- For configurations without MMU, with region protection or with MPU the
  address must be the physical address.
- For configurations with region translarion MMU or with MMUv3 and CONFIG_MMU=n
  the address must be a valid address in the current mapping. The kernel will
  not change the mapping on its own.
- For configurations with MMUv2 the address must be a virtual address in the
  default virtual mapping (0xd0000000..0xffffffff).
- For configurations with MMUv3 and CONFIG_MMU=y the address may be either a
  virtual or physical address. In either case it must be within the default
  virtual mapping. It is considered physical if it is within the range of
  physical addresses covered by the default KSEG mapping (XCHAL_KSEG_PADDR..
  XCHAL_KSEG_PADDR + XCHAL_KSEG_SIZE), otherwise it is considered virtual.
+2 −0
Original line number Diff line number Diff line
@@ -5,6 +5,8 @@ config XTENSA
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select ARCH_NO_COHERENT_DMA_MMAP if !MMU
	select ARCH_USE_QUEUED_RWLOCKS
	select ARCH_USE_QUEUED_SPINLOCKS
	select ARCH_WANT_FRAME_POINTERS
	select ARCH_WANT_IPC_PARSE_VERSION
	select BUILDTIME_EXTABLE_SORT
+2 −0
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@ generic-y += mm-arch-hooks.h
generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += sections.h
generic-y += socket.h
+32 −4
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@

#ifndef __ASSEMBLY__

#include <linux/bits.h>
#include <linux/stringify.h>

/*
@@ -138,6 +139,28 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#define xchg(ptr,x) \
	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))

static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
{
	int off = (unsigned long)ptr % sizeof(u32);
	volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
#else
	int bitoff = off * BITS_PER_BYTE;
#endif
	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
	u32 oldv, newv;
	u32 ret;

	do {
		oldv = READ_ONCE(*p);
		ret = (oldv & bitmask) >> bitoff;
		newv = (oldv & ~bitmask) | (x << bitoff);
	} while (__cmpxchg_u32(p, oldv, newv) != oldv);

	return ret;
}

/*
 * This only works if the compiler isn't horribly bad at optimizing.
 * gcc-2.5.8 reportedly can't handle this, but I define that one to
@@ -150,12 +173,17 @@ static __inline__ unsigned long
__xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
	case 1:
		return xchg_small(ptr, x, 1);
	case 2:
		return xchg_small(ptr, x, 2);
	case 4:
		return xchg_u32(ptr, x);
	}
	default:
		__xchg_called_with_bad_pointer();
		return x;
	}
}

#endif /* __ASSEMBLY__ */

+3 −182
Original line number Diff line number Diff line
@@ -12,188 +12,9 @@
#define _XTENSA_SPINLOCK_H

#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>

/*
 * spinlock
 *
 * There is at most one owner of a spinlock.  There are not different
 * types of spinlock owners like there are for rwlocks (see below).
 *
 * When trying to obtain a spinlock, the function "spins" forever, or busy-
 * waits, until the lock is obtained.  When spinning, presumably some other
 * owner will soon give up the spinlock making it available to others.  Use
 * the trylock functions to avoid spinning forever.
 *
 * possible values:
 *
 *    0         nobody owns the spinlock
 *    1         somebody owns the spinlock
 */

#define arch_spin_is_locked(x) ((x)->slock != 0)

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	unsigned long tmp;

	__asm__ __volatile__(
			"       movi    %0, 0\n"
			"       wsr     %0, scompare1\n"
			"1:     movi    %0, 1\n"
			"       s32c1i  %0, %1, 0\n"
			"       bnez    %0, 1b\n"
			: "=&a" (tmp)
			: "a" (&lock->slock)
			: "memory");
}

/* Returns 1 if the lock is obtained, 0 otherwise. */

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	unsigned long tmp;

	__asm__ __volatile__(
			"       movi    %0, 0\n"
			"       wsr     %0, scompare1\n"
			"       movi    %0, 1\n"
			"       s32c1i  %0, %1, 0\n"
			: "=&a" (tmp)
			: "a" (&lock->slock)
			: "memory");

	return tmp == 0 ? 1 : 0;
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	unsigned long tmp;

	__asm__ __volatile__(
			"       movi    %0, 0\n"
			"       s32ri   %0, %1, 0\n"
			: "=&a" (tmp)
			: "a" (&lock->slock)
			: "memory");
}

/*
 * rwlock
 *
 * Read-write locks are really a more flexible spinlock.  They allow
 * multiple readers but only one writer.  Write ownership is exclusive
 * (i.e., all other readers and writers are blocked from ownership while
 * there is a write owner).  These rwlocks are unfair to writers.  Writers
 * can be starved for an indefinite time by readers.
 *
 * possible values:
 *
 *   0          nobody owns the rwlock
 *  >0          one or more readers own the rwlock
 *                (the positive value is the actual number of readers)
 *  0x80000000  one writer owns the rwlock, no other writers, no readers
 */

static inline void arch_write_lock(arch_rwlock_t *rw)
{
	unsigned long tmp;

	__asm__ __volatile__(
			"       movi    %0, 0\n"
			"       wsr     %0, scompare1\n"
			"1:     movi    %0, 1\n"
			"       slli    %0, %0, 31\n"
			"       s32c1i  %0, %1, 0\n"
			"       bnez    %0, 1b\n"
			: "=&a" (tmp)
			: "a" (&rw->lock)
			: "memory");
}

/* Returns 1 if the lock is obtained, 0 otherwise. */

static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	unsigned long tmp;

	__asm__ __volatile__(
			"       movi    %0, 0\n"
			"       wsr     %0, scompare1\n"
			"       movi    %0, 1\n"
			"       slli    %0, %0, 31\n"
			"       s32c1i  %0, %1, 0\n"
			: "=&a" (tmp)
			: "a" (&rw->lock)
			: "memory");

	return tmp == 0 ? 1 : 0;
}

static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	unsigned long tmp;

	__asm__ __volatile__(
			"       movi    %0, 0\n"
			"       s32ri   %0, %1, 0\n"
			: "=&a" (tmp)
			: "a" (&rw->lock)
			: "memory");
}

static inline void arch_read_lock(arch_rwlock_t *rw)
{
	unsigned long tmp;
	unsigned long result;

	__asm__ __volatile__(
			"1:     l32i    %1, %2, 0\n"
			"       bltz    %1, 1b\n"
			"       wsr     %1, scompare1\n"
			"       addi    %0, %1, 1\n"
			"       s32c1i  %0, %2, 0\n"
			"       bne     %0, %1, 1b\n"
			: "=&a" (result), "=&a" (tmp)
			: "a" (&rw->lock)
			: "memory");
}

/* Returns 1 if the lock is obtained, 0 otherwise. */

static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	unsigned long result;
	unsigned long tmp;

	__asm__ __volatile__(
			"       l32i    %1, %2, 0\n"
			"       addi    %0, %1, 1\n"
			"       bltz    %0, 1f\n"
			"       wsr     %1, scompare1\n"
			"       s32c1i  %0, %2, 0\n"
			"       sub     %0, %0, %1\n"
			"1:\n"
			: "=&a" (result), "=&a" (tmp)
			: "a" (&rw->lock)
			: "memory");

	return result == 0;
}

static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
			"1:     l32i    %1, %2, 0\n"
			"       addi    %0, %1, -1\n"
			"       wsr     %1, scompare1\n"
			"       s32c1i  %0, %2, 0\n"
			"       bne     %0, %1, 1b\n"
			: "=&a" (tmp1), "=&a" (tmp2)
			: "a" (&rw->lock)
			: "memory");
}
#define smp_mb__after_spinlock()	smp_mb()

#endif	/* _XTENSA_SPINLOCK_H */
Loading