Commit 6d04182d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "Two weeks worth of accumulated fixes:

   - A fix for a performance regression seen on PowerVM LPARs using
     dedicated CPUs, caused by our vcpu_is_preempted() returning true
     even for idle CPUs.

   - One of the ultravisor support patches broke KVM on big endian hosts
     in v5.4.

   - Our KUAP (Kernel User Access Prevention) code missed allowing
     access in __clear_user(), which could lead to an oops or erroneous
     SEGV when triggered via PTRACE_GETREGSET.

   - Two fixes for the ocxl driver, an open/remove race, and a memory
     leak in an error path.

   - A handful of other small fixes.

  Thanks to: Andrew Donnellan, Christian Zigotzky, Christophe Leroy,
  Christoph Hellwig, Daniel Axtens, David Hildenbrand, Frederic Barrat,
  Gautham R. Shenoy, Greg Kurz, Ihor Pasichnyk, Juri Lelli, Marcus
  Comstedt, Mike Rapoport, Parth Shah, Srikar Dronamraju, Vaidyanathan
  Srinivasan"

* tag 'powerpc-5.5-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  KVM: PPC: Book3S HV: Fix regression on big endian hosts
  powerpc: Fix __clear_user() with KUAP enabled
  powerpc/pseries/cmm: fix managed page counts when migrating pages between zones
  powerpc/8xx: fix bogus __init on mmu_mapin_ram_chunk()
  ocxl: Fix potential memory leak on context creation
  powerpc/irq: fix stack overflow verification
  powerpc: Ensure that swiotlb buffer is allocated from low memory
  powerpc/shared: Use static key to detect shared processor
  powerpc/vcpu: Assume dedicated processors as non-preempt
  ocxl: Fix concurrent AFU open and device removal
parents 5c741e25 228b607d
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -36,10 +36,12 @@
#endif

#ifdef CONFIG_PPC_PSERIES
DECLARE_STATIC_KEY_FALSE(shared_processor);

#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
	if (!static_branch_unlikely(&shared_processor))
		return false;
	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}
@@ -110,13 +112,8 @@ static inline void splpar_rw_yield(arch_rwlock_t *lock) {};

static inline bool is_shared_processor(void)
{
/*
 * LPPACA is only available on Pseries so guard anything LPPACA related to
 * allow other platforms (which include this common header) to compile.
 */
#ifdef CONFIG_PPC_PSERIES
	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
		lppaca_shared_proc(local_paca->lppaca_ptr));
#ifdef CONFIG_PPC_SPLPAR
	return static_branch_unlikely(&shared_processor);
#else
	return false;
#endif
+7 −2
Original line number Diff line number Diff line
@@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
	return n;
}

extern unsigned long __clear_user(void __user *addr, unsigned long size);
unsigned long __arch_clear_user(void __user *addr, unsigned long size);

static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
@@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
	might_fault();
	if (likely(access_ok(addr, size))) {
		allow_write_to_user(addr, size);
		ret = __clear_user(addr, size);
		ret = __arch_clear_user(addr, size);
		prevent_write_to_user(addr, size);
	}
	return ret;
}

static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
	return clear_user(addr, size);
}

extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);

+2 −2
Original line number Diff line number Diff line
@@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs)

	trace_irq_entry(regs);

	check_stack_overflow();

	/*
	 * Query the platform PIC for the interrupt & ack it.
	 *
@@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs)
	irqsp = hardirq_ctx[raw_smp_processor_id()];
	sirqsp = softirq_ctx[raw_smp_processor_id()];

	check_stack_overflow();

	/* Already there ? */
	if (unlikely(cursp == irqsp || cursp == sirqsp)) {
		__do_irq(regs);
+2 −2
Original line number Diff line number Diff line
@@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	ld	r7, VCPU_GPR(R7)(r4)
	bne	ret_to_ultra

	lwz	r0, VCPU_CR(r4)
	ld	r0, VCPU_CR(r4)
	mtcr	r0

	ld	r0, VCPU_GPR(R0)(r4)
@@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 *   R3 = UV_RETURN
 */
ret_to_ultra:
	lwz	r0, VCPU_CR(r4)
	ld	r0, VCPU_CR(r4)
	mtcr	r0

	ld	r0, VCPU_GPR(R3)(r4)
+2 −2
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)

_GLOBAL(__clear_user)
_GLOBAL(__arch_clear_user)
/*
 * Use dcbz on the complete cache lines in the destination
 * to set them to zero.  This requires that the destination
@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
	EX_TABLE(8b, 91b)
	EX_TABLE(9b, 91b)

EXPORT_SYMBOL(__clear_user)
EXPORT_SYMBOL(__arch_clear_user)
Loading