Commit 3d491679 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'locking_urgent_for_v5.9_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Borislav Petkov:
 "Two fixes from the locking/urgent pile:

   - Fix lockdep's detection of "USED" <- "IN-NMI" inversions (Peter
     Zijlstra)

   - Make percpu-rwsem operations on the semaphore's ->read_count
     IRQ-safe because it can be used in an IRQ context (Hou Tao)"

* tag 'locking_urgent_for_v5.9_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/percpu-rwsem: Use this_cpu_{inc,dec}() for read_count
  locking/lockdep: Fix "USED" <- "IN-NMI" inversions
parents 5674d81c e6b1a44e
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
	 * anything we did within this RCU-sched read-size critical section.
	 */
	if (likely(rcu_sync_is_idle(&sem->rss)))
		__this_cpu_inc(*sem->read_count);
		this_cpu_inc(*sem->read_count);
	else
		__percpu_down_read(sem, false); /* Unconditional memory barrier */
	/*
@@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
	 * Same as in percpu_down_read().
	 */
	if (likely(rcu_sync_is_idle(&sem->rss)))
		__this_cpu_inc(*sem->read_count);
		this_cpu_inc(*sem->read_count);
	else
		ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
	preempt_enable();
@@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
	 * Same as in percpu_down_read().
	 */
	if (likely(rcu_sync_is_idle(&sem->rss))) {
		__this_cpu_dec(*sem->read_count);
		this_cpu_dec(*sem->read_count);
	} else {
		/*
		 * slowpath; reader will only ever wake a single blocked
@@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
		 * aggregate zero, as that is the only time it matters) they
		 * will also see our critical section.
		 */
		__this_cpu_dec(*sem->read_count);
		this_cpu_dec(*sem->read_count);
		rcuwait_wake_up(&sem->writer);
	}
	preempt_enable();
+29 −6
Original line number Diff line number Diff line
@@ -3969,13 +3969,18 @@ static int separate_irq_context(struct task_struct *curr,
static int mark_lock(struct task_struct *curr, struct held_lock *this,
			     enum lock_usage_bit new_bit)
{
	unsigned int new_mask = 1 << new_bit, ret = 1;
	unsigned int old_mask, new_mask, ret = 1;

	if (new_bit >= LOCK_USAGE_STATES) {
		DEBUG_LOCKS_WARN_ON(1);
		return 0;
	}

	if (new_bit == LOCK_USED && this->read)
		new_bit = LOCK_USED_READ;

	new_mask = 1 << new_bit;

	/*
	 * If already set then do not dirty the cacheline,
	 * nor do any checks:
@@ -3988,13 +3993,22 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
	/*
	 * Make sure we didn't race:
	 */
	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
		graph_unlock();
		return 1;
	}
	if (unlikely(hlock_class(this)->usage_mask & new_mask))
		goto unlock;

	old_mask = hlock_class(this)->usage_mask;
	hlock_class(this)->usage_mask |= new_mask;

	/*
	 * Save one usage_traces[] entry and map both LOCK_USED and
	 * LOCK_USED_READ onto the same entry.
	 */
	if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
		if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
			goto unlock;
		new_bit = LOCK_USED;
	}

	if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
		return 0;

@@ -4008,6 +4022,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
			return 0;
	}

unlock:
	graph_unlock();

	/*
@@ -4942,12 +4957,20 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
{
#ifdef CONFIG_PROVE_LOCKING
	struct lock_class *class = look_up_lock_class(lock, subclass);
	unsigned long mask = LOCKF_USED;

	/* if it doesn't have a class (yet), it certainly hasn't been used yet */
	if (!class)
		return;

	if (!(class->usage_mask & LOCK_USED))
	/*
	 * READ locks only conflict with USED, such that if we only ever use
	 * READ locks, there is no deadlock possible -- RCU.
	 */
	if (!hlock->read)
		mask |= LOCKF_USED_READ;

	if (!(class->usage_mask & mask))
		return;

	hlock->class_idx = class - lock_classes;
+2 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ enum lock_usage_bit {
#include "lockdep_states.h"
#undef LOCKDEP_STATE
	LOCK_USED,
	LOCK_USED_READ,
	LOCK_USAGE_STATES
};

@@ -40,6 +41,7 @@ enum {
#include "lockdep_states.h"
#undef LOCKDEP_STATE
	__LOCKF(USED)
	__LOCKF(USED_READ)
};

#define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
+2 −2
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(percpu_free_rwsem);

static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
	__this_cpu_inc(*sem->read_count);
	this_cpu_inc(*sem->read_count);

	/*
	 * Due to having preemption disabled the decrement happens on
@@ -71,7 +71,7 @@ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
	if (likely(!atomic_read_acquire(&sem->block)))
		return true;

	__this_cpu_dec(*sem->read_count);
	this_cpu_dec(*sem->read_count);

	/* Prod writer to re-evaluate readers_active_check() */
	rcuwait_wake_up(&sem->writer);