Commit 42e1b14b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Implement wraparound-safe refcount_t and kref_t types based on
     generic atomic primitives (Peter Zijlstra)

   - Improve and fix the ww_mutex code (Nicolai Hähnle)

   - Add self-tests to the ww_mutex code (Chris Wilson)

   - Optimize percpu-rwsems with the 'rcuwait' mechanism (Davidlohr
     Bueso)

   - Micro-optimize the current-task logic all around the core kernel
     (Davidlohr Bueso)

   - Tidy up after recent optimizations: remove stale code and APIs,
     clean up the code (Waiman Long)

   - ... plus misc fixes, updates and cleanups"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits)
  fork: Fix task_struct alignment
  locking/spinlock/debug: Remove spinlock lockup detection code
  lockdep: Fix incorrect condition to print bug msgs for MAX_LOCKDEP_CHAIN_HLOCKS
  lkdtm: Convert to refcount_t testing
  kref: Implement 'struct kref' using refcount_t
  refcount_t: Introduce a special purpose refcount type
  sched/wake_q: Clarify queue reinit comment
  sched/wait, rcuwait: Fix typo in comment
  locking/mutex: Fix lockdep_assert_held() fail
  locking/rtmutex: Flip unlikely() branch to likely() in __rt_mutex_slowlock()
  locking/rwsem: Reinit wake_q after use
  locking/rwsem: Remove unnecessary atomic_long_t casts
  jump_labels: Move header guard #endif down where it belongs
  locking/atomic, kref: Implement kref_put_lock()
  locking/ww_mutex: Turn off __must_check for now
  locking/atomic, kref: Avoid more abuse
  locking/atomic, kref: Use kref_get_unless_zero() more
  locking/atomic, kref: Kill kref_sub()
  locking/atomic, kref: Add kref_read()
  locking/atomic, kref: Add KREF_INIT()
  ...
parents 828cad8e 95cb64c1
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -309,11 +309,15 @@ Design:
  normal mutex locks, which are far more common. As such there is only a small
  increase in code size if wait/wound mutexes are not used.

  We maintain the following invariants for the wait list:
  (1) Waiters with an acquire context are sorted by stamp order; waiters
      without an acquire context are interspersed in FIFO order.
  (2) Among waiters with contexts, only the first one can have other locks
      acquired already (ctx->acquired > 0). Note that this waiter may come
      after other waiters without contexts in the list.

  In general, not much contention is expected. The locks are typically used to
  serialize access to resources for devices. The only way to make wakeups
  smarter would be at the cost of adding a field to struct mutex_waiter. This
  would add overhead to all cases where normal mutexes are used, and
  ww_mutexes are generally less performance sensitive.
  serialize access to resources for devices.

Lockdep:
  Special care has been taken to warn for as many cases of api abuse
+1 −1
Original line number Diff line number Diff line
@@ -76,7 +76,7 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
			add_sigio_fd(random_fd);

			add_wait_queue(&host_read_wait, &wait);
			set_task_state(current, TASK_INTERRUPTIBLE);
			set_current_state(TASK_INTERRUPTIBLE);

			schedule();
			remove_wait_queue(&host_read_wait, &wait);
+0 −3
Original line number Diff line number Diff line
@@ -23,9 +23,6 @@
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD	(1 << 15)

extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);

#include <asm/qspinlock.h>

/*
+1 −2
Original line number Diff line number Diff line
@@ -32,8 +32,7 @@ static void bug_at(unsigned char *ip, int line)
	 * Something went wrong. Crash the box, as something could be
	 * corrupting the kernel.
	 */
	pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
	       ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
	pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
	BUG();
}

+0 −14
Original line number Diff line number Diff line
@@ -620,18 +620,4 @@ void __init kvm_spinlock_init(void)
	}
}

static __init int kvm_spinlock_init_jump(void)
{
	if (!kvm_para_available())
		return 0;
	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
		return 0;

	static_key_slow_inc(&paravirt_ticketlocks_enabled);
	printk(KERN_INFO "KVM setup paravirtual spinlock\n");

	return 0;
}
early_initcall(kvm_spinlock_init_jump);

#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
Loading