Commit c7eba51c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - improve rwsem scalability

 - add uninitialized rwsem debugging check

 - reduce lockdep's stacktrace memory usage and add diagnostics

 - misc cleanups, code consolidation and constification

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  mutex: Fix up mutex_waiter usage
  locking/mutex: Use mutex flags macro instead of hard code
  locking/mutex: Make __mutex_owner static to mutex.c
  locking/qspinlock,x86: Clarify virt_spin_lock_key
  locking/rwsem: Check for operations on an uninitialized rwsem
  locking/rwsem: Make handoff writer optimistically spin on owner
  locking/lockdep: Report more stack trace statistics
  locking/lockdep: Reduce space occupied by stack traces
  stacktrace: Constify 'entries' arguments
  locking/lockdep: Make it clear that what lock_class::key points at is not modified
parents cc9b499a e57d1430
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
#endif

#ifdef CONFIG_PARAVIRT
/*
 * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
 *
 * Native (and PV wanting native due to vCPU pinning) should disable this key.
 * It is done in this backwards fashion to only have a single direction change,
 * which removes ordering between native_pv_spin_init() and HV setup.
 */
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);

void native_pv_lock_init(void) __init;

/*
 * Shortcut for the queued_spin_lock_slowpath() function that allows
 * virt to hijack it.
 *
 * Returns:
 *   true - lock has been negotiated, all done;
 *   false - queued_spin_lock_slowpath() will do its thing.
 */
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
+4 −7
Original line number Diff line number Diff line
@@ -66,10 +66,7 @@ struct lock_class_key {

extern struct lock_class_key __lockdep_no_validate__;

struct lock_trace {
	unsigned int		nr_entries;
	unsigned int		offset;
};
struct lock_trace;

#define LOCKSTAT_POINTS		4

@@ -97,7 +94,7 @@ struct lock_class {
	 */
	struct list_head		locks_after, locks_before;

	struct lockdep_subclass_key	*key;
	const struct lockdep_subclass_key *key;
	unsigned int			subclass;
	unsigned int			dep_gen_id;

@@ -105,7 +102,7 @@ struct lock_class {
	 * IRQ/softirq usage tracking bits:
	 */
	unsigned long			usage_mask;
	struct lock_trace		usage_traces[XXX_LOCK_USAGE_STATES];
	const struct lock_trace		*usage_traces[XXX_LOCK_USAGE_STATES];

	/*
	 * Generation counter, when doing certain classes of graph walking,
@@ -193,7 +190,7 @@ struct lock_list {
	struct list_head		entry;
	struct lock_class		*class;
	struct lock_class		*links_to;
	struct lock_trace		trace;
	const struct lock_trace		*trace;
	int				distance;

	/*
+3 −22
Original line number Diff line number Diff line
@@ -65,16 +65,6 @@ struct mutex {
#endif
};

/*
 * Internal helper function; C doesn't allow us to hide it :/
 *
 * DO NOT USE (outside of mutex code).
 */
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
}

/*
 * This is the control structure for tasks blocked on mutex,
 * which resides on the blocked task's kernel stack:
@@ -144,10 +134,7 @@ extern void __mutex_init(struct mutex *lock, const char *name,
 *
 * Returns true if the mutex is locked, false if unlocked.
 */
static inline bool mutex_is_locked(struct mutex *lock)
{
	return __mutex_owner(lock) != NULL;
}
extern bool mutex_is_locked(struct mutex *lock);

/*
 * See kernel/locking/mutex.c for detailed documentation of these APIs.
@@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum {
 *  - MUTEX_TRYLOCK_SUCCESS   - lock acquired,
 *  - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
 */
static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock)
{
	if (unlikely(__mutex_owner(lock) == current))
		return MUTEX_TRYLOCK_RECURSIVE;

	return mutex_trylock(lock);
}
extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock);

#endif /* __LINUX_MUTEX_H */
+10 −0
Original line number Diff line number Diff line
@@ -45,6 +45,9 @@ struct rw_semaphore {
#endif
	raw_spinlock_t wait_lock;
	struct list_head wait_list;
#ifdef CONFIG_DEBUG_RWSEMS
	void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map	dep_map;
#endif
@@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif

#ifdef CONFIG_DEBUG_RWSEMS
# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
#else
# define __DEBUG_RWSEM_INITIALIZER(lockname)
#endif

#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
#else
@@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock)	\
	  __RWSEM_OPT_INIT(name)				\
	  __DEBUG_RWSEM_INITIALIZER(name)			\
	  __RWSEM_DEP_MAP_INIT(name) }

#define DECLARE_RWSEM(name) \
+2 −2
Original line number Diff line number Diff line
@@ -9,9 +9,9 @@ struct task_struct;
struct pt_regs;

#ifdef CONFIG_STACKTRACE
void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
		       int spaces);
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
			unsigned int nr_entries, int spaces);
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
			      unsigned int skipnr);
Loading