Commit 1823172a authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

Merge branches 'doc.2014.07.08a', 'fixes.2014.07.09a',...

Merge branches 'doc.2014.07.08a', 'fixes.2014.07.09a', 'maintainers.2014.07.08b', 'nocbs.2014.07.07a' and 'torture.2014.07.07a' into HEAD

doc.2014.07.08a: Documentation updates.
fixes.2014.07.09a: Miscellaneous fixes.
maintainers.2014.07.08b: Maintainership updates.
nocbs.2014.07.07a: Callback-offloading fixes.
torture.2014.07.07a: Torture-test updates.
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -2802,6 +2802,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			quiescent states.  Units are jiffies, minimum
			value is one, and maximum value is HZ.

	rcutree.rcu_nocb_leader_stride= [KNL]
			Set the number of NOCB kthread groups, which
			defaults to the square root of the number of
			CPUs.  Larger numbers reduces the wakeup overhead
			on the per-CPU grace-period kthreads, but increases
			that same overhead on each group's leader.

	rcutree.qhimark= [KNL]
			Set threshold of queued RCU callbacks beyond which
			batch limiting is disabled.
+15 −3
Original line number Diff line number Diff line
@@ -70,6 +70,8 @@ Descriptions of section entries:

	P: Person (obsolete)
	M: Mail patches to: FullName <address@domain>
	R: Designated reviewer: FullName <address@domain>
	   These reviewers should be CCed on patches.
	L: Mailing list that is relevant to this area
	W: Web-page with status/info
	Q: Patchwork web based patch tracking system site
@@ -7399,10 +7401,14 @@ L: linux-kernel@vger.kernel.org
S:	Supported
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F:	Documentation/RCU/torture.txt
F:	kernel/rcu/torture.c
F:	kernel/rcu/rcutorture.c

RCUTORTURE TEST FRAMEWORK
M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M:	Josh Triplett <josh@joshtriplett.org>
R:	Steven Rostedt <rostedt@goodmis.org>
R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R:	Lai Jiangshan <laijs@cn.fujitsu.com>
L:	linux-kernel@vger.kernel.org
S:	Supported
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -7425,8 +7431,11 @@ S: Supported
F:	net/rds/

READ-COPY UPDATE (RCU)
M:	Dipankar Sarma <dipankar@in.ibm.com>
M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M:	Josh Triplett <josh@joshtriplett.org>
R:	Steven Rostedt <rostedt@goodmis.org>
R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R:	Lai Jiangshan <laijs@cn.fujitsu.com>
L:	linux-kernel@vger.kernel.org
W:	http://www.rdrop.com/users/paulmck/RCU/
S:	Supported
@@ -7436,7 +7445,7 @@ X: Documentation/RCU/torture.txt
F:	include/linux/rcu*
X:	include/linux/srcu.h
F:	kernel/rcu/
X:	kernel/rcu/torture.c
X:	kernel/torture.c

REAL TIME CLOCK (RTC) SUBSYSTEM
M:	Alessandro Zummo <a.zummo@towertech.it>
@@ -8206,6 +8215,9 @@ F: mm/sl?b.c
SLEEPABLE READ-COPY UPDATE (SRCU)
M:	Lai Jiangshan <laijs@cn.fujitsu.com>
M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M:	Josh Triplett <josh@joshtriplett.org>
R:	Steven Rostedt <rostedt@goodmis.org>
R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
L:	linux-kernel@vger.kernel.org
W:	http://www.rdrop.com/users/paulmck/RCU/
S:	Supported
+1 −8
Original line number Diff line number Diff line
@@ -102,12 +102,6 @@ extern struct group_info init_groups;
#define INIT_IDS
#endif

#ifdef CONFIG_RCU_BOOST
#define INIT_TASK_RCU_BOOST()						\
	.rcu_boost_mutex = NULL,
#else
#define INIT_TASK_RCU_BOOST()
#endif
#ifdef CONFIG_TREE_PREEMPT_RCU
#define INIT_TASK_RCU_TREE_PREEMPT()					\
	.rcu_blocked_node = NULL,
@@ -119,8 +113,7 @@ extern struct group_info init_groups;
	.rcu_read_lock_nesting = 0,					\
	.rcu_read_unlock_special = 0,					\
	.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),		\
	INIT_TASK_RCU_TREE_PREEMPT()					\
	INIT_TASK_RCU_BOOST()
	INIT_TASK_RCU_TREE_PREEMPT()
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
+36 −9
Original line number Diff line number Diff line
@@ -826,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void)
 * read-side critical section that would block in a !PREEMPT kernel.
 * But if you want the full story, read on!
 *
 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
 * is illegal to block while in an RCU read-side critical section.  In
 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
 * be preempted, but explicit blocking is illegal.  Finally, in preemptible
 * RCU implementations in real-time (with -rt patchset) kernel builds,
 * RCU read-side critical sections may be preempted and they may also
 * block, but only when acquiring spinlocks that are subject to priority
 * inheritance.
 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
 * it is illegal to block while in an RCU read-side critical section.
 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
 * kernel builds, RCU read-side critical sections may be preempted,
 * but explicit blocking is illegal.  Finally, in preemptible RCU
 * implementations in real-time (with -rt patchset) kernel builds, RCU
 * read-side critical sections may be preempted and they may also block, but
 * only when acquiring spinlocks that are subject to priority inheritance.
 */
static inline void rcu_read_lock(void)
{
@@ -858,6 +857,34 @@ static inline void rcu_read_lock(void)
/**
 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
 *
 * In most situations, rcu_read_unlock() is immune from deadlock.
 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
 * is responsible for deboosting, which it does via rt_mutex_unlock().
 * Unfortunately, this function acquires the scheduler's runqueue and
 * priority-inheritance spinlocks.  This means that deadlock could result
 * if the caller of rcu_read_unlock() already holds one of these locks or
 * any lock that is ever acquired while holding them.
 *
 * That said, RCU readers are never priority boosted unless they were
 * preempted.  Therefore, one way to avoid deadlock is to make sure
 * that preemption never happens within any RCU read-side critical
 * section whose outermost rcu_read_unlock() is called with one of
 * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
 * a number of ways, for example, by invoking preempt_disable() before
 * critical section's outermost rcu_read_lock().
 *
 * Given that the set of locks acquired by rt_mutex_unlock() might change
 * at any time, a somewhat more future-proofed approach is to make sure
 * that that preemption never happens within any RCU read-side critical
 * section whose outermost rcu_read_unlock() is called with irqs disabled.
 * This approach relies on the fact that rt_mutex_unlock() currently only
 * acquires irq-disabled locks.
 *
 * The second of these two approaches is best in most situations,
 * however, the first approach can also be useful, at least to those
 * developers willing to keep abreast of the set of locks acquired by
 * rt_mutex_unlock().
 *
 * See rcu_read_lock() for more information.
 */
static inline void rcu_read_unlock(void)
+0 −6
Original line number Diff line number Diff line
@@ -1270,9 +1270,6 @@ struct task_struct {
#ifdef CONFIG_TREE_PREEMPT_RCU
	struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
	struct rt_mutex *rcu_boost_mutex;
#endif /* #ifdef CONFIG_RCU_BOOST */

#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
	struct sched_info sched_info;
@@ -2009,9 +2006,6 @@ static inline void rcu_copy_process(struct task_struct *p)
#ifdef CONFIG_TREE_PREEMPT_RCU
	p->rcu_blocked_node = NULL;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
	p->rcu_boost_mutex = NULL;
#endif /* #ifdef CONFIG_RCU_BOOST */
	INIT_LIST_HEAD(&p->rcu_node_entry);
}

Loading