Commit 9228b5f2 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'rcu/next' of...

Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull RCU updates from Paul E. McKenney:

1.	Changes to rcutorture and to RCU documentation. Posted to LKML at
        https://lkml.org/lkml/2013/1/26/188.

2.	Enhancements to uniprocessor handling in tiny RCU. Posted to LKML
        at https://lkml.org/lkml/2013/1/27/2.

3.	Tag RCU callbacks with grace-period number to simplify callback
        advancement. Posted to LKML at https://lkml.org/lkml/2013/1/26/203.

4.	Miscellaneous fixes. Posted to LKML at https://lkml.org/lkml/2013/1/26/204

.

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 88b62b91 40393f52
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -253,6 +253,8 @@ This performs an atomic exchange operation on the atomic variable v, setting
the given new value.  It returns the old value that the atomic variable v had
just before the operation.

atomic_xchg requires explicit memory barriers around the operation.

	int atomic_cmpxchg(atomic_t *v, int old, int new);

This performs an atomic compare exchange operation on the atomic value v,
+1 −1
Original line number Diff line number Diff line
@@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			real-time workloads.  It can also improve energy
			efficiency for asymmetric multiprocessors.

	rcu_nocbs_poll	[KNL,BOOT]
	rcu_nocb_poll	[KNL,BOOT]
			Rather than requiring that offloaded CPUs
			(specified by rcu_nocbs= above) explicitly
			awaken the corresponding "rcuoN" kthreads,
+1 −0
Original line number Diff line number Diff line
@@ -1685,6 +1685,7 @@ explicit lock operations, described later). These include:

	xchg();
	cmpxchg();
	atomic_xchg();
	atomic_cmpxchg();
	atomic_inc_return();
	atomic_dec_return();
+11 −4
Original line number Diff line number Diff line
@@ -53,7 +53,10 @@ extern int rcutorture_runnable; /* for sysctl */
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
extern void do_trace_rcu_torture_read(char *rcutorturename,
				      struct rcu_head *rhp);
				      struct rcu_head *rhp,
				      unsigned long secs,
				      unsigned long c_old,
				      unsigned long c);
#else
static inline void rcutorture_record_test_transition(void)
{
@@ -63,9 +66,13 @@ static inline void rcutorture_record_progress(unsigned long vernum)
}
#ifdef CONFIG_RCU_TRACE
extern void do_trace_rcu_torture_read(char *rcutorturename,
				      struct rcu_head *rhp);
				      struct rcu_head *rhp,
				      unsigned long secs,
				      unsigned long c_old,
				      unsigned long c);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
	do { } while (0)
#endif
#endif

@@ -749,7 +756,7 @@ static inline void rcu_preempt_sleep_check(void)
 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
 * be preempted, but explicit blocking is illegal.  Finally, in preemptible
 * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
 * RCU implementations in real-time (with -rt patchset) kernel builds,
 * RCU read-side critical sections may be preempted and they may also
 * block, but only when acquiring spinlocks that are subject to priority
 * inheritance.
+21 −10
Original line number Diff line number Diff line
@@ -44,8 +44,10 @@ TRACE_EVENT(rcu_utilization,
 * of a new grace period or the end of an old grace period ("cpustart"
 * and "cpuend", respectively), a CPU passing through a quiescent
 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
 * and "cpuofl", respectively), and a CPU being kicked for being too
 * long in dyntick-idle mode ("kick").
 * and "cpuofl", respectively), a CPU being kicked for being too
 * long in dyntick-idle mode ("kick"), a CPU accelerating its new
 * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
 * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
 */
TRACE_EVENT(rcu_grace_period,

@@ -393,7 +395,7 @@ TRACE_EVENT(rcu_kfree_callback,
 */
TRACE_EVENT(rcu_batch_start,

	TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
	TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),

	TP_ARGS(rcuname, qlen_lazy, qlen, blimit),

@@ -401,7 +403,7 @@ TRACE_EVENT(rcu_batch_start,
		__field(char *, rcuname)
		__field(long, qlen_lazy)
		__field(long, qlen)
		__field(int, blimit)
		__field(long, blimit)
	),

	TP_fast_assign(
@@ -411,7 +413,7 @@ TRACE_EVENT(rcu_batch_start,
		__entry->blimit = blimit;
	),

	TP_printk("%s CBs=%ld/%ld bl=%d",
	TP_printk("%s CBs=%ld/%ld bl=%ld",
		  __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
		  __entry->blimit)
);
@@ -523,22 +525,30 @@ TRACE_EVENT(rcu_batch_end,
 */
TRACE_EVENT(rcu_torture_read,

	TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
	TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
		 unsigned long secs, unsigned long c_old, unsigned long c),

	TP_ARGS(rcutorturename, rhp),
	TP_ARGS(rcutorturename, rhp, secs, c_old, c),

	TP_STRUCT__entry(
		__field(char *, rcutorturename)
		__field(struct rcu_head *, rhp)
		__field(unsigned long, secs)
		__field(unsigned long, c_old)
		__field(unsigned long, c)
	),

	TP_fast_assign(
		__entry->rcutorturename = rcutorturename;
		__entry->rhp = rhp;
		__entry->secs = secs;
		__entry->c_old = c_old;
		__entry->c = c;
	),

	TP_printk("%s torture read %p",
		  __entry->rcutorturename, __entry->rhp)
	TP_printk("%s torture read %p %luus c: %lu %lu",
		  __entry->rcutorturename, __entry->rhp,
		  __entry->secs, __entry->c_old, __entry->c)
);

/*
@@ -608,7 +618,8 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
	do { } while (0)
#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
	do { } while (0)
#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)

#endif /* #else #ifdef CONFIG_RCU_TRACE */
Loading