Commit 13625c0a authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

Merge branches 'doc.2020.06.29a', 'fixes.2020.06.29a',...

Merge branches 'doc.2020.06.29a', 'fixes.2020.06.29a', 'kfree_rcu.2020.06.29a', 'rcu-tasks.2020.06.29a', 'scale.2020.06.29a', 'srcu.2020.06.29a' and 'torture.2020.06.29a' into HEAD

doc.2020.06.29a:  Documentation updates.
fixes.2020.06.29a:  Miscellaneous fixes.
kfree_rcu.2020.06.29a:  kfree_rcu() updates.
rcu-tasks.2020.06.29a:  RCU Tasks updates.
scale.2020.06.29a:  Read-side scalability tests.
srcu.2020.06.29a:  SRCU updates.
torture.2020.06.29a:  Torture-test updates.
Loading
Loading
Loading
Loading
+68 −0
Original line number Diff line number Diff line
@@ -4038,6 +4038,14 @@
			latencies, which will choose a value aligned
			with the appropriate hardware boundaries.

	rcutree.rcu_min_cached_objs= [KNL]
			Minimum number of objects which are cached and
			maintained per one CPU. Object size is equal
			to PAGE_SIZE. The cache allows to reduce the
			pressure to page allocator, also it makes the
			whole algorithm to behave better in low memory
			condition.

	rcutree.jiffies_till_first_fqs= [KNL]
			Set delay from grace-period initialization to
			first attempt to force quiescent states.
@@ -4258,6 +4266,20 @@
			Set time (jiffies) between CPU-hotplug operations,
			or zero to disable CPU-hotplug testing.

	rcutorture.read_exit= [KNL]
			Set the number of read-then-exit kthreads used
			to test the interaction of RCU updaters and
			task-exit processing.

	rcutorture.read_exit_burst= [KNL]
			The number of times in a given read-then-exit
			episode that a set of read-then-exit kthreads
			is spawned.

	rcutorture.read_exit_delay= [KNL]
			The delay, in seconds, between successive
			read-then-exit testing episodes.

	rcutorture.shuffle_interval= [KNL]
			Set task-shuffle interval (s).  Shuffling tasks
			allows some CPUs to go into dyntick-idle mode
@@ -4407,6 +4429,45 @@
			      reboot_cpu is s[mp]#### with #### being the processor
					to be used for rebooting.

	refscale.holdoff= [KNL]
			Set test-start holdoff period.  The purpose of
			this parameter is to delay the start of the
			test until boot completes in order to avoid
			interference.

	refscale.loops= [KNL]
			Set the number of loops over the synchronization
			primitive under test.  Increasing this number
			reduces noise due to loop start/end overhead,
			but the default has already reduced the per-pass
			noise to a handful of picoseconds on ca. 2020
			x86 laptops.

	refscale.nreaders= [KNL]
			Set number of readers.  The default value of -1
			selects N, where N is roughly 75% of the number
			of CPUs.  A value of zero is an interesting choice.

	refscale.nruns= [KNL]
			Set number of runs, each of which is dumped onto
			the console log.

	refscale.readdelay= [KNL]
			Set the read-side critical-section duration,
			measured in microseconds.

	refscale.scale_type= [KNL]
			Specify the read-protection implementation to test.

	refscale.shutdown= [KNL]
			Shut down the system at the end of the performance
			test.  This defaults to 1 (shut it down) when
			rcuperf is built into the kernel and to 0 (leave
			it running) when rcuperf is built as a module.

	refscale.verbose= [KNL]
			Enable additional printk() statements.

	relax_domain_level=
			[KNL, SMP] Set scheduler's default relax_domain_level.
			See Documentation/admin-guide/cgroup-v1/cpusets.rst.
@@ -5082,6 +5143,13 @@
			Prevent the CPU-hotplug component of torturing
			until after init has spawned.

	torture.ftrace_dump_at_shutdown= [KNL]
			Dump the ftrace buffer at torture-test shutdown,
			even if there were no errors.  This can be a
			very costly operation when many torture tests
			are running concurrently, especially on systems
			with rotating-rust storage.

	tp720=		[HW,PS2]

	tpm_suspend_pcr=[HW,TPM]
+2 −0
Original line number Diff line number Diff line
@@ -4515,6 +4515,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)

			/* once for us */
			free_extent_map(em);

			cond_resched(); /* Allow large-extent preemption. */
		}
	}
	return try_release_extent_state(tree, page, mask);
+1 −1
Original line number Diff line number Diff line
@@ -512,7 +512,7 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
 * @right: The hlist head on the right
 *
 * The lists start out as [@left  ][node1 ... ] and
                          [@right ][node2 ... ]
 *                        [@right ][node2 ... ]
 * The lists end up as    [@left  ][node2 ... ]
 *                        [@right ][node1 ... ]
 */
+46 −7
Original line number Diff line number Diff line
@@ -828,17 +828,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)

/*
 * Does the specified offset indicate that the corresponding rcu_head
 * structure can be handled by kfree_rcu()?
 * structure can be handled by kvfree_rcu()?
 */
#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)

/*
 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
 */
#define __kfree_rcu(head, offset) \
#define __kvfree_rcu(head, offset) \
	do { \
		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
		kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
		BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
		kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
	} while (0)

/**
@@ -857,7 +857,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 * Because the functions are not allowed in the low-order 4096 bytes of
 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
 * If the offset is larger than 4095 bytes, a compile-time error will
 * be generated in __kfree_rcu().  If this error is triggered, you can
 * be generated in __kvfree_rcu(). If this error is triggered, you can
 * either fall back to use of call_rcu() or rearrange the structure to
 * position the rcu_head structure into the first 4096 bytes.
 *
@@ -872,7 +872,46 @@ do { \
	typeof (ptr) ___p = (ptr);					\
									\
	if (___p)							\
		__kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
		__kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
} while (0)

/**
 * kvfree_rcu() - kvfree an object after a grace period.
 *
 * This macro consists of one or two arguments and it is
 * based on whether an object is head-less or not. If it
 * has a head then a semantic stays the same as it used
 * to be before:
 *
 *     kvfree_rcu(ptr, rhf);
 *
 * where @ptr is a pointer to kvfree(), @rhf is the name
 * of the rcu_head structure within the type of @ptr.
 *
 * When it comes to head-less variant, only one argument
 * is passed and that is just a pointer which has to be
 * freed after a grace period. Therefore the semantic is
 *
 *     kvfree_rcu(ptr);
 *
 * where @ptr is a pointer to kvfree().
 *
 * Please note, head-less way of freeing is permitted to
 * use from a context that has to follow might_sleep()
 * annotation. Otherwise, please switch and embed the
 * rcu_head structure within the type of @ptr.
 */
#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__,		\
	kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)

#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
#define kvfree_rcu_arg_1(ptr)					\
do {								\
	typeof(ptr) ___p = (ptr);				\
								\
	if (___p)						\
		kvfree_call_rcu(NULL, (rcu_callback_t) (___p));	\
} while (0)

/*
+2 −2
Original line number Diff line number Diff line
@@ -36,8 +36,8 @@ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
/**
 * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
 *
 * When synchronize_rcu_trace() is invoked by one task, then that task
 * is guaranteed to block until all other tasks exit their read-side
 * When synchronize_rcu_tasks_trace() is invoked by one task, then that
 * task is guaranteed to block until all other tasks exit their read-side
 * critical sections.  Similarly, if call_rcu_trace() is invoked on one
 * task while other tasks are within RCU read-side critical sections,
 * invocation of the corresponding RCU callback is deferred until after
Loading