Commit 74401729 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

tracing: Replace synchronize_sched() and call_rcu_sched()



Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can
be replaced by synchronize_rcu().  Similarly, call_rcu_sched() can be
replaced by call_rcu().  This commit therefore makes these changes.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: <linux-kernel@vger.kernel.org>
Acked-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent c93ffc15
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
static inline void tracepoint_synchronize_unregister(void)
{
	synchronize_srcu(&tracepoint_srcu);
	synchronize_sched();
	synchronize_rcu();
}
#else
static inline void tracepoint_synchronize_unregister(void)
+12 −12
Original line number Diff line number Diff line
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
{
	/*
	 * This function is just a stub to implement a hard force
	 * of synchronize_sched(). This requires synchronizing
	 * of synchronize_rcu(). This requires synchronizing
	 * tasks even in userspace and idle.
	 *
	 * Yes, function tracing is rude.
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
			ftrace_profile_enabled = 0;
			/*
			 * unregister_ftrace_profiler calls stop_machine
			 * so this acts like an synchronize_sched.
			 * so this acts like an synchronize_rcu.
			 */
			unregister_ftrace_profiler();
		}
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)

	/*
	 * Some of the ops may be dynamically allocated,
	 * they are freed after a synchronize_sched().
	 * they are freed after a synchronize_rcu().
	 */
	preempt_disable_notrace();

@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}

void ftrace_free_filter(struct ftrace_ops *ops)
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
 * the ip is not in the ops->notrace_hash.
 *
 * This needs to be called with preemption disabled as
 * the hashes are freed with call_rcu_sched().
 * the hashes are freed with call_rcu().
 */
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
	if (ftrace_enabled && !ftrace_hash_empty(hash))
		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
				       &old_hash_ops);
	synchronize_sched();
	synchronize_rcu();

	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
		hlist_del(&entry->hlist);
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
		mutex_unlock(&graph_lock);

		/* Wait till all users are no longer using the old hash */
		synchronize_sched();
		synchronize_rcu();

		free_ftrace_hash(old_hash);
	}
@@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod)
	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
		if (mod_map->mod == mod) {
			list_del_rcu(&mod_map->list);
			call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
			break;
		}
	}
@@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
	struct ftrace_mod_map *mod_map;
	const char *ret = NULL;

	/* mod_map is freed via call_rcu_sched() */
	/* mod_map is freed via call_rcu() */
	preempt_disable();
	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,

	/*
	 * Some of the ops may be dynamically allocated,
	 * they must be freed after a synchronize_sched().
	 * they must be freed after a synchronize_rcu().
	 */
	preempt_disable_notrace();

@@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
	rcu_assign_pointer(tr->function_pids, NULL);

	/* Wait till all users are no longer using pid filtering */
	synchronize_sched();
	synchronize_rcu();

	trace_free_pid_list(pid_list);
}
@@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
	rcu_assign_pointer(tr->function_pids, pid_list);

	if (filtered_pids) {
		synchronize_sched();
		synchronize_rcu();
		trace_free_pid_list(filtered_pids);
	} else if (pid_list) {
		/* Register a probe to set whether to ignore the tracing of a task */
+6 −6
Original line number Diff line number Diff line
@@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
		 * There could have been a race between checking
		 * record_disable and incrementing it.
		 */
		synchronize_sched();
		synchronize_rcu();
		for_each_buffer_cpu(buffer, cpu) {
			cpu_buffer = buffer->buffers[cpu];
			rb_check_pages(cpu_buffer);
@@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
 * This prevents all writes to the buffer. Any attempt to write
 * to the buffer after this will fail and return NULL.
 *
 * The caller should call synchronize_sched() after this.
 * The caller should call synchronize_rcu() after this.
 */
void ring_buffer_record_disable(struct ring_buffer *buffer)
{
@@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
 * This prevents all writes to the buffer. Any attempt to write
 * to the buffer after this will fail and return NULL.
 *
 * The caller should call synchronize_sched() after this.
 * The caller should call synchronize_rcu() after this.
 */
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
@@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
void
ring_buffer_read_prepare_sync(void)
{
	synchronize_sched();
	synchronize_rcu();
}
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);

@@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
	atomic_inc(&cpu_buffer->record_disabled);

	/* Make sure all commits have finished */
	synchronize_sched();
	synchronize_rcu();

	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);

@@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
		goto out;

	/*
	 * We can't do a synchronize_sched here because this
	 * We can't do a synchronize_rcu here because this
	 * function can be called in atomic context.
	 * Normally this will be called from the same CPU as cpu.
	 * If not it's up to the caller to protect this.
+5 −5
Original line number Diff line number Diff line
@@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu)
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
	synchronize_rcu();
	ring_buffer_reset_cpu(buffer, cpu);

	ring_buffer_record_enable(buffer);
@@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
	synchronize_rcu();

	buf->time_start = buffer_ftrace_now(buf, buf->cpu);

@@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void)
	preempt_enable();

	/* Wait for all current users to finish */
	synchronize_sched();
	synchronize_rcu();

	for_each_tracing_cpu(cpu) {
		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
@@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
	if (tr->current_trace->reset)
		tr->current_trace->reset(tr);

	/* Current trace needs to be nop_trace before synchronize_sched */
	/* Current trace needs to be nop_trace before synchronize_rcu */
	tr->current_trace = &nop_trace;

#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
		 * The update_max_tr is called from interrupts disabled
		 * so a synchronized_sched() is sufficient.
		 */
		synchronize_sched();
		synchronize_rcu();
		free_snapshot(tr);
	}
#endif
+2 −2
Original line number Diff line number Diff line
@@ -1614,7 +1614,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,

	/*
	 * The calls can still be using the old filters.
	 * Do a synchronize_sched() and to ensure all calls are
	 * Do a synchronize_rcu() and to ensure all calls are
	 * done with them before we free them.
	 */
	tracepoint_synchronize_unregister();
@@ -1845,7 +1845,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
	if (filter) {
		/*
		 * No event actually uses the system filter
		 * we can free it without synchronize_sched().
		 * we can free it without synchronize_rcu().
		 */
		__free_filter(system->filter);
		system->filter = filter;
Loading