Commit 1c5eb448 authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware)
Browse files

tracing: Rename trace_buffer to array_buffer

As we are working to remove the generic "ring_buffer" name that is used by
both tracing and perf, the ring_buffer name for tracing will be renamed to
trace_buffer, and perf's ring buffer will be renamed to perf_buffer.

As there already exists a trace_buffer that is used by the trace_arrays, it
needs to be first renamed to array_buffer.

Link: https://lore.kernel.org/r/20191213153553.GE20583@krava



Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 56de4e8f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -11,7 +11,7 @@
#include <linux/tracepoint.h>

struct trace_array;
struct trace_buffer;
struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
@@ -79,7 +79,7 @@ struct trace_entry {
struct trace_iterator {
	struct trace_array	*tr;
	struct tracer		*trace;
	struct trace_buffer	*trace_buffer;
	struct array_buffer	*array_buffer;
	void			*private;
	int			cpu_file;
	struct mutex		mutex;
+2 −2
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;

	if (blk_tracer) {
		buffer = blk_tr->trace_buffer.buffer;
		buffer = blk_tr->array_buffer.buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + len + cgid_len,
@@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
	if (blk_tracer) {
		tracing_record_cmdline(current);

		buffer = blk_tr->trace_buffer.buffer;
		buffer = blk_tr->array_buffer.buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + pdu_len + cgid_len,
+4 −4
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
{
	struct trace_array *tr = op->private;

	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
	if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
		return;

	op->saved_func(ip, parent_ip, op, regs);
@@ -6922,7 +6922,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,

	pid_list = rcu_dereference_sched(tr->function_pids);

	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
	this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
		       trace_ignore_this_task(pid_list, next));
}

@@ -6976,7 +6976,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);

	for_each_possible_cpu(cpu)
		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
		per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;

	rcu_assign_pointer(tr->function_pids, NULL);

@@ -7100,7 +7100,7 @@ static void ignore_task_cpu(void *data)
	pid_list = rcu_dereference_protected(tr->function_pids,
					     mutex_is_locked(&ftrace_lock));

	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
	this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
		       trace_ignore_this_task(pid_list, current));
}

+115 −115

File changed.

Preview size limit exceeded, changes collapsed.

+8 −8
Original line number Diff line number Diff line
@@ -176,7 +176,7 @@ struct trace_array_cpu {
struct tracer;
struct trace_option_dentry;

struct trace_buffer {
struct array_buffer {
	struct trace_array		*tr;
	struct ring_buffer		*buffer;
	struct trace_array_cpu __percpu	*data;
@@ -249,7 +249,7 @@ struct cond_snapshot {
struct trace_array {
	struct list_head	list;
	char			*name;
	struct trace_buffer	trace_buffer;
	struct array_buffer	array_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
	/*
	 * The max_buffer is used to snapshot the trace when a maximum
@@ -257,12 +257,12 @@ struct trace_array {
	 * Some tracers will use this to store a maximum trace while
	 * it continues examining live traces.
	 *
	 * The buffers for the max_buffer are set up the same as the trace_buffer
	 * The buffers for the max_buffer are set up the same as the array_buffer
	 * When a snapshot is taken, the buffer of the max_buffer is swapped
	 * with the buffer of the trace_buffer and the buffers are reset for
	 * the trace_buffer so the tracing can continue.
	 * with the buffer of the array_buffer and the buffers are reset for
	 * the array_buffer so the tracing can continue.
	 */
	struct trace_buffer	max_buffer;
	struct array_buffer	max_buffer;
	bool			allocated_snapshot;
#endif
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
@@ -685,7 +685,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)

int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_online_cpus(struct array_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -1057,7 +1057,7 @@ struct ftrace_func_command {
extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr)
{
	return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
	return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
}
extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr,
Loading