Commit 1744a21d authored by Vegard Nossum's avatar Vegard Nossum
Browse files

trace: annotate bitfields in struct ring_buffer_event



This gets rid of a heap of false-positive warnings from the tracer
code due to the use of bitfields.

[rebased for mainline inclusion]
Signed-off-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
parent a98b65a3
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
#ifndef _LINUX_RING_BUFFER_H
#define _LINUX_RING_BUFFER_H

#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>

@@ -11,7 +12,10 @@ struct ring_buffer_iter;
 * Don't refer to this struct directly, use functions below.
 */
struct ring_buffer_event {
	kmemcheck_bitfield_begin(bitfield);
	u32		type_len:5, time_delta:27;
	kmemcheck_bitfield_end(bitfield);

	u32		array[];
};

+3 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
@@ -1270,6 +1271,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
	if (tail < BUF_PAGE_SIZE) {
		/* Mark the rest of the page with padding */
		event = __rb_page_index(tail_page, tail);
		kmemcheck_annotate_bitfield(event, bitfield);
		rb_event_set_padding(event);
	}

@@ -1327,6 +1329,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
		return NULL;

	event = __rb_page_index(tail_page, tail);
	kmemcheck_annotate_bitfield(event, bitfield);
	rb_update_event(event, type, length);

	/* The passed in type is zero for DATA */