Commit 79f14641 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf_counter: counter overflow limit



Provide means to auto-disable the counter after 'n' overflow events.

Create the counter with hw_event.disabled = 1, and then issue an
ioctl(fd, PREF_COUNTER_IOC_REFRESH, n); to set the limit and enable
the counter.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094518.083139737@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 339f7c90
Loading
Loading
Loading
Loading
+9 −3
Original line number Diff line number Diff line
@@ -157,6 +157,7 @@ struct perf_counter_hw_event {
 */
#define PERF_COUNTER_IOC_ENABLE		_IO ('$', 0)
#define PERF_COUNTER_IOC_DISABLE	_IO ('$', 1)
#define PERF_COUNTER_IOC_REFRESH	_IOW('$', 2, u32)

/*
 * Structure of the page that can be mapped via mmap
@@ -403,9 +404,14 @@ struct perf_counter {
	/* poll related */
	wait_queue_head_t		waitq;
	struct fasync_struct		*fasync;
	/* optional: for NMIs */

	/* delayed work for NMIs and such */
	int				pending_wakeup;
	int				pending_disable;
	struct perf_pending_entry	pending;

	atomic_t			event_limit;

	void (*destroy)(struct perf_counter *);
	struct rcu_head			rcu_head;
#endif
+41 −10
Original line number Diff line number Diff line
@@ -744,6 +744,12 @@ static void perf_counter_enable(struct perf_counter *counter)
	spin_unlock_irq(&ctx->lock);
}

static void perf_counter_refresh(struct perf_counter *counter, int refresh)
{
	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);
}

/*
 * Enable a counter and all its children.
 */
@@ -1311,6 +1317,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
	case PERF_COUNTER_IOC_DISABLE:
		perf_counter_disable_family(counter);
		break;
	case PERF_COUNTER_IOC_REFRESH:
		perf_counter_refresh(counter, arg);
		break;
	default:
		err = -ENOTTY;
	}
@@ -1590,14 +1599,6 @@ void perf_counter_wakeup(struct perf_counter *counter)
	kill_fasync(&counter->fasync, SIGIO, POLL_IN);
}

static void perf_pending_wakeup(struct perf_pending_entry *entry)
{
	struct perf_counter *counter = container_of(entry,
			struct perf_counter, pending);

	perf_counter_wakeup(counter);
}

/*
 * Pending wakeups
 *
@@ -1607,6 +1608,22 @@ static void perf_pending_wakeup(struct perf_pending_entry *entry)
 * single linked list and use cmpxchg() to add entries lockless.
 */

static void perf_pending_counter(struct perf_pending_entry *entry)
{
	struct perf_counter *counter = container_of(entry,
			struct perf_counter, pending);

	if (counter->pending_disable) {
		counter->pending_disable = 0;
		perf_counter_disable(counter);
	}

	if (counter->pending_wakeup) {
		counter->pending_wakeup = 0;
		perf_counter_wakeup(counter);
	}
}

#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)

static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
@@ -1715,8 +1732,9 @@ struct perf_output_handle {
static inline void __perf_output_wakeup(struct perf_output_handle *handle)
{
	if (handle->nmi) {
		handle->counter->pending_wakeup = 1;
		perf_pending_queue(&handle->counter->pending,
				   perf_pending_wakeup);
				   perf_pending_counter);
	} else
		perf_counter_wakeup(handle->counter);
}
@@ -2063,8 +2081,21 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
int perf_counter_overflow(struct perf_counter *counter,
			  int nmi, struct pt_regs *regs)
{
	int events = atomic_read(&counter->event_limit);
	int ret = 0;

	if (events && atomic_dec_and_test(&counter->event_limit)) {
		ret = 1;
		if (nmi) {
			counter->pending_disable = 1;
			perf_pending_queue(&counter->pending,
					   perf_pending_counter);
		} else
			perf_counter_disable(counter);
	}

	perf_counter_output(counter, nmi, regs);
	return 0;
	return ret;
}

/*