Commit ab6f824c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf/core: Unify {pinned,flexible}_sched_in()



Less is more; unify the two very nearly identical function.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1941011a
Loading
Loading
Loading
Loading
+21 −37
Original line number Diff line number Diff line
@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event,
	return 1;
}

static inline struct list_head *get_event_list(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
}

static void perf_group_detach(struct perf_event *event)
{
	struct perf_event *sibling, *tmp;
@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event)
		if (!RB_EMPTY_NODE(&event->group_node)) {
			add_event_to_groups(sibling, event->ctx);

			if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
				struct list_head *list = sibling->attr.pinned ?
					&ctx->pinned_active : &ctx->flexible_active;

				list_add_tail(&sibling->active_list, list);
			}
			if (sibling->state == PERF_EVENT_STATE_ACTIVE)
				list_add_tail(&sibling->active_list, get_event_list(sibling));
		}

		WARN_ON_ONCE(sibling->ctx != event->ctx);
@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event,
{
	int ret = 0;

	WARN_ON_ONCE(event->ctx != ctx);

	lockdep_assert_held(&ctx->lock);

	if (event->state <= PERF_EVENT_STATE_OFF)
@@ -3425,10 +3429,12 @@ struct sched_in_data {
	int can_add_hw;
};

static int pinned_sched_in(struct perf_event *event, void *data)
static int merge_sched_in(struct perf_event *event, void *data)
{
	struct sched_in_data *sid = data;

	WARN_ON_ONCE(event->ctx != sid->ctx);

	if (event->state <= PERF_EVENT_STATE_OFF)
		return 0;

@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data)

	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
		if (!group_sched_in(event, sid->cpuctx, sid->ctx))
			list_add_tail(&event->active_list, &sid->ctx->pinned_active);
			list_add_tail(&event->active_list, get_event_list(event));
	}

	/*
	 * If this pinned group hasn't been scheduled,
	 * put it in error state.
	 */
	if (event->state == PERF_EVENT_STATE_INACTIVE)
	if (event->state == PERF_EVENT_STATE_INACTIVE) {
		if (event->attr.pinned)
			perf_event_set_state(event, PERF_EVENT_STATE_ERROR);

	return 0;
}

static int flexible_sched_in(struct perf_event *event, void *data)
{
	struct sched_in_data *sid = data;

	if (event->state <= PERF_EVENT_STATE_OFF)
		return 0;

	if (!event_filter_match(event))
		return 0;

	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
		int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
		if (ret) {
		sid->can_add_hw = 0;
		sid->ctx->rotate_necessary = 1;
			return 0;
		}
		list_add_tail(&event->active_list, &sid->ctx->flexible_active);
	}

	return 0;
@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,

	visit_groups_merge(&ctx->pinned_groups,
			   smp_processor_id(),
			   pinned_sched_in, &sid);
			   merge_sched_in, &sid);
}

static void
@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,

	visit_groups_merge(&ctx->flexible_groups,
			   smp_processor_id(),
			   flexible_sched_in, &sid);
			   merge_sched_in, &sid);
}

static void