Commit 15c7ad51 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar
Browse files

perf/x86: Rename Intel specific macros



There are macros that are Intel specific and not x86 generic. Rename
them into INTEL_*.

This patch removes X86_PMC_IDX_GENERIC and does:

 $ sed -i -e 's/X86_PMC_MAX_/INTEL_PMC_MAX_/g'           \
         arch/x86/include/asm/kvm_host.h                 \
         arch/x86/include/asm/perf_event.h               \
         arch/x86/kernel/cpu/perf_event.c                \
         arch/x86/kernel/cpu/perf_event_p4.c             \
         arch/x86/kvm/pmu.c
 $ sed -i -e 's/X86_PMC_IDX_FIXED/INTEL_PMC_IDX_FIXED/g' \
         arch/x86/include/asm/perf_event.h               \
         arch/x86/kernel/cpu/perf_event.c                \
         arch/x86/kernel/cpu/perf_event_intel.c          \
         arch/x86/kernel/cpu/perf_event_intel_ds.c       \
         arch/x86/kvm/pmu.c
 $ sed -i -e 's/X86_PMC_MSK_/INTEL_PMC_MSK_/g'           \
         arch/x86/include/asm/perf_event.h               \
         arch/x86/kernel/cpu/perf_event.c

Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1340217996-2254-2-git-send-email-robert.richter@amd.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1070505d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -313,8 +313,8 @@ struct kvm_pmu {
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
	u8 version;
	struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
	struct irq_work irq_work;
	u64 reprogram_pmi;
};
+8 −9
Original line number Diff line number Diff line
@@ -5,11 +5,10 @@
 * Performance event hw details:
 */

#define X86_PMC_MAX_GENERIC				       32
#define X86_PMC_MAX_FIXED					3
#define INTEL_PMC_MAX_GENERIC				       32
#define INTEL_PMC_MAX_FIXED					3
#define INTEL_PMC_IDX_FIXED				       32

#define X86_PMC_IDX_GENERIC				        0
#define X86_PMC_IDX_FIXED				       32
#define X86_PMC_IDX_MAX					       64

#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
@@ -121,16 +120,16 @@ struct x86_pmu_capability {

/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
#define X86_PMC_IDX_FIXED_INSTRUCTIONS	(X86_PMC_IDX_FIXED + 0)
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)

/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
#define X86_PMC_IDX_FIXED_CPU_CYCLES	(X86_PMC_IDX_FIXED + 1)
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)

/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
#define X86_PMC_IDX_FIXED_REF_CYCLES	(X86_PMC_IDX_FIXED + 2)
#define X86_PMC_MSK_FIXED_REF_CYCLES	(1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)

/*
 * We model BTS tracing as another fixed-mode PMC.
@@ -139,7 +138,7 @@ struct x86_pmu_capability {
 * values are used by actual fixed events and higher values are used
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
#define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
#define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)

/*
 * IBS cpuid feature detection
+19 −19
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
	int idx = hwc->idx;
	s64 delta;

	if (idx == X86_PMC_IDX_FIXED_BTS)
	if (idx == INTEL_PMC_IDX_FIXED_BTS)
		return 0;

	/*
@@ -626,8 +626,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
	c = sched->constraints[sched->state.event];

	/* Prefer fixed purpose counters */
	if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) {
		idx = X86_PMC_IDX_FIXED;
	if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
		idx = INTEL_PMC_IDX_FIXED;
		for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
			if (!__test_and_set_bit(idx, sched->state.used))
				goto done;
@@ -635,7 +635,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
	}
	/* Grab the first unused counter starting with idx */
	idx = sched->state.counter;
	for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
	for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
		if (!__test_and_set_bit(idx, sched->state.used))
			goto done;
	}
@@ -813,13 +813,13 @@ static inline void x86_assign_hw_event(struct perf_event *event,
	hwc->last_cpu = smp_processor_id();
	hwc->last_tag = ++cpuc->tags[i];

	if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
	if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
		hwc->config_base = 0;
		hwc->event_base	= 0;
	} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
	} else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
		hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
		hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
		hwc->event_base_rdpmc = (hwc->idx - X86_PMC_IDX_FIXED) | 1<<30;
		hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
		hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
	} else {
		hwc->config_base = x86_pmu_config_addr(hwc->idx);
		hwc->event_base  = x86_pmu_event_addr(hwc->idx);
@@ -921,7 +921,7 @@ int x86_perf_event_set_period(struct perf_event *event)
	s64 period = hwc->sample_period;
	int ret = 0, idx = hwc->idx;

	if (idx == X86_PMC_IDX_FIXED_BTS)
	if (idx == INTEL_PMC_IDX_FIXED_BTS)
		return 0;

	/*
@@ -1338,21 +1338,21 @@ static int __init init_hw_perf_events(void)
	for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
		quirk->func();

	if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
	if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
		     x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
		x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
		     x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
		x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
	}
	x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;

	if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
	if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
		     x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
		x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
		     x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
		x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
	}

	x86_pmu.intel_ctrl |=
		((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
		((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;

	perf_events_lapic_init();
	register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1368,7 +1368,7 @@ static int __init init_hw_perf_events(void)
		 */
		for_each_event_constraint(c, x86_pmu.event_constraints) {
			if (c->cmask != X86_RAW_EVENT_MASK
			    || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
			    || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
				continue;
			}

@@ -1611,8 +1611,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
	if (!x86_pmu.attr_rdpmc)
		return 0;

	if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
		idx -= X86_PMC_IDX_FIXED;
	if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
		idx -= INTEL_PMC_IDX_FIXED;
		idx |= 1 << 30;
	}

+7 −7
Original line number Diff line number Diff line
@@ -747,7 +747,7 @@ static void intel_pmu_disable_all(void)

	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);

	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
		intel_pmu_disable_bts();

	intel_pmu_pebs_disable_all();
@@ -763,9 +763,9 @@ static void intel_pmu_enable_all(int added)
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
			x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);

	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
	if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
		struct perf_event *event =
			cpuc->events[X86_PMC_IDX_FIXED_BTS];
			cpuc->events[INTEL_PMC_IDX_FIXED_BTS];

		if (WARN_ON_ONCE(!event))
			return;
@@ -871,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack)

static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
	int idx = hwc->idx - X86_PMC_IDX_FIXED;
	int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
	u64 ctrl_val, mask;

	mask = 0xfULL << (idx * 4);
@@ -886,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
	struct hw_perf_event *hwc = &event->hw;
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
	if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
		intel_pmu_disable_bts();
		intel_pmu_drain_bts_buffer();
		return;
@@ -915,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event)

static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
	int idx = hwc->idx - X86_PMC_IDX_FIXED;
	int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
	u64 ctrl_val, bits, mask;

	/*
@@ -949,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
	struct hw_perf_event *hwc = &event->hw;
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
	if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
		if (!__this_cpu_read(cpu_hw_events.enabled))
			return;

+2 −2
Original line number Diff line number Diff line
@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
 */

struct event_constraint bts_constraint =
	EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
	EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);

void intel_pmu_enable_bts(u64 config)
{
@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
		u64	to;
		u64	flags;
	};
	struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
	struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
	struct bts_record *at, *top;
	struct perf_output_handle handle;
	struct perf_event_header header;
Loading