Commit 5ee25c87 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

perf, x86: Extract PEBS/BTS allocation functions



Mostly a cleanup.. it reduces code indentation and makes the code flow
of reserve_ds_buffers() clearer.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarStephane Eranian <eranian@google.com>
LKML-Reference: <20101019134808.253453452@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b39f88ac
Loading
Loading
Loading
Loading
+56 −34
Original line number Diff line number Diff line
@@ -74,6 +74,32 @@ static void fini_debug_store_on_cpu(int cpu)
	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}

static int alloc_pebs_buffer(int cpu)
{
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
	int max, thresh = 1; /* always use a single PEBS record */
	void *buffer;

	if (!x86_pmu.pebs)
		return 0;

	buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
	if (unlikely(!buffer))
		return -ENOMEM;

	max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;

	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
	ds->pebs_index = ds->pebs_buffer_base;
	ds->pebs_absolute_maximum = ds->pebs_buffer_base +
		max * x86_pmu.pebs_record_size;

	ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
		thresh * x86_pmu.pebs_record_size;

	return 0;
}

static void release_pebs_buffer(int cpu)
{
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -85,6 +111,32 @@ static void release_pebs_buffer(int cpu)
	ds->pebs_buffer_base = 0;
}

static int alloc_bts_buffer(int cpu)
{
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
	int max, thresh;
	void *buffer;

	if (!x86_pmu.bts)
		return 0;

	buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
	if (unlikely(!buffer))
		return -ENOMEM;

	max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
	thresh = max / 16;

	ds->bts_buffer_base = (u64)(unsigned long)buffer;
	ds->bts_index = ds->bts_buffer_base;
	ds->bts_absolute_maximum = ds->bts_buffer_base +
		max * BTS_RECORD_SIZE;
	ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
		thresh * BTS_RECORD_SIZE;

	return 0;
}

static void release_bts_buffer(int cpu)
{
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -133,8 +185,6 @@ static int reserve_ds_buffers(void)

	for_each_possible_cpu(cpu) {
		struct debug_store *ds;
		void *buffer;
		int max, thresh;

		err = -ENOMEM;
		ds = kzalloc(sizeof(*ds), GFP_KERNEL);
@@ -142,40 +192,12 @@ static int reserve_ds_buffers(void)
			break;
		per_cpu(cpu_hw_events, cpu).ds = ds;

		if (x86_pmu.bts) {
			buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
			if (unlikely(!buffer))
		if (alloc_bts_buffer(cpu))
			break;

			max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
			thresh = max / 16;

			ds->bts_buffer_base = (u64)(unsigned long)buffer;
			ds->bts_index = ds->bts_buffer_base;
			ds->bts_absolute_maximum = ds->bts_buffer_base +
				max * BTS_RECORD_SIZE;
			ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
				thresh * BTS_RECORD_SIZE;
		}

		if (x86_pmu.pebs) {
			buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
			if (unlikely(!buffer))
		if (alloc_pebs_buffer(cpu))
			break;

			max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;

			ds->pebs_buffer_base = (u64)(unsigned long)buffer;
			ds->pebs_index = ds->pebs_buffer_base;
			ds->pebs_absolute_maximum = ds->pebs_buffer_base +
				max * x86_pmu.pebs_record_size;
			/*
			 * Always use single record PEBS
			 */
			ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
				x86_pmu.pebs_record_size;
		}

		err = 0;
	}