Commit 5a09928d authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra
Browse files

perf/x86: Remove task_ctx_size



A new kmem_cache method has replaced the kzalloc() to allocate the PMU
specific data. The task_ctx_size is not required anymore.

Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1593780569-62993-19-git-send-email-kan.liang@linux.intel.com
parent 33cad284
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -2371,7 +2371,6 @@ static struct pmu pmu = {

	.event_idx		= x86_pmu_event_idx,
	.sched_task		= x86_pmu_sched_task,
	.task_ctx_size          = sizeof(struct x86_perf_task_context),
	.swap_task_ctx		= x86_pmu_swap_task_ctx,
	.check_period		= x86_pmu_check_period,

+0 −1
Original line number Diff line number Diff line
@@ -1672,7 +1672,6 @@ void __init intel_pmu_arch_lbr_init(void)

	size = sizeof(struct x86_perf_task_context_arch_lbr) +
	       lbr_nr * sizeof(struct lbr_entry);
	x86_get_pmu()->task_ctx_size = size;
	x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);

	x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
+0 −4
Original line number Diff line number Diff line
@@ -419,10 +419,6 @@ struct pmu {
	 */
	void (*sched_task)		(struct perf_event_context *ctx,
					bool sched_in);
	/*
	 * PMU specific data size
	 */
	size_t				task_ctx_size;

	/*
	 * Kmem cache of PMU specific data
+1 −3
Original line number Diff line number Diff line
@@ -1243,15 +1243,13 @@ static void *alloc_task_ctx_data(struct pmu *pmu)
	if (pmu->task_ctx_cache)
		return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);

	return kzalloc(pmu->task_ctx_size, GFP_KERNEL);
	return NULL;
}

static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
{
	if (pmu->task_ctx_cache && task_ctx_data)
		kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
	else
		kfree(task_ctx_data);
}

static void free_ctx(struct rcu_head *head)