Commit 4a32fea9 authored by Christoph Lameter's avatar Christoph Lameter Committed by Tejun Heo
Browse files

scheduler: Replace __get_cpu_var with this_cpu_ptr



Convert all uses of __get_cpu_var for address calculation to use
this_cpu_ptr instead.

[Uses of __get_cpu_var with cpumask_var_t are no longer
handled by this patch]

Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent dc5df73b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);

/* Must have preemption disabled for this to be meaningful. */
#define kstat_this_cpu (&__get_cpu_var(kstat))
#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
#define kstat_this_cpu this_cpu_ptr(&kstat)
#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)

+2 −2
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
	int cpu;
	struct callchain_cpus_entries *entries;

	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
	if (*rctx == -1)
		return NULL;

@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
static void
put_callchain_entry(int rctx)
{
	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
}

struct perf_callchain_entry *
+12 −12
Original line number Diff line number Diff line
@@ -239,7 +239,7 @@ static void perf_duration_warn(struct irq_work *w)
	u64 avg_local_sample_len;
	u64 local_samples_len;

	local_samples_len = __get_cpu_var(running_sample_length);
	local_samples_len = __this_cpu_read(running_sample_length);
	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;

	printk_ratelimited(KERN_WARNING
@@ -261,10 +261,10 @@ void perf_sample_event_took(u64 sample_len_ns)
		return;

	/* decay the counter by 1 average sample */
	local_samples_len = __get_cpu_var(running_sample_length);
	local_samples_len = __this_cpu_read(running_sample_length);
	local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
	local_samples_len += sample_len_ns;
	__get_cpu_var(running_sample_length) = local_samples_len;
	__this_cpu_write(running_sample_length, local_samples_len);

	/*
	 * note: this will be biased artifically low until we have
@@ -877,7 +877,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
static void perf_pmu_rotate_start(struct pmu *pmu)
{
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
	struct list_head *head = &__get_cpu_var(rotation_list);
	struct list_head *head = this_cpu_ptr(&rotation_list);

	WARN_ON(!irqs_disabled());

@@ -2389,7 +2389,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
	 * to check if we have to switch out PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
		perf_cgroup_sched_out(task, next);
}

@@ -2632,11 +2632,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
	 * to check if we have to switch in PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
		perf_cgroup_sched_in(prev, task);

	/* check for system-wide branch_stack events */
	if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
	if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
		perf_branch_stack_sched_in(prev, task);
}

@@ -2891,7 +2891,7 @@ bool perf_event_can_stop_tick(void)

void perf_event_task_tick(void)
{
	struct list_head *head = &__get_cpu_var(rotation_list);
	struct list_head *head = this_cpu_ptr(&rotation_list);
	struct perf_cpu_context *cpuctx, *tmp;
	struct perf_event_context *ctx;
	int throttled;
@@ -5671,7 +5671,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
{
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
	struct perf_event *event;
	struct hlist_head *head;

@@ -5690,7 +5690,7 @@ end:

int perf_swevent_get_recursion_context(void)
{
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);

	return get_recursion_context(swhash->recursion);
}
@@ -5698,7 +5698,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);

inline void perf_swevent_put_recursion_context(int rctx)
{
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);

	put_recursion_context(swhash->recursion, rctx);
}
@@ -5727,7 +5727,7 @@ static void perf_swevent_read(struct perf_event *event)

static int perf_swevent_add(struct perf_event *event, int flags)
{
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
	struct hw_perf_event *hwc = &event->hw;
	struct hlist_head *head;

+2 −2
Original line number Diff line number Diff line
@@ -650,10 +650,10 @@ static inline int cpu_of(struct rq *rq)
DECLARE_PER_CPU(struct rq, runqueues);

#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
#define this_rq()		(&__get_cpu_var(runqueues))
#define this_rq()		this_cpu_ptr(&runqueues)
#define task_rq(p)		cpu_rq(task_cpu(p))
#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
#define raw_rq()		(&__raw_get_cpu_var(runqueues))
#define raw_rq()		raw_cpu_ptr(&runqueues)

static inline u64 rq_clock(struct rq *rq)
{
+1 −1
Original line number Diff line number Diff line
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
		fill_tgid_exit(tsk);
	}

	listeners = __this_cpu_ptr(&listener_array);
	listeners = raw_cpu_ptr(&listener_array);
	if (list_empty(&listeners->list))
		return;

Loading