Commit 6eac7795 authored by David Miller's avatar David Miller Committed by Alexei Starovoitov
Browse files

bpf/tests: Use migrate disable instead of preempt disable



Replace the preemption disable/enable with migrate_disable/enable() to
reflect the actual requirement and to allow PREEMPT_RT to substitute it
with an actual migration disable mechanism which does not disable
preemption.

[ tglx: Switched it over to migrate disable ]

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145643.785306549@linutronix.de
parent 3d9f773c
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -6660,14 +6660,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
	u64 start, finish;
	int ret = 0, i;

	preempt_disable();
	migrate_disable();
	start = ktime_get_ns();

	for (i = 0; i < runs; i++)
		ret = BPF_PROG_RUN(fp, data);

	finish = ktime_get_ns();
	preempt_enable();
	migrate_enable();

	*duration = finish - start;
	do_div(*duration, runs);
+4 −4
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
		repeat = 1;

	rcu_read_lock();
	preempt_disable();
	migrate_disable();
	time_start = ktime_get_ns();
	for (i = 0; i < repeat; i++) {
		bpf_cgroup_storage_set(storage);
@@ -54,18 +54,18 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,

		if (need_resched()) {
			time_spent += ktime_get_ns() - time_start;
			preempt_enable();
			migrate_enable();
			rcu_read_unlock();

			cond_resched();

			rcu_read_lock();
			preempt_disable();
			migrate_disable();
			time_start = ktime_get_ns();
		}
	}
	time_spent += ktime_get_ns() - time_start;
	preempt_enable();
	migrate_enable();
	rcu_read_unlock();

	do_div(time_spent, repeat);