Commit b6e5dae1 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Alexei Starovoitov
Browse files

bpf: Replace open coded recursion prevention in sys_bpf()



The required protection is that the caller cannot be migrated to a
different CPU as these functions end up in places which take either a hash
bucket lock or might trigger a kprobe inside the memory allocator. Both
scenarios can lead to deadlocks. The deadlock prevention is per CPU by
incrementing a per CPU variable which temporarily blocks the invocation of
BPF programs from perf and kprobes.

Replace the open coded preempt_[dis|en]able and __this_cpu_[inc|dec] pairs
with the new helper functions. These functions are already prepared to make
BPF work on PREEMPT_RT enabled kernels. No functional change for !RT
kernels.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145644.317843926@linutronix.de
parent 085fee1a
Loading
Loading
Loading
Loading
+8 −19
Original line number Diff line number Diff line
@@ -171,11 +171,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
						    flags);
	}

	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
	bpf_disable_instrumentation();
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
		err = bpf_percpu_hash_update(map, key, value, flags);
@@ -206,8 +202,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
		err = map->ops->map_update_elem(map, key, value, flags);
		rcu_read_unlock();
	}
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
	bpf_enable_instrumentation();
	maybe_wait_bpf_programs(map);

	return err;
@@ -222,8 +217,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
	if (bpf_map_is_dev_bound(map))
		return bpf_map_offload_lookup_elem(map, key, value);

	preempt_disable();
	this_cpu_inc(bpf_prog_active);
	bpf_disable_instrumentation();
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
		err = bpf_percpu_hash_copy(map, key, value);
@@ -268,8 +262,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
		rcu_read_unlock();
	}

	this_cpu_dec(bpf_prog_active);
	preempt_enable();
	bpf_enable_instrumentation();
	maybe_wait_bpf_programs(map);

	return err;
@@ -1136,13 +1129,11 @@ static int map_delete_elem(union bpf_attr *attr)
		goto out;
	}

	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
	bpf_disable_instrumentation();
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
	bpf_enable_instrumentation();
	maybe_wait_bpf_programs(map);
out:
	kfree(key);
@@ -1254,13 +1245,11 @@ int generic_map_delete_batch(struct bpf_map *map,
			break;
		}

		preempt_disable();
		__this_cpu_inc(bpf_prog_active);
		bpf_disable_instrumentation();
		rcu_read_lock();
		err = map->ops->map_delete_elem(map, key);
		rcu_read_unlock();
		__this_cpu_dec(bpf_prog_active);
		preempt_enable();
		bpf_enable_instrumentation();
		maybe_wait_bpf_programs(map);
		if (err)
			break;