Commit c85d6913 authored by Roman Gushchin's avatar Roman Gushchin Committed by Alexei Starovoitov
Browse files

bpf: move memory size checks to bpf_map_charge_init()



Most bpf map types doing similar checks and bytes to pages
conversion during memory allocation and charging.

Let's unify these checks by moving them into bpf_map_charge_init().

Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent b936ca64
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -652,7 +652,7 @@ void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
			 struct bpf_map_memory *src);
+1 −7
Original line number Diff line number Diff line
@@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)

	/* make sure there is no u32 overflow later in round_up() */
	cost = array_size;
	if (cost >= U32_MAX - PAGE_SIZE)
		return ERR_PTR(-ENOMEM);
	if (percpu) {
	if (percpu)
		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
		if (cost >= U32_MAX - PAGE_SIZE)
			return ERR_PTR(-ENOMEM);
	}
	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

	ret = bpf_map_charge_init(&mem, cost);
	if (ret < 0)
+1 −4
Original line number Diff line number Diff line
@@ -106,12 +106,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
	/* make sure page count doesn't overflow */
	cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
	cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
	if (cost >= U32_MAX - PAGE_SIZE)
		goto free_cmap;

	/* Notice returns -EPERM on if map size is larger than memlock limit */
	ret = bpf_map_charge_init(&cmap->map.memory,
				  round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
	ret = bpf_map_charge_init(&cmap->map.memory, cost);
	if (ret) {
		err = ret;
		goto free_cmap;
+1 −4
Original line number Diff line number Diff line
@@ -108,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
	/* make sure page count doesn't overflow */
	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
	cost += dev_map_bitmap_size(attr) * num_possible_cpus();
	if (cost >= U32_MAX - PAGE_SIZE)
		goto free_dtab;

	/* if map size is larger than memlock limit, reject it */
	err = bpf_map_charge_init(&dtab->map.memory,
				  round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
	err = bpf_map_charge_init(&dtab->map.memory, cost);
	if (err)
		goto free_dtab;

+1 −6
Original line number Diff line number Diff line
@@ -360,13 +360,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
	else
	       cost += (u64) htab->elem_size * num_possible_cpus();

	if (cost >= U32_MAX - PAGE_SIZE)
		/* make sure page count doesn't overflow */
		goto free_htab;

	/* if map size is larger than memlock limit, reject it */
	err = bpf_map_charge_init(&htab->map.memory,
				  round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
	err = bpf_map_charge_init(&htab->map.memory, cost);
	if (err)
		goto free_htab;

Loading