Commit 3539b96e authored by Roman Gushchin's avatar Roman Gushchin Committed by Alexei Starovoitov
Browse files

bpf: group memory related fields in struct bpf_map_memory



Group "user" and "pages" fields of bpf_map into the bpf_map_memory
structure. Later it can be extended with "memcg" and other related
information.

The main reason for a such change (beside cosmetics) is to pass
bpf_map_memory structure to charging functions before the actual
allocation of bpf_map.

Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d50836cd
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -66,6 +66,11 @@ struct bpf_map_ops {
				     u64 imm, u32 *off);
};

struct bpf_map_memory {
	u32 pages;
	struct user_struct *user;
};

struct bpf_map {
	/* The first two cachelines with read-mostly members of which some
	 * are also accessed in fast-path (e.g. ops, max_entries).
@@ -86,7 +91,7 @@ struct bpf_map {
	u32 btf_key_type_id;
	u32 btf_value_type_id;
	struct btf *btf;
	u32 pages;
	struct bpf_map_memory memory;
	bool unpriv_array;
	bool frozen; /* write-once */
	/* 48 bytes hole */
@@ -94,8 +99,7 @@ struct bpf_map {
	/* The 3rd and 4th cacheline with misc members to avoid false sharing
	 * particularly with refcounting.
	 */
	struct user_struct *user ____cacheline_aligned;
	atomic_t refcnt;
	atomic_t refcnt ____cacheline_aligned;
	atomic_t usercnt;
	struct work_struct work;
	char name[BPF_OBJ_NAME_LEN];
+1 −1
Original line number Diff line number Diff line
@@ -138,7 +138,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)

	/* copy mandatory map attributes */
	bpf_map_init_from_attr(&array->map, attr);
	array->map.pages = cost;
	array->map.memory.pages = cost;
	array->elem_size = elem_size;

	if (percpu && bpf_array_alloc_percpu(array)) {
+2 −2
Original line number Diff line number Diff line
@@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
	cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
	if (cost >= U32_MAX - PAGE_SIZE)
		goto free_cmap;
	cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
	cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

	/* Notice returns -EPERM on if map size is larger than memlock limit */
	ret = bpf_map_precharge_memlock(cmap->map.pages);
	ret = bpf_map_precharge_memlock(cmap->map.memory.pages);
	if (ret) {
		err = ret;
		goto free_cmap;
+2 −2
Original line number Diff line number Diff line
@@ -111,10 +111,10 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
	if (cost >= U32_MAX - PAGE_SIZE)
		goto free_dtab;

	dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
	dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

	/* if map size is larger than memlock limit, reject it early */
	err = bpf_map_precharge_memlock(dtab->map.pages);
	err = bpf_map_precharge_memlock(dtab->map.memory.pages);
	if (err)
		goto free_dtab;

+2 −2
Original line number Diff line number Diff line
@@ -364,10 +364,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
		/* make sure page count doesn't overflow */
		goto free_htab;

	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
	htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

	/* if map size is larger than memlock limit, reject it early */
	err = bpf_map_precharge_memlock(htab->map.pages);
	err = bpf_map_precharge_memlock(htab->map.memory.pages);
	if (err)
		goto free_htab;

Loading