Commit c92bbaa0 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'simplify-do_redirect'

Björn Töpel says:

====================
This series aims to simplify the XDP maps and
xdp_do_redirect_map()/xdp_do_flush_map(), and to crank out some more
performance from XDP_REDIRECT scenarios.

The first part of the series simplifies all XDP_REDIRECT capable maps,
so that __XXX_flush_map() does not require the map parameter, by
moving the flush list from the map to global scope.

This results in that the map_to_flush member can be removed from
struct bpf_redirect_info, and its corresponding logic.

Simpler code, and more performance due to that checks/code per-packet
is moved to flush.

Pre-series performance:
  $ sudo taskset -c 22 ./xdpsock -i enp134s0f0 -q 20 -n 1 -r -z

   sock0@enp134s0f0:20 rxdrop xdp-drv
                  pps         pkts        1.00
  rx              20,797,350  230,942,399
  tx              0           0

  $ sudo ./xdp_redirect_cpu --dev enp134s0f0 --cpu 22 xdp_cpu_map0

  Running XDP/eBPF prog_name:xdp_cpu_map5_lb_hash_ip_pairs
  XDP-cpumap      CPU:to  pps            drop-pps    extra-info
  XDP-RX          20      7723038        0           0
  XDP-RX          total   7723038        0
  cpumap_kthread  total   0              0           0
  redirect_err    total   0              0
  xdp_exception   total   0              0

Post-series performance:
  $ sudo taskset -c 22 ./xdpsock -i enp134s0f0 -q 20 -n 1 -r -z

   sock0@enp134s0f0:20 rxdrop xdp-drv
                  pps         pkts        1.00
  rx              21,524,979  86,835,327
  tx              0           0

  $ sudo ./xdp_redirect_cpu --dev enp134s0f0 --cpu 22 xdp_cpu_map0

  Running XDP/eBPF prog_name:xdp_cpu_map5_lb_hash_ip_pairs
  XDP-cpumap      CPU:to  pps            drop-pps    extra-info
  XDP-RX          20      7840124        0           0
  XDP-RX          total   7840124        0
  cpumap_kthread  total   0              0           0
  redirect_err    total   0              0
  xdp_exception   total   0              0

Results: +3.5% and +1.5% for the ubenchmarks.

v1->v2 [1]:
  * Removed 'unused-variable' compiler warning (Jakub)

[1] https://lore.kernel.org/bpf/20191218105400.2895-1-bjorn.topel@gmail.com/


====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 5bf2fc1f 1170beaa
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -959,14 +959,14 @@ struct sk_buff;

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_flush(struct bpf_map *map);
void __dev_map_flush(void);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);

struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(struct bpf_map *map);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);

@@ -1068,7 +1068,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
	return NULL;
}

static inline void __dev_map_flush(struct bpf_map *map)
static inline void __dev_map_flush(void)
{
}

@@ -1097,7 +1097,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
	return NULL;
}

static inline void __cpu_map_flush(struct bpf_map *map)
static inline void __cpu_map_flush(void)
{
}

+0 −1
Original line number Diff line number Diff line
@@ -592,7 +592,6 @@ struct bpf_redirect_info {
	u32 tgt_index;
	void *tgt_value;
	struct bpf_map *map;
	struct bpf_map *map_to_flush;
	u32 kern_flags;
};

+4 −7
Original line number Diff line number Diff line
@@ -72,7 +72,6 @@ struct xdp_umem {

struct xsk_map {
	struct bpf_map map;
	struct list_head __percpu *flush_list;
	spinlock_t lock; /* Synchronize map updates */
	struct xdp_sock *xsk_map[];
};
@@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
			     struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
		       struct xdp_sock *xs);
void __xsk_map_flush(struct bpf_map *map);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);

static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
						     u32 key)
@@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
	return 0;
}

static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
				     struct xdp_sock *xs)
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
	return -EOPNOTSUPP;
}

static inline void __xsk_map_flush(struct bpf_map *map)
static inline void __xsk_map_flush(void)
{
}

+26 −50
Original line number Diff line number Diff line
@@ -72,17 +72,18 @@ struct bpf_cpu_map {
	struct bpf_map map;
	/* Below members specific for map type */
	struct bpf_cpu_map_entry **cpu_map;
	struct list_head __percpu *flush_list;
};

static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx);
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);

static int bq_flush_to_queue(struct xdp_bulk_queue *bq);

static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{
	struct bpf_cpu_map *cmap;
	int err = -ENOMEM;
	int ret, cpu;
	u64 cost;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return ERR_PTR(-EPERM);
@@ -106,7 +107,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)

	/* make sure page count doesn't overflow */
	cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
	cost += sizeof(struct list_head) * num_possible_cpus();

	/* Notice returns -EPERM on if map size is larger than memlock limit */
	ret = bpf_map_charge_init(&cmap->map.memory, cost);
@@ -115,23 +115,14 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
		goto free_cmap;
	}

	cmap->flush_list = alloc_percpu(struct list_head);
	if (!cmap->flush_list)
		goto free_charge;

	for_each_possible_cpu(cpu)
		INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu));

	/* Alloc array for possible remote "destination" CPUs */
	cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
					   sizeof(struct bpf_cpu_map_entry *),
					   cmap->map.numa_node);
	if (!cmap->cpu_map)
		goto free_percpu;
		goto free_charge;

	return &cmap->map;
free_percpu:
	free_percpu(cmap->flush_list);
free_charge:
	bpf_map_charge_finish(&cmap->map.memory);
free_cmap:
@@ -399,22 +390,14 @@ free_rcu:
static void __cpu_map_entry_free(struct rcu_head *rcu)
{
	struct bpf_cpu_map_entry *rcpu;
	int cpu;

	/* This cpu_map_entry have been disconnected from map and one
	 * RCU graze-period have elapsed.  Thus, XDP cannot queue any
	 * RCU grace-period have elapsed.  Thus, XDP cannot queue any
	 * new packets and cannot change/set flush_needed that can
	 * find this entry.
	 */
	rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);

	/* Flush remaining packets in percpu bulkq */
	for_each_online_cpu(cpu) {
		struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);

		/* No concurrent bq_enqueue can run at this point */
		bq_flush_to_queue(bq, false);
	}
	free_percpu(rcpu->bulkq);
	/* Cannot kthread_stop() here, last put free rcpu resources */
	put_cpu_map_entry(rcpu);
@@ -436,7 +419,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
 * percpu bulkq to queue.  Due to caller map_delete_elem() disable
 * preemption, cannot call kthread_stop() to make sure queue is empty.
 * Instead a work_queue is started for stopping kthread,
 * cpu_map_kthread_stop, which waits for an RCU graze period before
 * cpu_map_kthread_stop, which waits for an RCU grace period before
 * stopping kthread, emptying the queue.
 */
static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
@@ -507,7 +490,6 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
static void cpu_map_free(struct bpf_map *map)
{
	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
	int cpu;
	u32 i;

	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
@@ -522,18 +504,6 @@ static void cpu_map_free(struct bpf_map *map)
	bpf_clear_redirect_map(map);
	synchronize_rcu();

	/* To ensure all pending flush operations have completed wait for flush
	 * list be empty on _all_ cpus. Because the above synchronize_rcu()
	 * ensures the map is disconnected from the program we can assume no new
	 * items will be added to the list.
	 */
	for_each_online_cpu(cpu) {
		struct list_head *flush_list = per_cpu_ptr(cmap->flush_list, cpu);

		while (!list_empty(flush_list))
			cond_resched();
	}

	/* For cpu_map the remote CPUs can still be using the entries
	 * (struct bpf_cpu_map_entry).
	 */
@@ -544,10 +514,9 @@ static void cpu_map_free(struct bpf_map *map)
		if (!rcpu)
			continue;

		/* bq flush and cleanup happens after RCU graze-period */
		/* bq flush and cleanup happens after RCU grace-period */
		__cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
	}
	free_percpu(cmap->flush_list);
	bpf_map_area_free(cmap->cpu_map);
	kfree(cmap);
}
@@ -599,7 +568,7 @@ const struct bpf_map_ops cpu_map_ops = {
	.map_check_btf		= map_check_no_btf,
};

static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
{
	struct bpf_cpu_map_entry *rcpu = bq->obj;
	unsigned int processed = 0, drops = 0;
@@ -620,10 +589,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
		err = __ptr_ring_produce(q, xdpf);
		if (err) {
			drops++;
			if (likely(in_napi_ctx))
			xdp_return_frame_rx_napi(xdpf);
			else
				xdp_return_frame(xdpf);
		}
		processed++;
	}
@@ -642,11 +608,11 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
 */
static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{
	struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list);
	struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
	struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);

	if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
		bq_flush_to_queue(bq, true);
		bq_flush_to_queue(bq);

	/* Notice, xdp_buff/page MUST be queued here, long enough for
	 * driver to code invoking us to finished, due to driver
@@ -681,16 +647,26 @@ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
	return 0;
}

void __cpu_map_flush(struct bpf_map *map)
void __cpu_map_flush(void)
{
	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
	struct list_head *flush_list = this_cpu_ptr(cmap->flush_list);
	struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
	struct xdp_bulk_queue *bq, *tmp;

	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
		bq_flush_to_queue(bq, true);
		bq_flush_to_queue(bq);

		/* If already running, costs spin_lock_irqsave + smb_mb */
		wake_up_process(bq->obj->kthread);
	}
}

static int __init cpu_map_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
	return 0;
}

subsys_initcall(cpu_map_init);
+18 −60
Original line number Diff line number Diff line
@@ -75,7 +75,6 @@ struct bpf_dtab_netdev {
struct bpf_dtab {
	struct bpf_map map;
	struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
	struct list_head __percpu *flush_list;
	struct list_head list;

	/* these are only used for DEVMAP_HASH type maps */
@@ -85,6 +84,7 @@ struct bpf_dtab {
	u32 n_buckets;
};

static DEFINE_PER_CPU(struct list_head, dev_map_flush_list);
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);

@@ -109,8 +109,8 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,

static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
	int err, cpu;
	u64 cost;
	u64 cost = 0;
	int err;

	/* check sanity of attributes */
	if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -125,9 +125,6 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)

	bpf_map_init_from_attr(&dtab->map, attr);

	/* make sure page count doesn't overflow */
	cost = (u64) sizeof(struct list_head) * num_possible_cpus();

	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);

@@ -143,17 +140,10 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
	if (err)
		return -EINVAL;

	dtab->flush_list = alloc_percpu(struct list_head);
	if (!dtab->flush_list)
		goto free_charge;

	for_each_possible_cpu(cpu)
		INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));

	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
		if (!dtab->dev_index_head)
			goto free_percpu;
			goto free_charge;

		spin_lock_init(&dtab->index_lock);
	} else {
@@ -161,13 +151,11 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
						      sizeof(struct bpf_dtab_netdev *),
						      dtab->map.numa_node);
		if (!dtab->netdev_map)
			goto free_percpu;
			goto free_charge;
	}

	return 0;

free_percpu:
	free_percpu(dtab->flush_list);
free_charge:
	bpf_map_charge_finish(&dtab->map.memory);
	return -ENOMEM;
@@ -201,7 +189,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
static void dev_map_free(struct bpf_map *map)
{
	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
	int i, cpu;
	int i;

	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
	 * so the programs (can be more than one that used this map) were
@@ -221,18 +209,6 @@ static void dev_map_free(struct bpf_map *map)
	/* Make sure prior __dev_map_entry_free() have completed. */
	rcu_barrier();

	/* To ensure all pending flush operations have completed wait for flush
	 * list to empty on _all_ cpus.
	 * Because the above synchronize_rcu() ensures the map is disconnected
	 * from the program we can assume no new items will be added.
	 */
	for_each_online_cpu(cpu) {
		struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);

		while (!list_empty(flush_list))
			cond_resched();
	}

	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
		for (i = 0; i < dtab->n_buckets; i++) {
			struct bpf_dtab_netdev *dev;
@@ -266,7 +242,6 @@ static void dev_map_free(struct bpf_map *map)
		bpf_map_area_free(dtab->netdev_map);
	}

	free_percpu(dtab->flush_list);
	kfree(dtab);
}

@@ -345,8 +320,7 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
	return -ENOENT;
}

static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
		       bool in_napi_ctx)
static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags)
{
	struct bpf_dtab_netdev *obj = bq->obj;
	struct net_device *dev = obj->dev;
@@ -384,11 +358,7 @@ error:
	for (i = 0; i < bq->count; i++) {
		struct xdp_frame *xdpf = bq->q[i];

		/* RX path under NAPI protection, can return frames faster */
		if (likely(in_napi_ctx))
		xdp_return_frame_rx_napi(xdpf);
		else
			xdp_return_frame(xdpf);
		drops++;
	}
	goto out;
@@ -401,15 +371,14 @@ error:
 * net device can be torn down. On devmap tear down we ensure the flush list
 * is empty before completing to ensure all flush operations have completed.
 */
void __dev_map_flush(struct bpf_map *map)
void __dev_map_flush(void)
{
	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
	struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
	struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
	struct xdp_bulk_queue *bq, *tmp;

	rcu_read_lock();
	list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
		bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
		bq_xmit_all(bq, XDP_XMIT_FLUSH);
	rcu_read_unlock();
}

@@ -436,11 +405,11 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
		      struct net_device *dev_rx)

{
	struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
	struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);

	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
		bq_xmit_all(bq, 0, true);
		bq_xmit_all(bq, 0);

	/* Ingress dev_rx will be the same for all xdp_frame's in
	 * bulk_queue, because bq stored per-CPU and must be flushed
@@ -509,27 +478,11 @@ static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
	return dev ? &dev->ifindex : NULL;
}

static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
{
	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
		struct xdp_bulk_queue *bq;
		int cpu;

		rcu_read_lock();
		for_each_online_cpu(cpu) {
			bq = per_cpu_ptr(dev->bulkq, cpu);
			bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
		}
		rcu_read_unlock();
	}
}

static void __dev_map_entry_free(struct rcu_head *rcu)
{
	struct bpf_dtab_netdev *dev;

	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
	dev_map_flush_old(dev);
	free_percpu(dev->bulkq);
	dev_put(dev->dev);
	kfree(dev);
@@ -810,10 +763,15 @@ static struct notifier_block dev_map_notifier = {

static int __init dev_map_init(void)
{
	int cpu;

	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
		     offsetof(struct _bpf_dtab_netdev, dev));
	register_netdevice_notifier(&dev_map_notifier);

	for_each_possible_cpu(cpu)
		INIT_LIST_HEAD(&per_cpu(dev_map_flush_list, cpu));
	return 0;
}

Loading