Commit 7da182a9 authored by Paul Blakey's avatar Paul Blakey Committed by Pablo Neira Ayuso
Browse files

netfilter: flowtable: Use work entry per offload command



To allow offload commands to execute in parallel, create workqueue
for flow table offload, and use a work entry per offload command.

Signed-off-by: default avatarPaul Blakey <paulb@mellanox.com>
Reviewed-by: default avatarOz Shlomo <ozsh@mellanox.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 422c032a
Loading
Loading
Loading
Loading
+15 −31
Original line number Diff line number Diff line
@@ -12,9 +12,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>

static struct work_struct nf_flow_offload_work;
static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
static LIST_HEAD(flow_offload_pending_list);
static struct workqueue_struct *nf_flow_offload_wq;

struct flow_offload_work {
	struct list_head	list;
@@ -22,6 +20,7 @@ struct flow_offload_work {
	int			priority;
	struct nf_flowtable	*flowtable;
	struct flow_offload	*flow;
	struct work_struct	work;
};

#define NF_FLOW_DISSECTOR(__match, __type, __field)	\
@@ -788,14 +787,9 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)

static void flow_offload_work_handler(struct work_struct *work)
{
	struct flow_offload_work *offload, *next;
	LIST_HEAD(offload_pending_list);

	spin_lock_bh(&flow_offload_pending_list_lock);
	list_replace_init(&flow_offload_pending_list, &offload_pending_list);
	spin_unlock_bh(&flow_offload_pending_list_lock);
	struct flow_offload_work *offload;

	list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
	offload = container_of(work, struct flow_offload_work, work);
	switch (offload->cmd) {
		case FLOW_CLS_REPLACE:
			flow_offload_work_add(offload);
@@ -809,18 +803,13 @@ static void flow_offload_work_handler(struct work_struct *work)
		default:
			WARN_ON_ONCE(1);
	}
		list_del(&offload->list);

	kfree(offload);
}
}

static void flow_offload_queue_work(struct flow_offload_work *offload)
{
	spin_lock_bh(&flow_offload_pending_list_lock);
	list_add_tail(&offload->list, &flow_offload_pending_list);
	spin_unlock_bh(&flow_offload_pending_list_lock);

	schedule_work(&nf_flow_offload_work);
	queue_work(nf_flow_offload_wq, &offload->work);
}

static struct flow_offload_work *
@@ -837,6 +826,7 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
	offload->flow = flow;
	offload->priority = flowtable->priority;
	offload->flowtable = flowtable;
	INIT_WORK(&offload->work, flow_offload_work_handler);

	return offload;
}
@@ -887,7 +877,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{
	if (nf_flowtable_hw_offload(flowtable))
		flush_work(&nf_flow_offload_work);
		flush_workqueue(nf_flow_offload_wq);
}

static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
@@ -1052,7 +1042,10 @@ static struct flow_indr_block_entry block_ing_entry = {

int nf_flow_table_offload_init(void)
{
	INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
	nf_flow_offload_wq  = alloc_workqueue("nf_flow_table_offload",
					      WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
	if (!nf_flow_offload_wq)
		return -ENOMEM;

	flow_indr_add_block_cb(&block_ing_entry);

@@ -1061,15 +1054,6 @@ int nf_flow_table_offload_init(void)

void nf_flow_table_offload_exit(void)
{
	struct flow_offload_work *offload, *next;
	LIST_HEAD(offload_pending_list);

	flow_indr_del_block_cb(&block_ing_entry);

	cancel_work_sync(&nf_flow_offload_work);

	list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
		list_del(&offload->list);
		kfree(offload);
	}
	destroy_workqueue(nf_flow_offload_wq);
}