Commit 505ee3a1 authored by Alaa Hleihel's avatar Alaa Hleihel Committed by David S. Miller
Browse files

netfilter: flowtable: Make nf_flow_table_offload_add/del_cb inline



Currently, nf_flow_table_offload_add/del_cb are exported by nf_flow_table
module, therefore modules using them will have hard-dependency
on nf_flow_table and will require loading it all the time.

This can lead to an unnecessary overhead on systems that do not
use this API.

To relax the hard-dependency between the modules, we unexport these
functions and make them static inline.

Fixes: 978703f4 ("netfilter: flowtable: Add API for registering to flow table events")
Signed-off-by: default avatarAlaa Hleihel <alaa@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Reviewed-by: default avatarMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 762f926d
Loading
Loading
Loading
Loading
+45 −4
Original line number Diff line number Diff line
@@ -161,10 +161,51 @@ struct nf_flow_route {
struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);

int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
				 flow_setup_cb_t *cb, void *cb_priv);
void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
				  flow_setup_cb_t *cb, void *cb_priv);
static inline int
nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
			     flow_setup_cb_t *cb, void *cb_priv)
{
	struct flow_block *block = &flow_table->flow_block;
	struct flow_block_cb *block_cb;
	int err = 0;

	down_write(&flow_table->flow_block_lock);
	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
	if (block_cb) {
		err = -EEXIST;
		goto unlock;
	}

	block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
	if (IS_ERR(block_cb)) {
		err = PTR_ERR(block_cb);
		goto unlock;
	}

	list_add_tail(&block_cb->list, &block->cb_list);

unlock:
	up_write(&flow_table->flow_block_lock);
	return err;
}

static inline void
nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
			     flow_setup_cb_t *cb, void *cb_priv)
{
	struct flow_block *block = &flow_table->flow_block;
	struct flow_block_cb *block_cb;

	down_write(&flow_table->flow_block_lock);
	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
	if (block_cb) {
		list_del(&block_cb->list);
		flow_block_cb_free(block_cb);
	} else {
		WARN_ON(true);
	}
	up_write(&flow_table->flow_block_lock);
}

int flow_offload_route_init(struct flow_offload *flow,
			    const struct nf_flow_route *route);
+0 −45
Original line number Diff line number Diff line
@@ -387,51 +387,6 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
	queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}

int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
				 flow_setup_cb_t *cb, void *cb_priv)
{
	struct flow_block *block = &flow_table->flow_block;
	struct flow_block_cb *block_cb;
	int err = 0;

	down_write(&flow_table->flow_block_lock);
	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
	if (block_cb) {
		err = -EEXIST;
		goto unlock;
	}

	block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
	if (IS_ERR(block_cb)) {
		err = PTR_ERR(block_cb);
		goto unlock;
	}

	list_add_tail(&block_cb->list, &block->cb_list);

unlock:
	up_write(&flow_table->flow_block_lock);
	return err;
}
EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);

void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
				  flow_setup_cb_t *cb, void *cb_priv)
{
	struct flow_block *block = &flow_table->flow_block;
	struct flow_block_cb *block_cb;

	down_write(&flow_table->flow_block_lock);
	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
	if (block_cb) {
		list_del(&block_cb->list);
		flow_block_cb_free(block_cb);
	} else {
		WARN_ON(true);
	}
	up_write(&flow_table->flow_block_lock);
}
EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);

static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
				__be16 port, __be16 new_port)