Commit a3ac249a authored by Rohit Maheshwari's avatar Rohit Maheshwari Committed by David S. Miller
Browse files

cxgb4/chcr: Enable ktls settings at run time



Current design enables ktls setting from start, which is not
efficient. Now the feature will be enabled when user demands
TLS offload on any interface.

v1->v2:
- taking ULD module refcount till any single connection exists.
- taking rtnl_lock() before clearing tls_devops.

v2->v3:
- cxgb4 is now registering to tlsdev_ops.
- module refcount inc/dec in chcr.
- refcount is only for connections.
- removed new code from cxgb_set_feature().

v3->v4:
- fixed warning message.

Signed-off-by: default avatarRohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 79a1f0cc
Loading
Loading
Loading
Loading
+10 −13
Original line number Diff line number Diff line
@@ -33,6 +33,13 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);

#if defined(CONFIG_CHELSIO_TLS_DEVICE)
static const struct tlsdev_ops chcr_ktls_ops = {
	.tls_dev_add = chcr_ktls_dev_add,
	.tls_dev_del = chcr_ktls_dev_del,
};
#endif

#ifdef CONFIG_CHELSIO_IPSEC_INLINE
static void update_netdev_features(void);
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
@@ -56,6 +63,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
	.tx_handler = chcr_uld_tx_handler,
#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
#if defined(CONFIG_CHELSIO_TLS_DEVICE)
	.tlsdev_ops = &chcr_ktls_ops,
#endif
};

static void detach_work_fn(struct work_struct *work)
@@ -207,11 +217,6 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
	}
	u_ctx->lldi = *lld;
	chcr_dev_init(u_ctx);

#ifdef CONFIG_CHELSIO_TLS_DEVICE
	if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
		chcr_enable_ktls(padap(&u_ctx->dev));
#endif
out:
	return u_ctx;
}
@@ -348,20 +353,12 @@ static void __exit chcr_crypto_exit(void)
	list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
		adap = padap(&u_ctx->dev);
		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
#ifdef CONFIG_CHELSIO_TLS_DEVICE
		if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
			chcr_disable_ktls(adap);
#endif
		list_del(&u_ctx->entry);
		kfree(u_ctx);
	}
	list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
		adap = padap(&u_ctx->dev);
		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
#ifdef CONFIG_CHELSIO_TLS_DEVICE
		if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
			chcr_disable_ktls(adap);
#endif
		list_del(&u_ctx->entry);
		kfree(u_ctx);
	}
+8 −2
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#define __CHCR_CORE_H__

#include <crypto/algapi.h>
#include <net/tls.h>
#include "t4_hw.h"
#include "cxgb4.h"
#include "t4_msg.h"
@@ -223,10 +224,15 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
void chcr_enable_ktls(struct adapter *adap);
void chcr_disable_ktls(struct adapter *adap);
int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
			     enum tls_offload_ctx_dir direction,
			     struct tls_crypto_info *crypto_info,
			     u32 start_offload_tcp_sn);
extern void chcr_ktls_dev_del(struct net_device *netdev,
			      struct tls_context *tls_ctx,
			      enum tls_offload_ctx_dir direction);
#endif
#endif /* __CHCR_CORE_H__ */
+15 −44
Original line number Diff line number Diff line
@@ -373,7 +373,7 @@ static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
 * @tls_cts - tls context.
 * @direction - TX/RX crypto direction
 */
static void chcr_ktls_dev_del(struct net_device *netdev,
void chcr_ktls_dev_del(struct net_device *netdev,
		       struct tls_context *tls_ctx,
		       enum tls_offload_ctx_dir direction)
{
@@ -411,6 +411,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
	atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
	kvfree(tx_info);
	tx_ctx->chcr_info = NULL;
	/* release module refcount */
	module_put(THIS_MODULE);
}

/*
@@ -422,7 +424,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
 * @direction - TX/RX crypto direction
 * return: SUCCESS/FAILURE.
 */
static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
		      enum tls_offload_ctx_dir direction,
		      struct tls_crypto_info *crypto_info,
		      u32 start_offload_tcp_sn)
@@ -528,6 +530,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
	if (ret)
		goto out2;

	/* Driver shouldn't be removed until any single connection exists */
	if (!try_module_get(THIS_MODULE)) {
		ret = -EINVAL;
		goto out2;
	}

	atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
	return 0;
out2:
@@ -537,43 +545,6 @@ out:
	return ret;
}

static const struct tlsdev_ops chcr_ktls_ops = {
	.tls_dev_add = chcr_ktls_dev_add,
	.tls_dev_del = chcr_ktls_dev_del,
};

/*
 * chcr_enable_ktls:  add NETIF_F_HW_TLS_TX flag in all the ports.
 */
void chcr_enable_ktls(struct adapter *adap)
{
	struct net_device *netdev;
	int i;

	for_each_port(adap, i) {
		netdev = adap->port[i];
		netdev->features |= NETIF_F_HW_TLS_TX;
		netdev->hw_features |= NETIF_F_HW_TLS_TX;
		netdev->tlsdev_ops = &chcr_ktls_ops;
	}
}

/*
 * chcr_disable_ktls:  remove NETIF_F_HW_TLS_TX flag from all the ports.
 */
void chcr_disable_ktls(struct adapter *adap)
{
	struct net_device *netdev;
	int i;

	for_each_port(adap, i) {
		netdev = adap->port[i];
		netdev->features &= ~NETIF_F_HW_TLS_TX;
		netdev->hw_features &= ~NETIF_F_HW_TLS_TX;
		netdev->tlsdev_ops = NULL;
	}
}

/*
 * chcr_init_tcb_fields:  Initialize tcb fields to handle TCP seq number
 *			  handling.
+7 −2
Original line number Diff line number Diff line
@@ -89,10 +89,15 @@ static inline int chcr_get_first_rx_qid(struct adapter *adap)
	return u_ctx->lldi.rxq_ids[0];
}

void chcr_enable_ktls(struct adapter *adap);
void chcr_disable_ktls(struct adapter *adap);
int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
		      enum tls_offload_ctx_dir direction,
		      struct tls_crypto_info *crypto_info,
		      u32 start_offload_tcp_sn);
void chcr_ktls_dev_del(struct net_device *netdev,
		       struct tls_context *tls_ctx,
		       enum tls_offload_ctx_dir direction);
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
#endif /* __CHCR_KTLS_H__ */
+4 −0
Original line number Diff line number Diff line
@@ -1099,6 +1099,7 @@ struct adapter {

	/* TC u32 offload */
	struct cxgb4_tc_u32_table *tc_u32;
	struct chcr_ktls chcr_ktls;
	struct chcr_stats_debug chcr_stats;

	/* TC flower offload */
@@ -2060,4 +2061,7 @@ int cxgb_open(struct net_device *dev);
int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
#endif
#endif /* __CXGB4_H__ */
Loading