Commit 902c2a31 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'cxgb4-ch_ktls-updates-in-net-next'



Rohit Maheshwari says:

====================
cxgb4/ch_ktls: updates in net-next

This series of patches improves connections setup and statistics.

This series is broken down as follows:

Patch 1 fixes the handling of connection setup failure in HW. Driver
shouldn't return success to tls_dev_add, until HW returns success.

Patch 2 avoids the log flood.

Patch 3 adds ktls statistics at port level.

v1->v2:
- removed conn_up from all places.

v2->v3:
- Corrected timeout handling.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2c956a5a 3427e13e
Loading
Loading
Loading
Loading
+15 −20
Original line number Diff line number Diff line
@@ -3527,6 +3527,10 @@ DEFINE_SHOW_ATTRIBUTE(meminfo);

static int chcr_stats_show(struct seq_file *seq, void *v)
{
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
	struct ch_ktls_port_stats_debug *ktls_port;
	int i = 0;
#endif
	struct adapter *adap = seq->private;

	seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
@@ -3557,18 +3561,6 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
	seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
	seq_printf(seq, "Tx TLS offload refcount:          %20u\n",
		   refcount_read(&adap->chcr_ktls.ktls_refcount));
	seq_printf(seq, "Tx HW offload contexts added:     %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_ctx));
	seq_printf(seq, "Tx connection created:            %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_open));
	seq_printf(seq, "Tx connection failed:             %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_fail));
	seq_printf(seq, "Tx connection closed:             %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_close));
	seq_printf(seq, "Packets passed for encryption :   %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_packets));
	seq_printf(seq, "Bytes passed for encryption :     %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_bytes));
	seq_printf(seq, "Tx records send:                  %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records));
	seq_printf(seq, "Tx partial start of records:      %20llu\n",
@@ -3581,14 +3573,17 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
	seq_printf(seq, "TX trim pkts :                    %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
	seq_printf(seq, "Tx out of order packets:          %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_ooo));
	seq_printf(seq, "Tx drop pkts before HW offload:   %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_skip_no_sync_data));
	seq_printf(seq, "Tx drop not synced packets:       %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_no_sync_data));
	seq_printf(seq, "Tx drop bypass req:               %20llu\n",
		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_bypass_req));
	while (i < MAX_NPORTS) {
		ktls_port = &adap->ch_ktls_stats.ktls_port[i];
		seq_printf(seq, "Port %d\n", i);
		seq_printf(seq, "Tx connection created:            %20llu\n",
			   atomic64_read(&ktls_port->ktls_tx_connection_open));
		seq_printf(seq, "Tx connection failed:             %20llu\n",
			   atomic64_read(&ktls_port->ktls_tx_connection_fail));
		seq_printf(seq, "Tx connection closed:             %20llu\n",
			   atomic64_read(&ktls_port->ktls_tx_connection_close));
		i++;
	}
#endif
	return 0;
}
+34 −16
Original line number Diff line number Diff line
@@ -117,14 +117,6 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
	"vlan_insertions        ",
	"gro_packets            ",
	"gro_merged             ",
};

static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
	"db_drop                ",
	"db_full                ",
	"db_empty               ",
	"write_coal_success     ",
	"write_coal_fail        ",
#if  IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
	"tx_tls_encrypted_packets",
	"tx_tls_encrypted_bytes  ",
@@ -136,6 +128,14 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
#endif
};

static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
	"db_drop                ",
	"db_full                ",
	"db_empty               ",
	"write_coal_success     ",
	"write_coal_fail        ",
};

static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
	"-------Loopback----------- ",
	"octets_ok              ",
@@ -257,14 +257,6 @@ struct queue_port_stats {
	u64 vlan_ins;
	u64 gro_pkts;
	u64 gro_merged;
};

struct adapter_stats {
	u64 db_drop;
	u64 db_full;
	u64 db_empty;
	u64 wc_success;
	u64 wc_fail;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
	u64 tx_tls_encrypted_packets;
	u64 tx_tls_encrypted_bytes;
@@ -276,12 +268,23 @@ struct adapter_stats {
#endif
};

struct adapter_stats {
	u64 db_drop;
	u64 db_full;
	u64 db_empty;
	u64 wc_success;
	u64 wc_fail;
};

static void collect_sge_port_stats(const struct adapter *adap,
				   const struct port_info *p,
				   struct queue_port_stats *s)
{
	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
	const struct ch_ktls_port_stats_debug *ktls_stats;
#endif
	struct sge_eohw_txq *eohw_tx;
	unsigned int i;

@@ -306,6 +309,21 @@ static void collect_sge_port_stats(const struct adapter *adap,
			s->vlan_ins += eohw_tx->vlan_ins;
		}
	}
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
	ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
	s->tx_tls_encrypted_packets =
		atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
	s->tx_tls_encrypted_bytes =
		atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
	s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
	s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
	s->tx_tls_skip_no_sync_data =
		atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
	s->tx_tls_drop_no_sync_data =
		atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
	s->tx_tls_drop_bypass_req =
		atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
#endif
}

static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+4 −4
Original line number Diff line number Diff line
@@ -690,7 +690,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
			 * ULD is/are already active, return failure.
			 */
			if (cxgb4_uld_in_use(adap)) {
				dev_warn(adap->pdev_dev,
				dev_dbg(adap->pdev_dev,
					"ULD connections (tid/stid) active. Can't enable kTLS\n");
				return -EINVAL;
			}
@@ -699,7 +699,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
			if (ret)
				return ret;
			refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
			pr_info("kTLS has been enabled. Restrictions placed on ULD support\n");
			pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
		} else {
			/* ktls settings already up, just increment refcount. */
			refcount_inc(&adap->chcr_ktls.ktls_refcount);
@@ -716,7 +716,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
					    0, 1, &params, &params);
			if (ret)
				return ret;
			pr_info("kTLS is disabled. Restrictions on ULD support removed\n");
			pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
		}
	}

+13 −8
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
#include "cxgb4.h"

#define MAX_ULD_QSETS 16
#define MAX_ULD_NPORTS 4

/* CPL message priority levels */
enum {
@@ -365,17 +366,10 @@ struct cxgb4_virt_res { /* virtualized HW resources */
};

#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
struct ch_ktls_stats_debug {
struct ch_ktls_port_stats_debug {
	atomic64_t ktls_tx_connection_open;
	atomic64_t ktls_tx_connection_fail;
	atomic64_t ktls_tx_connection_close;
	atomic64_t ktls_tx_send_records;
	atomic64_t ktls_tx_end_pkts;
	atomic64_t ktls_tx_start_pkts;
	atomic64_t ktls_tx_middle_pkts;
	atomic64_t ktls_tx_retransmit_pkts;
	atomic64_t ktls_tx_complete_pkts;
	atomic64_t ktls_tx_trimmed_pkts;
	atomic64_t ktls_tx_encrypted_packets;
	atomic64_t ktls_tx_encrypted_bytes;
	atomic64_t ktls_tx_ctx;
@@ -384,6 +378,17 @@ struct ch_ktls_stats_debug {
	atomic64_t ktls_tx_drop_no_sync_data;
	atomic64_t ktls_tx_drop_bypass_req;
};

struct ch_ktls_stats_debug {
	struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
	atomic64_t ktls_tx_send_records;
	atomic64_t ktls_tx_end_pkts;
	atomic64_t ktls_tx_start_pkts;
	atomic64_t ktls_tx_middle_pkts;
	atomic64_t ktls_tx_retransmit_pkts;
	atomic64_t ktls_tx_complete_pkts;
	atomic64_t ktls_tx_trimmed_pkts;
};
#endif

struct chcr_stats_debug {
+161 −146
Original line number Diff line number Diff line
@@ -125,60 +125,6 @@ out:
	return ret;
}

static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
					     int new_state)
{
	/* This function can be called from both rx (interrupt context) and tx
	 * queue contexts.
	 */
	spin_lock_bh(&tx_info->lock);
	switch (tx_info->connection_state) {
	case KTLS_CONN_CLOSED:
		tx_info->connection_state = new_state;
		break;

	case KTLS_CONN_ACT_OPEN_REQ:
		/* only go forward if state is greater than current state. */
		if (new_state <= tx_info->connection_state)
			break;
		/* update to the next state and also initialize TCB */
		tx_info->connection_state = new_state;
		fallthrough;
	case KTLS_CONN_ACT_OPEN_RPL:
		/* if we are stuck in this state, means tcb init might not
		 * received by HW, try sending it again.
		 */
		if (!chcr_init_tcb_fields(tx_info))
			tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
		break;

	case KTLS_CONN_SET_TCB_REQ:
		/* only go forward if state is greater than current state. */
		if (new_state <= tx_info->connection_state)
			break;
		/* update to the next state and check if l2t_state is valid  */
		tx_info->connection_state = new_state;
		fallthrough;
	case KTLS_CONN_SET_TCB_RPL:
		/* Check if l2t state is valid, then move to ready state. */
		if (cxgb4_check_l2t_valid(tx_info->l2te)) {
			tx_info->connection_state = KTLS_CONN_TX_READY;
			atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ctx);
		}
		break;

	case KTLS_CONN_TX_READY:
		/* nothing to be done here */
		break;

	default:
		pr_err("unknown KTLS connection state\n");
		break;
	}
	spin_unlock_bh(&tx_info->lock);

	return tx_info->connection_state;
}
/*
 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
 * @sk - tcp socket.
@@ -298,27 +244,17 @@ static int chcr_setup_connection(struct sock *sk,
		return -EINVAL;

	tx_info->atid = atid;
	tx_info->ip_family = sk->sk_family;

	if (sk->sk_family == AF_INET) {
		tx_info->ip_family = AF_INET;
	if (tx_info->ip_family == AF_INET) {
		ret = chcr_ktls_act_open_req(sk, tx_info, atid);
#if IS_ENABLED(CONFIG_IPV6)
	} else {
		if (!sk->sk_ipv6only &&
		    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
			tx_info->ip_family = AF_INET;
			ret = chcr_ktls_act_open_req(sk, tx_info, atid);
		} else {
			tx_info->ip_family = AF_INET6;
			ret = cxgb4_clip_get(tx_info->netdev,
					     (const u32 *)
					     &sk->sk_v6_rcv_saddr.s6_addr,
		ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
				     &sk->sk_v6_rcv_saddr,
				     1);
		if (ret)
				goto out;
			return ret;
		ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
		}
#endif
	}

@@ -326,16 +262,21 @@ static int chcr_setup_connection(struct sock *sk,
	 * success, if any other return type clear atid and return that failure.
	 */
	if (ret) {
		if (ret == NET_XMIT_CN)
		if (ret == NET_XMIT_CN) {
			ret = 0;
		else
		} else {
#if IS_ENABLED(CONFIG_IPV6)
			/* clear clip entry */
			if (tx_info->ip_family == AF_INET6)
				cxgb4_clip_release(tx_info->netdev,
						   (const u32 *)
						   &sk->sk_v6_rcv_saddr,
						   1);
#endif
			cxgb4_free_atid(t, atid);
		goto out;
		}
	}

	/* update the connection state */
	chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
out:
	return ret;
}

@@ -396,15 +337,10 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
	struct chcr_ktls_ofld_ctx_tx *tx_ctx =
				chcr_get_ktls_tx_context(tls_ctx);
	struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
	struct sock *sk;
	struct ch_ktls_port_stats_debug *port_stats;

	if (!tx_info)
		return;
	sk = tx_info->sk;

	spin_lock(&tx_info->lock);
	tx_info->connection_state = KTLS_CONN_CLOSED;
	spin_unlock(&tx_info->lock);

	/* clear l2t entry */
	if (tx_info->l2te)
@@ -413,8 +349,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
#if IS_ENABLED(CONFIG_IPV6)
	/* clear clip entry */
	if (tx_info->ip_family == AF_INET6)
		cxgb4_clip_release(netdev,
				   (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
		cxgb4_clip_release(netdev, (const u32 *)
				   &tx_info->sk->sk_v6_rcv_saddr,
				   1);
#endif

@@ -426,7 +362,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
				 tx_info->tid, tx_info->ip_family);
	}

	atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_connection_close);
	port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
	atomic64_inc(&port_stats->ktls_tx_connection_close);
	kvfree(tx_info);
	tx_ctx->chcr_info = NULL;
	/* release module refcount */
@@ -448,6 +385,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
			     u32 start_offload_tcp_sn)
{
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct ch_ktls_port_stats_debug *port_stats;
	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
	struct chcr_ktls_info *tx_info;
	struct dst_entry *dst;
@@ -461,30 +399,23 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,

	pi = netdev_priv(netdev);
	adap = pi->adapter;
	port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
	atomic64_inc(&port_stats->ktls_tx_connection_open);

	if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
		pr_err("not expecting for RX direction\n");
		ret = -EINVAL;
		goto out;
	}
	if (tx_ctx->chcr_info) {
		ret = -EINVAL;

	if (tx_ctx->chcr_info)
		goto out;
	}

	tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
	if (!tx_info) {
		ret = -ENOMEM;
	if (!tx_info)
		goto out;
	}

	spin_lock_init(&tx_info->lock);

	/* clear connection state */
	spin_lock(&tx_info->lock);
	tx_info->connection_state = KTLS_CONN_CLOSED;
	spin_unlock(&tx_info->lock);

	tx_info->sk = sk;
	spin_lock_init(&tx_info->lock);
	/* initialize tid and atid to -1, 0 is a also a valid id. */
	tx_info->tid = -1;
	tx_info->atid = -1;
@@ -495,10 +426,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
	tx_info->tx_chan = pi->tx_chan;
	tx_info->smt_idx = pi->smt_idx;
	tx_info->port_id = pi->port_id;
	tx_info->prev_ack = 0;
	tx_info->prev_win = 0;

	tx_info->rx_qid = chcr_get_first_rx_qid(adap);
	if (unlikely(tx_info->rx_qid < 0))
		goto out2;
		goto free_tx_info;

	tx_info->prev_seq = start_offload_tcp_sn;
	tx_info->tcp_start_seq_number = start_offload_tcp_sn;
@@ -506,18 +439,22 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
	/* save crypto keys */
	ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
	if (ret < 0)
		goto out2;
		goto free_tx_info;

	/* get peer ip */
	if (sk->sk_family == AF_INET) {
		memcpy(daaddr, &sk->sk_daddr, 4);
		tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
	} else {
		if (!sk->sk_ipv6only &&
		    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)
		    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
			memcpy(daaddr, &sk->sk_daddr, 4);
		else
			tx_info->ip_family = AF_INET;
		} else {
			memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
			tx_info->ip_family = AF_INET6;
		}
#endif
	}

@@ -525,13 +462,13 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
	dst = sk_dst_get(sk);
	if (!dst) {
		pr_err("DST entry not found\n");
		goto out2;
		goto free_tx_info;
	}
	n = dst_neigh_lookup(dst, daaddr);
	if (!n || !n->dev) {
		pr_err("neighbour not found\n");
		dst_release(dst);
		goto out2;
		goto free_tx_info;
	}
	tx_info->l2te  = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);

@@ -540,31 +477,86 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,

	if (!tx_info->l2te) {
		pr_err("l2t entry not found\n");
		goto out2;
		goto free_tx_info;
	}

	tx_ctx->chcr_info = tx_info;
	/* Driver shouldn't be removed until any single connection exists */
	if (!try_module_get(THIS_MODULE))
		goto free_l2t;

	init_completion(&tx_info->completion);
	/* create a filter and call cxgb4_l2t_send to send the packet out, which
	 * will take care of updating l2t entry in hw if not already done.
	 */
	ret = chcr_setup_connection(sk, tx_info);
	if (ret)
		goto out2;
	tx_info->open_state = CH_KTLS_OPEN_PENDING;

	/* Driver shouldn't be removed until any single connection exists */
	if (!try_module_get(THIS_MODULE)) {
		ret = -EINVAL;
		goto out2;
	if (chcr_setup_connection(sk, tx_info))
		goto put_module;

	/* Wait for reply */
	wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
	spin_lock_bh(&tx_info->lock);
	if (tx_info->open_state) {
		/* need to wait for hw response, can't free tx_info yet. */
		if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
			tx_info->pending_close = true;
		/* free the lock after the cleanup */
		goto put_module;
	}
	spin_unlock_bh(&tx_info->lock);

	/* initialize tcb */
	reinit_completion(&tx_info->completion);
	/* mark it pending for hw response */
	tx_info->open_state = CH_KTLS_OPEN_PENDING;

	if (chcr_init_tcb_fields(tx_info))
		goto free_tid;

	/* Wait for reply */
	wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
	spin_lock_bh(&tx_info->lock);
	if (tx_info->open_state) {
		/* need to wait for hw response, can't free tx_info yet. */
		tx_info->pending_close = true;
		/* free the lock after cleanup */
		goto free_tid;
	}
	spin_unlock_bh(&tx_info->lock);

	if (!cxgb4_check_l2t_valid(tx_info->l2te))
		goto free_tid;

	atomic64_inc(&port_stats->ktls_tx_ctx);
	tx_ctx->chcr_info = tx_info;

	atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open);
	return 0;
out2:

free_tid:
	chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6)
	/* clear clip entry */
	if (tx_info->ip_family == AF_INET6)
		cxgb4_clip_release(netdev, (const u32 *)
				   &sk->sk_v6_rcv_saddr,
				   1);
#endif
	cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
			 tx_info->tid, tx_info->ip_family);

put_module:
	/* release module refcount */
	module_put(THIS_MODULE);
free_l2t:
	cxgb4_l2t_release(tx_info->l2te);
free_tx_info:
	if (tx_info->pending_close)
		spin_unlock_bh(&tx_info->lock);
	else
		kvfree(tx_info);
out:
	atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_fail);
	return ret;
	atomic64_inc(&port_stats->ktls_tx_connection_fail);
	return -1;
}

/*
@@ -627,20 +619,39 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
	tx_info = lookup_atid(t, atid);

	if (!tx_info || tx_info->atid != atid) {
		pr_err("tx_info or atid is not correct\n");
		pr_err("%s: incorrect tx_info or atid\n", __func__);
		return -1;
	}

	cxgb4_free_atid(t, atid);
	tx_info->atid = -1;

	spin_lock(&tx_info->lock);
	/* HW response is very close, finish pending cleanup */
	if (tx_info->pending_close) {
		spin_unlock(&tx_info->lock);
		if (!status) {
			/* it's a late success, tcb status is establised,
			 * mark it close.
			 */
			chcr_ktls_mark_tcb_close(tx_info);
			cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
					 tid, tx_info->ip_family);
		}
		kvfree(tx_info);
		return 0;
	}

	if (!status) {
		tx_info->tid = tid;
		cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);

		cxgb4_free_atid(t, atid);
		tx_info->atid = -1;
		/* update the connection state */
		chcr_ktls_update_connection_state(tx_info,
						  KTLS_CONN_ACT_OPEN_RPL);
		tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
	} else {
		tx_info->open_state = CH_KTLS_OPEN_FAILURE;
	}
	spin_unlock(&tx_info->lock);

	complete(&tx_info->completion);
	return 0;
}

@@ -658,12 +669,22 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)

	t = &adap->tids;
	tx_info = lookup_tid(t, tid);

	if (!tx_info || tx_info->tid != tid) {
		pr_err("tx_info or atid is not correct\n");
		pr_err("%s: incorrect tx_info or tid\n", __func__);
		return -1;
	}
	/* update the connection state */
	chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);

	spin_lock(&tx_info->lock);
	if (tx_info->pending_close) {
		spin_unlock(&tx_info->lock);
		kvfree(tx_info);
		return 0;
	}
	tx_info->open_state = false;
	spin_unlock(&tx_info->lock);

	complete(&tx_info->completion);
	return 0;
}

@@ -765,6 +786,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
				   u64 tcp_ack, u64 tcp_win)
{
	bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
	struct ch_ktls_port_stats_debug *port_stats;
	u32 len, cpl = 0, ndesc, wr_len;
	struct fw_ulptx_wr *wr;
	int credits;
@@ -798,12 +820,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
	/* reset snd una if it's a re-transmit pkt */
	if (tcp_seq != tx_info->prev_seq) {
		/* reset snd_una */
		port_stats =
			&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
		pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
						 TCB_SND_UNA_RAW_W,
						 TCB_SND_UNA_RAW_V
						 (TCB_SND_UNA_RAW_M),
						 TCB_SND_UNA_RAW_V(0), 0);
		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ooo);
		atomic64_inc(&port_stats->ktls_tx_ooo);
		cpl++;
	}
	/* update ack */
@@ -1836,6 +1860,7 @@ out:
/* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ch_ktls_port_stats_debug *port_stats;
	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
	struct ch_ktls_stats_debug *stats;
	struct tcphdr *th = tcp_hdr(skb);
@@ -1845,7 +1870,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
	u32 tls_end_offset, tcp_seq;
	struct tls_context *tls_ctx;
	struct sk_buff *local_skb;
	int new_connection_state;
	struct sge_eth_txq *q;
	struct adapter *adap;
	unsigned long flags;
@@ -1868,15 +1892,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
	if (unlikely(!tx_info))
		goto out;

	/* check the connection state, we don't need to pass new connection
	 * state, state machine will check and update the new state if it is
	 * stuck due to responses not received from HW.
	 * Start the tx handling only if state is KTLS_CONN_TX_READY.
	 */
	new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
	if (new_connection_state != KTLS_CONN_TX_READY)
		goto out;

	/* don't touch the original skb, make a new skb to extract each records
	 * and send them separately.
	 */
@@ -1887,6 +1902,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)

	adap = tx_info->adap;
	stats = &adap->ch_ktls_stats;
	port_stats = &stats->ktls_port[tx_info->port_id];

	qidx = skb->queue_mapping;
	q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
@@ -1932,13 +1948,13 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
		 */
		if (unlikely(!record)) {
			spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
			atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
			atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
			goto out;
		}

		if (unlikely(tls_record_is_start_marker(record))) {
			spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
			atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
			atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
			goto out;
		}

@@ -2009,9 +2025,8 @@ clear_ref:
	} while (data_len > 0);

	tx_info->prev_seq = ntohl(th->seq) + skb->data_len;

	atomic64_inc(&stats->ktls_tx_encrypted_packets);
	atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
	atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
	atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);

	/* tcp finish is set, send a separate tcp msg including all the options
	 * as well.
Loading