Commit 395048eb authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'smc-fixes'



Ursula Braun says:

====================
net/smc: fixes 2018-11-12

here is V4 of some net/smc fixes in different areas for the net tree.

v1->v2:
   do not define 8-byte alignment for union smcd_cdc_cursor in
   patch 4/5 "net/smc: atomic SMCD cursor handling"
v2->v3:
   stay with 8-byte alignment for union smcd_cdc_cursor in
   patch 4/5 "net/smc: atomic SMCD cursor handling", but get rid of
   __packed for struct smcd_cdc_msg
v3->v4:
   get rid of another __packed for struct smc_cdc_msg in
   patch 4/5 "net/smc: atomic SMCD cursor handling"
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1e2b1046 e438bae4
Loading
Loading
Loading
Loading
+7 −4
Original line number Diff line number Diff line
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock)
	smc = smc_sk(sk);

	/* cleanup for a dangling non-blocking connect */
	if (smc->connect_info && sk->sk_state == SMC_INIT)
		tcp_abort(smc->clcsock->sk, ECONNABORTED);
	flush_work(&smc->connect_work);
	kfree(smc->connect_info);
	smc->connect_info = NULL;
@@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc,

	mutex_lock(&smc_create_lgr_pending);
	local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
					ibport, &aclc->lcl, NULL, 0);
					ibport, ntoh24(aclc->qpn), &aclc->lcl,
					NULL, 0);
	if (local_contact < 0) {
		if (local_contact == -ENOMEM)
			reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc,
	int rc = 0;

	mutex_lock(&smc_create_lgr_pending);
	local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0,
	local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
					NULL, ismdev, aclc->gid);
	if (local_contact < 0)
		return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
@@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
				int *local_contact)
{
	/* allocate connection / link group */
	*local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport,
	*local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
					 &pclc->lcl, NULL, 0);
	if (*local_contact < 0) {
		if (*local_contact == -ENOMEM)
@@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
	struct smc_clc_msg_smcd *pclc_smcd;

	pclc_smcd = smc_get_clc_msg_smcd(pclc);
	*local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL,
	*local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
					 ismdev, pclc_smcd->gid);
	if (*local_contact < 0) {
		if (*local_contact == -ENOMEM)
+15 −11
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
		sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
		"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
	BUILD_BUG_ON_MSG(
		sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE,
		offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
		"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
	BUILD_BUG_ON_MSG(
		sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
int smcd_cdc_msg_send(struct smc_connection *conn)
{
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
	union smc_host_cursor curs;
	struct smcd_cdc_msg cdc;
	int rc, diff;

	memset(&cdc, 0, sizeof(cdc));
	cdc.common.type = SMC_CDC_MSG_TYPE;
	cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
	cdc.prod_count = conn->local_tx_ctrl.prod.count;

	cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
	cdc.cons_count = conn->local_tx_ctrl.cons.count;
	cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
	cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
	curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
	cdc.prod.wrap = curs.wrap;
	cdc.prod.count = curs.count;
	curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
	cdc.cons.wrap = curs.wrap;
	cdc.cons.count = curs.count;
	cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
	cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
	rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
	if (rc)
		return rc;
	smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons,
		      conn);
	smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
	/* Calculate transmitted data and increment free send buffer space */
	diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
			     &conn->tx_curs_sent);
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
static void smcd_cdc_rx_tsklet(unsigned long data)
{
	struct smc_connection *conn = (struct smc_connection *)data;
	struct smcd_cdc_msg *data_cdc;
	struct smcd_cdc_msg cdc;
	struct smc_sock *smc;

	if (!conn)
		return;

	memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
	data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
	smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
	smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
	smc = container_of(conn, struct smc_sock, conn);
	smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
}
+45 −15
Original line number Diff line number Diff line
@@ -48,21 +48,31 @@ struct smc_cdc_msg {
	struct smc_cdc_producer_flags	prod_flags;
	struct smc_cdc_conn_state_flags	conn_state_flags;
	u8				reserved[18];
} __packed;					/* format defined in RFC7609 */
};

/* SMC-D cursor format */
union smcd_cdc_cursor {
	struct {
		u16	wrap;
		u32	count;
		struct smc_cdc_producer_flags	prod_flags;
		struct smc_cdc_conn_state_flags	conn_state_flags;
	} __packed;
#ifdef KERNEL_HAS_ATOMIC64
	atomic64_t		acurs;		/* for atomic processing */
#else
	u64			acurs;		/* for atomic processing */
#endif
} __aligned(8);

/* CDC message for SMC-D */
struct smcd_cdc_msg {
	struct smc_wr_rx_hdr common;	/* Type = 0xFE */
	u8 res1[7];
	u16 prod_wrap;
	u32 prod_count;
	u8 res2[2];
	u16 cons_wrap;
	u32 cons_count;
	struct smc_cdc_producer_flags	prod_flags;
	struct smc_cdc_conn_state_flags conn_state_flags;
	union smcd_cdc_cursor	prod;
	union smcd_cdc_cursor	cons;
	u8 res3[8];
} __packed;
} __aligned(8);

static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
{
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
#endif
}

static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
				  union smcd_cdc_cursor *src,
				  struct smc_connection *conn)
{
#ifndef KERNEL_HAS_ATOMIC64
	unsigned long flags;

	spin_lock_irqsave(&conn->acurs_lock, flags);
	tgt->acurs = src->acurs;
	spin_unlock_irqrestore(&conn->acurs_lock, flags);
#else
	atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
#endif
}

/* calculate cursor difference between old and new, where old <= new */
static inline int smc_curs_diff(unsigned int size,
				union smc_host_cursor *old,
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
					struct smcd_cdc_msg *peer)
{
	local->prod.wrap = peer->prod_wrap;
	local->prod.count = peer->prod_count;
	local->cons.wrap = peer->cons_wrap;
	local->cons.count = peer->cons_count;
	local->prod_flags = peer->prod_flags;
	local->conn_state_flags = peer->conn_state_flags;
	union smc_host_cursor temp;

	temp.wrap = peer->prod.wrap;
	temp.count = peer->prod.count;
	atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));

	temp.wrap = peer->cons.wrap;
	temp.count = peer->cons.count;
	atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
	local->prod_flags = peer->cons.prod_flags;
	local->conn_state_flags = peer->cons.conn_state_flags;
}

static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+14 −6
Original line number Diff line number Diff line
@@ -184,6 +184,8 @@ free:

		if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
			smc_llc_link_inactive(lnk);
		if (lgr->is_smcd)
			smc_ism_signal_shutdown(lgr);
		smc_lgr_free(lgr);
	}
}
@@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
}

/* Called when SMC-D device is terminated or peer is lost */
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
{
	struct smc_link_group *lgr, *l;
	LIST_HEAD(lgr_free_list);
@@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
		if (lgr->is_smcd && lgr->smcd == dev &&
		    (!peer_gid || lgr->peer_gid == peer_gid) &&
		    !list_empty(&lgr->list)) {
		    (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
			__smc_lgr_terminate(lgr);
			list_move(&lgr->list, &lgr_free_list);
		}
@@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
		list_del_init(&lgr->list);
		cancel_delayed_work_sync(&lgr->free_work);
		if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
			smc_ism_signal_shutdown(lgr);
		smc_lgr_free(lgr);
	}
}
@@ -559,7 +563,7 @@ out:

static bool smcr_lgr_match(struct smc_link_group *lgr,
			   struct smc_clc_msg_local *lcl,
			   enum smc_lgr_role role)
			   enum smc_lgr_role role, u32 clcqpn)
{
	return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
		       SMC_SYSTEMID_LEN) &&
@@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
			SMC_GID_SIZE) &&
		!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
			sizeof(lcl->mac)) &&
		lgr->role == role;
		lgr->role == role &&
		(lgr->role == SMC_SERV ||
		 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
}

static bool smcd_lgr_match(struct smc_link_group *lgr,
@@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,

/* create a new SMC connection (and a new link group if necessary) */
int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
		    struct smc_ib_device *smcibdev, u8 ibport,
		    struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
		    struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
		    u64 peer_gid)
{
@@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
	list_for_each_entry(lgr, &smc_lgr_list.list, list) {
		write_lock_bh(&lgr->conns_lock);
		if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
		     smcr_lgr_match(lgr, lcl, role)) &&
		     smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
		    !lgr->sync_err &&
		    lgr->vlan_id == vlan_id &&
		    (role == SMC_CLNT ||
@@ -1024,6 +1030,8 @@ void smc_core_exit(void)
			smc_llc_link_inactive(lnk);
		}
		cancel_delayed_work_sync(&lgr->free_work);
		if (lgr->is_smcd)
			smc_ism_signal_shutdown(lgr);
		smc_lgr_free(lgr); /* free link group */
	}
}
+3 −2
Original line number Diff line number Diff line
@@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
			unsigned short vlan);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);

void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
		    struct smc_ib_device *smcibdev, u8 ibport,
		    struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
		    struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
		    u64 peer_gid);
void smcd_conn_free(struct smc_connection *conn);
Loading