Commit 4c082221 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-smc-next'



Ursula Braun says:

====================
net/smc: patches 2020-02-17

here are patches for SMC making termination tasks more perfect.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 790a9a7c 5613f20c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
		smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
		if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
			smc->conn.lgr->sync_err = 1;
			smc_lgr_terminate(smc->conn.lgr, true);
			smc_lgr_terminate_sched(smc->conn.lgr);
		}
	}

+11 −15
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);

static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
			 struct smc_buf_desc *buf_desc);
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);

/* return head of link group list and its lock for a given link group */
static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
@@ -229,7 +230,7 @@ static void smc_lgr_terminate_work(struct work_struct *work)
	struct smc_link_group *lgr = container_of(work, struct smc_link_group,
						  terminate_work);

	smc_lgr_terminate(lgr, true);
	__smc_lgr_terminate(lgr, true);
}

/* create a new SMC link group */
@@ -576,15 +577,15 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
	} else {
		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

		wake_up(&lnk->wr_reg_wait);
		if (lnk->state != SMC_LNK_INACTIVE) {
			smc_link_send_delete(lnk, false);
		if (lnk->state != SMC_LNK_INACTIVE)
			smc_llc_link_inactive(lnk);
	}
}
}

/* terminate link group */
/* terminate link group
 * @soft: true if link group shutdown can take its time
 *	  false if immediate link group shutdown is required
 */
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{
	struct smc_connection *conn;
@@ -622,25 +623,20 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
		smc_lgr_free(lgr);
}

/* unlink and terminate link group
 * @soft: true if link group shutdown can take its time
 *	  false if immediate link group shutdown is required
 */
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
/* unlink link group and schedule termination */
void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{
	spinlock_t *lgr_lock;

	smc_lgr_list_head(lgr, &lgr_lock);
	spin_lock_bh(lgr_lock);
	if (lgr->terminating) {
	if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
		spin_unlock_bh(lgr_lock);
		return;	/* lgr already terminating */
	}
	if (!soft)
		lgr->freeing = 1;
	list_del_init(&lgr->list);
	spin_unlock_bh(lgr_lock);
	__smc_lgr_terminate(lgr, soft);
	schedule_work(&lgr->terminate_work);
}

/* Called when IB port is terminated */
+1 −7
Original line number Diff line number Diff line
@@ -285,18 +285,12 @@ static inline struct smc_connection *smc_lgr_find_conn(
	return res;
}

static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{
	if (!lgr->terminating && !lgr->freeing)
		schedule_work(&lgr->terminate_work);
}

struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;

void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
void smc_lgr_terminate_sched(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
			unsigned short vlan);
+29 −15
Original line number Diff line number Diff line
@@ -257,6 +257,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
					struct ib_event *ibevent)
{
	struct smc_ib_device *smcibdev;
	bool schedule = false;
	u8 port_idx;

	smcibdev = container_of(handler, struct smc_ib_device, event_handler);
@@ -266,22 +267,35 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
		/* terminate all ports on device */
		for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
			set_bit(port_idx, &smcibdev->port_event_mask);
			set_bit(port_idx, smcibdev->ports_going_away);
			if (!test_and_set_bit(port_idx,
					      smcibdev->ports_going_away))
				schedule = true;
		}
		if (schedule)
			schedule_work(&smcibdev->port_event_work);
		break;
	case IB_EVENT_PORT_ERR:
	case IB_EVENT_PORT_ACTIVE:
		port_idx = ibevent->element.port_num - 1;
		if (port_idx >= SMC_MAX_PORTS)
			break;
		set_bit(port_idx, &smcibdev->port_event_mask);
		if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
			schedule_work(&smcibdev->port_event_work);
		break;
	case IB_EVENT_PORT_ERR:
		port_idx = ibevent->element.port_num - 1;
		if (port_idx >= SMC_MAX_PORTS)
			break;
		set_bit(port_idx, &smcibdev->port_event_mask);
		if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
			schedule_work(&smcibdev->port_event_work);
		break;
	case IB_EVENT_GID_CHANGE:
		port_idx = ibevent->element.port_num - 1;
		if (port_idx < SMC_MAX_PORTS) {
		if (port_idx >= SMC_MAX_PORTS)
			break;
		set_bit(port_idx, &smcibdev->port_event_mask);
			if (ibevent->event == IB_EVENT_PORT_ERR)
				set_bit(port_idx, smcibdev->ports_going_away);
			else if (ibevent->event == IB_EVENT_PORT_ACTIVE)
				clear_bit(port_idx, smcibdev->ports_going_away);
		schedule_work(&smcibdev->port_event_work);
		}
		break;
	default:
		break;
@@ -316,11 +330,11 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
	case IB_EVENT_QP_FATAL:
	case IB_EVENT_QP_ACCESS_ERR:
		port_idx = ibevent->element.qp->port - 1;
		if (port_idx < SMC_MAX_PORTS) {
		if (port_idx >= SMC_MAX_PORTS)
			break;
		set_bit(port_idx, &smcibdev->port_event_mask);
			set_bit(port_idx, smcibdev->ports_going_away);
		if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
			schedule_work(&smcibdev->port_event_work);
		}
		break;
	default:
		break;
+1 −1
Original line number Diff line number Diff line
@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
	rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
						       SMC_LLC_WAIT_TIME);
	if (rc <= 0) {
		smc_lgr_terminate(smc_get_lgr(link), true);
		smc_lgr_terminate_sched(smc_get_lgr(link));
		return;
	}
	next_interval = link->llc_testlink_time;
Loading