Commit c3441618 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

xprtrdma: Per-mode handling for Remote Invalidation



Refactoring change: Remote Invalidation is particular to the memory
registration mode that is use. Use a callout instead of a generic
function to handle Remote Invalidation.

This gets rid of the 8-byte flags field in struct rpcrdma_mw, of
which only a single bit flag has been allocated.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 42b9f5c5
Loading
Loading
Loading
Loading
+21 −3
Original line number Diff line number Diff line
@@ -450,6 +450,26 @@ out_senderr:
	return ERR_PTR(-ENOTCONN);
}

/* Handle a remotely invalidated mw on the @mws list
 */
static void
frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mws)
{
	struct rpcrdma_mw *mw;

	list_for_each_entry(mw, mws, mw_list)
		if (mw->mw_handle == rep->rr_inv_rkey) {
			struct rpcrdma_xprt *r_xprt = mw->mw_xprt;

			list_del(&mw->mw_list);
			mw->frmr.fr_state = FRMR_IS_INVALID;
			ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
					mw->mw_sg, mw->mw_nents, mw->mw_dir);
			rpcrdma_put_mw(r_xprt, mw);
			break;	/* only one invalidated MR per RPC */
		}
}

/* Invalidate all memory regions that were registered for "req".
 *
 * Sleeps until it is safe for the host CPU to access the
@@ -478,9 +498,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
	list_for_each_entry(mw, mws, mw_list) {
		mw->frmr.fr_state = FRMR_IS_INVALID;

		if (mw->mw_flags & RPCRDMA_MW_F_RI)
			continue;

		f = &mw->frmr;
		dprintk("RPC:       %s: invalidating frmr %p\n",
			__func__, f);
@@ -553,6 +570,7 @@ reset_mrs:

const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
	.ro_map				= frwr_op_map,
	.ro_reminv			= frwr_op_reminv,
	.ro_unmap_sync			= frwr_op_unmap_sync,
	.ro_recover_mr			= frwr_op_recover_mr,
	.ro_open			= frwr_op_open,
+4 −20
Original line number Diff line number Diff line
@@ -984,24 +984,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
	return fixup_copy_count;
}

/* Caller must guarantee @rep remains stable during this call.
 */
static void
rpcrdma_mark_remote_invalidation(struct list_head *mws,
				 struct rpcrdma_rep *rep)
{
	struct rpcrdma_mw *mw;

	if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
		return;

	list_for_each_entry(mw, mws, mw_list)
		if (mw->mw_handle == rep->rr_inv_rkey) {
			mw->mw_flags = RPCRDMA_MW_F_RI;
			break; /* only one invalidated MR per RPC */
		}
}

/* By convention, backchannel calls arrive via rdma_msg type
 * messages, and never populate the chunk lists. This makes
 * the RPC/RDMA header small and fixed in size, so it is
@@ -1339,9 +1321,11 @@ void rpcrdma_deferred_completion(struct work_struct *work)
	struct rpcrdma_rep *rep =
			container_of(work, struct rpcrdma_rep, rr_work);
	struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;

	rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
	rpcrdma_release_rqst(rep->rr_rxprt, req);
	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
		r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
	rpcrdma_release_rqst(r_xprt, req);
	rpcrdma_complete_rqst(rep);
}

+0 −1
Original line number Diff line number Diff line
@@ -1307,7 +1307,6 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)

	if (!mw)
		goto out_nomws;
	mw->mw_flags = 0;
	return mw;

out_nomws:
+2 −6
Original line number Diff line number Diff line
@@ -272,7 +272,6 @@ struct rpcrdma_mw {
	struct scatterlist	*mw_sg;
	int			mw_nents;
	enum dma_data_direction	mw_dir;
	unsigned long		mw_flags;
	union {
		struct rpcrdma_fmr	fmr;
		struct rpcrdma_frmr	frmr;
@@ -284,11 +283,6 @@ struct rpcrdma_mw {
	struct list_head	mw_all;
};

/* mw_flags */
enum {
	RPCRDMA_MW_F_RI		= 1,
};

/*
 * struct rpcrdma_req -- structure central to the request/reply sequence.
 *
@@ -485,6 +479,8 @@ struct rpcrdma_memreg_ops {
			(*ro_map)(struct rpcrdma_xprt *,
				  struct rpcrdma_mr_seg *, int, bool,
				  struct rpcrdma_mw **);
	void		(*ro_reminv)(struct rpcrdma_rep *rep,
				     struct list_head *mws);
	void		(*ro_unmap_sync)(struct rpcrdma_xprt *,
					 struct list_head *);
	void		(*ro_recover_mr)(struct rpcrdma_mw *);