Commit 75c84151 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

SUNRPC: Rename xprt->recv_lock to xprt->queue_lock



We will use the same lock to protect both the transmit and receive queues.

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent ec37a58f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -235,7 +235,7 @@ struct rpc_xprt {
	 */
	spinlock_t		transport_lock;	/* lock transport info */
	spinlock_t		reserve_lock;	/* lock slot table */
	spinlock_t		recv_lock;	/* lock receive list */
	spinlock_t		queue_lock;	/* send/receive queue lock */
	u32			xid;		/* Next XID value to use */
	struct rpc_task *	snd_task;	/* Task blocked in send */
	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
+3 −3
Original line number Diff line number Diff line
@@ -1004,7 +1004,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)

	if (!bc_xprt)
		return -EAGAIN;
	spin_lock(&bc_xprt->recv_lock);
	spin_lock(&bc_xprt->queue_lock);
	req = xprt_lookup_rqst(bc_xprt, xid);
	if (!req)
		goto unlock_notfound;
@@ -1022,7 +1022,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
	memcpy(dst->iov_base, src->iov_base, src->iov_len);
	xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
	rqstp->rq_arg.len = 0;
	spin_unlock(&bc_xprt->recv_lock);
	spin_unlock(&bc_xprt->queue_lock);
	return 0;
unlock_notfound:
	printk(KERN_NOTICE
@@ -1031,7 +1031,7 @@ unlock_notfound:
		__func__, ntohl(calldir),
		bc_xprt, ntohl(xid));
unlock_eagain:
	spin_unlock(&bc_xprt->recv_lock);
	spin_unlock(&bc_xprt->queue_lock);
	return -EAGAIN;
}

+12 −12
Original line number Diff line number Diff line
@@ -826,7 +826,7 @@ static void xprt_connect_status(struct rpc_task *task)
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
 * Caller holds xprt->recv_lock.
 * Caller holds xprt->queue_lock.
 */
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{
@@ -892,7 +892,7 @@ static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
 * xprt_update_rtt - Update RPC RTT statistics
 * @task: RPC request that recently completed
 *
 * Caller holds xprt->recv_lock.
 * Caller holds xprt->queue_lock.
 */
void xprt_update_rtt(struct rpc_task *task)
{
@@ -914,7 +914,7 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt);
 * @task: RPC request that recently completed
 * @copied: actual number of bytes received from the transport
 *
 * Caller holds xprt->recv_lock.
 * Caller holds xprt->queue_lock.
 */
void xprt_complete_rqst(struct rpc_task *task, int copied)
{
@@ -1034,10 +1034,10 @@ void xprt_transmit(struct rpc_task *task)
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			spin_lock(&xprt->recv_lock);
			spin_lock(&xprt->queue_lock);
			list_add_tail(&req->rq_list, &xprt->recv);
			set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
			spin_unlock(&xprt->recv_lock);
			spin_unlock(&xprt->queue_lock);
			xprt_reset_majortimeo(req);
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
@@ -1076,7 +1076,7 @@ void xprt_transmit(struct rpc_task *task)
		 * The spinlock ensures atomicity between the test of
		 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
		 */
		spin_lock(&xprt->recv_lock);
		spin_lock(&xprt->queue_lock);
		if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			/* Wake up immediately if the connection was dropped */
@@ -1084,7 +1084,7 @@ void xprt_transmit(struct rpc_task *task)
				rpc_wake_up_queued_task_set_status(&xprt->pending,
						task, -ENOTCONN);
		}
		spin_unlock(&xprt->recv_lock);
		spin_unlock(&xprt->queue_lock);
	}
}

@@ -1379,18 +1379,18 @@ void xprt_release(struct rpc_task *task)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
	spin_lock(&xprt->recv_lock);
	spin_lock(&xprt->queue_lock);
	if (!list_empty(&req->rq_list)) {
		list_del_init(&req->rq_list);
		if (xprt_is_pinned_rqst(req)) {
			set_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate);
			spin_unlock(&xprt->recv_lock);
			spin_unlock(&xprt->queue_lock);
			xprt_wait_on_pinned_rqst(req);
			spin_lock(&xprt->recv_lock);
			spin_lock(&xprt->queue_lock);
			clear_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate);
		}
	}
	spin_unlock(&xprt->recv_lock);
	spin_unlock(&xprt->queue_lock);
	spin_lock_bh(&xprt->transport_lock);
	xprt->ops->release_xprt(xprt, task);
	if (xprt->ops->release_request)
@@ -1420,7 +1420,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);
	spin_lock_init(&xprt->recv_lock);
	spin_lock_init(&xprt->queue_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
+5 −5
Original line number Diff line number Diff line
@@ -1238,7 +1238,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
		goto out_badheader;

out:
	spin_lock(&xprt->recv_lock);
	spin_lock(&xprt->queue_lock);
	cwnd = xprt->cwnd;
	xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
	if (xprt->cwnd > cwnd)
@@ -1246,7 +1246,7 @@ out:

	xprt_complete_rqst(rqst->rq_task, status);
	xprt_unpin_rqst(rqst);
	spin_unlock(&xprt->recv_lock);
	spin_unlock(&xprt->queue_lock);
	return;

/* If the incoming reply terminated a pending RPC, the next
@@ -1345,7 +1345,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
	/* Match incoming rpcrdma_rep to an rpcrdma_req to
	 * get context for handling any incoming chunks.
	 */
	spin_lock(&xprt->recv_lock);
	spin_lock(&xprt->queue_lock);
	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
	if (!rqst)
		goto out_norqst;
@@ -1357,7 +1357,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
		credits = buf->rb_max_requests;
	buf->rb_credits = credits;

	spin_unlock(&xprt->recv_lock);
	spin_unlock(&xprt->queue_lock);

	req = rpcr_to_rdmar(rqst);
	req->rl_reply = rep;
@@ -1378,7 +1378,7 @@ out_badversion:
 * is corrupt.
 */
out_norqst:
	spin_unlock(&xprt->recv_lock);
	spin_unlock(&xprt->queue_lock);
	trace_xprtrdma_reply_rqst(rep);
	goto repost;

+2 −2
Original line number Diff line number Diff line
@@ -56,7 +56,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
	if (src->iov_len < 24)
		goto out_shortreply;

	spin_lock(&xprt->recv_lock);
	spin_lock(&xprt->queue_lock);
	req = xprt_lookup_rqst(xprt, xid);
	if (!req)
		goto out_notfound;
@@ -86,7 +86,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
	rcvbuf->len = 0;

out_unlock:
	spin_unlock(&xprt->recv_lock);
	spin_unlock(&xprt->queue_lock);
out:
	return ret;

Loading