Commit ef3f5434 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

SUNRPC: Distinguish between the slot allocation list and receive queue



When storing a struct rpc_rqst on the slot allocation list, we currently
use the same field 'rq_list' as we use to store the request on the
receive queue. Since the structure is never on both lists at the same
time, this is OK.
However, for clarity, let's make that a union with different names for
the different lists so that we can more easily distinguish between
the two states.

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 78b576ce
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -82,7 +82,11 @@ struct rpc_rqst {
	struct page		**rq_enc_pages;	/* scratch pages for use by
						   gss privacy code */
	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
	struct list_head	rq_list;

	union {
		struct list_head	rq_list;	/* Slot allocation list */
		struct list_head	rq_recv;	/* Receive queue */
	};

	void			*rq_buffer;	/* Call XDR encode buffer */
	size_t			rq_callsize;
@@ -249,7 +253,8 @@ struct rpc_xprt {
	struct list_head	bc_pa_list;	/* List of preallocated
						 * backchannel rpc_rqst's */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
	struct list_head	recv;

	struct list_head	recv_queue;	/* Receive queue */

	struct {
		unsigned long		bind_count,	/* total number of binds */
+6 −6
Original line number Diff line number Diff line
@@ -708,7 +708,7 @@ static void
xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
	__must_hold(&xprt->transport_lock)
{
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
	if (list_empty(&xprt->recv_queue) && xprt_has_timer(xprt))
		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
}

@@ -718,7 +718,7 @@ xprt_init_autodisconnect(struct timer_list *t)
	struct rpc_xprt *xprt = from_timer(xprt, t, timer);

	spin_lock(&xprt->transport_lock);
	if (!list_empty(&xprt->recv))
	if (!list_empty(&xprt->recv_queue))
		goto out_abort;
	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
	xprt->last_used = jiffies;
@@ -848,7 +848,7 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{
	struct rpc_rqst *entry;

	list_for_each_entry(entry, &xprt->recv, rq_list)
	list_for_each_entry(entry, &xprt->recv_queue, rq_recv)
		if (entry->rq_xid == xid) {
			trace_xprt_lookup_rqst(xprt, xid, 0);
			entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
@@ -938,7 +938,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
			sizeof(req->rq_private_buf));

	/* Add request to the receive list */
	list_add_tail(&req->rq_list, &xprt->recv);
	list_add_tail(&req->rq_recv, &xprt->recv_queue);
	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
	spin_unlock(&xprt->queue_lock);

@@ -957,7 +957,7 @@ static void
xprt_request_dequeue_receive_locked(struct rpc_task *task)
{
	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
		list_del(&task->tk_rqstp->rq_list);
		list_del(&task->tk_rqstp->rq_recv);
}

/**
@@ -1492,7 +1492,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
	spin_lock_init(&xprt->queue_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
	INIT_LIST_HEAD(&xprt->recv_queue);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);