Commit 7402a4fe authored by Trond Myklebust's avatar Trond Myklebust
Browse files

SUNRPC: Fix up backchannel slot table accounting



Add a per-transport maximum limit in the socket case, and add
helpers to allow the NFSv4 code to discover that limit.

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 9f98effc
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -8380,6 +8380,7 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
{
	unsigned int max_rqst_sz, max_resp_sz;
	unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
	unsigned int max_bc_slots = rpc_num_bc_slots(clnt);

	max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
	max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
@@ -8402,6 +8403,8 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
	args->bc_attrs.max_resp_sz_cached = 0;
	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
	args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
	if (args->bc_attrs.max_reqs > max_bc_slots)
		args->bc_attrs.max_reqs = max_bc_slots;

	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+1 −0
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs);
void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs);
void xprt_free_bc_rqst(struct rpc_rqst *req);
unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt);

/*
 * Determine if a shared backchannel is in use
+1 −0
Original line number Diff line number Diff line
@@ -194,6 +194,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
struct net *	rpc_net_ns(struct rpc_clnt *);
size_t		rpc_max_payload(struct rpc_clnt *);
size_t		rpc_max_bc_payload(struct rpc_clnt *);
unsigned int	rpc_num_bc_slots(struct rpc_clnt *);
void		rpc_force_rebind(struct rpc_clnt *);
size_t		rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char	*rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
+4 −2
Original line number Diff line number Diff line
@@ -158,6 +158,7 @@ struct rpc_xprt_ops {
	int		(*bc_setup)(struct rpc_xprt *xprt,
				    unsigned int min_reqs);
	size_t		(*bc_maxpayload)(struct rpc_xprt *xprt);
	unsigned int	(*bc_num_slots)(struct rpc_xprt *xprt);
	void		(*bc_free_rqst)(struct rpc_rqst *rqst);
	void		(*bc_destroy)(struct rpc_xprt *xprt,
				      unsigned int max_reqs);
@@ -251,8 +252,9 @@ struct rpc_xprt {
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
	struct svc_serv		*bc_serv;       /* The RPC service which will */
						/* process the callback */
	int			bc_alloc_count;	/* Total number of preallocs */
	atomic_t		bc_free_slots;
	unsigned int		bc_alloc_max;
	unsigned int		bc_alloc_count;	/* Total number of preallocs */
	atomic_t		bc_slot_count;	/* Number of allocated slots */
	spinlock_t		bc_pa_lock;	/* Protects the preallocated
						 * items */
	struct list_head	bc_pa_list;	/* List of preallocated
+22 −18
Original line number Diff line number Diff line
@@ -31,25 +31,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

#define BC_MAX_SLOTS	64U

unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
{
	return BC_MAX_SLOTS;
}

/*
 * Helper routines that track the number of preallocation elements
 * on the transport.
 */
static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
{
	return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
}

static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
{
	atomic_add(n, &xprt->bc_free_slots);
	xprt->bc_alloc_count += n;
}

static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
{
	atomic_sub(n, &xprt->bc_free_slots);
	return xprt->bc_alloc_count -= n;
	return xprt->bc_alloc_count < xprt->bc_alloc_max;
}

/*
@@ -145,6 +140,9 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)

	dprintk("RPC:       setup backchannel transport\n");

	if (min_reqs > BC_MAX_SLOTS)
		min_reqs = BC_MAX_SLOTS;

	/*
	 * We use a temporary list to keep track of the preallocated
	 * buffers.  Once we're done building the list we splice it
@@ -172,7 +170,9 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
	 */
	spin_lock(&xprt->bc_pa_lock);
	list_splice(&tmp_list, &xprt->bc_pa_list);
	xprt_inc_alloc_count(xprt, min_reqs);
	xprt->bc_alloc_count += min_reqs;
	xprt->bc_alloc_max += min_reqs;
	atomic_add(min_reqs, &xprt->bc_slot_count);
	spin_unlock(&xprt->bc_pa_lock);

	dprintk("RPC:       setup backchannel transport done\n");
@@ -220,11 +220,13 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
		goto out;

	spin_lock_bh(&xprt->bc_pa_lock);
	xprt_dec_alloc_count(xprt, max_reqs);
	xprt->bc_alloc_max -= max_reqs;
	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
		dprintk("RPC:        req=%p\n", req);
		list_del(&req->rq_bc_pa_list);
		xprt_free_allocation(req);
		xprt->bc_alloc_count--;
		atomic_dec(&xprt->bc_slot_count);
		if (--max_reqs == 0)
			break;
	}
@@ -241,13 +243,14 @@ static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
	struct rpc_rqst *req = NULL;

	dprintk("RPC:       allocate a backchannel request\n");
	if (atomic_read(&xprt->bc_free_slots) <= 0)
		goto not_found;
	if (list_empty(&xprt->bc_pa_list)) {
		if (!new)
			goto not_found;
		if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
			goto not_found;
		list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
		xprt->bc_alloc_count++;
		atomic_inc(&xprt->bc_slot_count);
	}
	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
				rq_bc_pa_list);
@@ -291,6 +294,7 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
	if (xprt_need_to_requeue(xprt)) {
		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
		xprt->bc_alloc_count++;
		atomic_inc(&xprt->bc_slot_count);
		req = NULL;
	}
	spin_unlock_bh(&xprt->bc_pa_lock);
@@ -357,7 +361,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)

	spin_lock(&xprt->bc_pa_lock);
	list_del(&req->rq_bc_pa_list);
	xprt_dec_alloc_count(xprt, 1);
	xprt->bc_alloc_count--;
	spin_unlock(&xprt->bc_pa_lock);

	req->rq_private_buf.len = copied;
Loading