Commit 5aef7cf2 authored by Bryan Tan's avatar Bryan Tan Committed by Jason Gunthorpe
Browse files

RDMA/vmw_pvrdma: Clarify QP and CQ is_kernel logic



Be more consistent in setting and checking is_kernel
flag for QPs and CQs.

Reviewed-by: default avatarAdit Ranadive <aditr@vmware.com>
Reviewed-by: default avatarAditya Sarwade <asarwade@vmware.com>
Reviewed-by: default avatarJorgen Hansen <jhansen@vmware.com>
Signed-off-by: default avatarBryan Tan <bryantan@vmware.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 926aae27
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
	}

	cq->ibcq.cqe = entries;
	cq->is_kernel = !context;

	if (context) {
	if (!cq->is_kernel) {
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
			ret = -EFAULT;
			goto err_cq;
@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,

		npages = ib_umem_page_count(cq->umem);
	} else {
		cq->is_kernel = true;

		/* One extra page for shared ring state */
		npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
			      PAGE_SIZE - 1) / PAGE_SIZE;
@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
	dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);

	if (context) {
	if (!cq->is_kernel) {
		cq->uar = &(to_vucontext(context)->uar);

		/* Copy udata back. */
@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir:
	pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem:
	if (context)
	if (!cq->is_kernel)
		ib_umem_release(cq->umem);
err_cq:
	atomic_dec(&dev->num_cqs);
+3 −4
Original line number Diff line number Diff line
@@ -249,8 +249,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
		init_completion(&qp->free);

		qp->state = IB_QPS_RESET;
		qp->is_kernel = !(pd->uobject && udata);

		if (pd->uobject && udata) {
		if (!qp->is_kernel) {
			dev_dbg(&dev->pdev->dev,
				"create queuepair from user space\n");

@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
				qp->npages_recv = 0;
			qp->npages = qp->npages_send + qp->npages_recv;
		} else {
			qp->is_kernel = true;

			ret = pvrdma_set_sq_size(to_vdev(pd->device),
						 &init_attr->cap, qp);
			if (ret)
@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir:
	pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem:
	if (pd->uobject && udata) {
	if (!qp->is_kernel) {
		if (qp->rumem)
			ib_umem_release(qp->rumem);
		if (qp->sumem)