Commit 6e5559b2 authored by Andrew Boyer's avatar Andrew Boyer Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Add link_down, rdma_sends, rdma_recvs stats counters



link_down is self-explanatory.

rdma_sends and rdma_recvs count the number of RDMA Send and RDMA Receive
operations completed successfully. This is different from the existing
sent_pkts and rcvd_pkts counters because the existing counters measure
packets, not RDMA operations.

ack_deffered is renamed to ack_deferred to fix the spelling.

out_of_sequence is renamed to out_of_seq_request to make clear that it is
counting only requests and not other packets which can be out of sequence.

Signed-off-by: default avatarAndrew Boyer <andrew.boyer@dell.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 5736c7c4
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -439,6 +439,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 */
static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	struct rxe_cqe cqe;

	if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
@@ -451,6 +452,11 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
		advance_consumer(qp->sq.queue);
	}

	if (wqe->wr.opcode == IB_WR_SEND ||
	    wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
	    wqe->wr.opcode == IB_WR_SEND_WITH_INV)
		rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);

	/*
	 * we completed something so let req run again
	 * if it is trying to fence
+5 −2
Original line number Diff line number Diff line
@@ -37,15 +37,18 @@ static const char * const rxe_counter_name[] = {
	[RXE_CNT_SENT_PKTS]           =  "sent_pkts",
	[RXE_CNT_RCVD_PKTS]           =  "rcvd_pkts",
	[RXE_CNT_DUP_REQ]             =  "duplicate_request",
	[RXE_CNT_OUT_OF_SEQ_REQ]      =  "out_of_sequence",
	[RXE_CNT_OUT_OF_SEQ_REQ]      =  "out_of_seq_request",
	[RXE_CNT_RCV_RNR]             =  "rcvd_rnr_err",
	[RXE_CNT_SND_RNR]             =  "send_rnr_err",
	[RXE_CNT_RCV_SEQ_ERR]         =  "rcvd_seq_err",
	[RXE_CNT_COMPLETER_SCHED]     =  "ack_deffered",
	[RXE_CNT_COMPLETER_SCHED]     =  "ack_deferred",
	[RXE_CNT_RETRY_EXCEEDED]      =  "retry_exceeded_err",
	[RXE_CNT_RNR_RETRY_EXCEEDED]  =  "retry_rnr_exceeded_err",
	[RXE_CNT_COMP_RETRY]          =  "completer_retry_err",
	[RXE_CNT_SEND_ERR]            =  "send_err",
	[RXE_CNT_LINK_DOWNED]         =  "link_downed",
	[RXE_CNT_RDMA_SEND]           =  "rdma_sends",
	[RXE_CNT_RDMA_RECV]           =  "rdma_recvs",
};

int rxe_ib_get_hw_stats(struct ib_device *ibdev,
+3 −0
Original line number Diff line number Diff line
@@ -50,6 +50,9 @@ enum rxe_counters {
	RXE_CNT_RNR_RETRY_EXCEEDED,
	RXE_CNT_COMP_RETRY,
	RXE_CNT_SEND_ERR,
	RXE_CNT_LINK_DOWNED,
	RXE_CNT_RDMA_SEND,
	RXE_CNT_RDMA_RECV,
	RXE_NUM_OF_COUNTERS
};

+1 −0
Original line number Diff line number Diff line
@@ -621,6 +621,7 @@ void rxe_port_down(struct rxe_dev *rxe)
	port->attr.state = IB_PORT_DOWN;

	rxe_port_event(rxe, IB_EVENT_PORT_ERR);
	rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
	dev_info(&rxe->ib_dev.dev, "set down\n");
}

+2 −1
Original line number Diff line number Diff line
@@ -835,6 +835,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
	struct ib_wc *wc = &cqe.ibwc;
	struct ib_uverbs_wc *uwc = &cqe.uibwc;
	struct rxe_recv_wqe *wqe = qp->resp.wqe;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);

	if (unlikely(!wqe))
		return RESPST_CLEANUP;
@@ -852,6 +853,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
	}

	if (wc->status == IB_WC_SUCCESS) {
		rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
				pkt->mask & RXE_WRITE_MASK) ?
					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
@@ -900,7 +902,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
			}

			if (pkt->mask & RXE_IETH_MASK) {
				struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
				struct rxe_mem *rmr;

				wc->wc_flags |= IB_WC_WITH_INVALIDATE;