Commit caa2da34 authored by Sunil Goutham's avatar Sunil Goutham Committed by David S. Miller
Browse files

octeontx2-pf: Initialize and config queues



This patch does the initialization of all queues ie the
receive buffer pools, receive and transmit queues, completion
or notification queues etc. Allocates all required resources
(eg transmit schedulers, receive buffers etc) and configures
them for proper functioning of queues. Also sets up receive
queue's RED dropping levels.

Co-developed-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 05fcc9e0
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -143,8 +143,13 @@ enum nix_scheduler {
	NIX_TXSCH_LVL_CNT = 0x5,
};

#define TXSCH_TL1_DFLT_RR_QTM      ((1 << 24) - 1)
#define TXSCH_RR_QTM_MAX		((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_QTM		TXSCH_RR_QTM_MAX
#define TXSCH_TL1_DFLT_RR_PRIO		(0x1ull)
#define MAX_SCHED_WEIGHT		0xFF
#define DFLT_RR_WEIGHT			71
#define DFLT_RR_QTM	((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
			 / MAX_SCHED_WEIGHT)

/* Min/Max packet sizes, excluding FCS */
#define	NIC_HW_MIN_FRS			40
+723 −0
Original line number Diff line number Diff line
@@ -15,6 +15,388 @@
#include "otx2_common.h"
#include "otx2_struct.h"

dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
			   gfp_t gfp)
{
	dma_addr_t iova;

	/* Check if request can be accommodated in previous allocated page */
	if (pool->page && ((pool->page_offset + pool->rbsize) <=
	    (PAGE_SIZE << pool->rbpage_order))) {
		pool->pageref++;
		goto ret;
	}

	otx2_get_page(pool);

	/* Allocate a new page */
	pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
				 pool->rbpage_order);
	if (unlikely(!pool->page))
		return -ENOMEM;

	pool->page_offset = 0;
ret:
	iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
				      pool->rbsize, DMA_FROM_DEVICE);
	if (!iova) {
		if (!pool->page_offset)
			__free_pages(pool->page, pool->rbpage_order);
		pool->page = NULL;
		return -ENOMEM;
	}
	pool->page_offset += pool->rbsize;
	return iova;
}

static int otx2_get_link(struct otx2_nic *pfvf)
{
	int link = 0;
	u16 map;

	/* cgx lmac link */
	if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
		map = pfvf->hw.tx_chan_base & 0x7FF;
		link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
	}
	/* LBK channel */
	if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
		link = 12;

	return link;
}

int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
{
	struct otx2_hw *hw = &pfvf->hw;
	struct nix_txschq_config *req;
	u64 schq, parent;

	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
	if (!req)
		return -ENOMEM;

	req->lvl = lvl;
	req->num_regs = 1;

	schq = hw->txschq_list[lvl][0];
	/* Set topology e.t.c configuration */
	if (lvl == NIX_TXSCH_LVL_SMQ) {
		req->reg[0] = NIX_AF_SMQX_CFG(schq);
		req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
				  (0x2ULL << 36);
		req->num_regs++;
		/* MDQ config */
		parent =  hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
		req->reg[1] = NIX_AF_MDQX_PARENT(schq);
		req->regval[1] = parent << 16;
		req->num_regs++;
		/* Set DWRR quantum */
		req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
		req->regval[2] =  DFLT_RR_QTM;
	} else if (lvl == NIX_TXSCH_LVL_TL4) {
		parent =  hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
		req->reg[0] = NIX_AF_TL4X_PARENT(schq);
		req->regval[0] = parent << 16;
		req->num_regs++;
		req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
		req->regval[1] = DFLT_RR_QTM;
	} else if (lvl == NIX_TXSCH_LVL_TL3) {
		parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
		req->reg[0] = NIX_AF_TL3X_PARENT(schq);
		req->regval[0] = parent << 16;
		req->num_regs++;
		req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
		req->regval[1] = DFLT_RR_QTM;
	} else if (lvl == NIX_TXSCH_LVL_TL2) {
		parent =  hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
		req->reg[0] = NIX_AF_TL2X_PARENT(schq);
		req->regval[0] = parent << 16;

		req->num_regs++;
		req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
		req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;

		req->num_regs++;
		req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
							otx2_get_link(pfvf));
		/* Enable this queue and backpressure */
		req->regval[2] = BIT_ULL(13) | BIT_ULL(12);

	} else if (lvl == NIX_TXSCH_LVL_TL1) {
		/* Default config for TL1.
		 * For VF this is always ignored.
		 */

		/* Set DWRR quantum */
		req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
		req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;

		req->num_regs++;
		req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
		req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);

		req->num_regs++;
		req->reg[2] = NIX_AF_TL1X_CIR(schq);
		req->regval[2] = 0;
	}

	return otx2_sync_mbox_msg(&pfvf->mbox);
}

int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
	struct nix_txsch_alloc_req *req;
	int lvl;

	/* Get memory to put this msg */
	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
	if (!req)
		return -ENOMEM;

	/* Request one schq per level */
	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
		req->schq[lvl] = 1;

	return otx2_sync_mbox_msg(&pfvf->mbox);
}

int otx2_txschq_stop(struct otx2_nic *pfvf)
{
	struct nix_txsch_free_req *free_req;
	int lvl, schq, err;

	otx2_mbox_lock(&pfvf->mbox);
	/* Free the transmit schedulers */
	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
	if (!free_req) {
		otx2_mbox_unlock(&pfvf->mbox);
		return -ENOMEM;
	}

	free_req->flags = TXSCHQ_FREE_ALL;
	err = otx2_sync_mbox_msg(&pfvf->mbox);
	otx2_mbox_unlock(&pfvf->mbox);

	/* Clear the txschq list */
	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
		for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
			pfvf->hw.txschq_list[lvl][schq] = 0;
	}
	return err;
}

void otx2_sqb_flush(struct otx2_nic *pfvf)
{
	int qidx, sqe_tail, sqe_head;
	u64 incr, *ptr, val;

	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
	for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
		incr = (u64)qidx << 32;
		while (1) {
			val = otx2_atomic64_add(incr, ptr);
			sqe_head = (val >> 20) & 0x3F;
			sqe_tail = (val >> 28) & 0x3F;
			if (sqe_head == sqe_tail)
				break;
			usleep_range(1, 3);
		}
	}
}

/* RED and drop levels of CQ on packet reception.
 * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
 */
#define RQ_PASS_LVL_CQ(skid, qsize)	((((skid) + 16) * 256) / (qsize))
#define RQ_DROP_LVL_CQ(skid, qsize)	(((skid) * 256) / (qsize))

/* RED and drop levels of AURA for packet reception.
 * For AURA level is measure of fullness (0x0 = empty, 255 = full).
 * Eg: For RQ length 1K, for pass/drop level 204/230.
 * RED accepts pkts if free pointers > 102 & <= 205.
 * Drops pkts if free pointers < 102.
 */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */

/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID	2000

static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
	struct otx2_qset *qset = &pfvf->qset;
	struct nix_aq_enq_req *aq;

	/* Get memory to put this msg */
	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
	if (!aq)
		return -ENOMEM;

	aq->rq.cq = qidx;
	aq->rq.ena = 1;
	aq->rq.pb_caching = 1;
	aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
	aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
	aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
	aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
	aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
	aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
	aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
	aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
	aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
	aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;

	/* Fill AQ info */
	aq->qidx = qidx;
	aq->ctype = NIX_AQ_CTYPE_RQ;
	aq->op = NIX_AQ_INSTOP_INIT;

	return otx2_sync_mbox_msg(&pfvf->mbox);
}

static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
{
	struct otx2_qset *qset = &pfvf->qset;
	struct otx2_snd_queue *sq;
	struct nix_aq_enq_req *aq;
	struct otx2_pool *pool;
	int err;

	pool = &pfvf->qset.pool[sqb_aura];
	sq = &qset->sq[qidx];
	sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
	sq->sqe_cnt = qset->sqe_cnt;

	err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
	if (err)
		return err;

	sq->sqe_base = sq->sqe->base;

	sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
	sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
	sq->aura_id = sqb_aura;
	sq->aura_fc_addr = pool->fc_addr->base;
	sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
	sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));

	/* Get memory to put this msg */
	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
	if (!aq)
		return -ENOMEM;

	aq->sq.cq = pfvf->hw.rx_queues + qidx;
	aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
	aq->sq.cq_ena = 1;
	aq->sq.ena = 1;
	/* Only one SMQ is allocated, map all SQ's to that SMQ  */
	aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
	aq->sq.smq_rr_quantum = DFLT_RR_QTM;
	aq->sq.default_chan = pfvf->hw.tx_chan_base;
	aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
	aq->sq.sqb_aura = sqb_aura;
	/* Due pipelining impact minimum 2000 unused SQ CQE's
	 * need to maintain to avoid CQ overflow.
	 */
	aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));

	/* Fill AQ info */
	aq->qidx = qidx;
	aq->ctype = NIX_AQ_CTYPE_SQ;
	aq->op = NIX_AQ_INSTOP_INIT;

	return otx2_sync_mbox_msg(&pfvf->mbox);
}

static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
	struct otx2_qset *qset = &pfvf->qset;
	struct nix_aq_enq_req *aq;
	struct otx2_cq_queue *cq;
	int err, pool_id;

	cq = &qset->cq[qidx];
	cq->cq_idx = qidx;
	if (qidx < pfvf->hw.rx_queues) {
		cq->cq_type = CQ_RX;
		cq->cqe_cnt = qset->rqe_cnt;
	} else {
		cq->cq_type = CQ_TX;
		cq->cqe_cnt = qset->sqe_cnt;
	}
	cq->cqe_size = pfvf->qset.xqe_size;

	/* Allocate memory for CQEs */
	err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
	if (err)
		return err;

	/* Save CQE CPU base for faster reference */
	cq->cqe_base = cq->cqe->base;
	/* In case where all RQs auras point to single pool,
	 * all CQs receive buffer pool also point to same pool.
	 */
	pool_id = ((cq->cq_type == CQ_RX) &&
		   (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
	cq->rbpool = &qset->pool[pool_id];

	/* Get memory to put this msg */
	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
	if (!aq)
		return -ENOMEM;

	aq->cq.ena = 1;
	aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
	aq->cq.caching = 1;
	aq->cq.base = cq->cqe->iova;
	aq->cq.avg_level = 255;

	if (qidx < pfvf->hw.rx_queues) {
		aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
		aq->cq.drop_ena = 1;
	}

	/* Fill AQ info */
	aq->qidx = qidx;
	aq->ctype = NIX_AQ_CTYPE_CQ;
	aq->op = NIX_AQ_INSTOP_INIT;

	return otx2_sync_mbox_msg(&pfvf->mbox);
}

int otx2_config_nix_queues(struct otx2_nic *pfvf)
{
	int qidx, err;

	/* Initialize RX queues */
	for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
		u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);

		err = otx2_rq_init(pfvf, qidx, lpb_aura);
		if (err)
			return err;
	}

	/* Initialize TX queues */
	for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
		u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);

		err = otx2_sq_init(pfvf, qidx, sqb_aura);
		if (err)
			return err;
	}

	/* Initialize completion queues */
	for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
		err = otx2_cq_init(pfvf, qidx);
		if (err)
			return err;
	}

	return 0;
}

int otx2_config_nix(struct otx2_nic *pfvf)
{
	struct nix_lf_alloc_req  *nixlf;
@@ -58,6 +440,302 @@ int otx2_config_nix(struct otx2_nic *pfvf)
	return rsp->hdr.rc;
}

void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
{
	struct otx2_qset *qset = &pfvf->qset;
	struct otx2_hw *hw = &pfvf->hw;
	struct otx2_snd_queue *sq;
	int sqb, qidx;
	u64 iova, pa;

	for (qidx = 0; qidx < hw->tx_queues; qidx++) {
		sq = &qset->sq[qidx];
		if (!sq->sqb_ptrs)
			continue;
		for (sqb = 0; sqb < sq->sqb_count; sqb++) {
			if (!sq->sqb_ptrs[sqb])
				continue;
			iova = sq->sqb_ptrs[sqb];
			pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
			dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
					     DMA_FROM_DEVICE,
					     DMA_ATTR_SKIP_CPU_SYNC);
			put_page(virt_to_page(phys_to_virt(pa)));
		}
		sq->sqb_count = 0;
	}
}

void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
{
	int pool_id, pool_start = 0, pool_end = 0, size = 0;
	u64 iova, pa;

	if (type == AURA_NIX_SQ) {
		pool_start = otx2_get_pool_idx(pfvf, type, 0);
		pool_end =  pool_start + pfvf->hw.sqpool_cnt;
		size = pfvf->hw.sqb_size;
	}
	if (type == AURA_NIX_RQ) {
		pool_start = otx2_get_pool_idx(pfvf, type, 0);
		pool_end = pfvf->hw.rqpool_cnt;
		size = pfvf->rbsize;
	}

	/* Free SQB and RQB pointers from the aura pool */
	for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
		iova = otx2_aura_allocptr(pfvf, pool_id);
		while (iova) {
			if (type == AURA_NIX_RQ)
				iova -= OTX2_HEAD_ROOM;

			pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
			dma_unmap_page_attrs(pfvf->dev, iova, size,
					     DMA_FROM_DEVICE,
					     DMA_ATTR_SKIP_CPU_SYNC);
			put_page(virt_to_page(phys_to_virt(pa)));
			iova = otx2_aura_allocptr(pfvf, pool_id);
		}
	}
}

void otx2_aura_pool_free(struct otx2_nic *pfvf)
{
	struct otx2_pool *pool;
	int pool_id;

	if (!pfvf->qset.pool)
		return;

	for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
		pool = &pfvf->qset.pool[pool_id];
		qmem_free(pfvf->dev, pool->stack);
		qmem_free(pfvf->dev, pool->fc_addr);
	}
	devm_kfree(pfvf->dev, pfvf->qset.pool);
}

static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
			  int pool_id, int numptrs)
{
	struct npa_aq_enq_req *aq;
	struct otx2_pool *pool;
	int err;

	pool = &pfvf->qset.pool[pool_id];

	/* Allocate memory for HW to update Aura count.
	 * Alloc one cache line, so that it fits all FC_STYPE modes.
	 */
	if (!pool->fc_addr) {
		err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
		if (err)
			return err;
	}

	/* Initialize this aura's context via AF */
	aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
	if (!aq) {
		/* Shared mbox memory buffer is full, flush it and retry */
		err = otx2_sync_mbox_msg(&pfvf->mbox);
		if (err)
			return err;
		aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
		if (!aq)
			return -ENOMEM;
	}

	aq->aura_id = aura_id;
	/* Will be filled by AF with correct pool context address */
	aq->aura.pool_addr = pool_id;
	aq->aura.pool_caching = 1;
	aq->aura.shift = ilog2(numptrs) - 8;
	aq->aura.count = numptrs;
	aq->aura.limit = numptrs;
	aq->aura.avg_level = 255;
	aq->aura.ena = 1;
	aq->aura.fc_ena = 1;
	aq->aura.fc_addr = pool->fc_addr->iova;
	aq->aura.fc_hyst_bits = 0; /* Store count on all updates */

	/* Fill AQ info */
	aq->ctype = NPA_AQ_CTYPE_AURA;
	aq->op = NPA_AQ_INSTOP_INIT;

	return 0;
}

static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
			  int stack_pages, int numptrs, int buf_size)
{
	struct npa_aq_enq_req *aq;
	struct otx2_pool *pool;
	int err;

	pool = &pfvf->qset.pool[pool_id];
	/* Alloc memory for stack which is used to store buffer pointers */
	err = qmem_alloc(pfvf->dev, &pool->stack,
			 stack_pages, pfvf->hw.stack_pg_bytes);
	if (err)
		return err;

	pool->rbsize = buf_size;
	pool->rbpage_order = get_order(buf_size);

	/* Initialize this pool's context via AF */
	aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
	if (!aq) {
		/* Shared mbox memory buffer is full, flush it and retry */
		err = otx2_sync_mbox_msg(&pfvf->mbox);
		if (err) {
			qmem_free(pfvf->dev, pool->stack);
			return err;
		}
		aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
		if (!aq) {
			qmem_free(pfvf->dev, pool->stack);
			return -ENOMEM;
		}
	}

	aq->aura_id = pool_id;
	aq->pool.stack_base = pool->stack->iova;
	aq->pool.stack_caching = 1;
	aq->pool.ena = 1;
	aq->pool.buf_size = buf_size / 128;
	aq->pool.stack_max_pages = stack_pages;
	aq->pool.shift = ilog2(numptrs) - 8;
	aq->pool.ptr_start = 0;
	aq->pool.ptr_end = ~0ULL;

	/* Fill AQ info */
	aq->ctype = NPA_AQ_CTYPE_POOL;
	aq->op = NPA_AQ_INSTOP_INIT;

	return 0;
}

int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
{
	int qidx, pool_id, stack_pages, num_sqbs;
	struct otx2_qset *qset = &pfvf->qset;
	struct otx2_hw *hw = &pfvf->hw;
	struct otx2_snd_queue *sq;
	struct otx2_pool *pool;
	int err, ptr;
	s64 bufptr;

	/* Calculate number of SQBs needed.
	 *
	 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
	 * Last SQE is used for pointing to next SQB.
	 */
	num_sqbs = (hw->sqb_size / 128) - 1;
	num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;

	/* Get no of stack pages needed */
	stack_pages =
		(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;

	for (qidx = 0; qidx < hw->tx_queues; qidx++) {
		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
		/* Initialize aura context */
		err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
		if (err)
			goto fail;

		/* Initialize pool context */
		err = otx2_pool_init(pfvf, pool_id, stack_pages,
				     num_sqbs, hw->sqb_size);
		if (err)
			goto fail;
	}

	/* Flush accumulated messages */
	err = otx2_sync_mbox_msg(&pfvf->mbox);
	if (err)
		goto fail;

	/* Allocate pointers and free them to aura/pool */
	for (qidx = 0; qidx < hw->tx_queues; qidx++) {
		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
		pool = &pfvf->qset.pool[pool_id];

		sq = &qset->sq[qidx];
		sq->sqb_count = 0;
		sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
		if (!sq->sqb_ptrs)
			return -ENOMEM;

		for (ptr = 0; ptr < num_sqbs; ptr++) {
			bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
			if (bufptr <= 0)
				return bufptr;
			otx2_aura_freeptr(pfvf, pool_id, bufptr);
			sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
		}
		otx2_get_page(pool);
	}

	return 0;
fail:
	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
	otx2_aura_pool_free(pfvf);
	return err;
}

int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
{
	struct otx2_hw *hw = &pfvf->hw;
	int stack_pages, pool_id, rq;
	struct otx2_pool *pool;
	int err, ptr, num_ptrs;
	s64 bufptr;

	num_ptrs = pfvf->qset.rqe_cnt;

	stack_pages =
		(num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;

	for (rq = 0; rq < hw->rx_queues; rq++) {
		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
		/* Initialize aura context */
		err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
		if (err)
			goto fail;
	}
	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
		err = otx2_pool_init(pfvf, pool_id, stack_pages,
				     num_ptrs, pfvf->rbsize);
		if (err)
			goto fail;
	}

	/* Flush accumulated messages */
	err = otx2_sync_mbox_msg(&pfvf->mbox);
	if (err)
		goto fail;

	/* Allocate pointers and free them to aura/pool */
	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
		pool = &pfvf->qset.pool[pool_id];
		for (ptr = 0; ptr < num_ptrs; ptr++) {
			bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
			if (bufptr <= 0)
				return bufptr;
			otx2_aura_freeptr(pfvf, pool_id,
					  bufptr + OTX2_HEAD_ROOM);
		}
		otx2_get_page(pool);
	}

	return 0;
fail:
	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
	otx2_aura_pool_free(pfvf);
	return err;
}

int otx2_config_npa(struct otx2_nic *pfvf)
{
	struct otx2_qset *qset = &pfvf->qset;
@@ -134,6 +812,14 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
		return err;
	}

	pfvf->nix_blkaddr = BLKADDR_NIX0;

	/* If the platform has two NIX blocks then LF may be
	 * allocated from NIX1.
	 */
	if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
		pfvf->nix_blkaddr = BLKADDR_NIX1;

	/* Get NPA and NIX MSIX vector offsets */
	msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
	if (!msix) {
@@ -158,6 +844,43 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
	return 0;
}

void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
{
	struct hwctx_disable_req *req;

	otx2_mbox_lock(mbox);
	/* Request AQ to disable this context */
	if (npa)
		req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
	else
		req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);

	if (!req) {
		otx2_mbox_unlock(mbox);
		return;
	}

	req->ctype = type;

	if (otx2_sync_mbox_msg(mbox))
		dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
			__func__);

	otx2_mbox_unlock(mbox);
}

void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
				  struct nix_txsch_alloc_rsp *rsp)
{
	int lvl, schq;

	/* Setup transmit scheduler list */
	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
		for (schq = 0; schq < rsp->schq[lvl]; schq++)
			pf->hw.txschq_list[lvl][schq] =
				rsp->schq_list[lvl][schq];
}

/* Mbox message handlers */
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
			       struct npa_lf_alloc_rsp *rsp)
+121 −10

File changed.

Preview size limit exceeded, changes collapsed.

+230 −5

File changed.

Preview size limit exceeded, changes collapsed.

+96 −0
Original line number Diff line number Diff line
@@ -48,4 +48,100 @@
#define RVU_FUNC_BLKADDR_SHIFT		20
#define RVU_FUNC_BLKADDR_MASK		0x1FULL

/* NPA LF registers */
#define NPA_LFBASE			(BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
#define NPA_LF_AURA_OP_ALLOCX(a)	(NPA_LFBASE | 0x10 | (a) << 3)
#define NPA_LF_AURA_OP_FREE0            (NPA_LFBASE | 0x20)
#define NPA_LF_AURA_OP_FREE1            (NPA_LFBASE | 0x28)
#define NPA_LF_AURA_OP_CNT              (NPA_LFBASE | 0x30)
#define NPA_LF_AURA_OP_LIMIT            (NPA_LFBASE | 0x50)
#define NPA_LF_AURA_OP_INT              (NPA_LFBASE | 0x60)
#define NPA_LF_AURA_OP_THRESH           (NPA_LFBASE | 0x70)
#define NPA_LF_POOL_OP_PC               (NPA_LFBASE | 0x100)
#define NPA_LF_POOL_OP_AVAILABLE        (NPA_LFBASE | 0x110)
#define NPA_LF_POOL_OP_PTR_START0       (NPA_LFBASE | 0x120)
#define NPA_LF_POOL_OP_PTR_START1       (NPA_LFBASE | 0x128)
#define NPA_LF_POOL_OP_PTR_END0         (NPA_LFBASE | 0x130)
#define NPA_LF_POOL_OP_PTR_END1         (NPA_LFBASE | 0x138)
#define NPA_LF_POOL_OP_INT              (NPA_LFBASE | 0x160)
#define NPA_LF_POOL_OP_THRESH           (NPA_LFBASE | 0x170)
#define NPA_LF_ERR_INT                  (NPA_LFBASE | 0x200)
#define NPA_LF_ERR_INT_W1S              (NPA_LFBASE | 0x208)
#define NPA_LF_ERR_INT_ENA_W1C          (NPA_LFBASE | 0x210)
#define NPA_LF_ERR_INT_ENA_W1S          (NPA_LFBASE | 0x218)
#define NPA_LF_RAS                      (NPA_LFBASE | 0x220)
#define NPA_LF_RAS_W1S                  (NPA_LFBASE | 0x228)
#define NPA_LF_RAS_ENA_W1C              (NPA_LFBASE | 0x230)
#define NPA_LF_RAS_ENA_W1S              (NPA_LFBASE | 0x238)
#define NPA_LF_QINTX_CNT(a)             (NPA_LFBASE | 0x300 | (a) << 12)
#define NPA_LF_QINTX_INT(a)             (NPA_LFBASE | 0x310 | (a) << 12)
#define NPA_LF_QINTX_INT_W1S(a)         (NPA_LFBASE | 0x318 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1S(a)         (NPA_LFBASE | 0x320 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1C(a)         (NPA_LFBASE | 0x330 | (a) << 12)

/* NIX LF registers */
#define	NIX_LFBASE			(BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
#define	NIX_LF_RX_SECRETX(a)		(NIX_LFBASE | 0x0 | (a) << 3)
#define	NIX_LF_CFG			(NIX_LFBASE | 0x100)
#define	NIX_LF_GINT			(NIX_LFBASE | 0x200)
#define	NIX_LF_GINT_W1S			(NIX_LFBASE | 0x208)
#define	NIX_LF_GINT_ENA_W1C		(NIX_LFBASE | 0x210)
#define	NIX_LF_GINT_ENA_W1S		(NIX_LFBASE | 0x218)
#define	NIX_LF_ERR_INT			(NIX_LFBASE | 0x220)
#define	NIX_LF_ERR_INT_W1S		(NIX_LFBASE | 0x228)
#define	NIX_LF_ERR_INT_ENA_W1C		(NIX_LFBASE | 0x230)
#define	NIX_LF_ERR_INT_ENA_W1S		(NIX_LFBASE | 0x238)
#define	NIX_LF_RAS			(NIX_LFBASE | 0x240)
#define	NIX_LF_RAS_W1S			(NIX_LFBASE | 0x248)
#define	NIX_LF_RAS_ENA_W1C		(NIX_LFBASE | 0x250)
#define	NIX_LF_RAS_ENA_W1S		(NIX_LFBASE | 0x258)
#define	NIX_LF_SQ_OP_ERR_DBG		(NIX_LFBASE | 0x260)
#define	NIX_LF_MNQ_ERR_DBG		(NIX_LFBASE | 0x270)
#define	NIX_LF_SEND_ERR_DBG		(NIX_LFBASE | 0x280)
#define	NIX_LF_TX_STATX(a)		(NIX_LFBASE | 0x300 | (a) << 3)
#define	NIX_LF_RX_STATX(a)		(NIX_LFBASE | 0x400 | (a) << 3)
#define	NIX_LF_OP_SENDX(a)		(NIX_LFBASE | 0x800 | (a) << 3)
#define	NIX_LF_RQ_OP_INT		(NIX_LFBASE | 0x900)
#define	NIX_LF_RQ_OP_OCTS		(NIX_LFBASE | 0x910)
#define	NIX_LF_RQ_OP_PKTS		(NIX_LFBASE | 0x920)
#define	NIX_LF_OP_IPSEC_DYNO_CN		(NIX_LFBASE | 0x980)
#define	NIX_LF_SQ_OP_INT		(NIX_LFBASE | 0xa00)
#define	NIX_LF_SQ_OP_OCTS		(NIX_LFBASE | 0xa10)
#define	NIX_LF_SQ_OP_PKTS		(NIX_LFBASE | 0xa20)
#define	NIX_LF_SQ_OP_STATUS		(NIX_LFBASE | 0xa30)
#define	NIX_LF_CQ_OP_INT		(NIX_LFBASE | 0xb00)
#define	NIX_LF_CQ_OP_DOOR		(NIX_LFBASE | 0xb30)
#define	NIX_LF_CQ_OP_STATUS		(NIX_LFBASE | 0xb40)
#define	NIX_LF_QINTX_CNT(a)		(NIX_LFBASE | 0xC00 | (a) << 12)
#define	NIX_LF_QINTX_INT(a)		(NIX_LFBASE | 0xC10 | (a) << 12)
#define	NIX_LF_QINTX_INT_W1S(a)		(NIX_LFBASE | 0xC18 | (a) << 12)
#define	NIX_LF_QINTX_ENA_W1S(a)		(NIX_LFBASE | 0xC20 | (a) << 12)
#define	NIX_LF_QINTX_ENA_W1C(a)		(NIX_LFBASE | 0xC30 | (a) << 12)
#define	NIX_LF_CINTX_CNT(a)		(NIX_LFBASE | 0xD00 | (a) << 12)
#define	NIX_LF_CINTX_WAIT(a)		(NIX_LFBASE | 0xD10 | (a) << 12)
#define	NIX_LF_CINTX_INT(a)		(NIX_LFBASE | 0xD20 | (a) << 12)
#define	NIX_LF_CINTX_INT_W1S(a)		(NIX_LFBASE | 0xD30 | (a) << 12)
#define	NIX_LF_CINTX_ENA_W1S(a)		(NIX_LFBASE | 0xD40 | (a) << 12)
#define	NIX_LF_CINTX_ENA_W1C(a)		(NIX_LFBASE | 0xD50 | (a) << 12)

/* NIX AF transmit scheduler registers */
#define NIX_AF_SMQX_CFG(a)		(0x700 | (a) << 16)
#define NIX_AF_TL1X_SCHEDULE(a)		(0xC00 | (a) << 16)
#define NIX_AF_TL1X_CIR(a)		(0xC20 | (a) << 16)
#define NIX_AF_TL1X_TOPOLOGY(a)		(0xC80 | (a) << 16)
#define NIX_AF_TL2X_PARENT(a)		(0xE88 | (a) << 16)
#define NIX_AF_TL2X_SCHEDULE(a)		(0xE00 | (a) << 16)
#define NIX_AF_TL3X_PARENT(a)		(0x1088 | (a) << 16)
#define NIX_AF_TL3X_SCHEDULE(a)		(0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a)		(0x1288 | (a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a)		(0x1200 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a)		(0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a)		(0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b)	(0x1700 | (a) << 16 | (b) << 3)

/* LMT LF registers */
#define LMT_LFBASE			BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
#define LMT_LF_LMTLINEX(a)		(LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL		(LMT_LFBASE | 0x400)

#endif /* OTX2_REG_H */
Loading