Commit 08e8b91c authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'octeontx2-vf-Add-network-driver-for-virtual-function'



Sunil Goutham says:

====================
octeontx2-vf: Add network driver for virtual function

This patch series adds  network driver for the virtual functions of
OcteonTX2 SOC's resource virtualization unit (RVU).

Changes from v3:
   * Removed missed out EXPORT symbols in VF driver.

Changes from v2:
   * Removed Copyright license text.
   * Removed wrapper fn()s around mutex_lock and unlock.
   * Got rid of using macro with 'return'.
   * Removed __weak fn()s.
        - Sugested by Leon Romanovsky and Andrew Lunn

Changes from v1:
   * Removed driver version and fixed authorship
   * Removed driver version and fixed authorship in the already
     upstreamed AF, PF drivers.
   * Removed unnecessary checks in sriov_enable and xmit fn()s.
   * Removed WQ_MEM_RECLAIM flag while creating workqueue.
   * Added lock in tx_timeout task.
   * Added 'supported_coalesce_params' in ethtool ops.
   * Minor other cleanups.
        - Sugested by Jakub Kicinski
====================

Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4e8386fc 4c3212f5
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -33,3 +33,9 @@ config OCTEONTX2_PF
	depends on PCI
	help
	  This driver supports Marvell's OcteonTX2 NIC physical function.

config OCTEONTX2_VF
	tristate "Marvell OcteonTX2 NIC Virtual Function driver"
	depends on OCTEONTX2_PF
	help
	  This driver supports Marvell's OcteonTX2 NIC virtual function.
+1 −3
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@

#define DRV_NAME	"octeontx2-af"
#define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
#define DRV_VERSION	"1.0"

static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);

@@ -46,10 +45,9 @@ static const struct pci_device_id rvu_id_table[] = {
	{ 0, }  /* end of table */
};

MODULE_AUTHOR("Marvell International Ltd.");
MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table);

static char *mkex_profile; /* MKEX profile name */
+2 −0
Original line number Diff line number Diff line
@@ -4,7 +4,9 @@
#

obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o

octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
octeontx2_nicvf-y := otx2_vf.o

ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+57 −42
Original line number Diff line number Diff line
@@ -49,15 +49,15 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
	if (!netif_running(pfvf->netdev))
		return;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
	if (!req) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return;
	}

	otx2_sync_mbox_msg(&pfvf->mbox);
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);
}

int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
@@ -128,6 +128,7 @@ void otx2_get_stats64(struct net_device *netdev,
	stats->tx_packets = dev_stats->tx_frames;
	stats->tx_dropped = dev_stats->tx_drops;
}
EXPORT_SYMBOL(otx2_get_stats64);

/* Sync MAC address with RVU AF */
static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
@@ -135,17 +136,17 @@ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
	struct nix_set_mac_addr *req;
	int err;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
	if (!req) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}

	ether_addr_copy(req->mac_addr, mac);

	err = otx2_sync_mbox_msg(&pfvf->mbox);
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);
	return err;
}

@@ -157,27 +158,27 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
	struct msg_req *req;
	int err;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
	if (!req) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}

	err = otx2_sync_mbox_msg(&pfvf->mbox);
	if (err) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return err;
	}

	msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
	if (IS_ERR(msghdr)) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return PTR_ERR(msghdr);
	}
	rsp = (struct nix_get_mac_addr_rsp *)msghdr;
	ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);

	return 0;
}
@@ -197,26 +198,25 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)

	return 0;
}
EXPORT_SYMBOL(otx2_set_mac_address);

int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
	struct nix_frs_cfg *req;
	int err;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
	if (!req) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}

	/* SMQ config limits maximum pkt size that can be transmitted */
	req->update_smq = true;
	pfvf->max_frs = mtu +  OTX2_ETH_HLEN;
	req->maxlen = pfvf->max_frs;

	err = otx2_sync_mbox_msg(&pfvf->mbox);
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);
	return err;
}

@@ -225,7 +225,10 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf)
	struct cgx_pause_frm_cfg *req;
	int err;

	otx2_mbox_lock(&pfvf->mbox);
	if (is_otx2_lbkvf(pfvf->pdev))
		return 0;

	mutex_lock(&pfvf->mbox.lock);
	req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
	if (!req) {
		err = -ENOMEM;
@@ -238,7 +241,7 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf)

	err = otx2_sync_mbox_msg(&pfvf->mbox);
unlock:
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);
	return err;
}

@@ -248,10 +251,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
	struct nix_rss_flowkey_cfg *req;
	int err;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
	if (!req) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}
	req->mcam_index = -1; /* Default or reserved index */
@@ -259,7 +262,7 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
	req->group = DEFAULT_RSS_CONTEXT_GROUP;

	err = otx2_sync_mbox_msg(&pfvf->mbox);
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);
	return err;
}

@@ -270,7 +273,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
	struct nix_aq_enq_req *aq;
	int idx, err;

	otx2_mbox_lock(mbox);
	mutex_lock(&mbox->lock);
	/* Get memory to put this msg */
	for (idx = 0; idx < rss->rss_size; idx++) {
		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -280,12 +283,12 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
			 */
			err = otx2_sync_mbox_msg(mbox);
			if (err) {
				otx2_mbox_unlock(mbox);
				mutex_unlock(&mbox->lock);
				return err;
			}
			aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
			if (!aq) {
				otx2_mbox_unlock(mbox);
				mutex_unlock(&mbox->lock);
				return -ENOMEM;
			}
		}
@@ -298,7 +301,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
		aq->op = NIX_AQ_INSTOP_INIT;
	}
	err = otx2_sync_mbox_msg(mbox);
	otx2_mbox_unlock(mbox);
	mutex_unlock(&mbox->lock);
	return err;
}

@@ -416,6 +419,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)

	schedule_work(&pfvf->reset_task);
}
EXPORT_SYMBOL(otx2_tx_timeout);

void otx2_get_mac_from_af(struct net_device *netdev)
{
@@ -430,6 +434,7 @@ void otx2_get_mac_from_af(struct net_device *netdev)
	if (!is_valid_ether_addr(netdev->dev_addr))
		eth_hw_addr_random(netdev);
}
EXPORT_SYMBOL(otx2_get_mac_from_af);

static int otx2_get_link(struct otx2_nic *pfvf)
{
@@ -465,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
	/* Set topology e.t.c configuration */
	if (lvl == NIX_TXSCH_LVL_SMQ) {
		req->reg[0] = NIX_AF_SMQX_CFG(schq);
		req->regval[0] = ((pfvf->netdev->mtu  + OTX2_ETH_HLEN) << 8) |
		req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
				   OTX2_MIN_MTU;

		req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
@@ -551,17 +556,17 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
	struct nix_txsch_free_req *free_req;
	int lvl, schq, err;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	/* Free the transmit schedulers */
	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
	if (!free_req) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}

	free_req->flags = TXSCHQ_FREE_ALL;
	err = otx2_sync_mbox_msg(&pfvf->mbox);
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);

	/* Clear the txschq list */
	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -575,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
	int qidx, sqe_tail, sqe_head;
	u64 incr, *ptr, val;
	int timeout = 1000;

	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
	for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
		incr = (u64)qidx << 32;
		while (1) {
		while (timeout) {
			val = otx2_atomic64_add(incr, ptr);
			sqe_head = (val >> 20) & 0x3F;
			sqe_tail = (val >> 28) & 0x3F;
			if (sqe_head == sqe_tail)
				break;
			usleep_range(1, 3);
			timeout--;
		}
	}
}
@@ -981,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
		qmem_free(pfvf->dev, pool->fc_addr);
	}
	devm_kfree(pfvf->dev, pfvf->qset.pool);
	pfvf->qset.pool = NULL;
}

static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
@@ -1248,10 +1256,10 @@ int otx2_detach_resources(struct mbox *mbox)
{
	struct rsrc_detach *detach;

	otx2_mbox_lock(mbox);
	mutex_lock(&mbox->lock);
	detach = otx2_mbox_alloc_msg_detach_resources(mbox);
	if (!detach) {
		otx2_mbox_unlock(mbox);
		mutex_unlock(&mbox->lock);
		return -ENOMEM;
	}

@@ -1260,9 +1268,10 @@ int otx2_detach_resources(struct mbox *mbox)

	/* Send detach request to AF */
	otx2_mbox_msg_send(&mbox->mbox, 0);
	otx2_mbox_unlock(mbox);
	mutex_unlock(&mbox->lock);
	return 0;
}
EXPORT_SYMBOL(otx2_detach_resources);

int otx2_attach_npa_nix(struct otx2_nic *pfvf)
{
@@ -1270,11 +1279,11 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
	struct msg_req *msix;
	int err;

	otx2_mbox_lock(&pfvf->mbox);
	mutex_lock(&pfvf->mbox.lock);
	/* Get memory to put this msg */
	attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
	if (!attach) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}

@@ -1284,7 +1293,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
	/* Send attach request to AF */
	err = otx2_sync_mbox_msg(&pfvf->mbox);
	if (err) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return err;
	}

@@ -1299,16 +1308,16 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
	/* Get NPA and NIX MSIX vector offsets */
	msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
	if (!msix) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return -ENOMEM;
	}

	err = otx2_sync_mbox_msg(&pfvf->mbox);
	if (err) {
		otx2_mbox_unlock(&pfvf->mbox);
		mutex_unlock(&pfvf->mbox.lock);
		return err;
	}
	otx2_mbox_unlock(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);

	if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
	    pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
@@ -1319,12 +1328,13 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)

	return 0;
}
EXPORT_SYMBOL(otx2_attach_npa_nix);

void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
{
	struct hwctx_disable_req *req;

	otx2_mbox_lock(mbox);
	mutex_lock(&mbox->lock);
	/* Request AQ to disable this context */
	if (npa)
		req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
@@ -1332,7 +1342,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
		req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);

	if (!req) {
		otx2_mbox_unlock(mbox);
		mutex_unlock(&mbox->lock);
		return;
	}

@@ -1342,7 +1352,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
		dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
			__func__);

	otx2_mbox_unlock(mbox);
	mutex_unlock(&mbox->lock);
}

int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
@@ -1387,6 +1397,7 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
			pf->hw.txschq_list[lvl][schq] =
				rsp->schq_list[lvl][schq];
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);

void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
			       struct npa_lf_alloc_rsp *rsp)
@@ -1394,6 +1405,7 @@ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
	pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
	pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
}
EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);

void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
			       struct nix_lf_alloc_rsp *rsp)
@@ -1404,6 +1416,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
	pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
	pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
}
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);

void mbox_handler_msix_offset(struct otx2_nic *pfvf,
			      struct msix_offset_rsp *rsp)
@@ -1411,6 +1424,7 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
	pfvf->hw.npa_msixoff = rsp->npa_msixoff;
	pfvf->hw.nix_msixoff = rsp->nix_msixoff;
}
EXPORT_SYMBOL(mbox_handler_msix_offset);

void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
				struct nix_bp_cfg_rsp *rsp)
@@ -1422,6 +1436,7 @@ void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
		pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
	}
}
EXPORT_SYMBOL(mbox_handler_nix_bp_enable);

void otx2_free_cints(struct otx2_nic *pfvf, int n)
{
+25 −15
Original line number Diff line number Diff line
@@ -20,6 +20,8 @@

/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF              0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF		0xA064
#define PCI_DEVID_OCTEONTX2_RVU_AFVF		0xA0F8

#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF		0xB200

@@ -191,6 +193,17 @@ struct otx2_hw {
	u64			cgx_tx_stats[CGX_TX_STATS_COUNT];
};

struct otx2_vf_config {
	struct otx2_nic *pf;
	struct delayed_work link_event_work;
	bool intf_down; /* interface was either configured or not */
};

struct flr_work {
	struct work_struct work;
	struct otx2_nic *pf;
};

struct refill_work {
	struct delayed_work pool_refill_work;
	struct otx2_nic *pf;
@@ -215,14 +228,20 @@ struct otx2_nic {

	/* Mbox */
	struct mbox		mbox;
	struct mbox		*mbox_pfvf;
	struct workqueue_struct *mbox_wq;
	struct workqueue_struct *mbox_pfvf_wq;

	u8			total_vfs;
	u16			pcifunc; /* RVU PF_FUNC */
	u16			bpid[NIX_MAX_BPID_CHAN];
	struct otx2_vf_config	*vf_configs;
	struct cgx_link_user_info linfo;

	u64			reset_count;
	struct work_struct	reset_task;
	struct workqueue_struct	*flr_wq;
	struct flr_work		*flr_wrk;
	struct refill_work	*refill_wrk;

	/* Ethtool stuff */
@@ -232,6 +251,11 @@ struct otx2_nic {
	int			nix_blkaddr;
};

static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{
	return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
}

static inline bool is_96xx_A0(struct pci_dev *pdev)
{
	return (pdev->revision == 0x00) &&
@@ -351,21 +375,6 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
	       hw_mbase + mbox->rx_start, msg_size + msgs_offset);
}

static inline void otx2_mbox_lock_init(struct mbox *mbox)
{
	mutex_init(&mbox->lock);
}

static inline void otx2_mbox_lock(struct mbox *mbox)
{
	mutex_lock(&mbox->lock);
}

static inline void otx2_mbox_unlock(struct mbox *mbox)
{
	mutex_unlock(&mbox->lock);
}

/* With the absence of API for 128-bit IO memory access for arm64,
 * implement required operations at place.
 */
@@ -614,6 +623,7 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
void otx2vf_set_ethtool_ops(struct net_device *netdev);

int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
Loading