Commit aeaf0cc5 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'octeontx2-Flow-control-support-and-other-misc-changes'



Sunil Goutham says:

====================
octeontx2: Flow control support and other misc changes

This patch series adds flow control support (802.3 pause frames) and
has other changes wrt generic admin function (AF) driver functionality.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 48938b1e dc819c1b
Loading
Loading
Loading
Loading
+121 −53
Original line number Original line Diff line number Diff line
@@ -367,6 +367,107 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
	return !!(last & DATA_PKT_TX_EN);
	return !!(last & DATA_PKT_TX_EN);
}
}


int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
			   u8 *tx_pause, u8 *rx_pause)
{
	struct cgx *cgx = cgxd;
	u64 cfg;

	if (!cgx || lmac_id >= cgx->lmac_count)
		return -ENODEV;

	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);

	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
	return 0;
}

int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
			   u8 tx_pause, u8 rx_pause)
{
	struct cgx *cgx = cgxd;
	u64 cfg;

	if (!cgx || lmac_id >= cgx->lmac_count)
		return -ENODEV;

	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);

	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);

	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
	if (tx_pause) {
		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
	} else {
		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
	}
	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
	return 0;
}

static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
{
	u64 cfg;

	if (!cgx || lmac_id >= cgx->lmac_count)
		return;
	if (enable) {
		/* Enable receive pause frames */
		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);

		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);

		/* Enable pause frames transmission */
		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
		cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);

		/* Set pause time and interval */
		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
			  DEFAULT_PAUSE_TIME);
		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
		cfg &= ~0xFFFFULL;
		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
			  cfg | (DEFAULT_PAUSE_TIME / 2));

		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
			  DEFAULT_PAUSE_TIME);

		cfg = cgx_read(cgx, lmac_id,
			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
		cfg &= ~0xFFFFULL;
		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
			  cfg | (DEFAULT_PAUSE_TIME / 2));
	} else {
		/* ALL pause frames received are completely ignored */
		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);

		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);

		/* Disable pause frames transmission */
		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
		cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
	}
}

/* CGX Firmware interface low level support */
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
{
@@ -544,59 +645,6 @@ static inline bool cgx_event_is_linkevent(u64 event)
		return false;
		return false;
}
}


static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
					   struct cgx *cgx)
{
	u64 req = 0;
	u64 resp;
	int err;

	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
	if (!err)
		*prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);

	return err;
}

static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
					     struct cgx *cgx)
{
	u64 req = 0;
	u64 resp;
	int err;

	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
	if (!err)
		*prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);

	return err;
}

int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
{
	struct cgx *cgx_dev;
	int err;

	if (!addr || !size)
		return -EINVAL;

	cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
	if (!cgx_dev)
		return -ENXIO;

	err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
	if (err)
		return -EIO;

	err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
	if (err)
		return -EIO;

	return 0;
}

static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
{
	struct lmac *lmac = data;
	struct lmac *lmac = data;
@@ -680,6 +728,24 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
	return 0;
	return 0;
}
}


int cgx_get_fwdata_base(u64 *base)
{
	u64 req = 0, resp;
	struct cgx *cgx;
	int err;

	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
	if (!cgx)
		return -ENXIO;

	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
	if (!err)
		*base = FIELD_GET(RESP_FWD_BASE, resp);

	return err;
}

static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
{
	u64 req = 0;
	u64 req = 0;
@@ -787,6 +853,7 @@ static int cgx_lmac_init(struct cgx *cgx)


		/* Add reference */
		/* Add reference */
		cgx->lmac_idmap[i] = lmac;
		cgx->lmac_idmap[i] = lmac;
		cgx_lmac_pause_frm_config(cgx, i, true);
	}
	}


	return cgx_lmac_verify_fwi_version(cgx);
	return cgx_lmac_verify_fwi_version(cgx);
@@ -805,6 +872,7 @@ static int cgx_lmac_exit(struct cgx *cgx)


	/* Free all lmac related resources */
	/* Free all lmac related resources */
	for (i = 0; i < cgx->lmac_count; i++) {
	for (i = 0; i < cgx->lmac_count; i++) {
		cgx_lmac_pause_frm_config(cgx, i, false);
		lmac = cgx->lmac_idmap[i];
		lmac = cgx->lmac_idmap[i];
		if (!lmac)
		if (!lmac)
			continue;
			continue;
+15 −1
Original line number Original line Diff line number Diff line
@@ -60,10 +60,20 @@
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK	BIT_ULL(3)
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK	BIT_ULL(3)
#define CGXX_GMP_GMI_RXX_FRM_CTL	0x38028
#define CGXX_GMP_GMI_RXX_FRM_CTL	0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK	BIT_ULL(3)
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK	BIT_ULL(3)
#define CGXX_SMUX_TX_CTL		0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME	0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL	0x20120
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME	0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL	0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV	BIT_ULL(7)
#define CGXX_CMR_RX_OVR_BP		0x130
#define CGX_CMR_RX_OVR_BP_EN(X)		BIT_ULL(((X) + 8))
#define CGX_CMR_RX_OVR_BP_BP(X)		BIT_ULL(((X) + 4))


#define CGX_COMMAND_REG			CGXX_SCRATCH1_REG
#define CGX_COMMAND_REG			CGXX_SCRATCH1_REG
#define CGX_EVENT_REG			CGXX_SCRATCH0_REG
#define CGX_EVENT_REG			CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT			2200 /* msecs */
#define CGX_CMD_TIMEOUT			2200 /* msecs */
#define DEFAULT_PAUSE_TIME		0x7FF


#define CGX_NVEC			37
#define CGX_NVEC			37
#define CGX_LMAC_FWI			0
#define CGX_LMAC_FWI			0
@@ -124,5 +134,9 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
int cgx_get_link_info(void *cgxd, int lmac_id,
		      struct cgx_link_user_info *linfo);
		      struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
int cgx_get_fwdata_base(u64 *base);
int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
			   u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
			   u8 tx_pause, u8 rx_pause);
#endif /* CGX_H */
#endif /* CGX_H */
+7 −1
Original line number Original line Diff line number Diff line
@@ -79,7 +79,8 @@ enum cgx_cmd_id {
	CGX_CMD_MODE_CHANGE,		/* hot plug support */
	CGX_CMD_MODE_CHANGE,		/* hot plug support */
	CGX_CMD_INTF_SHUTDOWN,
	CGX_CMD_INTF_SHUTDOWN,
	CGX_CMD_GET_MKEX_PRFL_SIZE,
	CGX_CMD_GET_MKEX_PRFL_SIZE,
	CGX_CMD_GET_MKEX_PRFL_ADDR
	CGX_CMD_GET_MKEX_PRFL_ADDR,
	CGX_CMD_GET_FWD_BASE,		/* get base address of shared FW data */
};
};


/* async event ids */
/* async event ids */
@@ -149,6 +150,11 @@ enum cgx_cmd_own {
 */
 */
#define RESP_MKEX_PRFL_ADDR		GENMASK_ULL(63, 9)
#define RESP_MKEX_PRFL_ADDR		GENMASK_ULL(63, 9)


/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
 * CGX_STAT_SUCCESS
 */
#define RESP_FWD_BASE		GENMASK_ULL(56, 9)

/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
 * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
 * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
 *
 *
+36 −2
Original line number Original line Diff line number Diff line
@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY,		0x001, ready, msg_req, ready_msg_rsp)		\
M(READY,		0x001, ready, msg_req, ready_msg_rsp)		\
M(ATTACH_RESOURCES,	0x002, attach_resources, rsrc_attach, msg_rsp)	\
M(ATTACH_RESOURCES,	0x002, attach_resources, rsrc_attach, msg_rsp)	\
M(DETACH_RESOURCES,	0x003, detach_resources, rsrc_detach, msg_rsp)	\
M(DETACH_RESOURCES,	0x003, detach_resources, rsrc_detach, msg_rsp)	\
M(MSIX_OFFSET,		0x004, msix_offset, msg_req, msix_offset_rsp)	\
M(MSIX_OFFSET,		0x005, msix_offset, msg_req, msix_offset_rsp)	\
M(VF_FLR,		0x006, vf_flr, msg_req, msg_rsp)		\
M(VF_FLR,		0x006, vf_flr, msg_req, msg_rsp)		\
M(GET_HW_CAP,		0x008, get_hw_cap, msg_req, get_hw_cap_rsp)	\
M(GET_HW_CAP,		0x008, get_hw_cap, msg_req, get_hw_cap_rsp)	\
/* CGX mbox IDs (range 0x200 - 0x3FF) */				\
/* CGX mbox IDs (range 0x200 - 0x3FF) */				\
@@ -143,6 +143,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO,	0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_GET_LINKINFO,	0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE,	0x20A, cgx_intlbk_enable, msg_req, msg_rsp)	\
M(CGX_INTLBK_ENABLE,	0x20A, cgx_intlbk_enable, msg_req, msg_rsp)	\
M(CGX_INTLBK_DISABLE,	0x20B, cgx_intlbk_disable, msg_req, msg_rsp)	\
M(CGX_INTLBK_DISABLE,	0x20B, cgx_intlbk_disable, msg_req, msg_rsp)	\
M(CGX_CFG_PAUSE_FRM,	0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg,	\
			       cgx_pause_frm_cfg)			\
/* NPA mbox IDs (range 0x400 - 0x5FF) */				\
/* NPA mbox IDs (range 0x400 - 0x5FF) */				\
M(NPA_LF_ALLOC,		0x400, npa_lf_alloc,				\
M(NPA_LF_ALLOC,		0x400, npa_lf_alloc,				\
				npa_lf_alloc_req, npa_lf_alloc_rsp)	\
				npa_lf_alloc_req, npa_lf_alloc_rsp)	\
@@ -211,6 +213,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
				 nix_lso_format_cfg,			\
				 nix_lso_format_cfg,			\
				 nix_lso_format_cfg_rsp)		\
				 nix_lso_format_cfg_rsp)		\
M(NIX_RXVLAN_ALLOC,	0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)	\
M(NIX_RXVLAN_ALLOC,	0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)	\
M(NIX_BP_ENABLE,	0x8016, nix_bp_enable, nix_bp_cfg_req,	\
				nix_bp_cfg_rsp)	\
M(NIX_BP_DISABLE,	0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \


/* Messages initiated by AF (range 0xC00 - 0xDFF) */
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -251,7 +256,8 @@ enum rvu_af_status {


struct ready_msg_rsp {
struct ready_msg_rsp {
	struct mbox_msghdr hdr;
	struct mbox_msghdr hdr;
	u16    sclk_feq;	/* SCLK frequency */
	u16    sclk_freq;	/* SCLK frequency (in MHz) */
	u16    rclk_freq;	/* RCLK frequency (in MHz) */
};
};


/* Structure for requesting resource provisioning.
/* Structure for requesting resource provisioning.
@@ -342,6 +348,15 @@ struct cgx_link_info_msg {
	struct cgx_link_user_info link_info;
	struct cgx_link_user_info link_info;
};
};


struct cgx_pause_frm_cfg {
	struct mbox_msghdr hdr;
	u8 set;
	/* set = 1 if the request is to config pause frames */
	/* set = 0 if the request is to fetch pause frames config */
	u8 rx_pause;
	u8 tx_pause;
};

/* NPA mbox message formats */
/* NPA mbox message formats */


/* NPA mailbox error codes
/* NPA mailbox error codes
@@ -676,6 +691,25 @@ struct nix_lso_format_cfg_rsp {
	u8 lso_format_idx;
	u8 lso_format_idx;
};
};


struct nix_bp_cfg_req {
	struct mbox_msghdr hdr;
	u16	chan_base; /* Starting channel number */
	u8	chan_cnt; /* Number of channels */
	u8	bpid_per_chan;
	/* bpid_per_chan = 0 assigns single bp id for range of channels */
	/* bpid_per_chan = 1 assigns separate bp id for each channel */
};

/* PF can be mapped to either CGX or LBK interface,
 * so maximum 64 channels are possible.
 */
#define NIX_MAX_BPID_CHAN	64
struct nix_bp_cfg_rsp {
	struct mbox_msghdr hdr;
	u16	chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
	u8	chan_cnt; /* Number of channel for which bpids are assigned */
};

/* NPC mbox message structs */
/* NPC mbox message structs */


#define NPC_MCAM_ENTRY_INVALID	0xFFFF
#define NPC_MCAM_ENTRY_INVALID	0xFFFF
+143 −22
Original line number Original line Diff line number Diff line
@@ -88,13 +88,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
	u64 reg_val;
	u64 reg_val;


	reg = rvu->afreg_base + ((block << 28) | offset);
	reg = rvu->afreg_base + ((block << 28) | offset);
	while (time_before(jiffies, timeout)) {
again:
	reg_val = readq(reg);
	reg_val = readq(reg);
	if (zero && !(reg_val & mask))
	if (zero && !(reg_val & mask))
		return 0;
		return 0;
	if (!zero && (reg_val & mask))
	if (!zero && (reg_val & mask))
		return 0;
		return 0;
	if (time_before(jiffies, timeout)) {
		usleep_range(1, 5);
		usleep_range(1, 5);
		goto again;
	}
	}
	return -EBUSY;
	return -EBUSY;
}
}
@@ -421,6 +423,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
	}
	}
}
}


static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
{
	rvu_write64(rvu, BLKADDR_RVUM,
		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
		    RVU_BLK_RVUM_REVID);
}

static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
{
	rvu_write64(rvu, BLKADDR_RVUM,
		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
}

int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
{
	int err;
	int err;
@@ -603,7 +618,11 @@ setup_vfmsix:
	 */
	 */
	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
	max_msix = cfg & 0xFFFFF;
	max_msix = cfg & 0xFFFFF;
	if (rvu->fwdata && rvu->fwdata->msixtr_base)
		phy_addr = rvu->fwdata->msixtr_base;
	else
		phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
		phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);

	iova = dma_map_resource(rvu->dev, phy_addr,
	iova = dma_map_resource(rvu->dev, phy_addr,
				max_msix * PCI_MSIX_ENTRY_SIZE,
				max_msix * PCI_MSIX_ENTRY_SIZE,
				DMA_BIDIRECTIONAL, 0);
				DMA_BIDIRECTIONAL, 0);
@@ -613,10 +632,18 @@ setup_vfmsix:


	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
	rvu->msix_base_iova = iova;
	rvu->msix_base_iova = iova;
	rvu->msixtr_base_phy = phy_addr;


	return 0;
	return 0;
}
}


static void rvu_reset_msix(struct rvu *rvu)
{
	/* Restore msixtr base register */
	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
		    rvu->msixtr_base_phy);
}

static void rvu_free_hw_resources(struct rvu *rvu)
static void rvu_free_hw_resources(struct rvu *rvu)
{
{
	struct rvu_hwinfo *hw = rvu->hw;
	struct rvu_hwinfo *hw = rvu->hw;
@@ -655,9 +682,80 @@ static void rvu_free_hw_resources(struct rvu *rvu)
			   max_msix * PCI_MSIX_ENTRY_SIZE,
			   max_msix * PCI_MSIX_ENTRY_SIZE,
			   DMA_BIDIRECTIONAL, 0);
			   DMA_BIDIRECTIONAL, 0);


	rvu_reset_msix(rvu);
	mutex_destroy(&rvu->rsrc_lock);
	mutex_destroy(&rvu->rsrc_lock);
}
}


static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
{
	struct rvu_hwinfo *hw = rvu->hw;
	int pf, vf, numvfs, hwvf;
	struct rvu_pfvf *pfvf;
	u64 *mac;

	for (pf = 0; pf < hw->total_pfs; pf++) {
		if (!is_pf_cgxmapped(rvu, pf))
			continue;
		/* Assign MAC address to PF */
		pfvf = &rvu->pf[pf];
		if (rvu->fwdata && pf < PF_MACNUM_MAX) {
			mac = &rvu->fwdata->pf_macs[pf];
			if (*mac)
				u64_to_ether_addr(*mac, pfvf->mac_addr);
			else
				eth_random_addr(pfvf->mac_addr);
		} else {
			eth_random_addr(pfvf->mac_addr);
		}

		/* Assign MAC address to VFs */
		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
		for (vf = 0; vf < numvfs; vf++, hwvf++) {
			pfvf = &rvu->hwvf[hwvf];
			if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
				mac = &rvu->fwdata->vf_macs[hwvf];
				if (*mac)
					u64_to_ether_addr(*mac, pfvf->mac_addr);
				else
					eth_random_addr(pfvf->mac_addr);
			} else {
				eth_random_addr(pfvf->mac_addr);
			}
		}
	}
}

static int rvu_fwdata_init(struct rvu *rvu)
{
	u64 fwdbase;
	int err;

	/* Get firmware data base address */
	err = cgx_get_fwdata_base(&fwdbase);
	if (err)
		goto fail;
	rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
	if (!rvu->fwdata)
		goto fail;
	if (!is_rvu_fwdata_valid(rvu)) {
		dev_err(rvu->dev,
			"Mismatch in 'fwdata' struct btw kernel and firmware\n");
		iounmap(rvu->fwdata);
		rvu->fwdata = NULL;
		return -EINVAL;
	}
	return 0;
fail:
	dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
	return -EIO;
}

static void rvu_fwdata_exit(struct rvu *rvu)
{
	if (rvu->fwdata)
		iounmap(rvu->fwdata);
}

static int rvu_setup_hw_resources(struct rvu *rvu)
static int rvu_setup_hw_resources(struct rvu *rvu)
{
{
	struct rvu_hwinfo *hw = rvu->hw;
	struct rvu_hwinfo *hw = rvu->hw;
@@ -813,6 +911,8 @@ init:


	mutex_init(&rvu->rsrc_lock);
	mutex_init(&rvu->rsrc_lock);


	rvu_fwdata_init(rvu);

	err = rvu_setup_msix_resources(rvu);
	err = rvu_setup_msix_resources(rvu);
	if (err)
	if (err)
		return err;
		return err;
@@ -825,8 +925,10 @@ init:
		/* Allocate memory for block LF/slot to pcifunc mapping info */
		/* Allocate memory for block LF/slot to pcifunc mapping info */
		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
					     sizeof(u16), GFP_KERNEL);
					     sizeof(u16), GFP_KERNEL);
		if (!block->fn_map)
		if (!block->fn_map) {
			return -ENOMEM;
			err = -ENOMEM;
			goto msix_err;
		}


		/* Scan all blocks to check if low level firmware has
		/* Scan all blocks to check if low level firmware has
		 * already provisioned any of the resources to a PF/VF.
		 * already provisioned any of the resources to a PF/VF.
@@ -836,25 +938,36 @@ init:


	err = rvu_npc_init(rvu);
	err = rvu_npc_init(rvu);
	if (err)
	if (err)
		goto exit;
		goto npc_err;


	err = rvu_cgx_init(rvu);
	err = rvu_cgx_init(rvu);
	if (err)
	if (err)
		goto exit;
		goto cgx_err;

	/* Assign MACs for CGX mapped functions */
	rvu_setup_pfvf_macaddress(rvu);


	err = rvu_npa_init(rvu);
	err = rvu_npa_init(rvu);
	if (err)
	if (err)
		goto cgx_err;
		goto npa_err;


	err = rvu_nix_init(rvu);
	err = rvu_nix_init(rvu);
	if (err)
	if (err)
		goto cgx_err;
		goto nix_err;


	return 0;
	return 0;


nix_err:
	rvu_nix_freemem(rvu);
npa_err:
	rvu_npa_freemem(rvu);
cgx_err:
cgx_err:
	rvu_cgx_exit(rvu);
	rvu_cgx_exit(rvu);
exit:
npc_err:
	rvu_npc_freemem(rvu);
	rvu_fwdata_exit(rvu);
msix_err:
	rvu_reset_msix(rvu);
	return err;
	return err;
}
}


@@ -901,6 +1014,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
			   struct ready_msg_rsp *rsp)
			   struct ready_msg_rsp *rsp)
{
{
	if (rvu->fwdata) {
		rsp->rclk_freq = rvu->fwdata->rclk;
		rsp->sclk_freq = rvu->fwdata->sclk;
	}
	return 0;
	return 0;
}
}


@@ -2128,6 +2245,9 @@ static int rvu_register_interrupts(struct rvu *rvu)
	}
	}
	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;


	/* Clear TRPEND bit for all PF */
	rvu_write64(rvu, BLKADDR_RVUM,
		    RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
	/* Enable ME interrupt for all PFs*/
	/* Enable ME interrupt for all PFs*/
	rvu_write64(rvu, BLKADDR_RVUM,
	rvu_write64(rvu, BLKADDR_RVUM,
		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
@@ -2439,17 +2559,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
		goto err_disable_device;
		goto err_disable_device;
	}
	}


	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
	if (err) {
	if (err) {
		dev_err(dev, "Unable to set DMA mask\n");
		dev_err(dev, "DMA mask config failed, abort\n");
		goto err_release_regions;
		goto err_release_regions;
	}
	}


	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
	pci_set_master(pdev);
	if (err) {
		dev_err(dev, "Unable to set consistent DMA mask\n");
		goto err_release_regions;
	}


	/* Map Admin function CSRs */
	/* Map Admin function CSRs */
	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
@@ -2489,6 +2605,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	if (err)
	if (err)
		goto err_flr;
		goto err_flr;


	rvu_setup_rvum_blk_revid(rvu);

	/* Enable AF's VFs (if any) */
	/* Enable AF's VFs (if any) */
	err = rvu_enable_sriov(rvu);
	err = rvu_enable_sriov(rvu);
	if (err)
	if (err)
@@ -2506,8 +2624,10 @@ err_mbox:
	rvu_mbox_destroy(&rvu->afpf_wq_info);
	rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
err_hwsetup:
	rvu_cgx_exit(rvu);
	rvu_cgx_exit(rvu);
	rvu_fwdata_exit(rvu);
	rvu_reset_all_blocks(rvu);
	rvu_reset_all_blocks(rvu);
	rvu_free_hw_resources(rvu);
	rvu_free_hw_resources(rvu);
	rvu_clear_rvum_blk_revid(rvu);
err_release_regions:
err_release_regions:
	pci_release_regions(pdev);
	pci_release_regions(pdev);
err_disable_device:
err_disable_device:
@@ -2527,11 +2647,12 @@ static void rvu_remove(struct pci_dev *pdev)
	rvu_unregister_interrupts(rvu);
	rvu_unregister_interrupts(rvu);
	rvu_flr_wq_destroy(rvu);
	rvu_flr_wq_destroy(rvu);
	rvu_cgx_exit(rvu);
	rvu_cgx_exit(rvu);
	rvu_fwdata_exit(rvu);
	rvu_mbox_destroy(&rvu->afpf_wq_info);
	rvu_mbox_destroy(&rvu->afpf_wq_info);
	rvu_disable_sriov(rvu);
	rvu_disable_sriov(rvu);
	rvu_reset_all_blocks(rvu);
	rvu_reset_all_blocks(rvu);
	rvu_free_hw_resources(rvu);
	rvu_free_hw_resources(rvu);

	rvu_clear_rvum_blk_revid(rvu);
	pci_release_regions(pdev);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	pci_set_drvdata(pdev, NULL);
Loading