Commit 7327699f authored by Mark Starovoytov's avatar Mark Starovoytov Committed by David S. Miller
Browse files

net: atlantic: QoS implementation: max_rate



This patch adds initial support for mqprio rate limiters (max_rate only).

Atlantic HW supports Rate-Shaping for time-sensitive traffic at per
Traffic Class (TC) granularity.
Target rate is defined by:
* nominal link rate (always 10G);
* rate factor (ratio between nominal rate and max allowed).

Signed-off-by: default avatarMark Starovoytov <mstarovoitov@marvell.com>
Signed-off-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b9e98926
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -35,6 +35,9 @@ enum aq_tc_mode {
			(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
			(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED   0xFFU
#define AQ_RX_QUEUE_NOT_ASSIGNED   0xFFU


/* Used for rate to Mbps conversion */
#define AQ_MBPS_DIVISOR         125000 /* 1000000 / 8 */

/* NIC H/W capabilities */
/* NIC H/W capabilities */
struct aq_hw_caps_s {
struct aq_hw_caps_s {
	u64 hw_features;
	u64 hw_features;
+26 −4
Original line number Original line Diff line number Diff line
@@ -333,8 +333,12 @@ static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
}
}


static int aq_validate_mqprio_opt(struct aq_nic_s *self,
static int aq_validate_mqprio_opt(struct aq_nic_s *self,
				  struct tc_mqprio_qopt_offload *mqprio,
				  const unsigned int num_tc)
				  const unsigned int num_tc)
{
{
	const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
	int i;

	if (num_tc > aq_hw_num_tcs(self->aq_hw)) {
	if (num_tc > aq_hw_num_tcs(self->aq_hw)) {
		netdev_err(self->ndev, "Too many TCs requested\n");
		netdev_err(self->ndev, "Too many TCs requested\n");
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;
@@ -345,25 +349,43 @@ static int aq_validate_mqprio_opt(struct aq_nic_s *self,
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;
	}
	}


	for (i = 0; i < num_tc; i++) {
		if (has_min_rate && mqprio->min_rate[i]) {
			netdev_err(self->ndev,
				   "Min tx rate is not supported\n");
			return -EOPNOTSUPP;
		}
	}

	return 0;
	return 0;
}
}


static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
			   void *type_data)
			   void *type_data)
{
{
	struct tc_mqprio_qopt_offload *mqprio = type_data;
	struct aq_nic_s *aq_nic = netdev_priv(dev);
	struct aq_nic_s *aq_nic = netdev_priv(dev);
	struct tc_mqprio_qopt *mqprio = type_data;
	int err;
	int err;
	int i;


	if (type != TC_SETUP_QDISC_MQPRIO)
	if (type != TC_SETUP_QDISC_MQPRIO)
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;


	err = aq_validate_mqprio_opt(aq_nic, mqprio->num_tc);
	err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
	if (err)
	if (err)
		return err;
		return err;


	return aq_nic_setup_tc_mqprio(aq_nic, mqprio->num_tc,
	if (mqprio->flags & TC_MQPRIO_F_MAX_RATE) {
				      mqprio->prio_tc_map);
		for (i = 0; i < mqprio->qopt.num_tc; i++) {
			u64 max_rate = mqprio->max_rate[i];

			do_div(max_rate, AQ_MBPS_DIVISOR);
			aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
		}
	}

	return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
				      mqprio->qopt.prio_tc_map);
}
}


static const struct net_device_ops aq_ndev_ops = {
static const struct net_device_ops aq_ndev_ops = {
+20 −0
Original line number Original line Diff line number Diff line
@@ -1324,3 +1324,23 @@ int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)


	return err;
	return err;
}
}

int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 max_rate)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (tc >= AQ_CFG_TCS_MAX)
		return -EINVAL;

	if (max_rate && max_rate < 10) {
		netdev_warn(self->ndev,
			"Setting %s to the minimum usable value of %dMbps.\n",
			"max rate", 10);
		cfg->tc_max_rate[tc] = 10;
	} else {
		cfg->tc_max_rate[tc] = max_rate;
	}

	return 0;
}
+3 −0
Original line number Original line Diff line number Diff line
@@ -65,6 +65,7 @@ struct aq_nic_cfg_s {
	u32 priv_flags;
	u32 priv_flags;
	u8  tcs;
	u8  tcs;
	u8 prio_tc_map[8];
	u8 prio_tc_map[8];
	u32 tc_max_rate[AQ_CFG_TCS_MAX];
	struct aq_rss_parameters aq_rss;
	struct aq_rss_parameters aq_rss;
	u32 eee_speeds;
	u32 eee_speeds;
};
};
@@ -194,4 +195,6 @@ u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
			   u32 location);
			   u32 location);
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 max_rate);
#endif /* AQ_NIC_H */
#endif /* AQ_NIC_H */
+54 −4
Original line number Original line Diff line number Diff line
@@ -138,6 +138,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
	unsigned int prio = 0U;
	unsigned int prio = 0U;
	u32 tc = 0U;
	u32 tc = 0U;


	hw_atl_b0_hw_init_tx_tc_rate_limit(self);

	if (cfg->is_ptp) {
	if (cfg->is_ptp) {
		tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
		tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
		rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
		rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
@@ -151,7 +153,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);


	/* TPS TC credits init */
	/* TPS TC credits init */
	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);


	tx_buff_size /= cfg->tcs;
	tx_buff_size /= cfg->tcs;
@@ -162,8 +163,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
		/* TX Packet Scheduler Data TC0 */
		/* TX Packet Scheduler Data TC0 */
		hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
		hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
		hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
		hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
		hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
		hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);


		/* Tx buf size TC0 */
		/* Tx buf size TC0 */
		hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
		hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
@@ -320,10 +319,61 @@ int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
	return aq_hw_err_from_flags(self);
	return aq_hw_err_from_flags(self);
}
}


int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
{
	/* Scale factor is based on the number of bits in fractional portion */
	static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
	static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
				    HW_ATL_TPS_DESC_RATE_Y_SHIFT;
	struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
	int tc;

	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
	hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
	for (tc = 0; tc != nic_cfg->tcs; tc++) {
		const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
		const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);

		hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
		hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);

		hw_atl_tps_tx_desc_rate_en_set(self, desc, en);

		if (en) {
			/* Nominal rate is always 10G */
			const u32 rate = 10000U * scale /
					 nic_cfg->tc_max_rate[tc];
			const u32 rate_int = rate >>
					     HW_ATL_TPS_DESC_RATE_Y_WIDTH;
			const u32 rate_frac = rate & frac_msk;

			hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
			hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
		} else {
			/* A value of 1 indicates the queue is not
			 * rate controlled.
			 */
			hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
			hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
		}
	}
	for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
		const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);

		hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
		hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
		hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
	}

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
{
	struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;

	/* Tx TC/Queue number config */
	/* Tx TC/Queue number config */
	hw_atl_tpb_tps_tx_tc_mode_set(self, self->aq_nic_cfg->tc_mode);
	hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);


	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
Loading