Commit 89e89f7a authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed
Browse files

net/mlx5e: Replace multiplication by stride size with a shift



In RX data-path, use shift operations instead of a regular multiplication
by stride size, as it is a power of two.

Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent b45d8b50
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -535,8 +535,8 @@ struct mlx5e_rq {
		struct {
		struct {
			struct mlx5e_mpw_info *info;
			struct mlx5e_mpw_info *info;
			void                  *mtt_no_align;
			void                  *mtt_no_align;
			u16                    stride_sz;
			u16                    num_strides;
			u16                    num_strides;
			u8                     log_stride_sz;
		} mpwqe;
		} mpwqe;
	};
	};
	struct {
	struct {
+2 −2
Original line number Original line Diff line number Diff line
@@ -615,10 +615,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
			goto err_rq_wq_destroy;
			goto err_rq_wq_destroy;
		}
		}


		rq->mpwqe.stride_sz = BIT(params->mpwqe_log_stride_sz);
		rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
		rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
		rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);


		rq->buff.wqe_sz = rq->mpwqe.stride_sz * rq->mpwqe.num_strides;
		rq->buff.wqe_sz = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
		byte_count = rq->buff.wqe_sz;
		byte_count = rq->buff.wqe_sz;


		err = mlx5e_create_rq_umr_mkey(mdev, rq);
		err = mlx5e_create_rq_umr_mkey(mdev, rq);
+2 −2
Original line number Original line Diff line number Diff line
@@ -304,7 +304,7 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
					    u32 page_idx, u32 frag_offset,
					    u32 page_idx, u32 frag_offset,
					    u32 len)
					    u32 len)
{
{
	unsigned int truesize = ALIGN(len, rq->mpwqe.stride_sz);
	unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz));


	dma_sync_single_for_cpu(rq->pdev,
	dma_sync_single_for_cpu(rq->pdev,
				wi->umr.dma_info[page_idx].addr + frag_offset,
				wi->umr.dma_info[page_idx].addr + frag_offset,
@@ -910,7 +910,7 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
					   struct sk_buff *skb)
					   struct sk_buff *skb)
{
{
	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
	u32 wqe_offset     = stride_ix * rq->mpwqe.stride_sz;
	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
	u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
	u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
	u32 page_idx       = wqe_offset >> PAGE_SHIFT;
	u32 page_idx       = wqe_offset >> PAGE_SHIFT;
	u32 head_page_idx  = page_idx;
	u32 head_page_idx  = page_idx;