Commit 5fc145f1 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'fixes-for-ena-driver'

Shay Agroskin says:

====================
Fixes for ENA driver

- fix wrong data offset on machines that support rx offset
- work-around Intel iommu issue
- fix out of bound access when request id is wrong
====================

Link: https://lore.kernel.org/r/20201123190859.21298-1-shayagr@amazon.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents d8f0a867 1396d314
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -516,6 +516,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
{
	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
	u16 q_depth = io_cq->q_depth;
	u16 cdesc_idx = 0;
	u16 nb_hw_desc;
	u16 i = 0;
@@ -543,6 +544,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
	do {
		ena_buf[i].len = cdesc->length;
		ena_buf[i].req_id = cdesc->req_id;
		if (unlikely(ena_buf[i].req_id >= q_depth))
			return -EIO;

		if (++i >= nb_hw_desc)
			break;
+30 −50
Original line number Diff line number Diff line
@@ -789,24 +789,6 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
					      adapter->num_io_queues);
}

static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
{
	if (likely(req_id < rx_ring->ring_size))
		return 0;

	netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
		  "Invalid rx req_id: %hu\n", req_id);

	u64_stats_update_begin(&rx_ring->syncp);
	rx_ring->rx_stats.bad_req_id++;
	u64_stats_update_end(&rx_ring->syncp);

	/* Trigger device reset */
	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
	set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
	return -EFAULT;
}

/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
 * @adapter: network interface device structure
 * @qid: queue index
@@ -926,10 +908,14 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
static int ena_alloc_rx_page(struct ena_ring *rx_ring,
				    struct ena_rx_buffer *rx_info, gfp_t gfp)
{
	int headroom = rx_ring->rx_headroom;
	struct ena_com_buf *ena_buf;
	struct page *page;
	dma_addr_t dma;

	/* restore page offset value in case it has been changed by device */
	rx_info->page_offset = headroom;

	/* if previous allocated page is not used */
	if (unlikely(rx_info->page))
		return 0;
@@ -959,10 +945,9 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
		  "Allocate page %p, rx_info %p\n", page, rx_info);

	rx_info->page = page;
	rx_info->page_offset = 0;
	ena_buf = &rx_info->ena_buf;
	ena_buf->paddr = dma + rx_ring->rx_headroom;
	ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
	ena_buf->paddr = dma + headroom;
	ena_buf->len = ENA_PAGE_SIZE - headroom;

	return 0;
}
@@ -1356,15 +1341,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
	struct ena_rx_buffer *rx_info;
	u16 len, req_id, buf = 0;
	void *va;
	int rc;

	len = ena_bufs[buf].len;
	req_id = ena_bufs[buf].req_id;

	rc = validate_rx_req_id(rx_ring, req_id);
	if (unlikely(rc < 0))
		return NULL;

	rx_info = &rx_ring->rx_buffer_info[req_id];

	if (unlikely(!rx_info->page)) {
@@ -1379,7 +1359,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,

	/* save virt address of first buffer */
	va = page_address(rx_info->page) + rx_info->page_offset;
	prefetch(va + NET_IP_ALIGN);

	prefetch(va);

	if (len <= rx_ring->rx_copybreak) {
		skb = ena_alloc_skb(rx_ring, false);
@@ -1420,8 +1401,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,

		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
				rx_info->page_offset, len, ENA_PAGE_SIZE);
		/* The offset is non zero only for the first buffer */
		rx_info->page_offset = 0;

		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
			  "RX skb updated. len %d. data_len %d\n",
@@ -1440,10 +1419,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
		len = ena_bufs[buf].len;
		req_id = ena_bufs[buf].req_id;

		rc = validate_rx_req_id(rx_ring, req_id);
		if (unlikely(rc < 0))
			return NULL;

		rx_info = &rx_ring->rx_buffer_info[req_id];
	} while (1);

@@ -1544,8 +1519,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
	int ret;

	rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
	xdp->data = page_address(rx_info->page) +
		rx_info->page_offset + rx_ring->rx_headroom;
	xdp->data = page_address(rx_info->page) + rx_info->page_offset;
	xdp_set_data_meta_invalid(xdp);
	xdp->data_hard_start = page_address(rx_info->page);
	xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
@@ -1612,8 +1586,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
		if (unlikely(ena_rx_ctx.descs == 0))
			break;

		/* First descriptor might have an offset set by the device */
		rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
		rx_info->page_offset = ena_rx_ctx.pkt_offset;
		rx_info->page_offset += ena_rx_ctx.pkt_offset;

		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
			  "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
@@ -1697,12 +1672,18 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
error:
	adapter = netdev_priv(rx_ring->netdev);

	if (rc == -ENOSPC) {
		u64_stats_update_begin(&rx_ring->syncp);
		rx_ring->rx_stats.bad_desc_num++;
		u64_stats_update_end(&rx_ring->syncp);

	/* Too many desc from the device. Trigger reset */
		adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
	} else {
		u64_stats_update_begin(&rx_ring->syncp);
		rx_ring->rx_stats.bad_req_id++;
		u64_stats_update_end(&rx_ring->syncp);
		adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
	}

	set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);

	return 0;
@@ -3388,16 +3369,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
		goto err_mmio_read_less;
	}

	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
	if (rc) {
		dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
		goto err_mmio_read_less;
	}

	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
	if (rc) {
		dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
			rc);
		dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
		goto err_mmio_read_less;
	}

@@ -4167,6 +4141,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
		return rc;
	}

	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
	if (rc) {
		dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
		goto err_disable_device;
	}

	pci_set_master(pdev);

	ena_dev = vzalloc(sizeof(*ena_dev));