Commit 272baeeb authored by Ben Hutchings's avatar Ben Hutchings
Browse files

sfc: Properly distinguish RX buffer and DMA lengths



Replace efx_nic::rx_buffer_len with efx_nic::rx_dma_len, the maximum
RX DMA length.

Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 80c2e716
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -639,12 +639,11 @@ static void efx_start_datapath(struct efx_nic *efx)
	 * support the current MTU, including padding for header
	 * alignment and overruns.
	 */
	efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
	efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
			   EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
			      efx->type->rx_buffer_hash_size +
			   efx->type->rx_buffer_padding);
	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
					 sizeof(struct efx_rx_page_state));
	efx->rx_buffer_order = get_order(sizeof(struct efx_rx_page_state) +
					 EFX_PAGE_IP_ALIGN + efx->rx_dma_len);

	/* We must keep at least one descriptor in a TX ring empty.
	 * We could avoid this when the queue size does not exactly
+2 −3
Original line number Diff line number Diff line
@@ -669,8 +669,7 @@ struct vfdi_status;
 * @n_channels: Number of channels in use
 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
 * @n_tx_channels: Number of channels used for TX
 * @rx_buffer_len: RX buffer length, including start alignment but excluding
 *	any metadata
 * @rx_dma_len: Current maximum RX DMA length
 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
 * @rx_hash_key: Toeplitz hash key for RSS
 * @rx_indir_table: Indirection table for RSS
@@ -786,7 +785,7 @@ struct efx_nic {
	unsigned rss_spread;
	unsigned tx_channel_offset;
	unsigned n_tx_channels;
	unsigned int rx_buffer_len;
	unsigned int rx_dma_len;
	unsigned int rx_buffer_order;
	u8 rx_hash_key[40];
	u32 rx_indir_table[128];
+8 −11
Original line number Diff line number Diff line
@@ -27,8 +27,9 @@
/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH  8

/* Maximum size of a buffer sharing a page */
#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
/* Maximum length for an RX descriptor sharing a page */
#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \
			  - EFX_PAGE_IP_ALIGN)

/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS  64u
@@ -52,10 +53,6 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
{
	return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
	return PAGE_SIZE << efx->rx_buffer_order;
}

static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
@@ -105,7 +102,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
		if (unlikely(page == NULL))
			return -ENOMEM;
		dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
					efx_rx_buf_size(efx),
					PAGE_SIZE << efx->rx_buffer_order,
					DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
			__free_pages(page, efx->rx_buffer_order);
@@ -124,12 +121,12 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
		rx_buf->page = page;
		rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
		rx_buf->len = efx->rx_dma_len;
		rx_buf->flags = 0;
		++rx_queue->added_count;
		++state->refcnt;

		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
		if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) {
			/* Use the second half of the page */
			get_page(page);
			dma_addr += (PAGE_SIZE >> 1);
@@ -153,7 +150,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
		if (--state->refcnt == 0) {
			dma_unmap_page(&efx->pci_dev->dev,
				       state->dma_addr,
				       efx_rx_buf_size(efx),
				       PAGE_SIZE << efx->rx_buffer_order,
				       DMA_FROM_DEVICE);
		} else if (used_len) {
			dma_sync_single_for_cpu(&efx->pci_dev->dev,
@@ -221,7 +218,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,

	rx_buf->flags = 0;

	if (efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
	if (efx->rx_dma_len <= EFX_RX_HALF_PAGE &&
	    page_count(rx_buf->page) == 1)
		efx_resurrect_rx_buffer(rx_queue, rx_buf);