Commit 3fc1ca00 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel
Browse files

swiotlb: Split size parameter to map/unmap APIs



This splits the size parameter to swiotlb_tbl_map_single() and
swiotlb_tbl_unmap_single() into an alloc_size and a mapping_size
parameter, where the latter one is rounded up to the iommu page
size.

Suggested-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2c700108
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
	 */
	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);

	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
				     attrs);
	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
				     size, size, dir, attrs);
	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
		return DMA_MAPPING_ERROR;

@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
	 * Ensure that the address returned is DMA'ble
	 */
	if (unlikely(!dma_capable(dev, dev_addr, size))) {
		swiotlb_tbl_unmap_single(dev, map, size, dir,
		swiotlb_tbl_unmap_single(dev, map, size, size, dir,
				attrs | DMA_ATTR_SKIP_CPU_SYNC);
		return DMA_MAPPING_ERROR;
	}
@@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,

	/* NOTE: We use dev_addr here, not paddr! */
	if (is_xen_swiotlb_buffer(dev_addr))
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
		swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
}

static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+6 −2
Original line number Diff line number Diff line
@@ -46,13 +46,17 @@ enum dma_sync_target {

extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
					  dma_addr_t tbl_dma_addr,
					  phys_addr_t phys, size_t size,
					  phys_addr_t phys,
					  size_t mapping_size,
					  size_t alloc_size,
					  enum dma_data_direction dir,
					  unsigned long attrs);

extern void swiotlb_tbl_unmap_single(struct device *hwdev,
				     phys_addr_t tlb_addr,
				     size_t size, enum dma_data_direction dir,
				     size_t mapping_size,
				     size_t alloc_size,
				     enum dma_data_direction dir,
				     unsigned long attrs);

extern void swiotlb_tbl_sync_single(struct device *hwdev,
+1 −1
Original line number Diff line number Diff line
@@ -297,7 +297,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
		dma_direct_sync_single_for_cpu(dev, addr, size, dir);

	if (unlikely(is_swiotlb_buffer(phys)))
		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
}
EXPORT_SYMBOL(dma_direct_unmap_page);

+21 −13
Original line number Diff line number Diff line
@@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,

phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
				   dma_addr_t tbl_dma_addr,
				   phys_addr_t orig_addr, size_t size,
				   phys_addr_t orig_addr,
				   size_t mapping_size,
				   size_t alloc_size,
				   enum dma_data_direction dir,
				   unsigned long attrs)
{
@@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
		pr_warn_once("%s is active and system is using DMA bounce buffers\n",
			     sme_active() ? "SME" : "SEV");

	if (mapping_size > alloc_size) {
		dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
			      mapping_size, alloc_size);
		return (phys_addr_t)DMA_MAPPING_ERROR;
	}

	mask = dma_get_seg_boundary(hwdev);

	tbl_dma_addr &= mask;
@@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
	 * For mappings greater than or equal to a page, we limit the stride
	 * (and hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size >= PAGE_SIZE)
	nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (alloc_size >= PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;
@@ -547,7 +555,7 @@ not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
		dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
			 size, io_tlb_nslabs, tmp_io_tlb_used);
			 alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
	return (phys_addr_t)DMA_MAPPING_ERROR;
found:
	io_tlb_used += nslots;
@@ -562,7 +570,7 @@ found:
		io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
		swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);

	return tlb_addr;
}
@@ -571,11 +579,11 @@ found:
 * tlb_addr is the physical address of the bounce buffer to unmap.
 */
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
			      size_t size, enum dma_data_direction dir,
			      unsigned long attrs)
			      size_t mapping_size, size_t alloc_size,
			      enum dma_data_direction dir, unsigned long attrs)
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t orig_addr = io_tlb_orig_addr[index];

@@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
	if (orig_addr != INVALID_PHYS_ADDR &&
	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
		swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);

	/*
	 * Return the buffer to the free list by setting the corresponding
@@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,

	/* Oh well, have to allocate and map a bounce buffer. */
	*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
			*phys, size, dir, attrs);
			*phys, size, size, dir, attrs);
	if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
		return false;

	/* Ensure that the address returned is DMA'ble */
	*dma_addr = __phys_to_dma(dev, *phys);
	if (unlikely(!dma_capable(dev, *dma_addr, size))) {
		swiotlb_tbl_unmap_single(dev, *phys, size, dir,
		swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
			attrs | DMA_ATTR_SKIP_CPU_SYNC);
		return false;
	}