Commit abfd6fe0 authored by Will Deacon's avatar Will Deacon
Browse files

iommu/io-pgtable: Replace ->tlb_add_flush() with ->tlb_add_page()



The ->tlb_add_flush() callback in the io-pgtable API now looks a bit
silly:

  - It takes a size and a granule, which are always the same
  - It takes a 'bool leaf', which is always true
  - It only ever flushes a single page

With that in mind, replace it with an optional ->tlb_add_page() callback
that drops the useless parameters.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 10b7a7d9
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -247,10 +247,6 @@ static void mmu_tlb_inv_context_s1(void *cookie)
	mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
}

static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
				     size_t granule, bool leaf, void *cookie)
{}

static void mmu_tlb_sync_context(void *cookie)
{
	//struct panfrost_device *pfdev = cookie;
@@ -273,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
	.tlb_flush_all	= mmu_tlb_inv_context_s1,
	.tlb_flush_walk = mmu_tlb_flush_walk,
	.tlb_flush_leaf = mmu_tlb_flush_leaf,
	.tlb_add_flush	= mmu_tlb_inv_range_nosync,
	.tlb_sync	= mmu_tlb_sync_context,
};

+7 −1
Original line number Diff line number Diff line
@@ -1603,6 +1603,12 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
	} while (size -= granule);
}

static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule,
					 void *cookie)
{
	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
}

static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
				  size_t granule, void *cookie)
{
@@ -1627,7 +1633,7 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
	.tlb_flush_all	= arm_smmu_tlb_inv_context,
	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
	.tlb_sync	= arm_smmu_tlb_sync,
};

+57 −31
Original line number Diff line number Diff line
@@ -248,10 +248,16 @@ enum arm_smmu_domain_stage {
	ARM_SMMU_DOMAIN_BYPASS,
};

struct arm_smmu_flush_ops {
	struct iommu_flush_ops		tlb;
	void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
			      bool leaf, void *cookie)
};

struct arm_smmu_domain {
	struct arm_smmu_device		*smmu;
	struct io_pgtable_ops		*pgtbl_ops;
	const struct iommu_flush_ops	*tlb_ops;
	const struct arm_smmu_flush_ops	*flush_ops;
	struct arm_smmu_cfg		cfg;
	enum arm_smmu_domain_stage	stage;
	bool				non_strict;
@@ -551,42 +557,62 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
				  size_t granule, void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;

	smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie);
	smmu_domain->tlb_ops->tlb_sync(cookie);
	ops->tlb_inv_range(iova, size, granule, false, cookie);
	ops->tlb.tlb_sync(cookie);
}

static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
				  size_t granule, void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;

	ops->tlb_inv_range(iova, size, granule, true, cookie);
	ops->tlb.tlb_sync(cookie);
}

static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
				  void *cookie)
{
	struct arm_smmu_domain *smmu_domain = cookie;
	const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;

	smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie);
	smmu_domain->tlb_ops->tlb_sync(cookie);
	ops->tlb_inv_range(iova, granule, granule, true, cookie);
}

static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
	.tlb = {
		.tlb_flush_all	= arm_smmu_tlb_inv_context_s1,
		.tlb_flush_walk	= arm_smmu_tlb_inv_walk,
		.tlb_flush_leaf	= arm_smmu_tlb_inv_leaf,
	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
		.tlb_add_page	= arm_smmu_tlb_add_page,
		.tlb_sync	= arm_smmu_tlb_sync_context,
	},
	.tlb_inv_range		= arm_smmu_tlb_inv_range_nosync,
};

static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
	.tlb = {
		.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
		.tlb_flush_walk	= arm_smmu_tlb_inv_walk,
		.tlb_flush_leaf	= arm_smmu_tlb_inv_leaf,
	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
		.tlb_add_page	= arm_smmu_tlb_add_page,
		.tlb_sync	= arm_smmu_tlb_sync_context,
	},
	.tlb_inv_range		= arm_smmu_tlb_inv_range_nosync,
};

static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
	.tlb = {
		.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
		.tlb_flush_walk	= arm_smmu_tlb_inv_walk,
		.tlb_flush_leaf	= arm_smmu_tlb_inv_leaf,
	.tlb_add_flush	= arm_smmu_tlb_inv_vmid_nosync,
		.tlb_add_page	= arm_smmu_tlb_add_page,
		.tlb_sync	= arm_smmu_tlb_sync_vmid,
	},
	.tlb_inv_range		= arm_smmu_tlb_inv_vmid_nosync,
};

static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -866,7 +892,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
			ias = min(ias, 32UL);
			oas = min(oas, 32UL);
		}
		smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
		smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
		break;
	case ARM_SMMU_DOMAIN_NESTED:
		/*
@@ -886,9 +912,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
			oas = min(oas, 40UL);
		}
		if (smmu->version == ARM_SMMU_V2)
			smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
			smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
		else
			smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
			smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
		break;
	default:
		ret = -EINVAL;
@@ -917,7 +943,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		.ias		= ias,
		.oas		= oas,
		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
		.tlb		= smmu_domain->tlb_ops,
		.tlb		= &smmu_domain->flush_ops->tlb,
		.iommu_dev	= smmu->dev,
	};

@@ -1346,9 +1372,9 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct arm_smmu_device *smmu = smmu_domain->smmu;

	if (smmu_domain->tlb_ops) {
	if (smmu_domain->flush_ops) {
		arm_smmu_rpm_get(smmu);
		smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
		smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
		arm_smmu_rpm_put(smmu);
	}
}
@@ -1359,9 +1385,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct arm_smmu_device *smmu = smmu_domain->smmu;

	if (smmu_domain->tlb_ops) {
	if (smmu_domain->flush_ops) {
		arm_smmu_rpm_get(smmu);
		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
		smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain);
		arm_smmu_rpm_put(smmu);
	}
}
+5 −7
Original line number Diff line number Diff line
@@ -584,7 +584,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
		return __arm_v7s_unmap(data, iova, size, 2, tablep);
	}

	io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
	io_pgtable_tlb_add_page(&data->iop, iova, size);
	return size;
}

@@ -647,8 +647,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
				 */
				smp_wmb();
			} else {
				io_pgtable_tlb_add_flush(iop, iova, blk_size,
							 blk_size, true);
				io_pgtable_tlb_add_page(iop, iova, blk_size);
			}
			iova += blk_size;
		}
@@ -809,10 +808,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}

static void dummy_tlb_add_flush(unsigned long iova, size_t size,
				size_t granule, bool leaf, void *cookie)
static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
{
	dummy_tlb_flush(iova, size, granule, cookie);
	dummy_tlb_flush(iova, granule, granule, cookie);
}

static void dummy_tlb_sync(void *cookie)
@@ -824,7 +822,7 @@ static const struct iommu_flush_ops dummy_tlb_ops = {
	.tlb_flush_all	= dummy_tlb_flush_all,
	.tlb_flush_walk	= dummy_tlb_flush,
	.tlb_flush_leaf	= dummy_tlb_flush,
	.tlb_add_flush	= dummy_tlb_add_flush,
	.tlb_add_page	= dummy_tlb_add_page,
	.tlb_sync	= dummy_tlb_sync,
};

+5 −6
Original line number Diff line number Diff line
@@ -582,7 +582,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,

		tablep = iopte_deref(pte, data);
	} else if (unmap_idx >= 0) {
		io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
		io_pgtable_tlb_add_page(&data->iop, iova, size);
		return size;
	}

@@ -623,7 +623,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
			 */
			smp_wmb();
		} else {
			io_pgtable_tlb_add_flush(iop, iova, size, size, true);
			io_pgtable_tlb_add_page(iop, iova, size);
		}

		return size;
@@ -1075,10 +1075,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}

static void dummy_tlb_add_flush(unsigned long iova, size_t size,
				size_t granule, bool leaf, void *cookie)
static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
{
	dummy_tlb_flush(iova, size, granule, cookie);
	dummy_tlb_flush(iova, granule, granule, cookie);
}

static void dummy_tlb_sync(void *cookie)
@@ -1090,7 +1089,7 @@ static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
	.tlb_flush_all	= dummy_tlb_flush_all,
	.tlb_flush_walk	= dummy_tlb_flush,
	.tlb_flush_leaf	= dummy_tlb_flush,
	.tlb_add_flush	= dummy_tlb_add_flush,
	.tlb_add_page	= dummy_tlb_add_page,
	.tlb_sync	= dummy_tlb_sync,
};

Loading