Commit 9662b99a authored by Zhen Lei's avatar Zhen Lei Committed by Will Deacon
Browse files

iommu/arm-smmu-v3: Add support for non-strict mode



Now that io-pgtable knows how to dodge strict TLB maintenance, all
that's left to do is bridge the gap between the IOMMU core requesting
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE for default domains, and showing the
appropriate IO_PGTABLE_QUIRK_NON_STRICT flag to alloc_io_pgtable_ops().

Signed-off-by: default avatarZhen Lei <thunder.leizhen@huawei.com>
[rm: convert to domain attribute, tweak commit message]
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent b6b65ca2
Loading
Loading
Loading
Loading
+56 −23
Original line number Original line Diff line number Diff line
@@ -612,6 +612,7 @@ struct arm_smmu_domain {
	struct mutex			init_mutex; /* Protects smmu pointer */
	struct mutex			init_mutex; /* Protects smmu pointer */


	struct io_pgtable_ops		*pgtbl_ops;
	struct io_pgtable_ops		*pgtbl_ops;
	bool				non_strict;


	enum arm_smmu_domain_stage	stage;
	enum arm_smmu_domain_stage	stage;
	union {
	union {
@@ -1407,6 +1408,12 @@ static void arm_smmu_tlb_inv_context(void *cookie)
		cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
		cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
	}
	}


	/*
	 * NOTE: when io-pgtable is in non-strict mode, we may get here with
	 * PTEs previously cleared by unmaps on the current CPU not yet visible
	 * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
	 * to guarantee those are observed before the TLBI. Do be careful, 007.
	 */
	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
	__arm_smmu_tlb_sync(smmu);
	__arm_smmu_tlb_sync(smmu);
}
}
@@ -1633,6 +1640,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
	if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
	if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
		pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
		pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;


	if (smmu_domain->non_strict)
		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;

	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
	if (!pgtbl_ops)
	if (!pgtbl_ops)
		return -ENOMEM;
		return -ENOMEM;
@@ -1934,9 +1944,8 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
{
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);


	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
	switch (domain->type) {
		return -EINVAL;
	case IOMMU_DOMAIN_UNMANAGED:

		switch (attr) {
		switch (attr) {
		case DOMAIN_ATTR_NESTING:
		case DOMAIN_ATTR_NESTING:
			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
@@ -1944,6 +1953,19 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
		default:
		default:
			return -ENODEV;
			return -ENODEV;
		}
		}
		break;
	case IOMMU_DOMAIN_DMA:
		switch (attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
			*(int *)data = smmu_domain->non_strict;
			return 0;
		default:
			return -ENODEV;
		}
		break;
	default:
		return -EINVAL;
	}
}
}


static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
@@ -1952,11 +1974,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
	int ret = 0;
	int ret = 0;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);


	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
		return -EINVAL;

	mutex_lock(&smmu_domain->init_mutex);
	mutex_lock(&smmu_domain->init_mutex);


	switch (domain->type) {
	case IOMMU_DOMAIN_UNMANAGED:
		switch (attr) {
		switch (attr) {
		case DOMAIN_ATTR_NESTING:
		case DOMAIN_ATTR_NESTING:
			if (smmu_domain->smmu) {
			if (smmu_domain->smmu) {
@@ -1968,11 +1989,23 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
			else
			else
				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;

			break;
			break;
		default:
		default:
			ret = -ENODEV;
			ret = -ENODEV;
		}
		}
		break;
	case IOMMU_DOMAIN_DMA:
		switch(attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
			smmu_domain->non_strict = *(int *)data;
			break;
		default:
			ret = -ENODEV;
		}
		break;
	default:
		ret = -EINVAL;
	}


out_unlock:
out_unlock:
	mutex_unlock(&smmu_domain->init_mutex);
	mutex_unlock(&smmu_domain->init_mutex);