Commit 09b42804 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

x86/amd-iommu: Reimplement flush_all_domains_on_iommu()



This patch reimplements the function
flush_all_domains_on_iommu to use the global protection
domain list.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent e3306664
Loading
Loading
Loading
Loading
+24 −19
Original line number Diff line number Diff line
@@ -499,43 +499,48 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
}

/*
 * This function flushes one domain on one IOMMU
 * This function flushes all domains that have devices on the given IOMMU
 */
static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
{
	struct iommu_cmd cmd;
	u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
	struct protection_domain *domain;
	unsigned long flags;

	__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
				      domid, 1, 1);

	spin_lock_irqsave(&iommu->lock, flags);
	__iommu_queue_command(iommu, &cmd);
	__iommu_completion_wait(iommu);
	__iommu_wait_for_completion(iommu);
	spin_unlock_irqrestore(&iommu->lock, flags);
}

static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
{
	int i;
	spin_lock_irqsave(&amd_iommu_pd_lock, flags);

	for (i = 1; i < MAX_DOMAIN_ID; ++i) {
		if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
	list_for_each_entry(domain, &amd_iommu_pd_list, list) {
		if (domain->dev_iommu[iommu->index] == 0)
			continue;
		flush_domain_on_iommu(iommu, i);

		spin_lock(&domain->lock);
		iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
		iommu_flush_complete(domain);
		spin_unlock(&domain->lock);
	}

	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}

/*
 * This function uses heavy locking and may disable irqs for some time. But
 * this is no issue because it is only called during resume.
 */
void amd_iommu_flush_all_domains(void)
{
	struct protection_domain *domain;
	unsigned long flags;

	spin_lock_irqsave(&amd_iommu_pd_lock, flags);

	list_for_each_entry(domain, &amd_iommu_pd_list, list) {
		spin_lock(&domain->lock);
		iommu_flush_tlb_pde(domain);
		iommu_flush_complete(domain);
		spin_unlock(&domain->lock);
	}

	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}

static void flush_all_devices_for_iommu(struct amd_iommu *iommu)