Commit 76f5e9f8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'iommu-fixes-v5.3-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:

 - Revert an Intel VT-d patch that caused problems for some users.

 - Removal of a feature in the Intel VT-d driver that was never
   supported in hardware. This qualifies as a fix because the code for
   this feature sets reserved bits in the invalidation queue descriptor,
   causing failed invalidations on real hardware.

 - Two fixes for AMD IOMMU driver to fix a race condition and to add a
   missing IOTLB flush when kernel is booted in kdump mode.

* tag 'iommu-fixes-v5.3-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Fix race in increase_address_space()
  iommu/amd: Flush old domains in kdump kernel
  iommu/vt-d: Remove global page flush support
  Revert "iommu/vt-d: Avoid duplicated pci dma alias consideration"
parents 04459710 754265bc
Loading
Loading
Loading
Loading
+35 −5
Original line number Diff line number Diff line
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
	iommu_completion_wait(iommu);
}

static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
{
	struct iommu_cmd cmd;

	build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
			      dom_id, 1);
	iommu_queue_command(iommu, &cmd);

	iommu_completion_wait(iommu);
}

static void amd_iommu_flush_all(struct amd_iommu *iommu)
{
	struct iommu_cmd cmd;
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
 * another level increases the size of the address space by 9 bits to a size up
 * to 64 bits.
 */
static bool increase_address_space(struct protection_domain *domain,
static void increase_address_space(struct protection_domain *domain,
				   gfp_t gfp)
{
	unsigned long flags;
	u64 *pte;

	if (domain->mode == PAGE_MODE_6_LEVEL)
	spin_lock_irqsave(&domain->lock, flags);

	if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
		/* address space already 64 bit large */
		return false;
		goto out;

	pte = (void *)get_zeroed_page(gfp);
	if (!pte)
		return false;
		goto out;

	*pte             = PM_LEVEL_PDE(domain->mode,
					iommu_virt_to_phys(domain->pt_root));
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
	domain->mode    += 1;
	domain->updated  = true;

	return true;
out:
	spin_unlock_irqrestore(&domain->lock, flags);

	return;
}

static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
{
	u64 pte_root = 0;
	u64 flags = 0;
	u32 old_domid;

	if (domain->mode != PAGE_MODE_NONE)
		pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
	flags &= ~DEV_DOMID_MASK;
	flags |= domain->id;

	old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
	amd_iommu_dev_table[devid].data[1]  = flags;
	amd_iommu_dev_table[devid].data[0]  = pte_root;

	/*
	 * A kdump kernel might be replacing a domain ID that was copied from
	 * the previous kernel--if so, it needs to flush the translation cache
	 * entries for the old domain ID that is being overwritten
	 */
	if (old_domid) {
		struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

		amd_iommu_flush_tlb_domid(iommu, old_domid);
	}
}

static void clear_dte_entry(u16 devid)
+53 −2
Original line number Diff line number Diff line
@@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
static void domain_context_clear(struct intel_iommu *iommu,
				 struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
@@ -2105,9 +2107,26 @@ out_unlock:
	return ret;
}

struct domain_context_mapping_data {
	struct dmar_domain *domain;
	struct intel_iommu *iommu;
	struct pasid_table *table;
};

static int domain_context_mapping_cb(struct pci_dev *pdev,
				     u16 alias, void *opaque)
{
	struct domain_context_mapping_data *data = opaque;

	return domain_context_mapping_one(data->domain, data->iommu,
					  data->table, PCI_BUS_NUM(alias),
					  alias & 0xff);
}

static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
	struct domain_context_mapping_data data;
	struct pasid_table *table;
	struct intel_iommu *iommu;
	u8 bus, devfn;
@@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
		return -ENODEV;

	table = intel_pasid_get_table(dev);
	return domain_context_mapping_one(domain, iommu, table, bus, devfn);

	if (!dev_is_pci(dev))
		return domain_context_mapping_one(domain, iommu, table,
						  bus, devfn);

	data.domain = domain;
	data.iommu = iommu;
	data.table = table;

	return pci_for_each_dma_alias(to_pci_dev(dev),
				      &domain_context_mapping_cb, &data);
}

static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -4759,6 +4788,28 @@ out_free_dmar:
	return ret;
}

static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct intel_iommu *iommu = opaque;

	domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
	return 0;
}

/*
 * NB - intel-iommu lacks any sort of reference counting for the users of
 * dependent devices.  If multiple endpoints have intersecting dependent
 * devices, unbinding the driver from any one of them will possibly leave
 * the others unable to operate.
 */
static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
{
	if (!iommu || !dev || !dev_is_pci(dev))
		return;

	pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
}

static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
	struct dmar_domain *domain;
@@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
					PASID_RID2PASID);

		iommu_disable_dev_iotlb(info);
		domain_context_clear_one(iommu, info->bus, info->devfn);
		domain_context_clear(iommu, info->dev);
		intel_pasid_free_table(info->dev);
	}

+15 −21
Original line number Diff line number Diff line
@@ -100,20 +100,15 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
}

static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
				       unsigned long address, unsigned long pages, int ih, int gl)
				unsigned long address, unsigned long pages, int ih)
{
	struct qi_desc desc;

	if (pages == -1) {
		/* For global kernel pages we have to flush them in *all* PASIDs
		 * because that's the only option the hardware gives us. Despite
		 * the fact that they are actually only accessible through one. */
		if (gl)
			desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
					QI_EIOTLB_DID(sdev->did) |
					QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
					QI_EIOTLB_TYPE;
		else
	/*
	 * Do PASID granu IOTLB invalidation if page selective capability is
	 * not available.
	 */
	if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
		desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
			QI_EIOTLB_DID(sdev->did) |
			QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
				QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
				QI_EIOTLB_TYPE;
		desc.qw1 = QI_EIOTLB_ADDR(address) |
				QI_EIOTLB_GL(gl) |
				QI_EIOTLB_IH(ih) |
				QI_EIOTLB_AM(mask);
	}
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}

static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
				  unsigned long pages, int ih, int gl)
				unsigned long pages, int ih)
{
	struct intel_svm_dev *sdev;

	rcu_read_lock();
	list_for_each_entry_rcu(sdev, &svm->devs, list)
		intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
		intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
	rcu_read_unlock();
}

@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
	struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);

	intel_flush_svm_range(svm, start,
			      (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
			      (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}

static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
	rcu_read_lock();
	list_for_each_entry_rcu(sdev, &svm->devs, list) {
		intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
		intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
		intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
	}
	rcu_read_unlock();

@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
				 * large and has to be physically contiguous. So it's
				 * hard to be as defensive as we might like. */
				intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
				intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
				intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
				kfree_rcu(sdev, rcu);

				if (list_empty(&svm->devs)) {
+0 −3
Original line number Diff line number Diff line
@@ -346,7 +346,6 @@ enum {
#define QI_PC_PASID_SEL		(QI_PC_TYPE | QI_PC_GRAN(1))

#define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_GL(gl)	(((u64)gl) << 7)
#define QI_EIOTLB_IH(ih)	(((u64)ih) << 6)
#define QI_EIOTLB_AM(am)	(((u64)am))
#define QI_EIOTLB_PASID(pasid) 	(((u64)pasid) << 32)
@@ -378,8 +377,6 @@ enum {
#define QI_RESP_INVALID		0x1
#define QI_RESP_FAILURE		0xf

#define QI_GRAN_ALL_ALL			0
#define QI_GRAN_NONG_ALL		1
#define QI_GRAN_NONG_PASID		2
#define QI_GRAN_PSI_PASID		3