Commit 800bb1c8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe
Browse files

mm: handle multiple owners of device private pages in migrate_vma

Add a new src_owner field to struct migrate_vma.  If the field is set,
only device private pages with page->pgmap->owner equal to that field are
migrated.  If the field is not set only "normal" pages are migrated.

Fixes: df6ad698 ("mm/device-public-memory: device memory cache coherent with CPU")
Link: https://lore.kernel.org/r/20200316193216.920734-3-hch@lst.de


Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Tested-by: default avatarBharata B Rao <bharata@linux.ibm.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f894ddd5
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -563,6 +563,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
	mig.end = end;
	mig.end = end;
	mig.src = &src_pfn;
	mig.src = &src_pfn;
	mig.dst = &dst_pfn;
	mig.dst = &dst_pfn;
	mig.src_owner = &kvmppc_uvmem_pgmap;


	mutex_lock(&kvm->arch.uvmem_lock);
	mutex_lock(&kvm->arch.uvmem_lock);
	/* The requested page is already paged-out, nothing to do */
	/* The requested page is already paged-out, nothing to do */
+1 −0
Original line number Original line Diff line number Diff line
@@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
		.end		= vmf->address + PAGE_SIZE,
		.end		= vmf->address + PAGE_SIZE,
		.src		= &src,
		.src		= &src,
		.dst		= &dst,
		.dst		= &dst,
		.src_owner	= drm->dev,
	};
	};


	/*
	/*
+8 −0
Original line number Original line Diff line number Diff line
@@ -196,6 +196,14 @@ struct migrate_vma {
	unsigned long		npages;
	unsigned long		npages;
	unsigned long		start;
	unsigned long		start;
	unsigned long		end;
	unsigned long		end;

	/*
	 * Set to the owner value also stored in page->pgmap->owner for
	 * migrating out of device private memory.  If set only device
	 * private pages with this owner are migrated.  If not set
	 * device private pages are not migrated at all.
	 */
	void			*src_owner;
};
};


int migrate_vma_setup(struct migrate_vma *args);
int migrate_vma_setup(struct migrate_vma *args);
+6 −3
Original line number Original line Diff line number Diff line
@@ -2241,7 +2241,7 @@ again:
	arch_enter_lazy_mmu_mode();
	arch_enter_lazy_mmu_mode();


	for (; addr < end; addr += PAGE_SIZE, ptep++) {
	for (; addr < end; addr += PAGE_SIZE, ptep++) {
		unsigned long mpfn, pfn;
		unsigned long mpfn = 0, pfn;
		struct page *page;
		struct page *page;
		swp_entry_t entry;
		swp_entry_t entry;
		pte_t pte;
		pte_t pte;
@@ -2255,8 +2255,6 @@ again:
		}
		}


		if (!pte_present(pte)) {
		if (!pte_present(pte)) {
			mpfn = 0;

			/*
			/*
			 * Only care about unaddressable device page special
			 * Only care about unaddressable device page special
			 * page table entry. Other special swap entries are not
			 * page table entry. Other special swap entries are not
@@ -2267,11 +2265,16 @@ again:
				goto next;
				goto next;


			page = device_private_entry_to_page(entry);
			page = device_private_entry_to_page(entry);
			if (page->pgmap->owner != migrate->src_owner)
				goto next;

			mpfn = migrate_pfn(page_to_pfn(page)) |
			mpfn = migrate_pfn(page_to_pfn(page)) |
					MIGRATE_PFN_MIGRATE;
					MIGRATE_PFN_MIGRATE;
			if (is_write_device_private_entry(entry))
			if (is_write_device_private_entry(entry))
				mpfn |= MIGRATE_PFN_WRITE;
				mpfn |= MIGRATE_PFN_WRITE;
		} else {
		} else {
			if (migrate->src_owner)
				goto next;
			pfn = pte_pfn(pte);
			pfn = pte_pfn(pte);
			if (is_zero_pfn(pfn)) {
			if (is_zero_pfn(pfn)) {
				mpfn = MIGRATE_PFN_MIGRATE;
				mpfn = MIGRATE_PFN_MIGRATE;