Commit 897e6365 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe
Browse files

memremap: add a migrate_to_ram method to struct dev_pagemap_ops



This replaces the hacky ->fault callback, which is currently directly
called from common code through a hmm specific data structure as an
exercise in layering violations.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f6a55e1a
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -692,11 +692,6 @@ struct hmm_devmem_ops {
 * chunk, as an optimization. It must, however, prioritize the faulting address
 * over all the others.
 */
typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
				unsigned long addr,
				const struct page *page,
				unsigned int flags,
				pmd_t *pmdp);

struct hmm_devmem {
	struct completion		completion;
@@ -707,7 +702,6 @@ struct hmm_devmem {
	struct dev_pagemap		pagemap;
	const struct hmm_devmem_ops	*ops;
	struct percpu_ref		ref;
	dev_page_fault_t		page_fault;
};

/*
+6 −0
Original line number Diff line number Diff line
@@ -80,6 +80,12 @@ struct dev_pagemap_ops {
	 * Wait for refcount in struct dev_pagemap to be idle and reap it.
	 */
	void (*cleanup)(struct dev_pagemap *pgmap);

	/*
	 * Used for private (un-addressable) device memory only.  Must migrate
	 * the page back to a CPU accessible page.
	 */
	vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
};

/**
+0 −15
Original line number Diff line number Diff line
@@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
{
	return pfn_to_page(swp_offset(entry));
}

vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
		       unsigned long addr,
		       swp_entry_t entry,
		       unsigned int flags,
		       pmd_t *pmdp);
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
{
@@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
{
	return NULL;
}

static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
				     unsigned long addr,
				     swp_entry_t entry,
				     unsigned int flags,
				     pmd_t *pmdp)
{
	return VM_FAULT_SIGBUS;
}
#endif /* CONFIG_DEVICE_PRIVATE */

#ifdef CONFIG_MIGRATION
+4 −31
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/wait_bit.h>
#include <linux/xarray.h>
#include <linux/hmm.h>

static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -46,36 +45,6 @@ static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgm
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */

#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
		       unsigned long addr,
		       swp_entry_t entry,
		       unsigned int flags,
		       pmd_t *pmdp)
{
	struct page *page = device_private_entry_to_page(entry);
	struct hmm_devmem *devmem;

	devmem = container_of(page->pgmap, typeof(*devmem), pagemap);

	/*
	 * The page_fault() callback must migrate page back to system memory
	 * so that CPU can access it. This might fail for various reasons
	 * (device issue, device was unsafely unplugged, ...). When such
	 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
	 *
	 * Note that because memory cgroup charges are accounted to the device
	 * memory, this should never fail because of memory restrictions (but
	 * allocation of regular system page might still fail because we are
	 * out of memory).
	 *
	 * There is a more in-depth description of what that callback can and
	 * cannot do, in include/linux/memremap.h
	 */
	return devmem->page_fault(vma, addr, page, flags, pmdp);
}
#endif /* CONFIG_DEVICE_PRIVATE */

static void pgmap_array_delete(struct resource *res)
{
	xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
@@ -193,6 +162,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
			WARN(1, "Device private memory not supported\n");
			return ERR_PTR(-EINVAL);
		}
		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
			WARN(1, "Missing migrate_to_ram method\n");
			return ERR_PTR(-EINVAL);
		}
		break;
	case MEMORY_DEVICE_FS_DAX:
		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
+5 −8
Original line number Diff line number Diff line
@@ -1366,15 +1366,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap)
	percpu_ref_kill(pgmap->ref);
}

static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
			    unsigned long addr,
			    const struct page *page,
			    unsigned int flags,
			    pmd_t *pmdp)
static vm_fault_t hmm_devmem_migrate_to_ram(struct vm_fault *vmf)
{
	struct hmm_devmem *devmem = page->pgmap->data;
	struct hmm_devmem *devmem = vmf->page->pgmap->data;

	return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
	return devmem->ops->fault(devmem, vmf->vma, vmf->address, vmf->page,
			vmf->flags, vmf->pmd);
}

static void hmm_devmem_free(struct page *page, void *data)
@@ -1388,6 +1385,7 @@ static const struct dev_pagemap_ops hmm_pagemap_ops = {
	.page_free		= hmm_devmem_free,
	.kill			= hmm_devmem_ref_kill,
	.cleanup		= hmm_devmem_ref_exit,
	.migrate_to_ram		= hmm_devmem_migrate_to_ram,
};

/*
@@ -1438,7 +1436,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
	devmem->pfn_last = devmem->pfn_first +
			   (resource_size(devmem->resource) >> PAGE_SHIFT);
	devmem->page_fault = hmm_devmem_fault;

	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
	devmem->pagemap.res = *devmem->resource;
Loading