Commit f6a55e1a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe
Browse files

memremap: lift the devmap_enable manipulation into devm_memremap_pages



Just check if there is a ->page_free operation set and take care of the
static key enable, as well as the put using device managed resources.
Also check that a ->page_free is provided for the pgmaps types that
require it, and check for a valid type as well while we are at it.

Note that this also fixes the fact that hmm never called
dev_pagemap_put_ops and thus would leave the slow path enabled forever,
even after a device driver unload or disable.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d8668bb0
Loading
Loading
Loading
Loading
+4 −19
Original line number Diff line number Diff line
@@ -334,11 +334,6 @@ static void pmem_release_disk(void *__pmem)
	put_disk(pmem->disk);
}

static void pmem_release_pgmap_ops(void *__pgmap)
{
	dev_pagemap_put_ops();
}

static void pmem_pagemap_page_free(struct page *page, void *data)
{
	wake_up_var(&page->_refcount);
@@ -350,16 +345,6 @@ static const struct dev_pagemap_ops fsdax_pagemap_ops = {
	.cleanup		= pmem_pagemap_cleanup,
};

static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
{
	dev_pagemap_get_ops();
	if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
		return -ENOMEM;
	pgmap->type = MEMORY_DEVICE_FS_DAX;
	pgmap->ops = &fsdax_pagemap_ops;
	return 0;
}

static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns)
{
@@ -415,8 +400,8 @@ static int pmem_attach_disk(struct device *dev,
	pmem->pfn_flags = PFN_DEV;
	pmem->pgmap.ref = &q->q_usage_counter;
	if (is_nd_pfn(dev)) {
		if (setup_pagemap_fsdax(dev, &pmem->pgmap))
			return -ENOMEM;
		pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
		pmem->pgmap.ops = &fsdax_pagemap_ops;
		addr = devm_memremap_pages(dev, &pmem->pgmap);
		pfn_sb = nd_pfn->pfn_sb;
		pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -428,8 +413,8 @@ static int pmem_attach_disk(struct device *dev,
	} else if (pmem_should_map_pages(dev)) {
		memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
		pmem->pgmap.altmap_valid = false;
		if (setup_pagemap_fsdax(dev, &pmem->pgmap))
			return -ENOMEM;
		pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
		pmem->pgmap.ops = &fsdax_pagemap_ops;
		addr = devm_memremap_pages(dev, &pmem->pgmap);
		pmem->pfn_flags |= PFN_MAP;
		memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
+0 −10
Original line number Diff line number Diff line
@@ -932,8 +932,6 @@ static inline bool is_zone_device_page(const struct page *page)
#endif

#ifdef CONFIG_DEV_PAGEMAP_OPS
void dev_pagemap_get_ops(void);
void dev_pagemap_put_ops(void);
void __put_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
static inline bool put_devmap_managed_page(struct page *page)
@@ -973,14 +971,6 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
#endif /* CONFIG_PCI_P2PDMA */

#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline void dev_pagemap_get_ops(void)
{
}

static inline void dev_pagemap_put_ops(void)
{
}

static inline bool put_devmap_managed_page(struct page *page)
{
	return false;
+37 −22
Original line number Diff line number Diff line
@@ -17,6 +17,35 @@ static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)

#ifdef CONFIG_DEV_PAGEMAP_OPS
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key);
static atomic_t devmap_managed_enable;

static void devmap_managed_enable_put(void *data)
{
	if (atomic_dec_and_test(&devmap_managed_enable))
		static_branch_disable(&devmap_managed_key);
}

static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
{
	if (!pgmap->ops->page_free) {
		WARN(1, "Missing page_free method\n");
		return -EINVAL;
	}

	if (atomic_inc_return(&devmap_managed_enable) == 1)
		static_branch_enable(&devmap_managed_key);
	return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
}
#else
static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
{
	return -EINVAL;
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */

#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
		       unsigned long addr,
@@ -156,6 +185,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
	};
	pgprot_t pgprot = PAGE_KERNEL;
	int error, nid, is_ram;
	bool need_devmap_managed = true;

	switch (pgmap->type) {
	case MEMORY_DEVICE_PRIVATE:
@@ -173,6 +203,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
		break;
	case MEMORY_DEVICE_DEVDAX:
	case MEMORY_DEVICE_PCI_P2PDMA:
		need_devmap_managed = false;
		break;
	default:
		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
@@ -185,6 +216,12 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
		return ERR_PTR(-EINVAL);
	}

	if (need_devmap_managed) {
		error = devmap_managed_enable_get(dev, pgmap);
		if (error)
			return ERR_PTR(error);
	}

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
		- align_start;
@@ -351,28 +388,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
EXPORT_SYMBOL_GPL(get_dev_pagemap);

#ifdef CONFIG_DEV_PAGEMAP_OPS
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key);
static atomic_t devmap_enable;

/*
 * Toggle the static key for ->page_free() callbacks when dev_pagemap
 * pages go idle.
 */
void dev_pagemap_get_ops(void)
{
	if (atomic_inc_return(&devmap_enable) == 1)
		static_branch_enable(&devmap_managed_key);
}
EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);

void dev_pagemap_put_ops(void)
{
	if (atomic_dec_and_test(&devmap_enable))
		static_branch_disable(&devmap_managed_key);
}
EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);

void __put_devmap_managed_page(struct page *page)
{
	int count = page_ref_dec_return(page);
+0 −2
Original line number Diff line number Diff line
@@ -1415,8 +1415,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
	void *result;
	int ret;

	dev_pagemap_get_ops();

	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
	if (!devmem)
		return ERR_PTR(-ENOMEM);