Commit f6d2b802 authored by Dan Williams's avatar Dan Williams
Browse files

Merge branch 'for-5.7/libnvdimm' into libnvdimm-for-next

- Introduce 'zero_page_range' as a dax operation. This facilitates
  filesystem-dax operation without a block-device.

- Advertise a persistence-domain for of_pmem and papr_scm. The
  persistence domain indicates where cpu-store cycles need to reach in
  the platform-memory subsystem before the platform will consider them
  power-fail protected.

- Fixup some flexible-array declarations.
parents d3b88655 4e4ced93
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -342,8 +342,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)

	if (p->is_volatile)
		p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
	else
	else {
		set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
		p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
	}
	if (!p->region) {
		dev_err(dev, "Error registering region %pR from %pOF\n",
				ndr_desc.res, p->dn);
+6 −6
Original line number Diff line number Diff line
@@ -145,32 +145,32 @@ struct nfit_spa {
	unsigned long ars_state;
	u32 clear_err_unit;
	u32 max_ars;
	struct acpi_nfit_system_address spa[0];
	struct acpi_nfit_system_address spa[];
};

struct nfit_dcr {
	struct list_head list;
	struct acpi_nfit_control_region dcr[0];
	struct acpi_nfit_control_region dcr[];
};

struct nfit_bdw {
	struct list_head list;
	struct acpi_nfit_data_region bdw[0];
	struct acpi_nfit_data_region bdw[];
};

struct nfit_idt {
	struct list_head list;
	struct acpi_nfit_interleave idt[0];
	struct acpi_nfit_interleave idt[];
};

struct nfit_flush {
	struct list_head list;
	struct acpi_nfit_flush_address flush[0];
	struct acpi_nfit_flush_address flush[];
};

struct nfit_memdev {
	struct list_head list;
	struct acpi_nfit_memory_map memdev[0];
	struct acpi_nfit_memory_map memdev[];
};

enum nfit_mem_flags {
+3 −1
Original line number Diff line number Diff line
@@ -421,8 +421,10 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
	 * device outside of mmap of the resulting character device.
	 */
	dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
	if (!dax_dev)
	if (IS_ERR(dax_dev)) {
		rc = PTR_ERR(dax_dev);
		goto err;
	}

	/* a device_dax instance is dead while the driver is not attached */
	kill_dax(dax_dev);
+26 −2
Original line number Diff line number Diff line
@@ -344,6 +344,23 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
}
EXPORT_SYMBOL_GPL(dax_copy_to_iter);

int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
			size_t nr_pages)
{
	if (!dax_alive(dax_dev))
		return -ENXIO;
	/*
	 * There are no callers that want to zero more than one page as of now.
	 * Once users are there, this check can be removed after the
	 * device mapper code has been updated to split ranges across targets.
	 */
	if (nr_pages != 1)
		return -EIO;

	return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);

#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -551,9 +568,16 @@ struct dax_device *alloc_dax(void *private, const char *__host,
	dev_t devt;
	int minor;

	if (ops && !ops->zero_page_range) {
		pr_debug("%s: error: device does not provide dax"
			 " operation zero_page_range()\n",
			 __host ? __host : "Unknown");
		return ERR_PTR(-EINVAL);
	}

	host = kstrdup(__host, GFP_KERNEL);
	if (__host && !host)
		return NULL;
		return ERR_PTR(-ENOMEM);

	minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
	if (minor < 0)
@@ -576,7 +600,7 @@ struct dax_device *alloc_dax(void *private, const char *__host,
	ida_simple_remove(&dax_minor_ida, minor);
 err_minor:
	kfree(host);
	return NULL;
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);

+18 −0
Original line number Diff line number Diff line
@@ -201,10 +201,27 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
	return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}

static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
				      size_t nr_pages)
{
	int ret;
	struct linear_c *lc = ti->private;
	struct block_device *bdev = lc->dev->bdev;
	struct dax_device *dax_dev = lc->dev->dax_dev;
	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;

	dev_sector = linear_map_sector(ti, sector);
	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
	if (ret)
		return ret;
	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}

#else
#define linear_dax_direct_access NULL
#define linear_dax_copy_from_iter NULL
#define linear_dax_copy_to_iter NULL
#define linear_dax_zero_page_range NULL
#endif

static struct target_type linear_target = {
@@ -226,6 +243,7 @@ static struct target_type linear_target = {
	.direct_access = linear_dax_direct_access,
	.dax_copy_from_iter = linear_dax_copy_from_iter,
	.dax_copy_to_iter = linear_dax_copy_to_iter,
	.dax_zero_page_range = linear_dax_zero_page_range,
};

int __init dm_linear_init(void)
Loading