Commit f4013ca6 authored by Vishal Verma's avatar Vishal Verma
Browse files

Merge branch 'for-5.9/dax' into libnvdimm-for-next

This contains a handful of dax changes for v5.9. Of the three commits,
one is a print verbosity change, and two are independent fixes that fell
out of the PKS work [1].

[1]: https://lore.kernel.org/linux-nvdimm/20200717072056.73134-1-ira.weiny@intel.com
parents 48778464 eedfd73d
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
	int err, id;

	if (blocksize != PAGE_SIZE) {
		pr_debug("%s: error: unsupported blocksize for dax\n",
		pr_info("%s: error: unsupported blocksize for dax\n",
				bdevname(bdev, buf));
		return false;
	}

	err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
	if (err) {
		pr_debug("%s: error: unaligned partition for dax\n",
		pr_info("%s: error: unaligned partition for dax\n",
				bdevname(bdev, buf));
		return false;
	}
@@ -95,7 +95,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
	last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
	err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
	if (err) {
		pr_debug("%s: error: unaligned partition for dax\n",
		pr_info("%s: error: unaligned partition for dax\n",
				bdevname(bdev, buf));
		return false;
	}
@@ -103,11 +103,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
	id = dax_read_lock();
	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
	len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
	dax_read_unlock(id);

	if (len < 1 || len2 < 1) {
		pr_debug("%s: error: dax access failed (%ld)\n",
		pr_info("%s: error: dax access failed (%ld)\n",
				bdevname(bdev, buf), len < 1 ? len : len2);
		dax_read_unlock(id);
		return false;
	}

@@ -137,9 +137,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
		put_dev_pagemap(end_pgmap);

	}
	dax_read_unlock(id);

	if (!dax_enabled) {
		pr_debug("%s: error: dax support not enabled\n",
		pr_info("%s: error: dax support not enabled\n",
				bdevname(bdev, buf));
		return false;
	}
+6 −7
Original line number Diff line number Diff line
@@ -680,21 +680,20 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
	return __dax_invalidate_entry(mapping, index, false);
}

static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
		sector_t sector, size_t size, struct page *to,
		unsigned long vaddr)
static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
			     sector_t sector, struct page *to, unsigned long vaddr)
{
	void *vto, *kaddr;
	pgoff_t pgoff;
	long rc;
	int id;

	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
	rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
	if (rc)
		return rc;

	id = dax_read_lock();
	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
	if (rc < 0) {
		dax_read_unlock(id);
		return rc;
@@ -1305,8 +1304,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
			clear_user_highpage(vmf->cow_page, vaddr);
			break;
		case IOMAP_MAPPED:
			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
					sector, PAGE_SIZE, vmf->cow_page, vaddr);
			error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
						  sector, vmf->cow_page, vaddr);
			break;
		default:
			WARN_ON_ONCE(1);