Commit 5e161e40 authored by Jan Kara's avatar Jan Kara Committed by Dan Williams
Browse files

dax: Factor out getting of pfn out of iomap



Factor out code to get pfn out of iomap that is shared between PTE and
PMD fault path.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 31a6f1a6
Loading
Loading
Loading
Loading
+43 −40
Original line number Diff line number Diff line
@@ -825,30 +825,53 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
}

static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
			      loff_t pos, void *entry)
static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
			 pfn_t *pfnp)
{
	const sector_t sector = dax_iomap_sector(iomap, pos);
	struct vm_area_struct *vma = vmf->vma;
	struct address_space *mapping = vma->vm_file->f_mapping;
	unsigned long vaddr = vmf->address;
	void *ret, *kaddr;
	pgoff_t pgoff;
	void *kaddr;
	int id, rc;
	pfn_t pfn;
	long length;

	rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
	if (rc)
		return rc;

	id = dax_read_lock();
	rc = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(PAGE_SIZE),
			       &kaddr, &pfn);
	if (rc < 0) {
	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
				   &kaddr, pfnp);
	if (length < 0) {
		rc = length;
		goto out;
	}
	rc = -EINVAL;
	if (PFN_PHYS(length) < size)
		goto out;
	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
		goto out;
	/* For larger pages we need devmap */
	if (length > 1 && !pfn_t_devmap(*pfnp))
		goto out;
	rc = 0;
out:
	dax_read_unlock(id);
	return rc;
}
	dax_read_unlock(id);

static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
			      loff_t pos, void *entry)
{
	const sector_t sector = dax_iomap_sector(iomap, pos);
	struct vm_area_struct *vma = vmf->vma;
	struct address_space *mapping = vma->vm_file->f_mapping;
	unsigned long vaddr = vmf->address;
	void *ret;
	int rc;
	pfn_t pfn;

	rc = dax_iomap_pfn(iomap, pos, PAGE_SIZE, &pfn);
	if (rc < 0)
		return rc;

	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
	if (IS_ERR(ret))
@@ -1223,46 +1246,26 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
{
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
	const sector_t sector = dax_iomap_sector(iomap, pos);
	struct dax_device *dax_dev = iomap->dax_dev;
	struct block_device *bdev = iomap->bdev;
	struct inode *inode = mapping->host;
	const size_t size = PMD_SIZE;
	void *ret = NULL, *kaddr;
	long length = 0;
	pgoff_t pgoff;
	void *ret = NULL;
	pfn_t pfn = {};
	int id;
	int rc;

	if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
	rc = dax_iomap_pfn(iomap, pos, PMD_SIZE, &pfn);
	if (rc < 0)
		goto fallback;

	id = dax_read_lock();
	length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
	if (length < 0)
		goto unlock_fallback;
	length = PFN_PHYS(length);

	if (length < size)
		goto unlock_fallback;
	if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
		goto unlock_fallback;
	if (!pfn_t_devmap(pfn))
		goto unlock_fallback;
	dax_read_unlock(id);

	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
			RADIX_DAX_PMD);
	if (IS_ERR(ret))
		goto fallback;

	trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
	trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, ret);
	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
			pfn, vmf->flags & FAULT_FLAG_WRITE);

unlock_fallback:
	dax_read_unlock(id);
fallback:
	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
	trace_dax_pmd_insert_mapping_fallback(inode, vmf, PMD_SIZE, pfn, ret);
	return VM_FAULT_FALLBACK;
}