Commit 75390281 authored by Thomas Hellstrom (VMware)'s avatar Thomas Hellstrom (VMware)
Browse files

drm/vmwgfx: Support huge page faults



With vmwgfx dirty-tracking we need a specialized huge_fault
callback. Implement and hook it up.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarThomas Hellstrom (VMware) <thomas_os@shipmail.org>
Reviewed-by: default avatarRoland Scheidegger <sroland@vmware.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
parent 314b6580
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1402,6 +1402,10 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
			pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
				enum page_entry_size pe_size);
#endif

/**
 * VMW_DEBUG_KMS - Debug output for kernel mode-setting
+73 −1
Original line number Diff line number Diff line
@@ -473,7 +473,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
	 * a lot of unnecessary write faults.
	 */
	if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
		prot = vma->vm_page_prot;
		prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
	else
		prot = vm_get_page_prot(vma->vm_flags);

@@ -486,3 +486,75 @@ out_unlock:

	return ret;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
				enum page_entry_size pe_size)
{
	struct vm_area_struct *vma = vmf->vma;
	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
	    vma->vm_private_data;
	struct vmw_buffer_object *vbo =
		container_of(bo, struct vmw_buffer_object, base);
	pgprot_t prot;
	vm_fault_t ret;
	pgoff_t fault_page_size;
	bool write = vmf->flags & FAULT_FLAG_WRITE;
	bool is_cow_mapping =
		(vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;

	switch (pe_size) {
	case PE_SIZE_PMD:
		fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
		break;
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
	case PE_SIZE_PUD:
		fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
		break;
#endif
	default:
		WARN_ON_ONCE(1);
		return VM_FAULT_FALLBACK;
	}

	/* Always do write dirty-tracking and COW on PTE level. */
	if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
		return VM_FAULT_FALLBACK;

	ret = ttm_bo_vm_reserve(bo, vmf);
	if (ret)
		return ret;

	if (vbo->dirty) {
		pgoff_t allowed_prefault;
		unsigned long page_offset;

		page_offset = vmf->pgoff -
			drm_vma_node_start(&bo->base.vma_node);
		if (page_offset >= bo->num_pages ||
		    vmw_resources_clean(vbo, page_offset,
					page_offset + PAGE_SIZE,
					&allowed_prefault)) {
			ret = VM_FAULT_SIGBUS;
			goto out_unlock;
		}

		/*
		 * Write protect, so we get a new fault on write, and can
		 * split.
		 */
		prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
	} else {
		prot = vm_get_page_prot(vma->vm_flags);
	}

	ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

out_unlock:
	dma_resv_unlock(bo->base.resv);

	return ret;
}
#endif
+4 −1
Original line number Diff line number Diff line
@@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
		.page_mkwrite = vmw_bo_vm_mkwrite,
		.fault = vmw_bo_vm_fault,
		.open = ttm_bo_vm_open,
		.close = ttm_bo_vm_close
		.close = ttm_bo_vm_close,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
		.huge_fault = vmw_bo_vm_huge_fault,
#endif
	};
	struct drm_file *file_priv = filp->private_data;
	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);