Commit 93392217 authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/vmwgfx: Implement an infrastructure for write-coherent resources



This infrastructure will, for coherent resources, make sure that
from the user-space point of view, data written by the CPU is immediately
automatically available to the GPU at resource validation time.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent 7a39f35c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@ config DRM_VMWGFX
	select FB_CFB_IMAGEBLIT
	select DRM_TTM
	select FB
	select AS_DIRTY_HELPERS
	# Only needed for the transitional use of drm_crtc_init - can be removed
	# again once vmwgfx sets up the primary plane itself.
	select DRM_KMS_HELPER
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
	    vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
	    vmwgfx_validation.o \
	    vmwgfx_validation.o vmwgfx_page_dirty.o \
	    ttm_object.o ttm_lock.o

obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+4 −1
Original line number Diff line number Diff line
@@ -463,6 +463,7 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);

	WARN_ON(vmw_bo->dirty);
	vmw_bo_unmap(vmw_bo);
	kfree(vmw_bo);
}
@@ -476,8 +477,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;

	vmw_bo_unmap(&vmw_user_bo->vbo);
	WARN_ON(vbo->dirty);
	vmw_bo_unmap(vbo);
	ttm_prime_object_kfree(vmw_user_bo, prime);
}

+5 −0
Original line number Diff line number Diff line
@@ -833,6 +833,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}
	dev_priv->vm_ops = *dev_priv->bdev.vm_ops;
	dev_priv->vm_ops.fault = vmw_bo_vm_fault;
	dev_priv->vm_ops.pfn_mkwrite = vmw_bo_vm_mkwrite;
	dev_priv->vm_ops.page_mkwrite = vmw_bo_vm_mkwrite;
	dev_priv->bdev.vm_ops = &dev_priv->vm_ops;

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
+23 −3
Original line number Diff line number Diff line
@@ -94,6 +94,7 @@ struct vmw_fpriv {
 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
 * @map: Kmap object for semi-persistent mappings
 * @res_prios: Eviction priority counts for attached resources
 * @dirty: structure for user-space dirty-tracking
 */
struct vmw_buffer_object {
	struct ttm_buffer_object base;
@@ -104,6 +105,7 @@ struct vmw_buffer_object {
	/* Protected by reservation */
	struct ttm_bo_kmap_obj map;
	u32 res_prios[TTM_MAX_BO_PRIORITY];
	struct vmw_bo_dirty *dirty;
};

/**
@@ -134,7 +136,8 @@ struct vmw_res_func;
 * @res_dirty: Resource contains data not yet in the backup buffer. Protected
 * by resource reserved.
 * @backup_dirty: Backup buffer contains data not yet in the HW resource.
 * Protecte by resource reserved.
 * Protected by resource reserved.
 * @coherent: Emulate coherency by tracking vm accesses.
 * @backup: The backup buffer if any. Protected by resource reserved.
 * @backup_offset: Offset into the backup buffer if any. Protected by resource
 * reserved. Note that only a few resource types can have a @backup_offset
@@ -151,14 +154,16 @@ struct vmw_res_func;
 * @hw_destroy: Callback to destroy the resource on the device, as part of
 * resource destruction.
 */
struct vmw_resource_dirty;
struct vmw_resource {
	struct kref kref;
	struct vmw_private *dev_priv;
	int id;
	u32 used_prio;
	unsigned long backup_size;
	bool res_dirty;
	bool backup_dirty;
	u32 res_dirty : 1;
	u32 backup_dirty : 1;
	u32 coherent : 1;
	struct vmw_buffer_object *backup;
	unsigned long backup_offset;
	unsigned long pin_count;
@@ -166,6 +171,7 @@ struct vmw_resource {
	struct list_head lru_head;
	struct list_head mob_head;
	struct list_head binding_head;
	struct vmw_resource_dirty *dirty;
	void (*res_free) (struct vmw_resource *res);
	void (*hw_destroy) (struct vmw_resource *res);
};
@@ -606,6 +612,9 @@ struct vmw_private {

	/* Validation memory reservation */
	struct vmw_validation_mem vvm;

	/* VM operations */
	struct vm_operations_struct vm_ops;
};

static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -722,6 +731,8 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
			       pgoff_t end);

/**
 * vmw_resource_mob_attached - Whether a resource currently has a mob attached
@@ -1410,6 +1421,15 @@ int vmw_host_log(const char *log);
#define VMW_DEBUG_USER(fmt, ...)                                              \
	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)

/* Resource dirtying - vmwgfx_page_dirty.c */
void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);

/**
 * Inline helper functions
 */
Loading