Commit 088880dd authored by Lucas Stach's avatar Lucas Stach
Browse files

drm/etnaviv: implement softpin



With softpin we allow the userspace to take control over the GPU virtual
address space. The new capability is relected by a bump of the minor DRM
version. There are a few restrictions for userspace to take into
account:

1. The kernel reserves a bit of the address space to implement zero page
faulting and mapping of the kernel internal ring buffer. Userspace can
query the kernel for the first usable GPU VM address via
ETNAVIV_PARAM_SOFTPIN_START_ADDR.

2. We only allow softpin on GPUs, which implement proper process
separation via PPAS. If softpin is not available the softpin start
address will be set to ~0.

3. Softpin is all or nothing. A submit using softpin must not use any
address fixups via relocs.

Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: default avatarGuido Günther <agx@sigxcpu.org>
parent 17eae23b
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -44,6 +44,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev)
	mutex_init(&suballoc->lock);
	mutex_init(&suballoc->lock);
	init_waitqueue_head(&suballoc->free_event);
	init_waitqueue_head(&suballoc->free_event);


	BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE);
	suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
	suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
				       &suballoc->paddr, GFP_KERNEL);
				       &suballoc->paddr, GFP_KERNEL);
	if (!suballoc->vaddr) {
	if (!suballoc->vaddr) {
+1 −1
Original line number Original line Diff line number Diff line
@@ -529,7 +529,7 @@ static struct drm_driver etnaviv_drm_driver = {
	.desc               = "etnaviv DRM",
	.desc               = "etnaviv DRM",
	.date               = "20151214",
	.date               = "20151214",
	.major              = 1,
	.major              = 1,
	.minor              = 2,
	.minor              = 3,
};
};


/*
/*
+2 −0
Original line number Original line Diff line number Diff line
@@ -24,6 +24,8 @@ struct etnaviv_gem_object;
struct etnaviv_gem_submit;
struct etnaviv_gem_submit;
struct etnaviv_iommu_global;
struct etnaviv_iommu_global;


#define ETNAVIV_SOFTPIN_START_ADDRESS	SZ_4M /* must be >= SUBALLOC_SIZE */

struct etnaviv_file_private {
struct etnaviv_file_private {
	struct etnaviv_iommu_context	*mmu;
	struct etnaviv_iommu_context	*mmu;
	struct drm_sched_entity		sched_entity[ETNA_MAX_PIPES];
	struct drm_sched_entity		sched_entity[ETNA_MAX_PIPES];
+3 −2
Original line number Original line Diff line number Diff line
@@ -248,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
}
}


struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context)
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
	u64 va)
{
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct etnaviv_vram_mapping *mapping;
	struct etnaviv_vram_mapping *mapping;
@@ -309,7 +310,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(


	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
				    mmu_context->global->memory_base,
				    mmu_context->global->memory_base,
				    mapping, 0);
				    mapping, va);
	if (ret < 0) {
	if (ret < 0) {
		etnaviv_iommu_context_put(mmu_context);
		etnaviv_iommu_context_put(mmu_context);
		kfree(mapping);
		kfree(mapping);
+1 −0
Original line number Original line Diff line number Diff line
@@ -77,6 +77,7 @@ static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)


struct etnaviv_gem_submit_bo {
struct etnaviv_gem_submit_bo {
	u32 flags;
	u32 flags;
	u64 va;
	struct etnaviv_gem_object *obj;
	struct etnaviv_gem_object *obj;
	struct etnaviv_vram_mapping *mapping;
	struct etnaviv_vram_mapping *mapping;
	struct dma_fence *excl;
	struct dma_fence *excl;
Loading