Commit 17eae23b authored by Lucas Stach's avatar Lucas Stach
Browse files

drm/etnaviv: allow to request specific virtual address for gem mapping



Allow the mapping code to request a specific virtual address for the gem
mapping. If the virtual address is zero we fall back to the old mode of
allocating a virtual address for the mapping.

Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: default avatarGuido Günther <agx@sigxcpu.org>
parent edb5ff07
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -308,7 +308,8 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
	mapping->use = 1;
	mapping->use = 1;


	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
				    mmu_context->global->memory_base, mapping);
				    mmu_context->global->memory_base,
				    mapping, 0);
	if (ret < 0) {
	if (ret < 0) {
		etnaviv_iommu_context_put(mmu_context);
		etnaviv_iommu_context_put(mmu_context);
		kfree(mapping);
		kfree(mapping);
+2 −1
Original line number Original line Diff line number Diff line
@@ -120,7 +120,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);


struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context);
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
	u64 va);
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);


#endif /* __ETNAVIV_GEM_H__ */
#endif /* __ETNAVIV_GEM_H__ */
+14 −2
Original line number Original line Diff line number Diff line
@@ -220,9 +220,16 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
	return ret;
	return ret;
}
}


static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
		   struct drm_mm_node *node, size_t size, u64 va)
{
	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
					   va + size, DRM_MM_INSERT_LOWEST);
}

int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
	struct etnaviv_vram_mapping *mapping)
	struct etnaviv_vram_mapping *mapping, u64 va)
{
{
	struct sg_table *sgt = etnaviv_obj->sgt;
	struct sg_table *sgt = etnaviv_obj->sgt;
	struct drm_mm_node *node;
	struct drm_mm_node *node;
@@ -248,7 +255,12 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,


	node = &mapping->vram_node;
	node = &mapping->vram_node;


	ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->base.size);
	if (va)
		ret = etnaviv_iommu_insert_exact(context, node,
						 etnaviv_obj->base.size, va);
	else
		ret = etnaviv_iommu_find_iova(context, node,
					      etnaviv_obj->base.size);
	if (ret < 0)
	if (ret < 0)
		goto unlock;
		goto unlock;


+1 −1
Original line number Original line Diff line number Diff line
@@ -88,7 +88,7 @@ struct etnaviv_gem_object;


int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
	struct etnaviv_vram_mapping *mapping);
	struct etnaviv_vram_mapping *mapping, u64 va);
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
	struct etnaviv_vram_mapping *mapping);
	struct etnaviv_vram_mapping *mapping);