Commit fe662d84 authored by Christian König's avatar Christian König Committed by Christian König
Browse files

drm/ttm: remove io_reserve_lru handling v3



That is not used any more.

v2: keep the NULL checks in TTM.
v3: remove unused variable

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarBen Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/388646/
parent 141b15e5
Loading
Loading
Loading
Loading
+4 −30
Original line number Diff line number Diff line
@@ -263,11 +263,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
	struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
	int ret;

	ret = ttm_mem_io_lock(old_man, true);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(old_man);
	ttm_bo_unmap_virtual(bo);

	/*
	 * Create and bind a ttm if required.
@@ -538,7 +534,6 @@ static void ttm_bo_release(struct kref *kref)
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
	size_t acc_size = bo->acc_size;
	int ret;

@@ -556,9 +551,7 @@ static void ttm_bo_release(struct kref *kref)
			bo->bdev->driver->release_notify(bo);

		drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
		ttm_mem_io_lock(man, false);
		ttm_mem_io_free_vm(bo);
		ttm_mem_io_unlock(man);
		ttm_mem_io_free(bdev, &bo->mem);
	}

	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
@@ -648,8 +641,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
	evict_mem.bus.base = 0;
	evict_mem.bus.offset = 0;
	evict_mem.bus.addr = NULL;
@@ -1085,8 +1076,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
	mem.bus.base = 0;
	mem.bus.offset = 0;
	mem.bus.addr = NULL;
@@ -1238,7 +1227,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
	INIT_LIST_HEAD(&bo->io_reserve_lru);
	bo->bdev = bdev;
	bo->type = type;
	bo->num_pages = num_pages;
@@ -1247,8 +1235,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
	bo->mem.bus.base = 0;
	bo->mem.bus.offset = 0;
	bo->mem.bus.addr = NULL;
@@ -1554,25 +1540,13 @@ EXPORT_SYMBOL(ttm_bo_device_init);
 * buffer object vm functions.
 */

void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;

	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
	ttm_mem_io_free_vm(bo);
}

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
	ttm_mem_io_free(bdev, &bo->mem);
}


EXPORT_SYMBOL(ttm_bo_unmap_virtual);

int ttm_bo_wait(struct ttm_buffer_object *bo,
+9 −108
Original line number Diff line number Diff line
@@ -91,122 +91,42 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);

int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible)
{
	if (likely(!man->use_io_reserve_lru))
		return 0;

	if (interruptible)
		return mutex_lock_interruptible(&man->io_reserve_mutex);

	mutex_lock(&man->io_reserve_mutex);
	return 0;
}

void ttm_mem_io_unlock(struct ttm_resource_manager *man)
{
	if (likely(!man->use_io_reserve_lru))
		return;

	mutex_unlock(&man->io_reserve_mutex);
}

static int ttm_mem_io_evict(struct ttm_resource_manager *man)
{
	struct ttm_buffer_object *bo;

	bo = list_first_entry_or_null(&man->io_reserve_lru,
				      struct ttm_buffer_object,
				      io_reserve_lru);
	if (!bo)
		return -ENOSPC;

	list_del_init(&bo->io_reserve_lru);
	ttm_bo_unmap_virtual_locked(bo);
	return 0;
}

int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
		       struct ttm_resource *mem)
{
	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
	int ret;

	if (mem->bus.io_reserved_count++)
	if (mem->bus.base || mem->bus.offset || mem->bus.addr)
		return 0;

	mem->bus.is_iomem = false;
	if (!bdev->driver->io_mem_reserve)
		return 0;

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
retry:
	ret = bdev->driver->io_mem_reserve(bdev, mem);
	if (ret == -ENOSPC) {
		ret = ttm_mem_io_evict(man);
		if (ret == 0)
			goto retry;
	}
	return ret;
	return bdev->driver->io_mem_reserve(bdev, mem);
}

void ttm_mem_io_free(struct ttm_bo_device *bdev,
		     struct ttm_resource *mem)
{
	if (--mem->bus.io_reserved_count)
		return;

	if (!bdev->driver->io_mem_free)
	if (!mem->bus.base && !mem->bus.offset && !mem->bus.addr)
		return;

	if (bdev->driver->io_mem_free)
		bdev->driver->io_mem_free(bdev, mem);
}

int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
	struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
	struct ttm_resource *mem = &bo->mem;
	int ret;

	if (mem->bus.io_reserved_vm)
		return 0;

	ret = ttm_mem_io_reserve(bo->bdev, mem);
	if (unlikely(ret != 0))
		return ret;
	mem->bus.io_reserved_vm = true;
	if (man->use_io_reserve_lru)
		list_add_tail(&bo->io_reserve_lru,
			      &man->io_reserve_lru);
	return 0;
}

void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
	struct ttm_resource *mem = &bo->mem;

	if (!mem->bus.io_reserved_vm)
		return;

	mem->bus.io_reserved_vm = false;
	list_del_init(&bo->io_reserve_lru);
	ttm_mem_io_free(bo->bdev, mem);
	mem->bus.base = 0;
	mem->bus.offset = 0;
	mem->bus.addr = NULL;
}

static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
			       struct ttm_resource *mem,
			       void **virtual)
{
	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
	int ret;
	void *addr;

	*virtual = NULL;
	(void) ttm_mem_io_lock(man, false);
	ret = ttm_mem_io_reserve(bdev, mem);
	ttm_mem_io_unlock(man);
	if (ret || !mem->bus.is_iomem)
		return ret;

@@ -222,9 +142,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
			addr = ioremap(mem->bus.base + mem->bus.offset,
				       bus_size);
		if (!addr) {
			(void) ttm_mem_io_lock(man, false);
			ttm_mem_io_free(bdev, mem);
			ttm_mem_io_unlock(man);
			return -ENOMEM;
		}
	}
@@ -236,15 +154,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
				struct ttm_resource *mem,
				void *virtual)
{
	struct ttm_resource_manager *man;

	man = ttm_manager_type(bdev, mem->mem_type);

	if (virtual && mem->bus.addr == NULL)
		iounmap(virtual);
	(void) ttm_mem_io_lock(man, false);
	ttm_mem_io_free(bdev, mem);
	ttm_mem_io_unlock(man);
}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -458,7 +370,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
	INIT_LIST_HEAD(&fbo->base.ddestroy);
	INIT_LIST_HEAD(&fbo->base.lru);
	INIT_LIST_HEAD(&fbo->base.swap);
	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
	fbo->base.moving = NULL;
	drm_vma_node_reset(&fbo->base.base.vma_node);

@@ -573,8 +484,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
		unsigned long start_page, unsigned long num_pages,
		struct ttm_bo_kmap_obj *map)
{
	struct ttm_resource_manager *man =
		ttm_manager_type(bo->bdev, bo->mem.mem_type);
	unsigned long offset, size;
	int ret;

@@ -585,9 +494,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
	if (start_page > bo->num_pages)
		return -EINVAL;

	(void) ttm_mem_io_lock(man, false);
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
	ttm_mem_io_unlock(man);
	if (ret)
		return ret;
	if (!bo->mem.bus.is_iomem) {
@@ -602,10 +509,6 @@ EXPORT_SYMBOL(ttm_bo_kmap);

void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
	struct ttm_buffer_object *bo = map->bo;
	struct ttm_resource_manager *man =
		ttm_manager_type(bo->bdev, bo->mem.mem_type);

	if (!map->virtual)
		return;
	switch (map->bo_kmap_type) {
@@ -623,9 +526,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
	default:
		BUG();
	}
	(void) ttm_mem_io_lock(man, false);
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
	ttm_mem_io_unlock(man);
	map->virtual = NULL;
	map->page = NULL;
}
+11 −28
Original line number Diff line number Diff line
@@ -281,8 +281,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
	pgoff_t i;
	vm_fault_t ret = VM_FAULT_NOPAGE;
	unsigned long address = vmf->address;
	struct ttm_resource_manager *man =
		ttm_manager_type(bdev, bo->mem.mem_type);

	/*
	 * Refuse to fault imported pages. This should be handled
@@ -321,24 +319,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
	if (unlikely(ret != 0))
		return ret;

	err = ttm_mem_io_lock(man, true);
	err = ttm_mem_io_reserve(bdev, &bo->mem);
	if (unlikely(err != 0))
		return VM_FAULT_NOPAGE;
	err = ttm_mem_io_reserve_vm(bo);
	if (unlikely(err != 0)) {
		ret = VM_FAULT_SIGBUS;
		goto out_io_unlock;
	}
		return VM_FAULT_SIGBUS;

	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
	page_last = vma_pages(vma) + vma->vm_pgoff -
		drm_vma_node_start(&bo->base.vma_node);

	if (unlikely(page_offset >= bo->num_pages)) {
		ret = VM_FAULT_SIGBUS;
		goto out_io_unlock;
	}
	if (unlikely(page_offset >= bo->num_pages))
		return VM_FAULT_SIGBUS;

	prot = ttm_io_prot(bo->mem.placement, prot);
	if (!bo->mem.bus.is_iomem) {
@@ -350,21 +341,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
		};

		ttm = bo->ttm;
		if (ttm_tt_populate(bo->ttm, &ctx)) {
			ret = VM_FAULT_OOM;
			goto out_io_unlock;
		}
		if (ttm_tt_populate(bo->ttm, &ctx))
			return VM_FAULT_OOM;
	} else {
		/* Iomem should not be marked encrypted */
		prot = pgprot_decrypted(prot);
	}

	/* We don't prefault on huge faults. Yet. */
	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
		ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
		return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
					     fault_page_size, prot);
		goto out_io_unlock;
	}

	/*
	 * Speculatively prefault a number of pages. Only error on
@@ -376,8 +363,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
		} else {
			page = ttm->pages[page_offset];
			if (unlikely(!page && i == 0)) {
				ret = VM_FAULT_OOM;
				goto out_io_unlock;
				return VM_FAULT_OOM;
			} else if (unlikely(!page)) {
				break;
			}
@@ -404,7 +390,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
		/* Never error on prefaulted PTEs */
		if (unlikely((ret & VM_FAULT_ERROR))) {
			if (i == 0)
				goto out_io_unlock;
				return VM_FAULT_NOPAGE;
			else
				break;
		}
@@ -413,9 +399,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
		if (unlikely(++page_offset >= page_last))
			break;
	}
	ret = VM_FAULT_NOPAGE;
out_io_unlock:
	ttm_mem_io_unlock(man);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
+0 −3
Original line number Diff line number Diff line
@@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
{
	unsigned i;

	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
	spin_lock_init(&man->move_lock);
	INIT_LIST_HEAD(&man->io_reserve_lru);
	man->size = p_size;

	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+0 −1
Original line number Diff line number Diff line
@@ -151,7 +151,6 @@ struct ttm_buffer_object {
	struct list_head lru;
	struct list_head ddestroy;
	struct list_head swap;
	struct list_head io_reserve_lru;

	/**
	 * Members protected by a bo reservation.
Loading