Commit b15934b6 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman
Browse files

gpu: ion: Use alloc_pages instead of vmalloc from the system heap



With this change the ion_system_heap will only use kernel address
space when the memory is mapped into the kernel (rare case).

Signed-off-by: default avatarRebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b892bf75
Loading
Loading
Loading
Loading
+61 −33
Original line number Diff line number Diff line
@@ -26,75 +26,103 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
				     struct ion_buffer *buffer,
				     unsigned long size, unsigned long align,
				     unsigned long flags)
{
	buffer->priv_virt = vmalloc_user(size);
	if (!buffer->priv_virt)
		return -ENOMEM;
	return 0;
}

void ion_system_heap_free(struct ion_buffer *buffer)
{
	vfree(buffer->priv_virt);
}

struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
					 struct ion_buffer *buffer)
{
	struct sg_table *table;
	struct scatterlist *sg;
	int i;
	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
	void *vaddr = buffer->priv_virt;
	int ret;
	int i, j;
	int npages = PAGE_ALIGN(size) / PAGE_SIZE;

	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!table)
		return ERR_PTR(-ENOMEM);
	ret = sg_alloc_table(table, npages, GFP_KERNEL);
	if (ret)
		return -ENOMEM;
	i = sg_alloc_table(table, npages, GFP_KERNEL);
	if (i)
		goto err0;
	for_each_sg(table->sgl, sg, table->nents, i) {
		struct page *page;
		page = vmalloc_to_page(vaddr);
		if (!page) {
			ret = -ENOMEM;
		page = alloc_page(GFP_KERNEL);
		if (!page)
			goto err1;
		}
		sg_set_page(sg, page, PAGE_SIZE, 0);
		vaddr += PAGE_SIZE;
	}
	return table;
	buffer->priv_virt = table;
	return 0;
err1:
	for_each_sg(table->sgl, sg, i, j)
		__free_page(sg_page(sg));
	sg_free_table(table);
err0:
	kfree(table);
	return ERR_PTR(ret);
	return -ENOMEM;
}

void ion_system_heap_unmap_dma(struct ion_heap *heap,
			       struct ion_buffer *buffer)
void ion_system_heap_free(struct ion_buffer *buffer)
{
	int i;
	struct scatterlist *sg;
	struct sg_table *table = buffer->priv_virt;

	for_each_sg(table->sgl, sg, table->nents, i)
		__free_page(sg_page(sg));
	if (buffer->sg_table)
		sg_free_table(buffer->sg_table);
	kfree(buffer->sg_table);
}

void *ion_system_heap_map_kernel(struct ion_heap *heap,
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
					 struct ion_buffer *buffer)
{
	return buffer->priv_virt;
}

void ion_system_heap_unmap_dma(struct ion_heap *heap,
			       struct ion_buffer *buffer)
{
	return;
}

void *ion_system_heap_map_kernel(struct ion_heap *heap,
				 struct ion_buffer *buffer)
{
	struct scatterlist *sg;
	int i;
	void *vaddr;
	struct sg_table *table = buffer->priv_virt;
	struct page **pages = kmalloc(sizeof(struct page *) * table->nents,
				      GFP_KERNEL);

	for_each_sg(table->sgl, sg, table->nents, i)
		pages[i] = sg_page(sg);
	vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
	kfree(pages);

	return vaddr;
}

void ion_system_heap_unmap_kernel(struct ion_heap *heap,
				  struct ion_buffer *buffer)
{
	vunmap(buffer->vaddr);
}

int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
			     struct vm_area_struct *vma)
{
	return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff);
	struct sg_table *table = buffer->priv_virt;
	unsigned long addr = vma->vm_start;
	unsigned long offset = vma->vm_pgoff;
	struct scatterlist *sg;
	int i;

	for_each_sg(table->sgl, sg, table->nents, i) {
		if (offset) {
			offset--;
			continue;
		}
		vm_insert_page(vma, addr, sg_page(sg));
		addr += PAGE_SIZE;
	}
	return 0;
}

static struct ion_heap_ops vmalloc_ops = {