Commit 6f510923 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull char/misc driver fixes from Greg KH:
 "Here are some binder, habanalabs, and vboxguest driver fixes for
  5.1-rc3.

  The Binder fixes resolve some reported issues found by testing, first
  by the selinux developers, and then earlier today by syzbot.

  The habanalabs fixes are all minor, resolving a number of tiny things.

  The vboxguest patches are a bit larger. They resolve the fact that
  virtual box decided to change their api in their latest release in a
  way that broke the existing kernel code, despite saying that they were
  never going to do that. So this is a bit of a "new feature", but is
  good to get merged so that 5.1 will work with the latest release. The
  changes are not large and of course virtual box "swears" they will not
  break this again, but no one is holding their breath here.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'char-misc-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  virt: vbox: Implement passing requestor info to the host for VirtualBox 6.0.x
  binder: fix race between munmap() and direct reclaim
  binder: fix BUG_ON found by selinux-testsuite
  habanalabs: cast to expected type
  habanalabs: prevent host crash during suspend/resume
  habanalabs: perform accounting for active CS
  habanalabs: fix mapping with page size bigger than 4KB
  habanalabs: complete user context cleanup before hard reset
  habanalabs: fix bug when mapping very large memory area
  habanalabs: fix MMU number of pages calculation
parents 3467b907 0532a1b0
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
	size_t object_size = 0;

	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
	if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
	    !IS_ALIGNED(offset, sizeof(u32)))
		return 0;
	binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
				      offset, read_size);
+8 −10
Original line number Diff line number Diff line
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,

	index = page - alloc->pages;
	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
	vma = binder_alloc_get_vma(alloc);
	if (vma) {
		if (!mmget_not_zero(alloc->vma_vm_mm))
			goto err_mmget;

	mm = alloc->vma_vm_mm;
		if (!down_read_trylock(&mm->mmap_sem))
	if (!mmget_not_zero(mm))
		goto err_mmget;
	if (!down_write_trylock(&mm->mmap_sem))
		goto err_down_write_mmap_sem_failed;
	}
	vma = binder_alloc_get_vma(alloc);

	list_lru_isolate(lru, item);
	spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
		zap_page_range(vma, page_addr, PAGE_SIZE);

		trace_binder_unmap_user_end(alloc, index);

		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	up_write(&mm->mmap_sem);
	mmput(mm);

	trace_binder_unmap_kernel_start(alloc, index);

+6 −0
Original line number Diff line number Diff line
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)

	/* We also need to update CI for internal queues */
	if (cs->submitted) {
		int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);

		WARN_ONCE((cs_cnt < 0),
			"hl%d: error in CS active cnt %d\n",
			hdev->id, cs_cnt);

		hl_int_hw_queue_update_ci(cs);

		spin_lock(&hdev->hw_queues_mirror_lock);
+4 −3
Original line number Diff line number Diff line
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
	enum vm_type_t *vm_type;
	bool once = true;
	u64 j;
	int i;

	if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
			} else {
				phys_pg_pack = hnode->ptr;
				seq_printf(s,
					"    0x%-14llx      %-10u       %-4u\n",
					"    0x%-14llx      %-10llu       %-4u\n",
					hnode->vaddr, phys_pg_pack->total_size,
					phys_pg_pack->handle);
			}
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
						phys_pg_pack->page_size);
			seq_puts(s, "   physical address\n");
			seq_puts(s, "---------------------\n");
			for (i = 0 ; i < phys_pg_pack->npages ; i++) {
			for (j = 0 ; j < phys_pg_pack->npages ; j++) {
				seq_printf(s, "    0x%-14llx\n",
						phys_pg_pack->pages[i]);
						phys_pg_pack->pages[j]);
			}
		}
		spin_unlock(&vm->idr_lock);
+66 −5
Original line number Diff line number Diff line
@@ -11,6 +11,8 @@
#include <linux/sched/signal.h>
#include <linux/hwmon.h>

#define HL_PLDM_PENDING_RESET_PER_SEC	(HL_PENDING_RESET_PER_SEC * 10)

bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
{
	if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
	spin_lock_init(&hdev->hw_queues_mirror_lock);
	atomic_set(&hdev->in_reset, 0);
	atomic_set(&hdev->fd_open_cnt, 0);
	atomic_set(&hdev->cs_active_cnt, 0);

	return 0;

@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)

	pci_save_state(hdev->pdev);

	/* Block future CS/VM/JOB completion operations */
	rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
	if (rc) {
		dev_err(hdev->dev, "Can't suspend while in reset\n");
		return -EIO;
	}

	/* This blocks all other stuff that is not blocked by in_reset */
	hdev->disabled = true;

	/*
	 * Flush anyone that is inside the critical section of enqueue
	 * jobs to the H/W
	 */
	hdev->asic_funcs->hw_queues_lock(hdev);
	hdev->asic_funcs->hw_queues_unlock(hdev);

	/* Flush processes that are sending message to CPU */
	mutex_lock(&hdev->send_cpu_message_lock);
	mutex_unlock(&hdev->send_cpu_message_lock);

	rc = hdev->asic_funcs->suspend(hdev);
	if (rc)
		dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)

	pci_set_power_state(hdev->pdev, PCI_D0);
	pci_restore_state(hdev->pdev);
	rc = pci_enable_device(hdev->pdev);
	rc = pci_enable_device_mem(hdev->pdev);
	if (rc) {
		dev_err(hdev->dev,
			"Failed to enable PCI device in resume\n");
		return rc;
	}

	pci_set_master(hdev->pdev);

	rc = hdev->asic_funcs->resume(hdev);
	if (rc) {
		dev_err(hdev->dev,
			"Failed to enable PCI access from device CPU\n");
		return rc;
		dev_err(hdev->dev, "Failed to resume device after suspend\n");
		goto disable_device;
	}


	hdev->disabled = false;
	atomic_set(&hdev->in_reset, 0);

	rc = hl_device_reset(hdev, true, false);
	if (rc) {
		dev_err(hdev->dev, "Failed to reset device during resume\n");
		goto disable_device;
	}

	return 0;

disable_device:
	pci_clear_master(hdev->pdev);
	pci_disable_device(hdev->pdev);

	return rc;
}

static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
	struct hl_device_reset_work *device_reset_work =
		container_of(work, struct hl_device_reset_work, reset_work);
	struct hl_device *hdev = device_reset_work->hdev;
	u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
	u16 pending_total, pending_cnt;
	struct task_struct *task = NULL;

	if (hdev->pldm)
		pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
	else
		pending_total = HL_PENDING_RESET_PER_SEC;

	pending_cnt = pending_total;

	/* Flush all processes that are inside hl_open */
	mutex_lock(&hdev->fd_open_cnt_lock);

@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
		}
	}

	pending_cnt = pending_total;

	while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {

		pending_cnt--;

		ssleep(1);
	}

	if (atomic_read(&hdev->fd_open_cnt))
		dev_crit(hdev->dev,
			"Going to hard reset with open user contexts\n");

	mutex_unlock(&hdev->fd_open_cnt_lock);

	hl_device_reset(hdev, true, true);
Loading