Commit 7d17e83a authored by Ralph Campbell's avatar Ralph Campbell Committed by Jason Gunthorpe
Browse files

mm/hmm/test: use the new migration invalidation

Use the new MMU_NOTIFY_MIGRATE event to skip MMU invalidations of device
private memory and handle the invalidation in the driver as part of
migrating device private memory.

Link: https://lore.kernel.org/r/20200723223004.9586-6-rcampbell@nvidia.com


Signed-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f8477ce6
Loading
Loading
Loading
Loading
+17 −13
Original line number Diff line number Diff line
@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
{
	struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);

	/*
	 * Ignore invalidation callbacks for device private pages since
	 * the invalidation is handled as part of the migration process.
	 */
	if (range->event == MMU_NOTIFY_MIGRATE &&
	    range->migrate_pgmap_owner == dmirror->mdevice)
		return true;

	if (mmu_notifier_range_blockable(range))
		mutex_lock(&dmirror->mutex);
	else if (!mutex_trylock(&dmirror->mutex))
@@ -693,7 +701,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
		args.dst = dst_pfns;
		args.start = addr;
		args.end = next;
		args.pgmap_owner = NULL;
		args.pgmap_owner = dmirror->mdevice;
		args.flags = MIGRATE_VMA_SELECT_SYSTEM;
		ret = migrate_vma_setup(&args);
		if (ret)
@@ -983,7 +991,7 @@ static void dmirror_devmem_free(struct page *page)
}

static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
						struct dmirror_device *mdevice)
						      struct dmirror *dmirror)
{
	const unsigned long *src = args->src;
	unsigned long *dst = args->dst;
@@ -1005,6 +1013,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
			continue;

		lock_page(dpage);
		xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
		copy_highpage(dpage, spage);
		*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
		if (*src & MIGRATE_PFN_WRITE)
@@ -1013,15 +1022,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
	return 0;
}

static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
						  struct dmirror *dmirror)
{
	/* Invalidate the device's page table mapping. */
	mutex_lock(&dmirror->mutex);
	dmirror_do_update(dmirror, args->start, args->end);
	mutex_unlock(&dmirror->mutex);
}

static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
	struct migrate_vma args;
@@ -1051,11 +1051,15 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
	if (migrate_vma_setup(&args))
		return VM_FAULT_SIGBUS;

	ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
	ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
	if (ret)
		return ret;
	migrate_vma_pages(&args);
	dmirror_devmem_fault_finalize_and_map(&args, dmirror);
	/*
	 * No device finalize step is needed since
	 * dmirror_devmem_fault_alloc_and_copy() will have already
	 * invalidated the device page table.
	 */
	migrate_vma_finalize(&args);
	return 0;
}
+14 −4
Original line number Diff line number Diff line
@@ -881,8 +881,9 @@ TEST_F(hmm, migrate)
}

/*
 * Migrate anonymous memory to device private memory and fault it back to system
 * memory.
 * Migrate anonymous memory to device private memory and fault some of it back
 * to system memory, then try migrating the resulting mix of system and device
 * private memory to the device.
 */
TEST_F(hmm, migrate_fault)
{
@@ -924,8 +925,17 @@ TEST_F(hmm, migrate_fault)
	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
		ASSERT_EQ(ptr[i], i);

	/* Fault pages back to system memory and check them. */
	for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
	/* Fault half the pages back to system memory and check them. */
	for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
		ASSERT_EQ(ptr[i], i);

	/* Migrate memory to the device again. */
	ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
	ASSERT_EQ(ret, 0);
	ASSERT_EQ(buffer->cpages, npages);

	/* Check what the device read. */
	for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
		ASSERT_EQ(ptr[i], i);

	hmm_buffer_free(buffer);