Commit 1a77decd authored by Ralph Campbell's avatar Ralph Campbell Committed by Jason Gunthorpe
Browse files

nouveau: fix storing invalid ptes

When migrating a range of system memory to device private memory, some of
the pages in the address range may not be migrating. In this case, the non
migrating pages won't have a new GPU MMU entry to store but the
nvif_object_ioctl() NVIF_VMM_V0_PFNMAP method doesn't check the input and
stores a bad valid GPU page table entry.

Fix this by skipping the invalid input PTEs when updating the GPU page
tables.

Link: https://lore.kernel.org/r/20200723223004.9586-2-rcampbell@nvidia.com


Signed-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b223555d
Loading
Loading
Loading
Loading
+9 −4
Original line number Diff line number Diff line
@@ -79,8 +79,12 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
	dma_addr_t addr;

	nvkm_kmap(pt->memory);
	while (ptes--) {
	for (; ptes; ptes--, map->pfn++) {
		u64 data = 0;

		if (!(*map->pfn & NVKM_VMM_PFN_V))
			continue;

		if (!(*map->pfn & NVKM_VMM_PFN_W))
			data |= BIT_ULL(6); /* RO. */

@@ -100,7 +104,6 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
		}

		VMM_WO064(pt, vmm, ptei++ * 8, data);
		map->pfn++;
	}
	nvkm_done(pt->memory);
}
@@ -310,9 +313,12 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
	dma_addr_t addr;

	nvkm_kmap(pt->memory);
	while (ptes--) {
	for (; ptes; ptes--, map->pfn++) {
		u64 data = 0;

		if (!(*map->pfn & NVKM_VMM_PFN_V))
			continue;

		if (!(*map->pfn & NVKM_VMM_PFN_W))
			data |= BIT_ULL(6); /* RO. */

@@ -332,7 +338,6 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
		}

		VMM_WO064(pt, vmm, ptei++ * 16, data);
		map->pfn++;
	}
	nvkm_done(pt->memory);
}