Commit 11d723ce authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson
Browse files

drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests



Ditch the dubious static list of sizes to enumerate, in favour of
choosing a random size within the limits of each backing store. With
repeated CI runs this should give us a wider range of object sizes, and
in turn more page-size combinations, while using less machine time.

Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191025153728.23689-6-chris@chris-wilson.co.uk
parent 23741bc8
Loading
Loading
Loading
Loading
+80 −149
Original line number Diff line number Diff line
@@ -1317,205 +1317,138 @@ out_device:
	return err;
}

static int igt_ppgtt_internal_huge(void *arg)
{
	struct i915_gem_context *ctx = arg;
	struct drm_i915_private *i915 = ctx->i915;
	struct drm_i915_gem_object *obj;
	static const unsigned int sizes[] = {
		SZ_64K,
		SZ_128K,
		SZ_256K,
		SZ_512K,
		SZ_1M,
		SZ_2M,
	};
	int i;
	int err;

	/*
	 * Sanity check that the HW uses huge pages correctly through internal
	 * -- ensure that our writes land in the right place.
	 */

	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
		unsigned int size = sizes[i];

		obj = i915_gem_object_create_internal(i915, size);
		if (IS_ERR(obj))
			return PTR_ERR(obj);

		err = i915_gem_object_pin_pages(obj);
		if (err)
			goto out_put;

		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
			pr_info("internal unable to allocate huge-page(s) with size=%u\n",
				size);
			goto out_unpin;
		}

		err = igt_write_huge(ctx, obj);
		if (err) {
			pr_err("internal write-huge failed with size=%u\n",
			       size);
			goto out_unpin;
		}

		i915_gem_object_unpin_pages(obj);
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
		i915_gem_object_put(obj);
	}

	return 0;

out_unpin:
	i915_gem_object_unpin_pages(obj);
out_put:
	i915_gem_object_put(obj);

	return err;
}
typedef struct drm_i915_gem_object *
(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);

static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
{
	return i915->mm.gemfs && has_transparent_hugepage();
}

static int igt_ppgtt_gemfs_huge(void *arg)
static struct drm_i915_gem_object *
igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
{
	struct i915_gem_context *ctx = arg;
	struct drm_i915_private *i915 = ctx->i915;
	struct drm_i915_gem_object *obj;
	static const unsigned int sizes[] = {
		SZ_2M,
		SZ_4M,
		SZ_8M,
		SZ_16M,
		SZ_32M,
	};
	int i;
	int err;

	/*
	 * Sanity check that the HW uses huge pages correctly through gemfs --
	 * ensure that our writes land in the right place.
	 */

	if (!igt_can_allocate_thp(i915)) {
		pr_info("missing THP support, skipping\n");
		return 0;
		pr_info("%s missing THP support, skipping\n", __func__);
		return ERR_PTR(-ENODEV);
	}

	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
		unsigned int size = sizes[i];

		obj = i915_gem_object_create_shmem(i915, size);
		if (IS_ERR(obj))
			return PTR_ERR(obj);

		err = i915_gem_object_pin_pages(obj);
		if (err)
			goto out_put;

		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
			pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
				size);
			goto out_unpin;
	return i915_gem_object_create_shmem(i915, size);
}

		err = igt_write_huge(ctx, obj);
		if (err) {
			pr_err("gemfs write-huge failed with size=%u\n",
			       size);
			goto out_unpin;
static struct drm_i915_gem_object *
igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
{
	return i915_gem_object_create_internal(i915, size);
}

		i915_gem_object_unpin_pages(obj);
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
		i915_gem_object_put(obj);
static struct drm_i915_gem_object *
igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
{
	return i915_gem_object_create_lmem(i915, size, flags);
}

	return 0;
static u32 igt_random_size(struct rnd_state *prng,
			   u32 min_page_size,
			   u32 max_page_size)
{
	u64 mask;
	u32 size;

out_unpin:
	i915_gem_object_unpin_pages(obj);
out_put:
	i915_gem_object_put(obj);
	GEM_BUG_ON(!is_power_of_2(min_page_size));
	GEM_BUG_ON(!is_power_of_2(max_page_size));
	GEM_BUG_ON(min_page_size < PAGE_SIZE);
	GEM_BUG_ON(min_page_size > max_page_size);

	return err;
	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
	size = prandom_u32_state(prng) & mask;
	if (size < min_page_size)
		size |= min_page_size;

	return size;
}

static int igt_ppgtt_lmem_huge(void *arg)
static int igt_ppgtt_smoke_huge(void *arg)
{
	struct i915_gem_context *ctx = arg;
	struct drm_i915_private *i915 = ctx->i915;
	struct drm_i915_gem_object *obj;
	static const unsigned int sizes[] = {
		SZ_64K,
		SZ_512K,
		SZ_1M,
		SZ_2M,
	I915_RND_STATE(prng);
	struct {
		igt_create_fn fn;
		u32 min;
		u32 max;
	} backends[] = {
		{ igt_create_internal, SZ_64K, SZ_2M,  },
		{ igt_create_shmem,    SZ_64K, SZ_32M, },
		{ igt_create_local,    SZ_64K, SZ_1G,  },
	};
	int i;
	int err;

	if (!HAS_LMEM(i915)) {
		pr_info("device lacks LMEM support, skipping\n");
		return 0;
	}
	int i;

	/*
	 * Sanity check that the HW uses huge pages correctly through LMEM
	 * -- ensure that our writes land in the right place.
	 * Sanity check that the HW uses huge pages correctly through our
	 * various backends -- ensure that our writes land in the right place.
	 */

	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
		unsigned int size = sizes[i];
	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
		u32 min = backends[i].min;
		u32 max = backends[i].max;
		u32 size = max;
try_again:
		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));

		obj = i915_gem_object_create_lmem(i915, size,
						  I915_BO_ALLOC_CONTIGUOUS);
		obj = backends[i].fn(i915, size, 0);
		if (IS_ERR(obj)) {
			err = PTR_ERR(obj);
			if (err == -E2BIG) {
				pr_info("object too big for region!\n");
				return 0;
				size >>= 1;
				goto try_again;
			} else if (err == -ENODEV) {
				err = 0;
				continue;
			}

			return err;
		}

		err = i915_gem_object_pin_pages(obj);
		if (err)
		if (err) {
			if (err == -ENXIO) {
				i915_gem_object_put(obj);
				size >>= 1;
				goto try_again;
			}
			goto out_put;
		}

		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
			pr_info("LMEM unable to allocate huge-page(s) with size=%u\n",
				size);
		if (obj->mm.page_sizes.phys < min) {
			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
				__func__, size, i);
			err = -ENOMEM;
			goto out_unpin;
		}

		err = igt_write_huge(ctx, obj);
		if (err) {
			pr_err("LMEM write-huge failed with size=%u\n", size);
			goto out_unpin;
			pr_err("%s write-huge failed with size=%u, i=%d\n",
			       __func__, size, i);
		}

		i915_gem_object_unpin_pages(obj);
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
		i915_gem_object_put(obj);
	}

	return 0;

out_unpin:
		i915_gem_object_unpin_pages(obj);
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
out_put:
		i915_gem_object_put(obj);

	if (err == -ENOMEM)
		if (err == -ENOMEM || err == -ENXIO)
			err = 0;

		if (err)
			break;

		cond_resched();
	}

	return err;
}

@@ -1878,9 +1811,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
		SUBTEST(igt_ppgtt_pin_update),
		SUBTEST(igt_tmpfs_fallback),
		SUBTEST(igt_ppgtt_exhaust_huge),
		SUBTEST(igt_ppgtt_gemfs_huge),
		SUBTEST(igt_ppgtt_internal_huge),
		SUBTEST(igt_ppgtt_lmem_huge),
		SUBTEST(igt_ppgtt_smoke_huge),
	};
	struct drm_file *file;
	struct i915_gem_context *ctx;