Commit 82ac4043 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cache QoS updates from Ingo Molnar:
 "An RDT cleanup and a fix for RDT initialization of new resource
  groups"

* 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Initialize a new resource group with default MBA values
  x86/resctrl: Move per RDT domain initialization to a separate function
parents 75571d82 47820e73
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
	if (cpumask_empty(cpu_mask) || mba_sc)
		goto done;
	cpu = get_cpu();
	/* Update CBM on this cpu if it's in cpu_mask. */
	/* Update resource control msr on this CPU if it's in cpu_mask. */
	if (cpumask_test_cpu(cpu, cpu_mask))
		rdt_ctrl_update(&msr_param);
	/* Update CBM on other cpus. */
	/* Update resource control msr on other CPUs. */
	smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
	put_cpu();

+100 −73
Original line number Diff line number Diff line
@@ -2516,37 +2516,23 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
	bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}

/**
 * rdtgroup_init_alloc - Initialize the new RDT group's allocations
 *
 * A new RDT group is being created on an allocation capable (CAT)
 * supporting system. Set this group up to start off with all usable
 * allocations. That is, all shareable and unused bits.
/*
 * Initialize cache resources per RDT domain
 *
 * All-zero CBM is invalid. If there are no more shareable bits available
 * on any domain then the entire allocation will fail.
 * Set the RDT domain up to start off with all usable allocations. That is,
 * all shareable and unused bits. All-zero CBM is invalid.
 */
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
				 u32 closid)
{
	struct rdt_resource *r_cdp = NULL;
	struct rdt_domain *d_cdp = NULL;
	u32 used_b = 0, unused_b = 0;
	u32 closid = rdtgrp->closid;
	struct rdt_resource *r;
	unsigned long tmp_cbm;
	enum rdtgrp_mode mode;
	struct rdt_domain *d;
	u32 peer_ctl, *ctrl;
	int i, ret;
	int i;

	for_each_alloc_enabled_rdt_resource(r) {
		/*
		 * Only initialize default allocations for CBM cache
		 * resources
		 */
		if (r->rid == RDT_RESOURCE_MBA)
			continue;
		list_for_each_entry(d, &r->domains, list) {
	rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
	d->have_new_ctrl = false;
	d->new_ctrl = r->cache.shareable_bits;
@@ -2558,10 +2544,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
			if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
				break;
			/*
					 * If CDP is active include peer
					 * domain's usage to ensure there
					 * is no overlap with an exclusive
					 * group.
			 * If CDP is active include peer domain's
			 * usage to ensure there is no overlap
			 * with an exclusive group.
			 */
			if (d_cdp)
				peer_ctl = d_cdp->ctrl_val[i];
@@ -2583,33 +2568,75 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
	 */
	cbm_ensure_valid(&d->new_ctrl, r);
	/*
			 * Assign the u32 CBM to an unsigned long to ensure
			 * that bitmap_weight() does not access out-of-bound
			 * memory.
	 * Assign the u32 CBM to an unsigned long to ensure that
	 * bitmap_weight() does not access out-of-bound memory.
	 */
	tmp_cbm = d->new_ctrl;
			if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
			    r->cache.min_cbm_bits) {
				rdt_last_cmd_printf("No space on %s:%d\n",
						    r->name, d->id);
	if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
		rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
		return -ENOSPC;
	}
	d->have_new_ctrl = true;
		}

	return 0;
}

	for_each_alloc_enabled_rdt_resource(r) {
/*
		 * Only initialize default allocations for CBM cache
		 * resources
 * Initialize cache resources with default values.
 *
 * A new RDT group is being created on an allocation capable (CAT)
 * supporting system. Set this group up to start off with all usable
 * allocations.
 *
 * If there are no more shareable bits available on any domain then
 * the entire allocation will fail.
 */
		if (r->rid == RDT_RESOURCE_MBA)
			continue;
static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
{
	struct rdt_domain *d;
	int ret;

	list_for_each_entry(d, &r->domains, list) {
		ret = __init_one_rdt_domain(d, r, closid);
		if (ret < 0)
			return ret;
	}

	return 0;
}

/* Initialize MBA resource with default values. */
static void rdtgroup_init_mba(struct rdt_resource *r)
{
	struct rdt_domain *d;

	list_for_each_entry(d, &r->domains, list) {
		d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
		d->have_new_ctrl = true;
	}
}

/* Initialize the RDT group's allocations. */
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
	struct rdt_resource *r;
	int ret;

	for_each_alloc_enabled_rdt_resource(r) {
		if (r->rid == RDT_RESOURCE_MBA) {
			rdtgroup_init_mba(r);
		} else {
			ret = rdtgroup_init_cat(r, rdtgrp->closid);
			if (ret < 0)
				return ret;
		}

		ret = update_domains(r, rdtgrp->closid);
		if (ret < 0) {
			rdt_last_cmd_puts("Failed to initialize allocations\n");
			return ret;
		}

	}

	rdtgrp->mode = RDT_MODE_SHAREABLE;