Commit 8ef4c4ab authored by Andrea Parri (Microsoft)'s avatar Andrea Parri (Microsoft) Committed by Wei Liu
Browse files

Drivers: hv: vmbus: Remove the unused HV_LOCALIZED channel affinity logic



The logic is unused since commit 509879bd ("Drivers: hv: Introduce
a policy for controlling channel affinity").

This logic assumes that a channel target_cpu doesn't change during the
lifetime of a channel, but this assumption is incompatible with the new
functionality that allows changing the vCPU a channel will interrupt.

Signed-off-by: default avatarAndrea Parri (Microsoft) <parri.andrea@gmail.com>
Link: https://lore.kernel.org/r/20200406001514.19876-9-parri.andrea@gmail.com


Reviewed-by: default avatarMichael Kelley <mikelley@microsoft.com>
Signed-off-by: default avatarWei Liu <wei.liu@kernel.org>
parent 240ad77c
Loading
Loading
Loading
Loading
+25 −80
Original line number Diff line number Diff line
@@ -433,14 +433,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
		spin_unlock_irqrestore(&primary_channel->lock, flags);
	}

	/*
	 * We need to free the bit for init_vp_index() to work in the case
	 * of sub-channel, when we reload drivers like hv_netvsc.
	 */
	if (channel->affinity_policy == HV_LOCALIZED)
		cpumask_clear_cpu(channel->target_cpu,
				  &primary_channel->alloced_cpus_in_node);

	/*
	 * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
	 * the relid is invalidated; after hibernation, when the user-space app
@@ -662,20 +654,21 @@ static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
/*
 * Starting with Win8, we can statically distribute the incoming
 * channel interrupt load by binding a channel to VCPU.
 * We distribute the interrupt loads to one or more NUMA nodes based on
 * the channel's affinity_policy.
 *
 * For pre-win8 hosts or non-performance critical channels we assign the
 * first CPU in the first NUMA node.
 *
 * Starting with win8, performance critical channels will be distributed
 * evenly among all the available NUMA nodes.  Once the node is assigned,
 * we will assign the CPU based on a simple round robin scheme.
 */
static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
{
	u32 cur_cpu;
	bool perf_chn = vmbus_devs[dev_type].perf_device;
	struct vmbus_channel *primary = channel->primary_channel;
	int next_node;
	cpumask_var_t available_mask;
	struct cpumask *alloced_mask;
	u32 target_cpu;
	int numa_node;

	if ((vmbus_proto_version == VERSION_WS2008) ||
	    (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
@@ -693,31 +686,27 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
		return;
	}

	spin_lock(&bind_channel_to_cpu_lock);

	/*
	 * Based on the channel affinity policy, we will assign the NUMA
	 * nodes.
	 * Serializes the accesses to the global variable next_numa_node_id.
	 * See also the header comment of the spin lock declaration.
	 */
	spin_lock(&bind_channel_to_cpu_lock);

	if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
	while (true) {
			next_node = next_numa_node_id++;
			if (next_node == nr_node_ids) {
				next_node = next_numa_node_id = 0;
		numa_node = next_numa_node_id++;
		if (numa_node == nr_node_ids) {
			next_numa_node_id = 0;
			continue;
		}
			if (cpumask_empty(cpumask_of_node(next_node)))
		if (cpumask_empty(cpumask_of_node(numa_node)))
			continue;
		break;
	}
		channel->numa_node = next_node;
		primary = channel;
	}
	alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
	channel->numa_node = numa_node;
	alloced_mask = &hv_context.hv_numa_map[numa_node];

	if (cpumask_weight(alloced_mask) ==
	    cpumask_weight(cpumask_of_node(primary->numa_node))) {
	    cpumask_weight(cpumask_of_node(numa_node))) {
		/*
		 * We have cycled through all the CPUs in the node;
		 * reset the alloced map.
@@ -725,57 +714,13 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
		cpumask_clear(alloced_mask);
	}

	cpumask_xor(available_mask, alloced_mask,
		    cpumask_of_node(primary->numa_node));

	cur_cpu = -1;

	if (primary->affinity_policy == HV_LOCALIZED) {
		/*
		 * Normally Hyper-V host doesn't create more subchannels
		 * than there are VCPUs on the node but it is possible when not
		 * all present VCPUs on the node are initialized by guest.
		 * Clear the alloced_cpus_in_node to start over.
		 */
		if (cpumask_equal(&primary->alloced_cpus_in_node,
				  cpumask_of_node(primary->numa_node)))
			cpumask_clear(&primary->alloced_cpus_in_node);
	}
	cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));

	while (true) {
		cur_cpu = cpumask_next(cur_cpu, available_mask);
		if (cur_cpu >= nr_cpu_ids) {
			cur_cpu = -1;
			cpumask_copy(available_mask,
				     cpumask_of_node(primary->numa_node));
			continue;
		}

		if (primary->affinity_policy == HV_LOCALIZED) {
			/*
			 * NOTE: in the case of sub-channel, we clear the
			 * sub-channel related bit(s) in
			 * primary->alloced_cpus_in_node in
			 * hv_process_channel_removal(), so when we
			 * reload drivers like hv_netvsc in SMP guest, here
			 * we're able to re-allocate
			 * bit from primary->alloced_cpus_in_node.
			 */
			if (!cpumask_test_cpu(cur_cpu,
					      &primary->alloced_cpus_in_node)) {
				cpumask_set_cpu(cur_cpu,
						&primary->alloced_cpus_in_node);
				cpumask_set_cpu(cur_cpu, alloced_mask);
				break;
			}
		} else {
			cpumask_set_cpu(cur_cpu, alloced_mask);
			break;
		}
	}
	target_cpu = cpumask_first(available_mask);
	cpumask_set_cpu(target_cpu, alloced_mask);

	channel->target_cpu = cur_cpu;
	channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
	channel->target_cpu = target_cpu;
	channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);

	spin_unlock(&bind_channel_to_cpu_lock);

+0 −27
Original line number Diff line number Diff line
@@ -689,11 +689,6 @@ union hv_connection_id {
	} u;
};

enum hv_numa_policy {
	HV_BALANCED = 0,
	HV_LOCALIZED,
};

enum vmbus_device_type {
	HV_IDE = 0,
	HV_SCSI,
@@ -808,10 +803,6 @@ struct vmbus_channel {
	u32 target_vp;
	/* The corresponding CPUID in the guest */
	u32 target_cpu;
	/*
	 * State to manage the CPU affiliation of channels.
	 */
	struct cpumask alloced_cpus_in_node;
	int numa_node;
	/*
	 * Support for sub-channels. For high performance devices,
@@ -898,18 +889,6 @@ struct vmbus_channel {
	 */
	bool low_latency;

	/*
	 * NUMA distribution policy:
	 * We support two policies:
	 * 1) Balanced: Here all performance critical channels are
	 *    distributed evenly amongst all the NUMA nodes.
	 *    This policy will be the default policy.
	 * 2) Localized: All channels of a given instance of a
	 *    performance critical service will be assigned CPUs
	 *    within a selected NUMA node.
	 */
	enum hv_numa_policy affinity_policy;

	bool probe_done;

	/*
@@ -965,12 +944,6 @@ static inline bool is_sub_channel(const struct vmbus_channel *c)
	return c->offermsg.offer.sub_channel_index != 0;
}

static inline void set_channel_affinity_state(struct vmbus_channel *c,
					      enum hv_numa_policy policy)
{
	c->affinity_policy = policy;
}

static inline void set_channel_read_mode(struct vmbus_channel *c,
					enum hv_callback_mode mode)
{