Commit e935b837 authored by Jan Kiszka's avatar Jan Kiszka Committed by Marcelo Tosatti
Browse files

KVM: Convert kvm_lock to raw_spinlock



Code under this lock requires non-preemptibility. Ensure this also over
-rt by converting it to raw spinlock.

Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent bd3d1ec3
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -85,7 +85,7 @@


#define ASYNC_PF_PER_VCPU 64
#define ASYNC_PF_PER_VCPU 64


extern spinlock_t kvm_lock;
extern raw_spinlock_t kvm_lock;
extern struct list_head vm_list;
extern struct list_head vm_list;


struct kvm_vcpu;
struct kvm_vcpu;
+2 −2
Original line number Original line Diff line number Diff line
@@ -3587,7 +3587,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
	if (nr_to_scan == 0)
	if (nr_to_scan == 0)
		goto out;
		goto out;


	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);


	list_for_each_entry(kvm, &vm_list, vm_list) {
	list_for_each_entry(kvm, &vm_list, vm_list) {
		int idx, freed_pages;
		int idx, freed_pages;
@@ -3610,7 +3610,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
	if (kvm_freed)
	if (kvm_freed)
		list_move_tail(&kvm_freed->vm_list, &vm_list);
		list_move_tail(&kvm_freed->vm_list, &vm_list);


	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);


out:
out:
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
+2 −2
Original line number Original line Diff line number Diff line
@@ -4557,7 +4557,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va


	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);


	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list) {
	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
			if (vcpu->cpu != freq->cpu)
			if (vcpu->cpu != freq->cpu)
@@ -4567,7 +4567,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
				send_ipi = 1;
				send_ipi = 1;
		}
		}
	}
	}
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);


	if (freq->old < freq->new && send_ipi) {
	if (freq->old < freq->new && send_ipi) {
		/*
		/*
+18 −18
Original line number Original line Diff line number Diff line
@@ -69,7 +69,7 @@ MODULE_LICENSE("GPL");
 * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
 * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
 */
 */


DEFINE_SPINLOCK(kvm_lock);
DEFINE_RAW_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
LIST_HEAD(vm_list);


static cpumask_var_t cpus_hardware_enabled;
static cpumask_var_t cpus_hardware_enabled;
@@ -481,9 +481,9 @@ static struct kvm *kvm_create_vm(void)
	mutex_init(&kvm->irq_lock);
	mutex_init(&kvm->irq_lock);
	mutex_init(&kvm->slots_lock);
	mutex_init(&kvm->slots_lock);
	atomic_set(&kvm->users_count, 1);
	atomic_set(&kvm->users_count, 1);
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);


	return kvm;
	return kvm;


@@ -556,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
	struct mm_struct *mm = kvm->mm;
	struct mm_struct *mm = kvm->mm;


	kvm_arch_sync_events(kvm);
	kvm_arch_sync_events(kvm);
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);
	kvm_free_irq_routing(kvm);
	kvm_free_irq_routing(kvm);
	for (i = 0; i < KVM_NR_BUSES; i++)
	for (i = 0; i < KVM_NR_BUSES; i++)
		kvm_io_bus_destroy(kvm->buses[i]);
		kvm_io_bus_destroy(kvm->buses[i]);
@@ -2177,9 +2177,9 @@ static void hardware_enable_nolock(void *junk)


static void hardware_enable(void *junk)
static void hardware_enable(void *junk)
{
{
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	hardware_enable_nolock(junk);
	hardware_enable_nolock(junk);
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);
}
}


static void hardware_disable_nolock(void *junk)
static void hardware_disable_nolock(void *junk)
@@ -2194,9 +2194,9 @@ static void hardware_disable_nolock(void *junk)


static void hardware_disable(void *junk)
static void hardware_disable(void *junk)
{
{
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	hardware_disable_nolock(junk);
	hardware_disable_nolock(junk);
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);
}
}


static void hardware_disable_all_nolock(void)
static void hardware_disable_all_nolock(void)
@@ -2210,16 +2210,16 @@ static void hardware_disable_all_nolock(void)


static void hardware_disable_all(void)
static void hardware_disable_all(void)
{
{
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	hardware_disable_all_nolock();
	hardware_disable_all_nolock();
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);
}
}


static int hardware_enable_all(void)
static int hardware_enable_all(void)
{
{
	int r = 0;
	int r = 0;


	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);


	kvm_usage_count++;
	kvm_usage_count++;
	if (kvm_usage_count == 1) {
	if (kvm_usage_count == 1) {
@@ -2232,7 +2232,7 @@ static int hardware_enable_all(void)
		}
		}
	}
	}


	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);


	return r;
	return r;
}
}
@@ -2394,10 +2394,10 @@ static int vm_stat_get(void *_offset, u64 *val)
	struct kvm *kvm;
	struct kvm *kvm;


	*val = 0;
	*val = 0;
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
	list_for_each_entry(kvm, &vm_list, vm_list)
		*val += *(u32 *)((void *)kvm + offset);
		*val += *(u32 *)((void *)kvm + offset);
	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);
	return 0;
	return 0;
}
}


@@ -2411,12 +2411,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
	int i;
	int i;


	*val = 0;
	*val = 0;
	spin_lock(&kvm_lock);
	raw_spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_for_each_vcpu(i, vcpu, kvm)
			*val += *(u32 *)((void *)vcpu + offset);
			*val += *(u32 *)((void *)vcpu + offset);


	spin_unlock(&kvm_lock);
	raw_spin_unlock(&kvm_lock);
	return 0;
	return 0;
}
}


@@ -2457,7 +2457,7 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
static int kvm_resume(struct sys_device *dev)
static int kvm_resume(struct sys_device *dev)
{
{
	if (kvm_usage_count) {
	if (kvm_usage_count) {
		WARN_ON(spin_is_locked(&kvm_lock));
		WARN_ON(raw_spin_is_locked(&kvm_lock));
		hardware_enable_nolock(NULL);
		hardware_enable_nolock(NULL);
	}
	}
	return 0;
	return 0;