Commit 414d3b07 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Christian Borntraeger
Browse files

s390/kvm: page table invalidation notifier



Pass an address range to the page table invalidation notifier
for KVM. This allows to notify changes that affect a larger
virtual memory area, e.g. for 1MB pages.

Reviewed-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 64672c95
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -39,7 +39,8 @@ struct gmap {
 */
 */
struct gmap_notifier {
struct gmap_notifier {
	struct list_head list;
	struct list_head list;
	void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
	void (*notifier_call)(struct gmap *gmap, unsigned long start,
			      unsigned long end);
};
};


struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
+13 −5
Original line number Original line Diff line number Diff line
@@ -150,7 +150,8 @@ int kvm_arch_hardware_enable(void)
	return 0;
	return 0;
}
}


static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
			      unsigned long end);


/*
/*
 * This callback is executed during stop_machine(). All CPUs are therefore
 * This callback is executed during stop_machine(). All CPUs are therefore
@@ -1976,16 +1977,23 @@ void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
	kvm_s390_vcpu_request(vcpu);
	kvm_s390_vcpu_request(vcpu);
}
}


static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
			      unsigned long end)
{
{
	int i;
	struct kvm *kvm = gmap->private;
	struct kvm *kvm = gmap->private;
	struct kvm_vcpu *vcpu;
	struct kvm_vcpu *vcpu;
	unsigned long prefix;
	int i;


	if (start >= 1UL << 31)
		/* We are only interested in prefix pages */
		return;
	kvm_for_each_vcpu(i, vcpu, kvm) {
	kvm_for_each_vcpu(i, vcpu, kvm) {
		/* match against both prefix pages */
		/* match against both prefix pages */
		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
		prefix = kvm_s390_get_prefix(vcpu);
			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
				   start, end);
			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
		}
		}
	}
	}
+16 −3
Original line number Original line Diff line number Diff line
@@ -572,6 +572,21 @@ void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
}
}
EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);


/**
 * gmap_call_notifier - call all registered invalidation callbacks
 * @gmap: pointer to guest mapping meta data structure
 * @start: start virtual address in the guest address space
 * @end: end virtual address in the guest address space
 */
static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
			       unsigned long end)
{
	struct gmap_notifier *nb;

	list_for_each_entry(nb, &gmap_notifier_list, list)
		nb->notifier_call(gmap, start, end);
}

/**
/**
 * gmap_ipte_notify - mark a range of ptes for invalidation notification
 * gmap_ipte_notify - mark a range of ptes for invalidation notification
 * @gmap: pointer to guest mapping meta data structure
 * @gmap: pointer to guest mapping meta data structure
@@ -643,7 +658,6 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
{
{
	unsigned long offset, gaddr;
	unsigned long offset, gaddr;
	unsigned long *table;
	unsigned long *table;
	struct gmap_notifier *nb;
	struct gmap *gmap;
	struct gmap *gmap;


	offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
	offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
@@ -655,8 +669,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
		if (!table)
		if (!table)
			continue;
			continue;
		gaddr = __gmap_segment_gaddr(table) + offset;
		gaddr = __gmap_segment_gaddr(table) + offset;
		list_for_each_entry(nb, &gmap_notifier_list, list)
		gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
			nb->notifier_call(gmap, gaddr);
	}
	}
	spin_unlock(&gmap_notifier_lock);
	spin_unlock(&gmap_notifier_lock);
}
}