Commit 8fe65a82 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

kvm: rename last argument to kvm_get_dirty_log_protect



When manual dirty log reprotect will be enabled, kvm_get_dirty_log_protect's
pointer argument will always be false on exit, because no TLB flush is needed
until the manual re-protection operation.  Rename it from "is_dirty" to "flush",
which more accurately tells the caller what they have to do with it.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e5d83c74
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -1004,14 +1004,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	bool is_dirty = false;
	bool flush = false;
	int r;

	mutex_lock(&kvm->slots_lock);

	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
	r = kvm_get_dirty_log_protect(kvm, log, &flush);

	if (is_dirty) {
	if (flush) {
		slots = kvm_memslots(kvm);
		memslot = id_to_memslot(slots, log->slot);

+3 −3
Original line number Diff line number Diff line
@@ -4393,7 +4393,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
	bool is_dirty = false;
	bool flush = false;
	int r;

	mutex_lock(&kvm->slots_lock);
@@ -4404,14 +4404,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
	if (kvm_x86_ops->flush_log_dirty)
		kvm_x86_ops->flush_log_dirty(kvm);

	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
	r = kvm_get_dirty_log_protect(kvm, log, &flush);

	/*
	 * All the TLBs can be flushed out of mmu lock, see the comments in
	 * kvm_mmu_slot_remove_write_access().
	 */
	lockdep_assert_held(&kvm->slots_lock);
	if (is_dirty)
	if (flush)
		kvm_flush_remote_tlbs(kvm);

	mutex_unlock(&kvm->slots_lock);
+1 −1
Original line number Diff line number Diff line
@@ -753,7 +753,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty);

int kvm_get_dirty_log_protect(struct kvm *kvm,
			struct kvm_dirty_log *log, bool *is_dirty);
			      struct kvm_dirty_log *log, bool *flush);

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
					struct kvm_memory_slot *slot,
+3 −3
Original line number Diff line number Diff line
@@ -1205,14 +1205,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
	bool is_dirty = false;
	bool flush = false;
	int r;

	mutex_lock(&kvm->slots_lock);

	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
	r = kvm_get_dirty_log_protect(kvm, log, &flush);

	if (is_dirty)
	if (flush)
		kvm_flush_remote_tlbs(kvm);

	mutex_unlock(&kvm->slots_lock);
+3 −3
Original line number Diff line number Diff line
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
 *
 */
int kvm_get_dirty_log_protect(struct kvm *kvm,
			struct kvm_dirty_log *log, bool *is_dirty)
			struct kvm_dirty_log *log, bool *flush)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
@@ -1181,7 +1181,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
	memset(dirty_bitmap_buffer, 0, n);

	spin_lock(&kvm->mmu_lock);
	*is_dirty = false;
	*flush = false;
	for (i = 0; i < n / sizeof(long); i++) {
		unsigned long mask;
		gfn_t offset;
@@ -1189,7 +1189,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
		if (!dirty_bitmap[i])
			continue;

		*is_dirty = true;
		*flush = true;

		mask = xchg(&dirty_bitmap[i], 0);
		dirty_bitmap_buffer[i] = mask;