Commit f13577e8 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: MMU: return page fault error code from permission_fault



This will help in the implementation of PKRU, where the PK bit of the page
fault error code cannot be computed in advance (unlike I/D, R/W and U/S).

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e9ad4ec8
Loading
Loading
Loading
Loading
+10 −5
Original line number Diff line number Diff line
@@ -141,10 +141,14 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
}

/*
 * Will a fault with a given page-fault error code (pfec) cause a permission
 * fault with the given access (in ACC_* format)?
 * Check if a given access (described through the I/D, W/R and U/S bits of a
 * page fault error code pfec) causes a permission fault with the given PTE
 * access rights (in ACC_* format).
 *
 * Return zero if the access does not fault; return the page fault error code
 * if the access faults.
 */
static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				  unsigned pte_access, unsigned pfec)
{
	int cpl = kvm_x86_ops->get_cpl(vcpu);
@@ -169,7 +173,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,

	WARN_ON(pfec & PFERR_RSVD_MASK);

	return (mmu->permissions[index] >> pte_access) & 1;
	pfec |= PFERR_PRESENT_MASK;
	return -((mmu->permissions[index] >> pte_access) & 1) & pfec;
}

void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
+2 −3
Original line number Diff line number Diff line
@@ -359,10 +359,9 @@ retry_walk:
		walker->ptes[walker->level - 1] = pte;
	} while (!is_last_gpte(mmu, walker->level, pte));

	if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
		errcode |= PFERR_PRESENT_MASK;
	errcode = permission_fault(vcpu, mmu, pte_access, access);
	if (unlikely(errcode))
		goto error;
	}

	gfn = gpte_to_gfn_lvl(pte, walker->level);
	gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;