Commit 656012c7 authored by Fuad Tabba's avatar Fuad Tabba Committed by Marc Zyngier
Browse files

KVM: Fix spelling in code comments



Fix spelling and typos (e.g., repeated words) in comments.

Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200401140310.29701-1-tabba@google.com
parent ce6f8f02
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -455,9 +455,9 @@ void force_vm_exit(const cpumask_t *mask)
 *
 * The hardware supports a limited set of values with the value zero reserved
 * for the host, so we check if an assigned value belongs to a previous
 * generation, which which requires us to assign a new value. If we're the
 * first to use a VMID for the new generation, we must flush necessary caches
 * and TLBs on all CPUs.
 * generation, which requires us to assign a new value. If we're the first to
 * use a VMID for the new generation, we must flush necessary caches and TLBs
 * on all CPUs.
 */
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
{
+2 −2
Original line number Diff line number Diff line
@@ -267,7 +267,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
	/*
	 * Vector lengths supported by the host can't currently be
	 * hidden from the guest individually: instead we can only set a
	 * maxmium via ZCR_EL2.LEN.  So, make sure the available vector
	 * maximum via ZCR_EL2.LEN.  So, make sure the available vector
	 * lengths match the set requested exactly up to the requested
	 * maximum:
	 */
@@ -337,7 +337,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
	unsigned int reg_num;

	unsigned int reqoffset, reqlen; /* User-requested offset and length */
	unsigned int maxlen; /* Maxmimum permitted length */
	unsigned int maxlen; /* Maximum permitted length */

	size_t sve_state_size;

+1 −1
Original line number Diff line number Diff line
@@ -577,7 +577,7 @@ static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)

/*
 * The priority value is independent of any of the BPR values, so we
 * normalize it using the minumal BPR value. This guarantees that no
 * normalize it using the minimal BPR value. This guarantees that no
 * matter what the guest does with its BPR, we can always set/get the
 * same value of a priority.
 */
+1 −1
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,

	/*
	 * No valid syndrome? Ask userspace for help if it has
	 * voluntered to do so, and bail out otherwise.
	 * volunteered to do so, and bail out otherwise.
	 */
	if (!kvm_vcpu_dabt_isvalid(vcpu)) {
		if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
+3 −3
Original line number Diff line number Diff line
@@ -784,7 +784,7 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
	mutex_lock(&kvm_hyp_pgd_mutex);

	/*
	 * This assumes that we we have enough space below the idmap
	 * This assumes that we have enough space below the idmap
	 * page to allocate our VAs. If not, the check below will
	 * kick. A potential alternative would be to detect that
	 * overflow and switch to an allocation above the idmap.
@@ -964,7 +964,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
 * @kvm: The struct kvm pointer
 *
 * Go through the memregions and unmap any reguler RAM
 * Go through the memregions and unmap any regular RAM
 * backing memory already mapped to the VM.
 */
void stage2_unmap_vm(struct kvm *kvm)
@@ -2262,7 +2262,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
	/*
	 * At this point memslot has been committed and there is an
	 * allocated dirty_bitmap[], dirty pages will be be tracked while the
	 * allocated dirty_bitmap[], dirty pages will be tracked while the
	 * memory slot is write protected.
	 */
	if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
Loading