Commit 05b3e0c2 authored by Avi Kivity's avatar Avi Kivity Committed by Linus Torvalds
Browse files

[PATCH] KVM: Replace __x86_64__ with CONFIG_X86_64



As per akpm's request.

Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5aff458e
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -140,7 +140,7 @@ enum {
	VCPU_REGS_RBP = 5,
	VCPU_REGS_RSI = 6,
	VCPU_REGS_RDI = 7,
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	VCPU_REGS_R8 = 8,
	VCPU_REGS_R9 = 9,
	VCPU_REGS_R10 = 10,
@@ -375,7 +375,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);

#ifdef __x86_64__
#ifdef CONFIG_X86_64
void set_efer(struct kvm_vcpu *vcpu, u64 efer);
#endif

@@ -485,7 +485,7 @@ static inline unsigned long read_tr_base(void)
	return segment_base(tr);
}

#ifdef __x86_64__
#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;
@@ -533,7 +533,7 @@ static inline u32 get_rdx_init_val(void)
#define TSS_REDIRECTION_SIZE (256 / 8)
#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)

#ifdef __x86_64__
#ifdef CONFIG_X86_64

/*
 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.  Therefore
+8 −8
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
}
EXPORT_SYMBOL_GPL(find_msr_entry);

#ifdef __x86_64__
#ifdef CONFIG_X86_64
// LDT or TSS descriptor in the GDT. 16 bytes.
struct segment_descriptor_64 {
	struct segment_descriptor s;
@@ -115,7 +115,7 @@ unsigned long segment_base(u16 selector)
	}
	d = (struct segment_descriptor *)(table_base + (selector & ~7));
	v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	if (d->system == 0
	    && (d->type == 2 || d->type == 9 || d->type == 11))
		v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
@@ -351,7 +351,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
	}

	if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
#ifdef __x86_64__
#ifdef CONFIG_X86_64
		if ((vcpu->shadow_efer & EFER_LME)) {
			int cs_db, cs_l;

@@ -1120,7 +1120,7 @@ static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
	return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
}

#ifdef __x86_64__
#ifdef CONFIG_X86_64

void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
@@ -1243,7 +1243,7 @@ static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
	regs->rdi = vcpu->regs[VCPU_REGS_RDI];
	regs->rsp = vcpu->regs[VCPU_REGS_RSP];
	regs->rbp = vcpu->regs[VCPU_REGS_RBP];
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	regs->r8 = vcpu->regs[VCPU_REGS_R8];
	regs->r9 = vcpu->regs[VCPU_REGS_R9];
	regs->r10 = vcpu->regs[VCPU_REGS_R10];
@@ -1287,7 +1287,7 @@ static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
	vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
	vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
	vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	vcpu->regs[VCPU_REGS_R8] = regs->r8;
	vcpu->regs[VCPU_REGS_R9] = regs->r9;
	vcpu->regs[VCPU_REGS_R10] = regs->r10;
@@ -1401,7 +1401,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
	vcpu->cr8 = sregs->cr8;

	mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	kvm_arch_ops->set_efer(vcpu, sregs->efer);
#endif
	vcpu->apic_base = sregs->apic_base;
@@ -1434,7 +1434,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
static u32 msrs_to_save[] = {
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
	MSR_K6_STAR,
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
	MSR_IA32_TIME_STAMP_COUNTER,
+1 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@
#include "kvm.h"

static const u32 host_save_msrs[] = {
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
	MSR_FS_BASE, MSR_GS_BASE,
#endif
+1 −1
Original line number Diff line number Diff line
#ifndef __KVM_VMX_H
#define __KVM_VMX_H

#ifdef __x86_64__
#ifdef CONFIG_X86_64
/*
 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
 * mechanism (cpu bug AA24)
+11 −11
Original line number Diff line number Diff line
@@ -287,7 +287,7 @@ static void svm_hardware_enable(void *garbage)

	struct svm_cpu_data *svm_data;
	uint64_t efer;
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	struct desc_ptr gdt_descr;
#else
	struct Xgt_desc_struct gdt_descr;
@@ -397,7 +397,7 @@ static __init int svm_hardware_setup(void)
	memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
	msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;

#ifdef __x86_64__
#ifdef CONFIG_X86_64
	set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
	set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
	set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
@@ -704,7 +704,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)

static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	if (vcpu->shadow_efer & KVM_EFER_LME) {
		if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
			vcpu->shadow_efer |= KVM_EFER_LMA;
@@ -1097,7 +1097,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
	case MSR_IA32_APICBASE:
		*data = vcpu->apic_base;
		break;
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	case MSR_STAR:
		*data = vcpu->svm->vmcb->save.star;
		break;
@@ -1149,7 +1149,7 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
{
	switch (ecx) {
#ifdef __x86_64__
#ifdef CONFIG_X86_64
	case MSR_EFER:
		set_efer(vcpu, data);
		break;
@@ -1172,7 +1172,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
	case MSR_IA32_APICBASE:
		vcpu->apic_base = data;
		break;
#ifdef __x86_64___
#ifdef CONFIG_X86_64_
	case MSR_STAR:
		vcpu->svm->vmcb->save.star = data;
		break;
@@ -1387,7 +1387,7 @@ again:
		load_db_regs(vcpu->svm->db_regs);
	}
	asm volatile (
#ifdef __x86_64__
#ifdef CONFIG_X86_64
		"push %%rbx; push %%rcx; push %%rdx;"
		"push %%rsi; push %%rdi; push %%rbp;"
		"push %%r8;  push %%r9;  push %%r10; push %%r11;"
@@ -1397,7 +1397,7 @@ again:
		"push %%esi; push %%edi; push %%ebp;"
#endif

#ifdef __x86_64__
#ifdef CONFIG_X86_64
		"mov %c[rbx](%[vcpu]), %%rbx \n\t"
		"mov %c[rcx](%[vcpu]), %%rcx \n\t"
		"mov %c[rdx](%[vcpu]), %%rdx \n\t"
@@ -1421,7 +1421,7 @@ again:
		"mov %c[rbp](%[vcpu]), %%ebp \n\t"
#endif

#ifdef __x86_64__
#ifdef CONFIG_X86_64
		/* Enter guest mode */
		"push %%rax \n\t"
		"mov %c[svm](%[vcpu]), %%rax \n\t"
@@ -1442,7 +1442,7 @@ again:
#endif

		/* Save guest registers, load host registers */
#ifdef __x86_64__
#ifdef CONFIG_X86_64
		"mov %%rbx, %c[rbx](%[vcpu]) \n\t"
		"mov %%rcx, %c[rcx](%[vcpu]) \n\t"
		"mov %%rdx, %c[rdx](%[vcpu]) \n\t"
@@ -1483,7 +1483,7 @@ again:
		  [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
		  [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
		  [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
#ifdef __x86_64__
#ifdef CONFIG_X86_64
		  ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
		  [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
		  [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
Loading