gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

You are on a read-only GitLab instance.
Commit 22447828 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "ARM fixes:

   - Another state update on exit to userspace fix

   - Prevent the creation of mixed 32/64 VMs

   - Fix regression with irqbypass not restarting the guest on failed
     connect

   - Fix regression with debug register decoding resulting in
     overlapping access

   - Commit exception state on exit to usrspace

   - Fix the MMU notifier return values

   - Add missing 'static' qualifiers in the new host stage-2 code

  x86 fixes:

   - fix guest missed wakeup with assigned devices

   - fix WARN reported by syzkaller

   - do not use BIT() in UAPI headers

   - make the kvm_amd.avic parameter bool

  PPC fixes:

   - make halt polling heuristics consistent with other architectures

  selftests:

   - various fixes

   - new performance selftest memslot_perf_test

   - test UFFD minor faults in demand_paging_test"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (44 commits)
  selftests: kvm: fix overlapping addresses in memslot_perf_test
  KVM: X86: Kill off ctxt->ud
  KVM: X86: Fix warning caused by stale emulation context
  KVM: X86: Use kvm_get_linear_rip() in single-step and #DB/#BP interception
  KVM: x86/mmu: Fix comment mentioning skip_4k
  KVM: VMX: update vcpu posted-interrupt descriptor when assigning device
  KVM: rename KVM_REQ_PENDING_TIMER to KVM_REQ_UNBLOCK
  KVM: x86: add start_assignment hook to kvm_x86_ops
  KVM: LAPIC: Narrow the timer latency between wait_lapic_expire and world switch
  selftests: kvm: do only 1 memslot_perf_test run by default
  KVM: X86: Use _BITUL() macro in UAPI headers
  KVM: selftests: add shared hugetlbfs backing source type
  KVM: selftests: allow using UFFD minor faults for demand paging
  KVM: selftests: create alias mappings when using shared memory
  KVM: selftests: add shmem backing source type
  KVM: selftests: refactor vm_mem_backing_src_type flags
  KVM: selftests: allow different backing source types
  KVM: selftests: compute correct demand paging size
  KVM: selftests: simplify setup_demand_paging error handling
  KVM: selftests: Print a message if /dev/kvm is missing
  ...
parents 866c4b8a 000ac429
...@@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD ...@@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
necessary to inform each VCPU to completely refresh the tables. This necessary to inform each VCPU to completely refresh the tables. This
request is used for that. request is used for that.
KVM_REQ_PENDING_TIMER KVM_REQ_UNBLOCK
This request may be made from a timer handler run on the host on behalf This request informs the vCPU to exit kvm_vcpu_block. It is used for
of a VCPU. It informs the VCPU thread to inject a timer interrupt. example from timer handlers that run on the host on behalf of a vCPU,
or in order to update the interrupt routing and ensure that assigned
devices will wake up the vCPU.
KVM_REQ_UNHALT KVM_REQ_UNHALT
......
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18 #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19 #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20 #define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff); ...@@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_gic_config(void); extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void); extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr); extern void __vgic_v3_write_vmcr(u32 vmcr);
......
...@@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) ...@@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC; vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
} }
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->arch.features);
}
#endif /* __ARM64_KVM_EMULATE_H__ */ #endif /* __ARM64_KVM_EMULATE_H__ */
...@@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
if (run->immediate_exit)
return -EINTR;
vcpu_load(vcpu); vcpu_load(vcpu);
if (run->immediate_exit) {
ret = -EINTR;
goto out;
}
kvm_sigset_activate(vcpu); kvm_sigset_activate(vcpu);
ret = 1; ret = 1;
...@@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_sigset_deactivate(vcpu); kvm_sigset_deactivate(vcpu);
out:
/*
* In the unlikely event that we are returning to userspace
* with pending exceptions or PC adjustment, commit these
* adjustments in order to give userspace a consistent view of
* the vcpu state. Note that this relies on __kvm_adjust_pc()
* being preempt-safe on VHE.
*/
if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_INCREMENT_PC)))
kvm_call_hyp(__kvm_adjust_pc, vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
return ret; return ret;
} }
......
...@@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) ...@@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset; *vcpu_pc(vcpu) = vect_offset;
} }
void kvm_inject_exception(struct kvm_vcpu *vcpu) static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{ {
if (vcpu_el1_is_32bit(vcpu)) { if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) { switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
...@@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu) ...@@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
} }
} }
} }
/*
* Adjust the guest PC (and potentially exception state) depending on
* flags provided by the emulation code.
*/
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
kvm_inject_exception(vcpu);
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
kvm_skip_instr(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
}
}
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
void kvm_inject_exception(struct kvm_vcpu *vcpu);
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu) static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) { if (vcpu_mode_is_32bit(vcpu)) {
...@@ -43,22 +41,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) ...@@ -43,22 +41,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
} }
/*
* Adjust the guest PC on entry, depending on flags provided by EL1
* for the purpose of emulation (MMIO, sysreg) or exception injection.
*/
static inline void __adjust_pc(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
kvm_inject_exception(vcpu);
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
kvm_skip_instr(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
}
}
/* /*
* Skip an instruction while host sysregs are live. * Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit. * Assumes host is always 64-bit.
......
...@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) ...@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
} }
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
__kvm_adjust_pc(kern_hyp_va(vcpu));
}
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{ {
__kvm_flush_vm_context(); __kvm_flush_vm_context();
...@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); ...@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
static const hcall_t host_hcall[] = { static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid), HANDLE_FUNC(__kvm_tlb_flush_vmid),
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
extern unsigned long hyp_nr_cpus; extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm; struct host_kvm host_kvm;
struct hyp_pool host_s2_mem; static struct hyp_pool host_s2_mem;
struct hyp_pool host_s2_dev; static struct hyp_pool host_s2_dev;
/* /*
* Copies of the host's CPU features registers holding sanitized values. * Copies of the host's CPU features registers holding sanitized values.
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
struct hyp_pool hpool; struct hyp_pool hpool;
struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
unsigned long hyp_nr_cpus; unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \ #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
...@@ -27,6 +26,7 @@ static void *vmemmap_base; ...@@ -27,6 +26,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base; static void *hyp_pgt_base;
static void *host_s2_mem_pgt_base; static void *host_s2_mem_pgt_base;
static void *host_s2_dev_pgt_base; static void *host_s2_dev_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static int divide_memory_pool(void *virt, unsigned long size) static int divide_memory_pool(void *virt, unsigned long size)
{ {
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
*/ */
#include <hyp/adjust_pc.h>
#include <hyp/switch.h> #include <hyp/switch.h>
#include <hyp/sysreg-sr.h> #include <hyp/sysreg-sr.h>
...@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/ */
__debug_save_host_buffers_nvhe(vcpu); __debug_save_host_buffers_nvhe(vcpu);
__adjust_pc(vcpu); __kvm_adjust_pc(vcpu);
/* /*
* We must restore the 32-bit state before the sysregs, thanks * We must restore the 32-bit state before the sysregs, thanks
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
*/ */
#include <hyp/adjust_pc.h>
#include <hyp/switch.h> #include <hyp/switch.h>
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
...@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu); __load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu); __activate_traps(vcpu);
__adjust_pc(vcpu); __kvm_adjust_pc(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt); sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu); __debug_switch_to_guest(vcpu);
......
...@@ -1156,13 +1156,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) ...@@ -1156,13 +1156,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return 0; return false;
__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT, (range->end - range->start) << PAGE_SHIFT,
range->may_block); range->may_block);
return 0; return false;
} }
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
...@@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
kvm_pfn_t pfn = pte_pfn(range->pte); kvm_pfn_t pfn = pte_pfn(range->pte);
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return 0; return false;
WARN_ON(range->end - range->start != 1); WARN_ON(range->end - range->start != 1);
...@@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
PAGE_SIZE, __pfn_to_phys(pfn), PAGE_SIZE, __pfn_to_phys(pfn),
KVM_PGTABLE_PROT_R, NULL); KVM_PGTABLE_PROT_R, NULL);
return 0; return false;
} }
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
...@@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t pte; pte_t pte;
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return 0; return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
...@@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return 0; return false;
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT); range->start << PAGE_SHIFT);
......
...@@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) ...@@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tmp;
bool is32bit;
int i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
return false;
/* Check that the vcpus are either all 32bit or all 64bit */
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
return false;
}
return true;
}
/** /**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value * kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
...@@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
} }
} }
if (!vcpu_allowed_register_width(vcpu)) {
ret = -EINVAL;
goto out;
}
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
default: default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
ret = -EINVAL;
goto out;
}
pstate = VCPU_RESET_PSTATE_SVC; pstate = VCPU_RESET_PSTATE_SVC;
} else { } else {
pstate = VCPU_RESET_PSTATE_EL1; pstate = VCPU_RESET_PSTATE_EL1;
......
...@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, ...@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, rd, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true; return true;
} }
...@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, ...@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
...@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
...@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu, static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
} }
static bool trap_bcr(struct kvm_vcpu *vcpu, static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, rd, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true; return true;
} }
...@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, ...@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
...@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
...@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu, static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
} }
static bool trap_wvr(struct kvm_vcpu *vcpu, static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, rd, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, trace_trap_reg(__func__, rd->CRm, p->is_write,
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true;