@@ -846,20 +846,28 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
#define AA64DFR0_BRPS(v) \
((u8)cpuid_feature_extract_unsigned_field(v, ID_AA64DFR0_BRPS_SHIFT))
+#define AA64DFR0_WRPS(v) \
+ ((u8)cpuid_feature_extract_unsigned_field(v, ID_AA64DFR0_WRPS_SHIFT))
#define AA64DFR0_CTX_CMPS(v) \
((u8)cpuid_feature_extract_unsigned_field(v, ID_AA64DFR0_CTX_CMPS_SHIFT))
#define INVALID_BRPN ((u8)-1)
-static u8 get_bcr_lbn(u64 val)
+static u8 get_bwcr_lbn(u64 val)
{
+ WARN_ON_ONCE(SYS_DBGBCR_EL1_LBN_SHIFT != SYS_DBGWCR_EL1_LBN_SHIFT);
+ WARN_ON_ONCE(SYS_DBGBCR_EL1_LBN_MASK != SYS_DBGWCR_EL1_LBN_MASK);
+
return ((val >> SYS_DBGBCR_EL1_LBN_SHIFT) & SYS_DBGBCR_EL1_LBN_MASK);
}
-static u64 update_bcr_lbn(u64 val, u8 lbn)
+static u64 update_bwcr_lbn(u64 val, u8 lbn)
{
u64 new;
+ WARN_ON_ONCE(SYS_DBGBCR_EL1_LBN_SHIFT != SYS_DBGWCR_EL1_LBN_SHIFT);
+ WARN_ON_ONCE(SYS_DBGBCR_EL1_LBN_MASK != SYS_DBGWCR_EL1_LBN_MASK);
+
new = val & ~(SYS_DBGBCR_EL1_LBN_MASK << SYS_DBGBCR_EL1_LBN_SHIFT);
new |= ((u64)lbn & SYS_DBGBCR_EL1_LBN_MASK) << SYS_DBGBCR_EL1_LBN_SHIFT;
return new;
@@ -1029,29 +1037,51 @@ static u8 get_unused_p_bpn(struct kvm_vcpu *vcpu)
* context aware breakpoint. In such cases, KVM will return 0 to reads of
* bcr.lbn, and have the breakpoint behaves as if it is disabled by
* setting the lbn to unused (disabled) breakpoint.
+ *
+ * virt_to_phys_wcr()/phys_to_virt_wcr() does the same thing for wcr.
*/
-static u64 virt_to_phys_bcr(struct kvm_vcpu *vcpu, u64 v_bcr)
+static u64 virt_to_phys_bwcr(struct kvm_vcpu *vcpu, u64 v_bwcr)
{
u8 v_lbn, p_lbn;
- v_lbn = get_bcr_lbn(v_bcr);
+ v_lbn = get_bwcr_lbn(v_bwcr);
p_lbn = virt_to_phys_bpn(vcpu, v_lbn);
if (p_lbn == INVALID_BRPN)
p_lbn = get_unused_p_bpn(vcpu);
- return update_bcr_lbn(v_bcr, p_lbn);
+ return update_bwcr_lbn(v_bwcr, p_lbn);
}
-static u64 phys_to_virt_bcr(struct kvm_vcpu *vcpu, u64 p_bcr)
+static u64 phys_to_virt_bwcr(struct kvm_vcpu *vcpu, u64 p_bwcr)
{
u8 v_lbn, p_lbn;
- p_lbn = get_bcr_lbn(p_bcr);
+ p_lbn = get_bwcr_lbn(p_bwcr);
v_lbn = phys_to_virt_bpn(vcpu, p_lbn);
if (v_lbn == INVALID_BRPN)
v_lbn = 0;
- return update_bcr_lbn(p_bcr, v_lbn);
+ return update_bwcr_lbn(p_bwcr, v_lbn);
+}
+
+static u64 virt_to_phys_bcr(struct kvm_vcpu *vcpu, u64 v_bcr)
+{
+ return virt_to_phys_bwcr(vcpu, v_bcr);
+}
+
+static u64 virt_to_phys_wcr(struct kvm_vcpu *vcpu, u64 v_wcr)
+{
+ return virt_to_phys_bwcr(vcpu, v_wcr);
+}
+
+static u64 phys_to_virt_bcr(struct kvm_vcpu *vcpu, u64 p_bcr)
+{
+ return phys_to_virt_bwcr(vcpu, p_bcr);
+}
+
+static u64 phys_to_virt_wcr(struct kvm_vcpu *vcpu, u64 p_wcr)
+{
+ return phys_to_virt_bwcr(vcpu, p_wcr);
}
/*
@@ -1116,6 +1146,12 @@ void kvm_vcpu_breakpoint_config(struct kvm_vcpu *vcpu)
dbg->dbg_bcr[v] = 0;
dbg->dbg_bvr[v] = 0;
}
+
+ /*
+ * There is no distinction between physical and virtual
+ * watchpoint numbers. So, the index stays the same.
+ */
+ dbg->dbg_wcr[v] = virt_to_phys_wcr(vcpu, dbg->dbg_wcr[v]);
}
}
@@ -1461,12 +1497,26 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ u8 wpn = rd->CRm;
+ u64 *dbg_reg;
+ u64 v_dfr0 = read_id_reg_with_encoding(vcpu, SYS_ID_AA64DFR0_EL1);
- if (p->is_write)
+ if (wpn > AA64DFR0_WRPS(v_dfr0)) {
+ /* Invalid watchpoint number for the guest */
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+
+ dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[wpn];
+ if (p->is_write) {
+ /* Convert virtual wcr to physical wcr and update debug_reg */
+ p->regval = virt_to_phys_wcr(vcpu, p->regval);
reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
+ } else {
dbg_to_reg(vcpu, p, rd, dbg_reg);
+ /* Convert physical wcr to virtual wcr */
+ p->regval = phys_to_virt_wcr(vcpu, p->regval);
+ }
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
@@ -1476,19 +1526,49 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ u8 wpn = rd->CRm;
+ u64 v_wcr, p_wcr;
- if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ if (copy_from_user(&v_wcr, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
+
+ /*
+ * Until the first KVM_RUN, vcpu_debug_state holds the virtual wcr.
+ * After that, vcpu_debug_state holds the physical wcr.
+ */
+ if (vcpu_has_run_once(vcpu)) {
+ /* Convert virtual wcr to physical wcr, and save it */
+ p_wcr = virt_to_phys_wcr(vcpu, v_wcr);
+ vcpu->arch.vcpu_debug_state.dbg_wcr[wpn] = p_wcr;
+ } else {
+ vcpu->arch.vcpu_debug_state.dbg_wcr[wpn] = v_wcr;
+ return 0;
+ }
+
return 0;
}
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ u8 wpn = rd->CRm;
+ u64 p_wcr, v_wcr;
- if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ /*
+ * Until the first KVM_RUN, vcpu_debug_state holds the virtual wcr.
+ * After that, vcpu_debug_state holds the physical wcr.
+ */
+ if (vcpu_has_run_once(vcpu)) {
+ /* Get the physical wcr value */
+ p_wcr = vcpu->arch.vcpu_debug_state.dbg_wcr[wpn];
+
+ /* Convert physical wcr to virtual wcr */
+ v_wcr = phys_to_virt_wcr(vcpu, p_wcr);
+ } else {
+ v_wcr = vcpu->arch.vcpu_debug_state.dbg_wcr[wpn];
+ }
+
+ if (copy_to_user(uaddr, &v_wcr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0;
}
When the number of non-context aware breakpoints for the guest is decreased by userspace, KVM will map vCPU's context-aware breakpoints (from the guest point of view) to pCPU's context aware breakpoints. Since dbgwcr.lbn holds a linked breakpoint number, emulate dbgwcr accesses to do conversion of virtual/physical dbgwcr.lbn as needed. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/kvm/sys_regs.c | 110 ++++++++++++++++++++++++++++++++------ 1 file changed, 95 insertions(+), 15 deletions(-)