@@ -80,6 +80,8 @@ struct arm64_ftr_reg {
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
+struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
+
extern unsigned int __read_mostly sysctl_enable_asym_32bit;
/*
@@ -579,6 +581,7 @@ void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
u64 read_sanitised_ftr_reg(u32 id);
+int emulate_sys_reg(u32 id, u64 *valp);
static inline bool cpu_supports_mixed_endian_el0(void)
{
@@ -648,7 +648,7 @@ static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
* returns - Upon success, matching ftr_reg entry for id.
* - NULL on failure but with an WARN_ON().
*/
-static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
+struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
{
struct arm64_ftr_reg *reg;
@@ -2774,7 +2774,7 @@ static inline int emulate_id_reg(u32 id, u64 *valp)
return 0;
}
-static int emulate_sys_reg(u32 id, u64 *valp)
+int emulate_sys_reg(u32 id, u64 *valp)
{
struct arm64_ftr_reg *regp;
@@ -2353,7 +2353,7 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
}
-static int emulate_sys_reg(struct kvm_vcpu *vcpu,
+static int kvm_emulate_sys_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *params)
{
const struct sys_reg_desc *r;
@@ -2412,7 +2412,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = !(esr & 1);
- ret = emulate_sys_reg(vcpu, ¶ms);
+ ret = kvm_emulate_sys_reg(vcpu, ¶ms);
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
And get_arm64_ftr_reg() to allow exposing sanitized version of id_aa64fpr0 register to user space. To avoid a clash, rename emulate_sys_reg() in kvm code to be prefixed with 'kvm_'. Signed-off-by: Qais Yousef <qais.yousef@arm.com> --- arch/arm64/include/asm/cpufeature.h | 3 +++ arch/arm64/kernel/cpufeature.c | 4 ++-- arch/arm64/kvm/sys_regs.c | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-)