@@ -18,6 +18,7 @@
*
* Copyright (c) 2019 Microsoft.
*/
+#include <xen/cpumask.h>
#include <xen/init.h>
#include <xen/types.h>
@@ -51,6 +52,10 @@ void __init hypervisor_setup(void)
{
if ( ops.setup )
ops.setup();
+
+ /* Check if assisted flush is available and disable the TLB clock if so. */
+ if ( !hypervisor_flush_tlb(cpumask_of(smp_processor_id()), NULL, 0) )
+ tlb_clk_enabled = false;
}
int hypervisor_ap_setup(void)
@@ -73,6 +78,15 @@ void __init hypervisor_e820_fixup(struct e820map *e820)
ops.e820_fixup(e820);
}
+int hypervisor_flush_tlb(const cpumask_t *mask, const void *va,
+ unsigned int order)
+{
+ if ( ops.flush_tlb )
+ return alternative_call(ops.flush_tlb, mask, va, order);
+
+ return -EOPNOTSUPP;
+}
+
/*
* Local variables:
* mode: C
@@ -324,12 +324,18 @@ static void __init e820_fixup(struct e820map *e820)
pv_shim_fixup_e820(e820);
}
+static int flush_tlb(const cpumask_t *mask, const void *va, unsigned int order)
+{
+ return xen_hypercall_hvm_op(HVMOP_flush_tlbs, NULL);
+}
+
static const struct hypervisor_ops __initconstrel ops = {
.name = "Xen",
.setup = setup,
.ap_setup = ap_setup,
.resume = resume,
.e820_fixup = e820_fixup,
+ .flush_tlb = flush_tlb,
};
const struct hypervisor_ops *__init xg_probe(void)
@@ -15,6 +15,7 @@
#include <xen/perfc.h>
#include <xen/spinlock.h>
#include <asm/current.h>
+#include <asm/guest.h>
#include <asm/smp.h>
#include <asm/mc146818rtc.h>
#include <asm/flushtlb.h>
@@ -268,6 +269,12 @@ void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
if ( (flags & ~FLUSH_ORDER_MASK) &&
!cpumask_subset(mask, cpumask_of(cpu)) )
{
+ if ( cpu_has_hypervisor &&
+ !(flags & ~(FLUSH_TLB | FLUSH_TLB_GLOBAL | FLUSH_VA_VALID |
+ FLUSH_ORDER_MASK)) &&
+ !hypervisor_flush_tlb(mask, va, (flags - 1) & FLUSH_ORDER_MASK) )
+ return;
+
spin_lock(&flush_lock);
cpumask_and(&flush_cpumask, mask, &cpu_online_map);
cpumask_clear_cpu(cpu, &flush_cpumask);
@@ -19,6 +19,8 @@
#ifndef __X86_HYPERVISOR_H__
#define __X86_HYPERVISOR_H__
+#include <xen/cpumask.h>
+
#include <asm/e820.h>
struct hypervisor_ops {
@@ -32,6 +34,8 @@ struct hypervisor_ops {
void (*resume)(void);
/* Fix up e820 map */
void (*e820_fixup)(struct e820map *e820);
+ /* L0 assisted TLB flush */
+ int (*flush_tlb)(const cpumask_t *mask, const void *va, unsigned int order);
};
#ifdef CONFIG_GUEST
@@ -41,6 +45,14 @@ void hypervisor_setup(void);
int hypervisor_ap_setup(void);
void hypervisor_resume(void);
void hypervisor_e820_fixup(struct e820map *e820);
+/*
+ * L0 assisted TLB flush.
+ * mask: cpumask of the dirty vCPUs that should be flushed.
+ * va: linear address to flush, or NULL for global flushes.
+ * order: order of the linear address pointed by va.
+ */
+int hypervisor_flush_tlb(const cpumask_t *mask, const void *va,
+ unsigned int order);
#else
@@ -52,6 +64,11 @@ static inline void hypervisor_setup(void) { ASSERT_UNREACHABLE(); }
static inline int hypervisor_ap_setup(void) { return 0; }
static inline void hypervisor_resume(void) { ASSERT_UNREACHABLE(); }
static inline void hypervisor_e820_fixup(struct e820map *e820) {}
+static inline int hypervisor_flush_tlb(const cpumask_t *mask, const void *va,
+ unsigned int order)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_GUEST */