@@ -3990,55 +3990,10 @@ static void hvm_s3_resume(struct domain *d)
bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
void *ctxt)
{
- static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
- cpumask_t *mask = &this_cpu(flush_cpumask);
- struct domain *d = current->domain;
- struct vcpu *v;
-
- /* Avoid deadlock if more than one vcpu tries this at the same time. */
- if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
- return false;
-
- /* Pause all other vcpus. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_pause_nosync(v);
-
- /* Now that all VCPUs are signalled to deschedule, we wait... */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- while ( !vcpu_runnable(v) && v->is_running )
- cpu_relax();
-
- /* All other vcpus are paused, safe to unlock now. */
- spin_unlock(&d->hypercall_deadlock_mutex);
-
- cpumask_clear(mask);
-
- /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
- for_each_vcpu ( d, v )
- {
- unsigned int cpu;
-
- if ( !flush_vcpu(ctxt, v) )
- continue;
-
- paging_update_cr3(v, false);
+ struct domain *currd = current->domain;
- cpu = read_atomic(&v->dirty_cpu);
- if ( is_vcpu_dirty_cpu(cpu) )
- __cpumask_set_cpu(cpu, mask);
- }
-
- /* Flush TLBs on all CPUs with dirty vcpu state. */
- flush_tlb_mask(mask);
-
- /* Done. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_unpause(v);
-
- return true;
+ return shadow_mode_enabled(currd) ? shadow_flush_tlb(flush_vcpu, ctxt)
+ : hap_flush_tlb(flush_vcpu, ctxt);
}
static bool always_flush(void *ctxt, struct vcpu *v)
@@ -669,6 +669,60 @@ static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush)
hvm_update_guest_cr3(v, noflush);
}
+bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
const struct paging_mode *
hap_paging_get_mode(struct vcpu *v)
{
@@ -3357,6 +3357,61 @@ out:
return rc;
}
+/* Fluhs TLB of selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
/**************************************************************************/
/* Shadow-control XEN_DOMCTL dispatcher */
@@ -46,6 +46,9 @@ int hap_track_dirty_vram(struct domain *d,
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
+bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#endif /* XEN_HAP_H */
/*
@@ -95,6 +95,10 @@ void shadow_blow_tables_per_domain(struct domain *d);
int shadow_set_allocation(struct domain *d, unsigned int pages,
bool *preempted);
+/* Flush the TLB of the selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#else /* !CONFIG_SHADOW_PAGING */
#define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
@@ -106,6 +110,14 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
#define shadow_set_allocation(d, pages, preempted) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+static inline bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt,
+ struct vcpu *v),
+ void *ctxt)
+{
+ ASSERT_UNREACHABLE();
+ return false;
+}
+
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
int fast, int all) {}