@@ -82,7 +82,7 @@ void hvm_asid_init(int nasids)
void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
{
- asid->generation = 0;
+ write_atomic(&asid->generation, 0);
}
void hvm_asid_flush_vcpu(struct vcpu *v)
@@ -120,7 +120,7 @@ bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
goto disabled;
/* Test if VCPU has valid ASID. */
- if ( asid->generation == data->core_asid_generation )
+ if ( read_atomic(&asid->generation) == data->core_asid_generation )
return 0;
/* If there are no free ASIDs, need to go to a new generation */
@@ -134,7 +134,7 @@ bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
/* Now guaranteed to be a free ASID. */
asid->asid = data->next_asid++;
- asid->generation = data->core_asid_generation;
+ write_atomic(&asid->generation, data->core_asid_generation);
/*
* When we assign ASID 1, flush all TLB entries as we are starting a new
Current implementation of hvm_asid_flush_vcpu is not safe to use unless the target vCPU is either paused or the currently running one, as it modifies the generation without any locking. Fix this by using atomic operations when accessing the generation field, both in hvm_asid_flush_vcpu_asid and other ASID functions. This allows to safely flush the current ASID generation. Note that for the flush to take effect if the vCPU is currently running a vmexit is required. Note the same could be achieved by introducing an extra field to hvm_vcpu_asid that signals hvm_asid_handle_vmenter the need to call hvm_asid_flush_vcpu on the given vCPU before vmentry, this however seems unnecessary as hvm_asid_flush_vcpu itself only sets two vCPU fields to 0, so there's no need to delay this to the vmentry ASID helper. This is not a bugfix as no callers that would violate the assumptions listed in the first paragraph have been found, but a preparatory change in order to allow remote flushing of HVM vCPUs. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- xen/arch/x86/hvm/asid.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)