@@ -205,6 +205,88 @@ static struct vcpu *vector_hashing_dest(const struct domain *d,
return dest;
}
+static void hvm_pirq_eoi(struct pirq *pirq)
+{
+ struct hvm_pirq_dpci *pirq_dpci;
+
+ if ( !pirq )
+ {
+ ASSERT_UNREACHABLE();
+ return;
+ }
+
+ pirq_dpci = pirq_dpci(pirq);
+
+ /*
+ * No need to get vector lock for timer
+ * since interrupt is still not EOIed
+ */
+ if ( --pirq_dpci->pending ||
+ /* When the interrupt source is MSI no Ack should be performed. */
+ (pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE) )
+ return;
+
+ pirq_guest_eoi(pirq);
+}
+
+static void _hvm_dpci_eoi(struct domain *d,
+ const struct hvm_girq_dpci_mapping *girq)
+{
+ struct pirq *pirq = pirq_info(d, girq->machine_gsi);
+
+ if ( !hvm_domain_use_pirq(d, pirq) )
+ hvm_pci_intx_deassert(d, girq->device, girq->intx);
+
+ hvm_pirq_eoi(pirq);
+}
+
+static void hvm_gsi_eoi(struct domain *d, unsigned int gsi)
+{
+ struct pirq *pirq = pirq_info(d, gsi);
+
+ /* Check if GSI is actually mapped. */
+ if ( !pirq_dpci(pirq) )
+ return;
+
+ hvm_gsi_deassert(d, gsi);
+ hvm_pirq_eoi(pirq);
+}
+
+void hvm_dpci_eoi(unsigned int guest_gsi)
+{
+ struct domain *d = current->domain;
+ const struct hvm_irq_dpci *hvm_irq_dpci;
+ const struct hvm_girq_dpci_mapping *girq;
+
+ if ( !is_iommu_enabled(d) )
+ return;
+
+ if ( is_hardware_domain(d) )
+ {
+ spin_lock(&d->event_lock);
+ hvm_gsi_eoi(d, guest_gsi);
+ goto unlock;
+ }
+
+ if ( guest_gsi < NR_ISAIRQS )
+ {
+ hvm_dpci_isairq_eoi(d, guest_gsi);
+ return;
+ }
+
+ spin_lock(&d->event_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if ( !hvm_irq_dpci )
+ goto unlock;
+
+ list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
+ _hvm_dpci_eoi(d, girq);
+
+unlock:
+ spin_unlock(&d->event_lock);
+}
+
int pt_irq_create_bind(
struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
{
@@ -860,88 +942,6 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
spin_unlock(&d->event_lock);
}
-static void hvm_pirq_eoi(struct pirq *pirq)
-{
- struct hvm_pirq_dpci *pirq_dpci;
-
- if ( !pirq )
- {
- ASSERT_UNREACHABLE();
- return;
- }
-
- pirq_dpci = pirq_dpci(pirq);
-
- /*
- * No need to get vector lock for timer
- * since interrupt is still not EOIed
- */
- if ( --pirq_dpci->pending ||
- /* When the interrupt source is MSI no Ack should be performed. */
- (pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE) )
- return;
-
- pirq_guest_eoi(pirq);
-}
-
-static void __hvm_dpci_eoi(struct domain *d,
- const struct hvm_girq_dpci_mapping *girq)
-{
- struct pirq *pirq = pirq_info(d, girq->machine_gsi);
-
- if ( !hvm_domain_use_pirq(d, pirq) )
- hvm_pci_intx_deassert(d, girq->device, girq->intx);
-
- hvm_pirq_eoi(pirq);
-}
-
-static void hvm_gsi_eoi(struct domain *d, unsigned int gsi)
-{
- struct pirq *pirq = pirq_info(d, gsi);
-
- /* Check if GSI is actually mapped. */
- if ( !pirq_dpci(pirq) )
- return;
-
- hvm_gsi_deassert(d, gsi);
- hvm_pirq_eoi(pirq);
-}
-
-void hvm_dpci_eoi(unsigned int guest_gsi)
-{
- struct domain *d = current->domain;
- const struct hvm_irq_dpci *hvm_irq_dpci;
- const struct hvm_girq_dpci_mapping *girq;
-
- if ( !is_iommu_enabled(d) )
- return;
-
- if ( is_hardware_domain(d) )
- {
- spin_lock(&d->event_lock);
- hvm_gsi_eoi(d, guest_gsi);
- goto unlock;
- }
-
- if ( guest_gsi < NR_ISAIRQS )
- {
- hvm_dpci_isairq_eoi(d, guest_gsi);
- return;
- }
-
- spin_lock(&d->event_lock);
- hvm_irq_dpci = domain_get_irq_dpci(d);
-
- if ( !hvm_irq_dpci )
- goto unlock;
-
- list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
- __hvm_dpci_eoi(d, girq);
-
-unlock:
- spin_unlock(&d->event_lock);
-}
-
static int pci_clean_dpci_irq(struct domain *d,
struct hvm_pirq_dpci *pirq_dpci, void *arg)
{