@@ -453,8 +453,11 @@ struct xsc_core_device {
struct pci_dev *pdev;
struct device *device;
struct xsc_priv priv;
+ void *eth_priv;
struct xsc_dev_resource *dev_res;
+ void (*event_handler)(void *adapter);
+
void __iomem *bar;
int bar_num;
@@ -486,6 +489,7 @@ struct xsc_core_device {
u16 fw_version_patch;
u32 fw_version_tweak;
u8 fw_version_extra_flag;
+ cpumask_var_t xps_cpumask;
};
int xsc_core_create_resource_common(struct xsc_core_device *xdev,
@@ -493,6 +497,8 @@ int xsc_core_create_resource_common(struct xsc_core_device *xdev,
void xsc_core_destroy_resource_common(struct xsc_core_device *xdev,
struct xsc_core_qp *qp);
struct xsc_eq *xsc_core_eq_get(struct xsc_core_device *xdev, int i);
+int xsc_core_vector2eqn(struct xsc_core_device *xdev, int vector, int *eqn,
+ unsigned int *irqn);
static inline void *xsc_buf_offset(struct xsc_buf *buf, int offset)
{
@@ -6,4 +6,4 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc
obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o
-xsc_pci-y := main.o cmdq.o hw.o qp.o cq.o alloc.o eq.o
+xsc_pci-y := main.o cmdq.o hw.o qp.o cq.o alloc.o eq.o pci_irq.o
@@ -9,6 +9,7 @@
#include "qp.h"
#include "cq.h"
#include "eq.h"
+#include "pci_irq.h"
unsigned int xsc_debug_mask;
module_param_named(debug_mask, xsc_debug_mask, uint, 0644);
@@ -282,10 +283,18 @@ static int xsc_load(struct xsc_core_device *xdev)
goto out;
}
+ err = xsc_irq_eq_create(xdev);
+ if (err) {
+ xsc_core_err(xdev, "xsc_irq_eq_create failed %d\n", err);
+ goto err_irq_eq_create;
+ }
+
set_bit(XSC_INTERFACE_STATE_UP, &xdev->intf_state);
mutex_unlock(&xdev->intf_state_mutex);
return 0;
+err_irq_eq_create:
+ xsc_hw_cleanup(xdev);
out:
mutex_unlock(&xdev->intf_state_mutex);
return err;
@@ -302,7 +311,7 @@ static int xsc_unload(struct xsc_core_device *xdev)
}
clear_bit(XSC_INTERFACE_STATE_UP, &xdev->intf_state);
-
+ xsc_irq_eq_destroy(xdev);
xsc_hw_cleanup(xdev);
out:
new file mode 100644
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/module.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#include "common/xsc_driver.h"
+#include "common/xsc_core.h"
+#include "eq.h"
+#include "pci_irq.h"
+
+enum {
+ XSC_COMP_EQ_SIZE = 1024,
+};
+
+enum xsc_eq_type {
+ XSC_EQ_TYPE_COMP,
+ XSC_EQ_TYPE_ASYNC,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ XSC_EQ_TYPE_PF,
+#endif
+};
+
+struct xsc_irq {
+ struct atomic_notifier_head nh;
+ cpumask_var_t mask;
+ char name[XSC_MAX_IRQ_NAME];
+};
+
+struct xsc_irq_table {
+ struct xsc_irq *irq;
+ int nvec;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
+};
+
+struct xsc_msix_resource *g_msix_xres;
+
+static void xsc_free_irq(struct xsc_core_device *xdev, unsigned int vector)
+{
+ unsigned int irqn = 0;
+
+ irqn = pci_irq_vector(xdev->pdev, vector);
+ disable_irq(irqn);
+
+ if (xsc_fw_is_available(xdev))
+ free_irq(irqn, xdev);
+}
+
+static int set_comp_irq_affinity_hint(struct xsc_core_device *xdev, int i)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ int vecidx = table->eq_vec_comp_base + i;
+ struct xsc_eq *eq = xsc_core_eq_get(xdev, i);
+ unsigned int irqn;
+ int ret;
+
+ irqn = pci_irq_vector(xdev->pdev, vecidx);
+ if (!zalloc_cpumask_var(&eq->mask, GFP_KERNEL)) {
+ xsc_core_err(xdev, "zalloc_cpumask_var rx cpumask failed");
+ return -ENOMEM;
+ }
+
+ if (!zalloc_cpumask_var(&xdev->xps_cpumask, GFP_KERNEL)) {
+ xsc_core_err(xdev, "zalloc_cpumask_var tx cpumask failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, xdev->priv.numa_node),
+ xdev->xps_cpumask);
+ ret = irq_set_affinity_hint(irqn, eq->mask);
+
+ return ret;
+}
+
+static void clear_comp_irq_affinity_hint(struct xsc_core_device *xdev, int i)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ int vecidx = table->eq_vec_comp_base + i;
+ struct xsc_eq *eq = xsc_core_eq_get(xdev, i);
+ int irqn;
+
+ irqn = pci_irq_vector(xdev->pdev, vecidx);
+ irq_set_affinity_hint(irqn, NULL);
+ free_cpumask_var(eq->mask);
+}
+
+static int set_comp_irq_affinity_hints(struct xsc_core_device *xdev)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ int nvec = table->num_comp_vectors;
+ int err;
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ err = set_comp_irq_affinity_hint(xdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ clear_comp_irq_affinity_hint(xdev, i);
+ free_cpumask_var(xdev->xps_cpumask);
+
+ return err;
+}
+
+static void clear_comp_irq_affinity_hints(struct xsc_core_device *xdev)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ int nvec = table->num_comp_vectors;
+ int i;
+
+ for (i = 0; i < nvec; i++)
+ clear_comp_irq_affinity_hint(xdev, i);
+ free_cpumask_var(xdev->xps_cpumask);
+}
+
+static int xsc_alloc_irq_vectors(struct xsc_core_device *xdev)
+{
+ struct xsc_dev_resource *dev_res = xdev->dev_res;
+ struct xsc_eq_table *table = &dev_res->eq_table;
+ int nvec = xdev->caps.msix_num;
+ int nvec_base;
+ int err;
+
+ nvec_base = XSC_EQ_VEC_COMP_BASE;
+ if (nvec <= nvec_base) {
+ xsc_core_warn(xdev, "failed to alloc irq vector(%d)\n", nvec);
+ return -ENOMEM;
+ }
+
+ dev_res->irq_info = kcalloc(nvec, sizeof(*dev_res->irq_info), GFP_KERNEL);
+ if (!dev_res->irq_info)
+ return -ENOMEM;
+
+ nvec = pci_alloc_irq_vectors(xdev->pdev, nvec_base + 1, nvec, PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
+
+ table->eq_vec_comp_base = nvec_base;
+ table->num_comp_vectors = nvec - nvec_base;
+ xdev->msix_vec_base = xdev->caps.msix_base;
+ xsc_core_info(xdev,
+ "alloc msix_vec_num=%d, comp_num=%d, max_msix_num=%d, msix_vec_base=%d\n",
+ nvec, table->num_comp_vectors, xdev->caps.msix_num, xdev->msix_vec_base);
+
+ return 0;
+
+err_free_irq_info:
+ pci_free_irq_vectors(xdev->pdev);
+ kfree(dev_res->irq_info);
+ return err;
+}
+
+static void xsc_free_irq_vectors(struct xsc_core_device *xdev)
+{
+ struct xsc_dev_resource *dev_res = xdev->dev_res;
+
+ if (!xsc_fw_is_available(xdev))
+ return;
+
+ pci_free_irq_vectors(xdev->pdev);
+ kfree(dev_res->irq_info);
+}
+
+int xsc_core_vector2eqn(struct xsc_core_device *xdev, int vector, int *eqn,
+ unsigned int *irqn)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ struct xsc_eq *eq, *n;
+ int err = -ENOENT;
+
+ if (!xdev->caps.msix_enable)
+ return 0;
+
+ spin_lock(&table->lock);
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ if (eq->index == vector) {
+ *eqn = eq->eqn;
+ *irqn = eq->irqn;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock(&table->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(xsc_core_vector2eqn);
+
+static void free_comp_eqs(struct xsc_core_device *xdev)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ struct xsc_eq *eq, *n;
+
+ spin_lock(&table->lock);
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_del(&eq->list);
+ spin_unlock(&table->lock);
+ if (xsc_destroy_unmap_eq(xdev, eq))
+ xsc_core_warn(xdev, "failed to destroy EQ 0x%x\n", eq->eqn);
+ kfree(eq);
+ spin_lock(&table->lock);
+ }
+ spin_unlock(&table->lock);
+}
+
+static int alloc_comp_eqs(struct xsc_core_device *xdev)
+{
+ struct xsc_eq_table *table = &xdev->dev_res->eq_table;
+ char name[XSC_MAX_IRQ_NAME];
+ struct xsc_eq *eq;
+ int ncomp_vec;
+ int nent;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&table->comp_eqs_list);
+ ncomp_vec = table->num_comp_vectors;
+ nent = XSC_COMP_EQ_SIZE;
+
+ for (i = 0; i < ncomp_vec; i++) {
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean;
+ }
+
+ snprintf(name, XSC_MAX_IRQ_NAME, "xsc_comp%d", i);
+ err = xsc_create_map_eq(xdev, eq,
+ i + table->eq_vec_comp_base, nent, name);
+ if (err) {
+ kfree(eq);
+ goto clean;
+ }
+
+ eq->index = i;
+ spin_lock(&table->lock);
+ list_add_tail(&eq->list, &table->comp_eqs_list);
+ spin_unlock(&table->lock);
+ }
+
+ return 0;
+
+clean:
+ free_comp_eqs(xdev);
+ return err;
+}
+
+static irqreturn_t xsc_cmd_handler(int irq, void *arg)
+{
+ struct xsc_core_device *xdev = (struct xsc_core_device *)arg;
+ int err;
+
+ disable_irq_nosync(xdev->cmd.irqn);
+ err = xsc_cmd_err_handler(xdev);
+ if (!err)
+ xsc_cmd_resp_handler(xdev);
+ enable_irq(xdev->cmd.irqn);
+
+ return IRQ_HANDLED;
+}
+
+static int xsc_request_irq_for_cmdq(struct xsc_core_device *xdev, u8 vecidx)
+{
+ struct xsc_dev_resource *dev_res = xdev->dev_res;
+
+ writel(xdev->msix_vec_base + vecidx, REG_ADDR(xdev, xdev->cmd.reg.msix_vec_addr));
+
+ snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s",
+ "xsc_cmd", pci_name(xdev->pdev));
+ xdev->cmd.irqn = pci_irq_vector(xdev->pdev, vecidx);
+ return request_irq(xdev->cmd.irqn, xsc_cmd_handler, 0,
+ dev_res->irq_info[vecidx].name, xdev);
+}
+
+static void xsc_free_irq_for_cmdq(struct xsc_core_device *xdev)
+{
+ xsc_free_irq(xdev, XSC_VEC_CMD);
+}
+
+static irqreturn_t xsc_event_handler(int irq, void *arg)
+{
+ struct xsc_core_device *xdev = (struct xsc_core_device *)arg;
+
+ xsc_core_dbg(xdev, "cmd event hint irq: %d\n", irq);
+
+ if (!xdev->eth_priv)
+ return IRQ_NONE;
+
+ if (!xdev->event_handler)
+ return IRQ_NONE;
+
+ xdev->event_handler(xdev->eth_priv);
+
+ return IRQ_HANDLED;
+}
+
+static int xsc_request_irq_for_event(struct xsc_core_device *xdev)
+{
+ struct xsc_dev_resource *dev_res = xdev->dev_res;
+
+ snprintf(dev_res->irq_info[XSC_VEC_CMD_EVENT].name, XSC_MAX_IRQ_NAME, "%s@pci:%s",
+ "xsc_eth_event", pci_name(xdev->pdev));
+ return request_irq(pci_irq_vector(xdev->pdev, XSC_VEC_CMD_EVENT), xsc_event_handler, 0,
+ dev_res->irq_info[XSC_VEC_CMD_EVENT].name, xdev);
+}
+
+static void xsc_free_irq_for_event(struct xsc_core_device *xdev)
+{
+ xsc_free_irq(xdev, XSC_VEC_CMD_EVENT);
+}
+
+static int xsc_cmd_enable_msix(struct xsc_core_device *xdev)
+{
+ struct xsc_msix_table_info_mbox_in in;
+ struct xsc_msix_table_info_mbox_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_MSIX);
+
+ err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (err) {
+ xsc_core_err(xdev, "xsc_cmd_exec enable msix failed %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int xsc_irq_eq_create(struct xsc_core_device *xdev)
+{
+ int err;
+
+ if (xdev->caps.msix_enable == 0)
+ return 0;
+
+ err = xsc_alloc_irq_vectors(xdev);
+ if (err) {
+ xsc_core_err(xdev, "enable msix failed, err=%d\n", err);
+ goto out;
+ }
+
+ err = xsc_start_eqs(xdev);
+ if (err) {
+ xsc_core_err(xdev, "failed to start EQs, err=%d\n", err);
+ goto err_free_irq_vectors;
+ }
+
+ err = alloc_comp_eqs(xdev);
+ if (err) {
+ xsc_core_err(xdev, "failed to alloc comp EQs, err=%d\n", err);
+ goto err_stop_eqs;
+ }
+
+ err = xsc_request_irq_for_cmdq(xdev, XSC_VEC_CMD);
+ if (err) {
+ xsc_core_err(xdev, "failed to request irq for cmdq, err=%d\n", err);
+ goto err_free_comp_eqs;
+ }
+
+ err = xsc_request_irq_for_event(xdev);
+ if (err) {
+ xsc_core_err(xdev, "failed to request irq for event, err=%d\n", err);
+ goto err_free_irq_cmdq;
+ }
+
+ err = set_comp_irq_affinity_hints(xdev);
+ if (err) {
+ xsc_core_err(xdev, "failed to alloc affinity hint cpumask, err=%d\n", err);
+ goto err_free_irq_evnt;
+ }
+
+ xsc_cmd_use_events(xdev);
+ err = xsc_cmd_enable_msix(xdev);
+ if (err) {
+ xsc_core_err(xdev, "xsc_cmd_enable_msix failed %d.\n", err);
+ xsc_cmd_use_polling(xdev);
+ goto err_free_irq_evnt;
+ }
+ return 0;
+
+err_free_irq_evnt:
+ xsc_free_irq_for_event(xdev);
+err_free_irq_cmdq:
+ xsc_free_irq_for_cmdq(xdev);
+err_free_comp_eqs:
+ free_comp_eqs(xdev);
+err_stop_eqs:
+ xsc_stop_eqs(xdev);
+err_free_irq_vectors:
+ xsc_free_irq_vectors(xdev);
+out:
+ return err;
+}
+
+int xsc_irq_eq_destroy(struct xsc_core_device *xdev)
+{
+ if (xdev->caps.msix_enable == 0)
+ return 0;
+
+ xsc_stop_eqs(xdev);
+ clear_comp_irq_affinity_hints(xdev);
+ free_comp_eqs(xdev);
+
+ xsc_free_irq_for_event(xdev);
+ xsc_free_irq_for_cmdq(xdev);
+ xsc_free_irq_vectors(xdev);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef XSC_PCI_IRQ_H
+#define XSC_PCI_IRQ_H
+
+#include "common/xsc_core.h"
+
+int xsc_irq_eq_create(struct xsc_core_device *xdev);
+int xsc_irq_eq_destroy(struct xsc_core_device *xdev);
+
+#endif
Implement interrupt management and event handling Co-developed-by: Honggang Wei <weihg@yunsilicon.com> Co-developed-by: Lei Yan <Jacky@yunsilicon.com> Signed-off-by: Xin Tian <tianx@yunsilicon.com> --- .../ethernet/yunsilicon/xsc/common/xsc_core.h | 6 + .../net/ethernet/yunsilicon/xsc/pci/Makefile | 2 +- .../net/ethernet/yunsilicon/xsc/pci/main.c | 11 +- .../net/ethernet/yunsilicon/xsc/pci/pci_irq.c | 427 ++++++++++++++++++ .../net/ethernet/yunsilicon/xsc/pci/pci_irq.h | 14 + 5 files changed, 458 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.h