@@ -52,8 +52,7 @@ struct qtnf_bus_ops {
struct qtnf_bus {
struct device *dev;
enum qtnf_fw_state fw_state;
- u32 chip;
- u32 chiprev;
+ u32 chipid;
struct qtnf_bus_ops *bus_ops;
struct qtnf_wmac *mac[QTNF_MAX_MAC];
struct qtnf_qlink_transport trans;
@@ -335,10 +335,11 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!bus)
return -ENOMEM;
+ bus->fw_state = QTNF_FW_STATE_DETACHED;
+ bus->chipid = chipid;
pcie_priv = get_bus_priv(bus);
pci_set_drvdata(pdev, bus);
bus->dev = &pdev->dev;
- bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
pcie_priv->flashboot = flashboot;
@@ -23,9 +23,6 @@
#include "shm_ipc.h"
#include "debug.h"
-#define PEARL_TX_BD_SIZE_DEFAULT 32
-#define PEARL_RX_BD_SIZE_DEFAULT 256
-
struct qtnf_pearl_bda {
__le16 bda_len;
__le16 bda_version;
@@ -73,8 +70,28 @@ struct qtnf_pearl_fw_hdr {
__le32 crc;
} __packed;
+struct qtnf_pcie_pearl_state;
+
+struct qtnf_pcie_pearl_hdp_ops {
+ u16 hdp_rx_bd_size_default;
+ u16 hdp_tx_bd_size_default;
+ int (*hdp_alloc_bd_table)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_init)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_hhbm_init)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_set_queues)(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size);
+ void (*hdp_rbd_attach)(struct qtnf_pcie_pearl_state *ps, u16 index,
+ dma_addr_t paddr);
+ u32 (*hdp_get_tx_done_index)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_tx_hw_push)(struct qtnf_pcie_pearl_state *ps, int index,
+ dma_addr_t paddr);
+
+};
+
struct qtnf_pcie_pearl_state {
struct qtnf_pcie_bus_priv base;
+ const struct qtnf_pcie_pearl_hdp_ops *hdp_ops;
/* lock for irq configuration changes */
spinlock_t irq_lock;
@@ -97,6 +114,180 @@ struct qtnf_pcie_pearl_state {
u32 pcie_irq_uf_count;
};
+/* HDP common ops */
+
+static void hdp_set_queues_common(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+ if (tx_bd_size == 0) {
+ tx_bd_size = ps->hdp_ops->hdp_tx_bd_size_default;
+ } else if (!is_power_of_2(tx_bd_size)) {
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_tx_bd_size_default);
+ tx_bd_size = ps->hdp_ops->hdp_tx_bd_size_default;
+ }
+
+ if (rx_bd_size == 0) {
+ rx_bd_size = ps->hdp_ops->hdp_rx_bd_size_default;
+ } else if (!is_power_of_2(rx_bd_size)) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_rx_bd_size_default);
+ rx_bd_size = ps->hdp_ops->hdp_rx_bd_size_default;
+ }
+
+ priv->tx_bd_num = tx_bd_size;
+ priv->rx_bd_num = rx_bd_size;
+}
+
+/* HDP ops: rev B */
+
+static int hdp_alloc_bd_table_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* tx bd */
+
+ ps->bd_table_vaddr = vaddr;
+ ps->bd_table_paddr = paddr;
+ ps->bd_table_len = len;
+
+ ps->tx_bd_vbase = vaddr;
+ ps->tx_bd_pbase = paddr;
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+
+ ps->rx_bd_vbase = vaddr;
+ ps->rx_bd_pbase = paddr;
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ return 0;
+}
+
+static void hdp_rbd_attach_rev_b(struct qtnf_pcie_pearl_state *ps, u16 index,
+ dma_addr_t paddr)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
+
+ writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
+}
+
+static void hdp_hhbm_init_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ val |= HHBM_CONFIG_SOFT_RESET;
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ usleep_range(50, 100);
+ val &= ~HHBM_CONFIG_SOFT_RESET;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ val |= HHBM_64BIT;
+#endif
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
+}
+
+static void hdp_init_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->rx_bd_pbase),
+ PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(ps->rx_bd_pbase),
+ PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
+ PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
+}
+
+static void hdp_set_queues_rev_b(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 val;
+
+ hdp_set_queues_common(ps, tx_bd_size, rx_bd_size);
+
+ val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_tx_bd_size_default);
+ tx_bd_size = ps->hdp_ops->hdp_tx_bd_size_default;
+ }
+
+ val = rx_bd_size * sizeof(dma_addr_t);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_rx_bd_size_default);
+ rx_bd_size = ps->hdp_ops->hdp_rx_bd_size_default;
+ }
+
+ priv->tx_bd_num = tx_bd_size;
+ priv->rx_bd_num = rx_bd_size;
+}
+
+static u32 hdp_get_tx_done_index_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 v;
+
+ v = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
+ & (priv->tx_bd_num - 1);
+
+ return v;
+}
+
+static void hdp_tx_hw_push_rev_b(struct qtnf_pcie_pearl_state *ps, int index,
+ dma_addr_t paddr)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
+}
+
+static const struct qtnf_pcie_pearl_hdp_ops hdp_ops_rev_b = {
+ .hdp_tx_bd_size_default = 32,
+ .hdp_rx_bd_size_default = 256,
+ .hdp_alloc_bd_table = hdp_alloc_bd_table_rev_b,
+ .hdp_init = hdp_init_rev_b,
+ .hdp_hhbm_init = hdp_hhbm_init_rev_b,
+ .hdp_set_queues = hdp_set_queues_rev_b,
+ .hdp_rbd_attach = hdp_rbd_attach_rev_b,
+ .hdp_get_tx_done_index = hdp_get_tx_done_index_rev_b,
+ .hdp_tx_hw_push = hdp_tx_hw_push_rev_b,
+};
+
+/* common */
+
static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
{
unsigned long flags;
@@ -229,56 +420,6 @@ static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
return 0;
}
-static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
-{
- struct qtnf_pcie_bus_priv *priv = &ps->base;
- dma_addr_t paddr;
- void *vaddr;
- int len;
-
- len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
- priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
-
- vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
-
- /* tx bd */
-
- ps->bd_table_vaddr = vaddr;
- ps->bd_table_paddr = paddr;
- ps->bd_table_len = len;
-
- ps->tx_bd_vbase = vaddr;
- ps->tx_bd_pbase = paddr;
-
- pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
- priv->tx_bd_r_index = 0;
- priv->tx_bd_w_index = 0;
-
- /* rx bd */
-
- vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
- paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
-
- ps->rx_bd_vbase = vaddr;
- ps->rx_bd_pbase = paddr;
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
- writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
- PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
-
- pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
- return 0;
-}
-
static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
@@ -312,14 +453,8 @@ static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
/* sync up all descriptor updates */
wmb();
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
+ ps->hdp_ops->hdp_rbd_attach(ps, index, paddr);
- writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
return 0;
}
@@ -379,66 +514,15 @@ static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
}
}
-static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
-{
- u32 val;
-
- val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
- val |= HHBM_CONFIG_SOFT_RESET;
- writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
- usleep_range(50, 100);
- val &= ~HHBM_CONFIG_SOFT_RESET;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- val |= HHBM_64BIT;
-#endif
- writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
- writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
-
- return 0;
-}
-
static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
unsigned int tx_bd_size,
unsigned int rx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
- u32 val;
- if (tx_bd_size == 0)
- tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT;
-
- val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
-
- if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("invalid tx_bd_size value %u, use default %u\n",
- tx_bd_size, PEARL_TX_BD_SIZE_DEFAULT);
- priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
- } else {
- priv->tx_bd_num = tx_bd_size;
- }
-
- if (rx_bd_size == 0)
- rx_bd_size = PEARL_RX_BD_SIZE_DEFAULT;
-
- val = rx_bd_size * sizeof(dma_addr_t);
-
- if (!is_power_of_2(rx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("invalid rx_bd_size value %u, use default %u\n",
- rx_bd_size, PEARL_RX_BD_SIZE_DEFAULT);
- priv->rx_bd_num = PEARL_RX_BD_SIZE_DEFAULT;
- } else {
- priv->rx_bd_num = rx_bd_size;
- }
-
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
-
- ret = pearl_hhbm_init(ps);
- if (ret) {
- pr_err("failed to init h/w queues\n");
- return ret;
- }
+ ps->hdp_ops->hdp_set_queues(ps, tx_bd_size, rx_bd_size);
+ ps->hdp_ops->hdp_hhbm_init(ps);
ret = qtnf_pcie_alloc_skb_array(priv);
if (ret) {
@@ -446,7 +530,7 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
return ret;
}
- ret = pearl_alloc_bd_table(ps);
+ ret = ps->hdp_ops->hdp_alloc_bd_table(ps);
if (ret) {
pr_err("failed to allocate bd table\n");
return ret;
@@ -458,6 +542,8 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
return ret;
}
+ ps->hdp_ops->hdp_init(ps);
+
return ret;
}
@@ -474,9 +560,7 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
- tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
- & (priv->tx_bd_num - 1);
-
+ tx_done_index = ps->hdp_ops->hdp_get_tx_done_index(ps);
i = priv->tx_bd_r_index;
while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
@@ -580,18 +664,13 @@ static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
/* write new TX descriptor to PCIE_RX_FIFO on EP */
txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
-
if (++i >= priv->tx_bd_num)
i = 0;
priv->tx_bd_w_index = i;
+ ps->hdp_ops->hdp_tx_hw_push(ps, i, txbd_paddr);
+
tx_done:
if (ret && skb) {
pr_err_ratelimited("drop skb\n");
@@ -739,7 +818,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
consume = 0;
}
- if (skb && (skb_tailroom(skb) < psize)) {
+ if (skb && (skb_tailroom(skb) < psize)) {
pr_err("skip packet with invalid length: %u > %u\n",
psize, skb_tailroom(skb));
consume = 0;
@@ -777,7 +856,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
priv->rx_bd_r_index = r_idx;
- /* repalce processed buffer by a new one */
+ /* replace processed buffer by a new one */
w_idx = priv->rx_bd_w_index;
while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
priv->rx_bd_num) > 0) {
@@ -884,22 +963,10 @@ static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
- seq_printf(s, "tx_bd_p_index(%u)\n",
- readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
- & (priv->tx_bd_num - 1));
seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
- seq_printf(s, "tx queue len(%u)\n",
- CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
- priv->tx_bd_num));
seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
- seq_printf(s, "rx_bd_p_index(%u)\n",
- readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
- & (priv->rx_bd_num - 1));
seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
- seq_printf(s, "rx alloc queue len(%u)\n",
- CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
- priv->rx_bd_num));
return 0;
}
@@ -1108,7 +1175,8 @@ static u64 qtnf_pearl_dma_mask_get(void)
#endif
}
-static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus,
+ unsigned int tx_bd_size,
unsigned int rx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
@@ -1120,6 +1188,16 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
spin_lock_init(&ps->irq_lock);
INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
+ switch (bus->chipid) {
+ case QTN_CHIP_ID_PEARL:
+ case QTN_CHIP_ID_PEARL_B:
+ ps->hdp_ops = &hdp_ops_rev_b;
+ break;
+ default:
+ pr_err("unsupported PEARL chip ID 0x%x\n", bus->chipid);
+ return -ENOTSUPP;
+ }
+
ps->pcie_reg_base = ps->base.dmareg_bar;
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
Data path operations may differ between chip revisions. Extract such operations and settings and into a separate structure in order to support multiple QSR10G chips revisions with single module. Remove data path counters specific to a single chip revision. Signed-off-by: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com> --- drivers/net/wireless/quantenna/qtnfmac/bus.h | 3 +- drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c | 3 +- .../wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 356 +++++++++++++-------- 3 files changed, 220 insertions(+), 142 deletions(-)