@@ -322,6 +322,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QTN_CHIP_ID_PEARL:
case QTN_CHIP_ID_PEARL_B:
case QTN_CHIP_ID_PEARL_C:
+ case QTN_CHIP_ID_PEARL_C1:
bus = qtnf_pcie_pearl_alloc(pdev);
break;
case QTN_CHIP_ID_TOPAZ:
@@ -57,8 +57,6 @@ struct qtnf_pearl_rx_bd {
__le32 addr_h;
__le32 info;
__le32 info_h;
- __le32 next_ptr;
- __le32 next_ptr_h;
} __packed;
struct qtnf_pearl_fw_hdr {
@@ -78,12 +76,15 @@ struct qtnf_pcie_pearl_hdp_ops {
int (*hdp_alloc_bd_table)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_init)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_hhbm_init)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_enable)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_disable)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_set_queues)(struct qtnf_pcie_pearl_state *ps,
unsigned int tx_bd_size,
unsigned int rx_bd_size);
void (*hdp_rbd_attach)(struct qtnf_pcie_pearl_state *ps, u16 index,
dma_addr_t paddr);
u32 (*hdp_get_tx_done_index)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_tx_done_wrap)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_tx_hw_push)(struct qtnf_pcie_pearl_state *ps, int index,
dma_addr_t paddr);
@@ -105,8 +106,19 @@ struct qtnf_pcie_pearl_state {
struct qtnf_pearl_rx_bd *rx_bd_vbase;
dma_addr_t rx_bd_pbase;
+ dma_addr_t rx_dma_cnt_paddr;
+ void *rx_dma_cnt_vaddr;
+
+ dma_addr_t tx_dma_cnt_paddr;
+ void *tx_dma_cnt_vaddr;
+
dma_addr_t bd_table_paddr;
void *bd_table_vaddr;
+
+ u32 tx_bd_ack_wrap;
+ u16 rx_bd_h_index;
+ u16 tx_bd_h_index;
+
u32 bd_table_len;
u32 pcie_irq_mask;
u32 pcie_irq_rx_count;
@@ -280,12 +292,234 @@ static const struct qtnf_pcie_pearl_hdp_ops hdp_ops_rev_b = {
.hdp_alloc_bd_table = hdp_alloc_bd_table_rev_b,
.hdp_init = hdp_init_rev_b,
.hdp_hhbm_init = hdp_hhbm_init_rev_b,
+ .hdp_enable = NULL,
+ .hdp_disable = NULL,
.hdp_set_queues = hdp_set_queues_rev_b,
.hdp_rbd_attach = hdp_rbd_attach_rev_b,
.hdp_get_tx_done_index = hdp_get_tx_done_index_rev_b,
+ .hdp_tx_done_wrap = NULL,
.hdp_tx_hw_push = hdp_tx_hw_push_rev_b,
};
+/* HDP ops: rev C */
+
+static int hdp_alloc_bd_table_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd) +
+ 2 * QTN_HDP_DMA_PTR_SIZE;
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* tx bd */
+
+ ps->bd_table_vaddr = vaddr;
+ ps->bd_table_paddr = paddr;
+ ps->bd_table_len = len;
+
+ ps->tx_bd_vbase = vaddr;
+ ps->tx_bd_pbase = paddr;
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+
+ ps->rx_bd_vbase = vaddr;
+ ps->rx_bd_pbase = paddr;
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* dma completion counters */
+
+ vaddr = ((struct qtnf_pearl_rx_bd *)vaddr) + priv->rx_bd_num;
+ paddr += priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
+
+ ps->rx_dma_cnt_vaddr = vaddr;
+ ps->rx_dma_cnt_paddr = paddr;
+
+ vaddr += QTN_HDP_DMA_PTR_SIZE;
+ paddr += QTN_HDP_DMA_PTR_SIZE;
+
+ ps->tx_dma_cnt_vaddr = vaddr;
+ ps->tx_dma_cnt_paddr = paddr;
+
+ return 0;
+}
+
+static void hdp_rbd_attach_rev_c(struct qtnf_pcie_pearl_state *ps, u16 index,
+ dma_addr_t paddr)
+{
+ u16 ihw;
+
+ ihw = index | (ps->rx_bd_h_index & QTN_HDP_BD_WRAP);
+ if (ihw < ps->rx_bd_h_index)
+ ihw ^= QTN_HDP_BD_WRAP;
+
+ writel(ihw | ((ihw ^ QTN_HDP_BD_WRAP) << 16),
+ PCIE_HDP_TX0_DESC_Q_WR_PTR(ps->pcie_reg_base));
+
+ ps->rx_bd_h_index = ihw;
+}
+
+static void hdp_hhbm_init_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ val |= HHBM_CONFIG_SOFT_RESET;
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ usleep_range(50, 100);
+}
+
+static void hdp_init_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ int mrrs = pcie_get_readrq(priv->pdev);
+ int mps = pcie_get_mps(priv->pdev);
+ u32 val;
+
+ val = readl(PCIE_HDP_AXI_MASTER_CTRL(ps->pcie_reg_base));
+
+ if (mrrs > PCIE_HDP_AXI_BURST32_SIZE)
+ val |= PCIE_HDP_AXI_EN_BURST32_READ;
+ else
+ val &= ~PCIE_HDP_AXI_EN_BURST32_READ;
+
+ if (mps > PCIE_HDP_AXI_BURST32_SIZE)
+ val |= PCIE_HDP_AXI_EN_BURST32_WRITE;
+ else
+ val &= ~PCIE_HDP_AXI_EN_BURST32_WRITE;
+
+ writel(val, PCIE_HDP_AXI_MASTER_CTRL(ps->pcie_reg_base));
+
+ /* HDP Tx init */
+
+ writel(PCIE_HDP_RXDMA_INTERLEAVE | PCIE_HDP_RXDMA_NEW |
+ PCIE_HDP_RXDMA_WPTR, PCIE_HDP_RXDMA_CTRL(ps->pcie_reg_base));
+ writel(PCIE_HDP_TXDMA_NEW, PCIE_HDP_TX_DMA_CTRL(ps->pcie_reg_base));
+
+ writel(QTN_HOST_LO32(ps->tx_bd_pbase),
+ PCIE_HDP_RX2_DESC_BASE_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->tx_bd_pbase),
+ PCIE_HDP_RX2_DESC_BASE_ADDR_H(ps->pcie_reg_base));
+#endif
+
+ writel(priv->tx_bd_num | (sizeof(struct qtnf_pearl_tx_bd) << 16),
+ PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ writel(QTN_HOST_LO32(ps->tx_dma_cnt_paddr),
+ PCIE_HDP_RX2_DEV_PTR_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->tx_dma_cnt_paddr),
+ PCIE_HDP_RX2_DEV_PTR_ADDR_H(ps->pcie_reg_base));
+#endif
+ writel(ps->tx_bd_h_index,
+ PCIE_HDP_RX2_DESC_Q_WR_PTR(ps->pcie_reg_base));
+
+ /* HDP Rx init */
+
+ writel(QTN_HOST_LO32(ps->rx_bd_pbase),
+ PCIE_HDP_TX0_DESC_BASE_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->rx_bd_pbase),
+ PCIE_HDP_TX0_DESC_BASE_ADDR_H(ps->pcie_reg_base));
+#endif
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd) << 16),
+ PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ writel(QTN_HOST_LO32(ps->rx_dma_cnt_paddr),
+ PCIE_HDP_TX0_DEV_PTR_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->rx_dma_cnt_paddr),
+ PCIE_HDP_TX0_DEV_PTR_ADDR_H(ps->pcie_reg_base));
+#endif
+}
+
+static u32 hdp_get_tx_done_index_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 v;
+
+ v = le32_to_cpu(*((__le32 *)ps->tx_dma_cnt_vaddr)) &
+ (priv->tx_bd_num - 1);
+
+ return v;
+}
+
+static void hdp_tx_done_wrap_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ ps->tx_bd_ack_wrap ^= (QTN_HDP_BD_WRAP << 16);
+}
+
+static void hdp_tx_hw_push_rev_c(struct qtnf_pcie_pearl_state *ps, int index,
+ dma_addr_t paddr)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 ihw;
+
+ ihw = index | (ps->tx_bd_h_index & QTN_HDP_BD_WRAP);
+
+ if (ihw < ps->tx_bd_h_index)
+ ihw ^= QTN_HDP_BD_WRAP;
+
+ writel(ihw | (priv->tx_bd_r_index << 16) | ps->tx_bd_ack_wrap,
+ PCIE_HDP_RX2_DESC_Q_WR_PTR(ps->pcie_reg_base));
+
+ ps->tx_bd_h_index = ihw;
+}
+
+static void hdp_enable_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+ val |= PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ val = readl(PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+ val |= PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+}
+
+static void hdp_disable_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+ val &= ~PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ val = readl(PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+ val &= ~PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+}
+
+static const struct qtnf_pcie_pearl_hdp_ops hdp_ops_rev_c = {
+ .hdp_rx_bd_size_default = 512,
+ .hdp_tx_bd_size_default = 512,
+ .hdp_alloc_bd_table = hdp_alloc_bd_table_rev_c,
+ .hdp_init = hdp_init_rev_c,
+ .hdp_hhbm_init = hdp_hhbm_init_rev_c,
+ .hdp_enable = hdp_enable_rev_c,
+ .hdp_disable = hdp_disable_rev_c,
+ .hdp_set_queues = hdp_set_queues_common,
+ .hdp_rbd_attach = hdp_rbd_attach_rev_c,
+ .hdp_get_tx_done_index = hdp_get_tx_done_index_rev_c,
+ .hdp_tx_done_wrap = hdp_tx_done_wrap_rev_c,
+ .hdp_tx_hw_push = hdp_tx_hw_push_rev_c,
+};
+
/* common */
static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
@@ -586,8 +820,11 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
priv->tx_skb[i] = NULL;
count++;
- if (++i >= priv->tx_bd_num)
+ if (++i >= priv->tx_bd_num) {
+ if (ps->hdp_ops->hdp_tx_done_wrap)
+ ps->hdp_ops->hdp_tx_done_wrap(ps);
i = 0;
+ }
}
priv->tx_reclaim_done += count;
@@ -727,11 +964,17 @@ static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
u32 status;
priv->pcie_irq_count++;
- status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+ writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+ writel(status & ps->pcie_irq_mask,
+ PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+ writel(ps->pcie_irq_mask & (~status),
+ PCIE_HDP_INT_EN(ps->pcie_reg_base));
+
if (!(status & ps->pcie_irq_mask))
goto irq_done;
@@ -744,20 +987,13 @@ static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
if (status & PCIE_HDP_INT_HHBM_UF)
ps->pcie_irq_uf_count++;
- if (status & PCIE_HDP_INT_RX_BITS) {
- qtnf_dis_rxdone_irq(ps);
+ if (status & PCIE_HDP_INT_RX_BITS)
napi_schedule(&bus->mux_napi);
- }
- if (status & PCIE_HDP_INT_TX_BITS) {
- qtnf_dis_txdone_irq(ps);
+ if (status & PCIE_HDP_INT_TX_BITS)
tasklet_hi_schedule(&priv->reclaim_tq);
- }
irq_done:
- /* H/W workaround: clean all bits, not only enabled */
- qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
-
if (!priv->msi_enabled)
qtnf_deassert_intx(ps);
@@ -896,6 +1132,8 @@ static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
qtnf_enable_hdp_irqs(ps);
+ if (ps->hdp_ops->hdp_enable)
+ ps->hdp_ops->hdp_enable(ps);
napi_enable(&bus->mux_napi);
}
@@ -904,6 +1142,8 @@ static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
napi_disable(&bus->mux_napi);
+ if (ps->hdp_ops->hdp_disable)
+ ps->hdp_ops->hdp_disable(ps);
qtnf_disable_hdp_irqs(ps);
}
@@ -1124,7 +1364,8 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
} else {
pr_info("starting firmware upload: %s\n", fwname);
-
+ if (ps->hdp_ops->hdp_enable)
+ ps->hdp_ops->hdp_enable(ps);
ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
release_firmware(fw);
if (ret) {
@@ -1193,6 +1434,10 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus,
case QTN_CHIP_ID_PEARL_B:
ps->hdp_ops = &hdp_ops_rev_b;
break;
+ case QTN_CHIP_ID_PEARL_C:
+ case QTN_CHIP_ID_PEARL_C1:
+ ps->hdp_ops = &hdp_ops_rev_c;
+ break;
default:
pr_err("unsupported PEARL chip ID 0x%x\n", bus->chipid);
return -ENOTSUPP;
@@ -61,6 +61,9 @@
#define QTN_ENET_ADDR_LENGTH 6
+#define QTN_HDP_BD_WRAP 0x8000
+#define QTN_HDP_DMA_PTR_SIZE (4 * sizeof(u64))
+
#define QTN_TXDONE_MASK ((u32)0x80000000)
#define QTN_GET_LEN(x) ((x) & 0xFFFF)
@@ -4,7 +4,7 @@
#ifndef __PEARL_PCIE_H
#define __PEARL_PCIE_H
-/* Pearl PCIe HDP registers */
+/* Pearl rev B PCIe HDP registers */
#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
@@ -60,7 +60,6 @@
#define PCIE_HDP_RX3DMA_CNT(base) ((base) + 0x2d1c)
#define PCIE_HDP_TX0DMA_CNT(base) ((base) + 0x2d20)
#define PCIE_HDP_TX1DMA_CNT(base) ((base) + 0x2d24)
-#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base) ((base) + 0x2d2c)
#define PCIE_HDP_TX_HOST_Q_BASE_L(base) ((base) + 0x2d30)
#define PCIE_HDP_TX_HOST_Q_BASE_H(base) ((base) + 0x2d34)
@@ -68,6 +67,36 @@
#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
+#define PCIE_HDP_TX_DMA_CTRL(base) ((base) + 0x2dcc)
+#define PCIE_HDP_TXDMA_NEW (BIT(8))
+
+#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
+#define PCIE_HDP_RXDMA_WPTR (BIT(27))
+#define PCIE_HDP_RXDMA_NEW (BIT(29))
+#define PCIE_HDP_RXDMA_INTERLEAVE (BIT(30))
+
+/* Pearl rev C PCIe HDP registers */
+#define PCIE_HDP_TX0_DEV_PTR_ADDR(base) ((base) + 0x2db0)
+#define PCIE_HDP_TX0_DEV_PTR_ADDR_H(base) ((base) + 0x2db4)
+#define PCIE_HDP_TX0_DESC_Q_WR_PTR(base) ((base) + 0x2da4)
+#define PCIE_HDP_TX0_DESC_BASE_ADDR(base) ((base) + 0x2dac)
+#define PCIE_HDP_TX0_DESC_BASE_ADDR_H(base) ((base) + 0x2da8)
+
+#define PCIE_HDP_RX2_DESC_BASE_ADDR(base) ((base) + 0x2c20)
+#define PCIE_HDP_RX2_DESC_BASE_ADDR_H(base) ((base) + 0x2c24)
+#define PCIE_HDP_RX2_DESC_Q_WR_PTR(base) ((base) + 0x2d84)
+#define PCIE_HDP_RX2_DEV_PTR_ADDR(base) ((base) + 0x2dd8)
+#define PCIE_HDP_RX2_DEV_PTR_ADDR_H(base) ((base) + 0x2ddc)
+
+#define PCIE_HDP_TX0_DESC_Q_CTRL(base) ((base) + 0x2da0)
+#define PCIE_HDP_RX2_DESC_Q_CTRL(base) ((base) + 0x2d80)
+#define PCIE_HDP_DESC_FETCH_EN (BIT(31))
+
+#define PCIE_HDP_AXI_MASTER_CTRL(base) ((base) + 0x2de0)
+#define PCIE_HDP_AXI_EN_BURST32_READ (BIT(3) | BIT(7))
+#define PCIE_HDP_AXI_EN_BURST32_WRITE BIT(11)
+#define PCIE_HDP_AXI_BURST32_SIZE (32 * 8)
+
/* Pearl PCIe HBM pool registers */
#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
@@ -18,6 +18,7 @@
#define QTN_CHIP_ID_PEARL 0x50
#define QTN_CHIP_ID_PEARL_B 0x60
#define QTN_CHIP_ID_PEARL_C 0x70
+#define QTN_CHIP_ID_PEARL_C1 0x80
/* FW names */
@@ -116,6 +116,8 @@ const char *qtnf_chipid_to_string(unsigned long chip_id)
return "Pearl revB";
case QTN_CHIP_ID_PEARL_C:
return "Pearl revC";
+ case QTN_CHIP_ID_PEARL_C1:
+ return "Pearl revC1";
default:
return "unknown";
}
Add support for the new minor revision of QSR10g chip. Major changes from the driver perspective include PCIe data path modifications. Setup is now more complicated, but finally more things have been offloaded to hardware. As a result, less driver boilerplate operations are needed after Tx/Rx descriptors queues have been configured. Besides, restrictions on descriptors queue lengths have been relaxed. Signed-off-by: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com> --- drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c | 1 + .../wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 273 +++++++++++++++++++-- .../quantenna/qtnfmac/pcie/pearl_pcie_ipc.h | 3 + .../quantenna/qtnfmac/pcie/pearl_pcie_regs.h | 33 ++- .../net/wireless/quantenna/qtnfmac/qtn_hw_ids.h | 1 + drivers/net/wireless/quantenna/qtnfmac/util.c | 2 + 6 files changed, 297 insertions(+), 16 deletions(-)