@@ -3,7 +3,8 @@ obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
mt76-y := \
- mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
+ tx.o agg-rx.o usb.o
CFLAGS_trace.o := -I$(src)
@@ -25,6 +25,11 @@
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_RX_RXWI_LEN 32
+
struct mt76_desc {
__le32 buf0;
__le32 ctrl;
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/leds.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
#include "util.h"
@@ -63,12 +64,24 @@ struct mt76_queue_buf {
int len;
};
+struct mt76_usb_buf {
+ struct mt76_dev *dev;
+ struct urb *urb;
+ dma_addr_t dma;
+ void *buf;
+ size_t len;
+ bool done;
+};
+
struct mt76_queue_entry {
union {
void *buf;
struct sk_buff *skb;
};
- struct mt76_txwi_cache *txwi;
+ union {
+ struct mt76_txwi_cache *txwi;
+ struct mt76_usb_buf ubuf;
+ };
bool schedule;
};
@@ -89,6 +102,7 @@ struct mt76_queue {
struct list_head swq;
int swq_queued;
+ u16 first;
u16 head;
u16 tail;
int ndesc;
@@ -194,6 +208,9 @@ enum {
MT76_STATE_RUNNING,
MT76_SCANNING,
MT76_RESET,
+ MT76_REMOVED,
+ MT76_READING_STATS,
+ MT76_PENDING_STATS,
};
struct mt76_hw_cap {
@@ -233,6 +250,55 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 0x1,
+ MT_VEND_WRITE = 0x2,
+ MT_VEND_MULTI_WRITE = 0x6,
+ MT_VEND_MULTI_READ = 0x7,
+ MT_VEND_READ_EEPROM = 0x9,
+ MT_VEND_WRITE_FCE = 0x42,
+ MT_VEND_WRITE_CFG = 0x46,
+ MT_VEND_READ_CFG = 0x47,
+};
+
+enum mt76_usb_in_ep {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt76_usb_out_ep {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+#define MT_URB_SIZE 2048
+#define MT_NUM_TX_ENTRIES 256
+#define MT_NUM_RX_ENTRIES 256
+struct mt76_usb {
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
@@ -273,6 +339,8 @@ struct mt76_dev {
char led_name[32];
bool led_al;
u8 led_pin;
+
+ struct mt76_usb usb;
};
enum mt76_phy_type {
@@ -390,6 +458,14 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+/* Hardware uses mirrored order of queues with Q3
+ * having the highest priority
+ */
+static inline u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
{
@@ -449,4 +525,41 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+/* usb */
+static inline bool mt76_usb_urb_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN &&
+ urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+int mt76_usb_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len);
+void mt76_usb_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+int mt76_usb_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76_usb_deinit(struct mt76_dev *dev);
+int mt76_usb_buf_alloc(struct mt76_dev *dev, struct mt76_usb_buf *buf,
+ size_t len, gfp_t gfp);
+void mt76_usb_buf_free(struct mt76_usb_buf *buf);
+int mt76_usb_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76_usb_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+int mt76_usb_submit_rx_buffers(struct mt76_dev *dev);
+int mt76_usb_alloc_rx(struct mt76_dev *dev);
+int mt76_usb_alloc_tx(struct mt76_dev *dev);
+void mt76_usb_free_rx(struct mt76_dev *dev);
+void mt76_usb_free_tx(struct mt76_dev *dev);
+void mt76_usb_stop_rx(struct mt76_dev *dev);
+void mt76_usb_stop_tx(struct mt76_dev *dev);
+
#endif
new file mode 100644
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76_usb_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+ int i, ret;
+
+ pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+ : usb_sndctrlpipe(udev, 0);
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return -EIO;
+
+ ret = usb_control_msg(udev, pipe, req, req_type, val,
+ offset, buf, len, MT_VEND_REQ_TOUT_MS);
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+ usleep_range(5000, 10000);
+ }
+
+ dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+ return ret;
+}
+
+int mt76_usb_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76_usb_vendor_request(dev, req, req_type,
+ val, offset, buf, len);
+ trace_reg_wr(dev, offset, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_usb_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76_usb_rr(struct mt76_dev *dev, u32 addr)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 data = ~0;
+ u16 offset;
+ int ret;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_EEPROM:
+ req = MT_VEND_READ_EEPROM;
+ break;
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_READ_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_READ;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ ret = __mt76_usb_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, offset, usb->data, sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+static u32 mt76_usb_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76_usb_rr(dev, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76_usb_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u16 offset;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_WRITE_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_WRITE;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76_usb_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+ offset, usb->data, sizeof(__le32));
+ trace_reg_wr(dev, addr, val);
+}
+
+static void mt76_usb_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76_usb_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76_usb_rmw(struct mt76_dev *dev, u32 addr, u32 mask,
+ u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= __mt76_usb_rr(dev, addr) & ~mask;
+ __mt76_usb_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt76_usb_copy(struct mt76_dev *dev, u32 offset, const void *data,
+ int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ const __le32 *val = data;
+ int i, ret;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ for (i = 0; i < (len / 4); i++) {
+ put_unaligned_le32(val[i], usb->data);
+ ret = __mt76_usb_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0, offset + i * 4, usb->data,
+ sizeof(__le32));
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76_usb_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76_usb_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val & 0xffff, offset, NULL, 0);
+ __mt76_usb_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val >> 16, offset + 2, NULL, 0);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_single_wr);
+
+static int mt76_usb_set_endpoints(struct usb_interface *intf,
+ struct mt76_usb *usb)
+{
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, in_ep = 0, out_ep = 0;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ in_ep < __MT_EP_IN_MAX) {
+ usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+ usb->in_max_packet = usb_endpoint_maxp(ep_desc);
+ in_ep++;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ out_ep < __MT_EP_OUT_MAX) {
+ usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+ usb->out_max_packet = usb_endpoint_maxp(ep_desc);
+ out_ep++;
+ }
+ }
+
+ if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+ return -EINVAL;
+ return 0;
+}
+
+int mt76_usb_buf_alloc(struct mt76_dev *dev, struct mt76_usb_buf *buf,
+ size_t len, gfp_t gfp)
+{
+ buf->urb = usb_alloc_urb(0, gfp);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->buf = netdev_alloc_frag(len);
+ if (!buf->buf) {
+ usb_free_urb(buf->urb);
+ return -ENOMEM;
+ }
+ buf->len = len;
+ buf->dev = dev;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_usb_buf_alloc);
+
+void mt76_usb_buf_free(struct mt76_usb_buf *buf)
+{
+ skb_free_frag(buf->buf);
+ usb_free_urb(buf->urb);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_buf_free);
+
+int mt76_usb_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76_usb_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+ else
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+ usb_fill_bulk_urb(buf->urb, udev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+
+ return usb_submit_urb(buf->urb, gfp);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_submit_buf);
+
+static inline struct mt76_usb_buf
+*mt76_usb_get_next_rx_entry(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_usb_buf *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (q->head != q->tail) {
+ buf = &q->entry[q->head].ubuf;
+ q->head = (q->head + 1) % q->ndesc;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return buf;
+}
+
+static int mt76_usb_get_rx_entry_len(u8 *data, u32 data_len)
+{
+ u16 dma_len, min_len;
+
+ dma_len = get_unaligned_le16(data);
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+ MT_FCE_INFO_LEN;
+
+ if (data_len < min_len || WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return -EINVAL;
+ return dma_len;
+}
+
+static int
+mt76_usb_process_rx_entry(struct mt76_dev *dev,
+ struct mt76_usb_buf *buf)
+{
+ u8 *data = buf->buf;
+ struct sk_buff *skb;
+ int len;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ return 0;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mt76_usb_get_rx_entry_len(data, buf->urb->actual_length);
+ if (len < 0) {
+ skb_free_frag(data);
+ return len;
+ }
+
+ skb = build_skb(data, buf->len);
+ if (!skb) {
+ skb_free_frag(data);
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ if (skb->tail + len > skb->end) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ __skb_put(skb, len);
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+ return 0;
+}
+
+static void mt76_usb_complete_rx(struct urb *urb)
+{
+ struct mt76_dev *dev = urb->context;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ goto out;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static void mt76_usb_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_usb_buf *buf;
+
+ rcu_read_lock();
+
+ while (true) {
+ buf = mt76_usb_get_next_rx_entry(dev);
+ if (!buf)
+ break;
+
+ mt76_usb_process_rx_entry(dev, buf);
+
+ buf->buf = netdev_alloc_frag(buf->len);
+ if (!buf->buf)
+ break;
+
+ mt76_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ buf, GFP_ATOMIC,
+ mt76_usb_complete_rx, dev);
+ }
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+ rcu_read_unlock();
+}
+
+int mt76_usb_submit_rx_buffers(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+ int i, err = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ &q->entry[i].ubuf, GFP_ATOMIC,
+ mt76_usb_complete_rx, dev);
+ if (err < 0)
+ break;
+ }
+ q->head = q->tail = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76_usb_submit_rx_buffers);
+
+int mt76_usb_alloc_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, err;
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
+ err = mt76_usb_buf_alloc(dev, &q->entry[i].ubuf, MT_URB_SIZE,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+ }
+ q->ndesc = MT_NUM_RX_ENTRIES;
+
+ return mt76_usb_submit_rx_buffers(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_alloc_rx);
+
+void mt76_usb_free_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76_usb_buf_free(&q->entry[i].ubuf);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_free_rx);
+
+void mt76_usb_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_kill_urb(q->entry[i].ubuf.urb);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_stop_rx);
+
+static void mt76_usb_tx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+
+ set_bit(MT76_PENDING_STATS, &dev->state);
+ dev->drv->tx_complete_skb(dev, NULL, NULL, false);
+}
+
+static void mt76_usb_complete_tx(struct urb *urb)
+{
+ struct mt76_usb_buf *buf = urb->context;
+ struct mt76_dev *dev = buf->dev;
+
+ if (mt76_usb_urb_error(urb))
+ dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+ buf->done = true;
+
+ tasklet_schedule(&dev->usb.tx_tasklet);
+}
+
+static int mt76_usb_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ u16 idx = q->tail, next_idx = (q->tail + 1) % q->ndesc;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ u8 ep = q2ep(q->hw_idx);
+ struct mt76_usb_buf *buf;
+ unsigned int pipe;
+ int err;
+
+ if (next_idx == q->head)
+ return -ENOSPC;
+
+ err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ if (err < 0)
+ return err;
+
+ buf = &q->entry[idx].ubuf;
+ buf->done = false;
+
+ q->entry[idx].skb = skb;
+ q->tail = next_idx;
+ q->queued++;
+
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
+ usb_fill_bulk_urb(buf->urb, udev, pipe, skb->data, skb->len,
+ mt76_usb_complete_tx, buf);
+ return idx;
+}
+
+static void mt76_usb_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct mt76_usb_buf *buf;
+ int err;
+
+ while (q->first != q->tail) {
+ buf = &q->entry[q->first].ubuf;
+ err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "tx urb submit failed:%d\n",
+ err);
+ break;
+ }
+ q->first = (q->first + 1) % q->ndesc;
+ }
+}
+
+int mt76_usb_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76_usb_buf *buf;
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+ q->hw_idx = q2hwq(i);
+
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ for (j = 0; j < q->ndesc; j++) {
+ buf = &q->entry[j].ubuf;
+ buf->dev = dev;
+
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!buf->urb)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_usb_alloc_tx);
+
+void mt76_usb_free_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_free_urb(q->entry[j].ubuf.urb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_usb_free_tx);
+
+void mt76_usb_stop_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].ubuf.urb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_usb_stop_tx);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+ .tx_queue_skb = mt76_usb_tx_queue_skb,
+ .kick = mt76_usb_tx_kick,
+};
+
+int mt76_usb_init(struct mt76_dev *dev,
+ struct usb_interface *intf)
+{
+ static const struct mt76_bus_ops mt76_usb_ops = {
+ .rr = mt76_usb_rr,
+ .wr = mt76_usb_wr,
+ .rmw = mt76_usb_rmw,
+ .copy = mt76_usb_copy,
+ };
+ struct mt76_usb *usb = &dev->usb;
+
+ tasklet_init(&usb->rx_tasklet, mt76_usb_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&usb->tx_tasklet, mt76_usb_tx_tasklet, (unsigned long)dev);
+ skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+ mutex_init(&usb->usb_ctrl_mtx);
+ dev->bus = &mt76_usb_ops;
+ dev->queue_ops = &usb_queue_ops;
+
+ return mt76_usb_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76_usb_init);
+
This will be used by drivers for MT76x2u based devices Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com> --- drivers/net/wireless/mediatek/mt76/Makefile | 3 +- drivers/net/wireless/mediatek/mt76/dma.h | 5 + drivers/net/wireless/mediatek/mt76/mt76.h | 115 +++++- drivers/net/wireless/mediatek/mt76/usb.c | 610 ++++++++++++++++++++++++++++ 4 files changed, 731 insertions(+), 2 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/usb.c