@@ -2561,9 +2561,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
- spin_lock_init(&ar->txqs_lock);
- INIT_LIST_HEAD(&ar->txqs);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
@@ -892,10 +892,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
- /* protects: ar->txqs, artxq->list */
- spinlock_t txqs_lock;
- struct list_head txqs;
struct list_head arvifs;
struct list_head peers;
struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
@@ -3825,7 +3825,6 @@ static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
{
- struct ath10k_txq *artxq;
struct ath10k_skb_cb *cb;
struct sk_buff *msdu;
int msdu_id;
@@ -3833,12 +3832,6 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
if (!txq)
return;
- artxq = (void *)txq->drv_priv;
- spin_lock_bh(&ar->txqs_lock);
- if (!list_empty(&artxq->list))
- list_del_init(&artxq->list);
- spin_unlock_bh(&ar->txqs_lock);
-
spin_lock_bh(&ar->htt.tx_lock);
idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
cb = ATH10K_SKB_CB(msdu);
@@ -3969,22 +3962,15 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_txq *txq;
- struct ath10k_txq *artxq;
- struct ath10k_txq *last;
int ret;
int max;
if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
return;
- spin_lock_bh(&ar->txqs_lock);
rcu_read_lock();
- last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
- while (!list_empty(&ar->txqs)) {
- artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
- txq = container_of((void *)artxq, struct ieee80211_txq,
- drv_priv);
+ while ((txq = ieee80211_next_txq(hw))) {
/* Prevent aggressive sta/tid taking over tx queue */
max = 16;
@@ -3995,18 +3981,16 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
break;
}
- list_del_init(&artxq->list);
if (ret != -ENOENT)
- list_add_tail(&artxq->list, &ar->txqs);
+ ieee80211_schedule_txq(hw, txq);
ath10k_htt_tx_txq_update(hw, txq);
- if (artxq == last || (ret < 0 && ret != -ENOENT))
+ if (ret < 0 && ret != -ENOENT)
break;
}
rcu_read_unlock();
- spin_unlock_bh(&ar->txqs_lock);
}
/************/
@@ -4240,34 +4224,22 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
}
}
-static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw)
{
- struct ath10k *ar = hw->priv;
- struct ath10k_txq *artxq = (void *)txq->drv_priv;
- struct ieee80211_txq *f_txq;
- struct ath10k_txq *f_artxq;
+ struct ieee80211_txq *txq;
int ret = 0;
int max = 16;
- spin_lock_bh(&ar->txqs_lock);
- if (list_empty(&artxq->list))
- list_add_tail(&artxq->list, &ar->txqs);
-
- f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
- f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
- list_del_init(&f_artxq->list);
+ txq = ieee80211_next_txq(hw);
- while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
- ret = ath10k_mac_tx_push_txq(hw, f_txq);
+ while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
if (ret)
break;
}
if (ret != -ENOENT)
- list_add_tail(&f_artxq->list, &ar->txqs);
- spin_unlock_bh(&ar->txqs_lock);
+ ieee80211_schedule_txq(hw, txq);
- ath10k_htt_tx_txq_update(hw, f_txq);
ath10k_htt_tx_txq_update(hw, txq);
}
This removes TXQ scheduling from ath10k and changes it to use the mac80211 TXQ scheduling API introduced in the previous patch. Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk> --- drivers/net/wireless/ath/ath10k/core.c | 2 -- drivers/net/wireless/ath/ath10k/core.h | 3 --- drivers/net/wireless/ath/ath10k/mac.c | 46 +++++++--------------------------- 3 files changed, 9 insertions(+), 42 deletions(-)