@@ -483,6 +483,7 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
for (int i = 0; i != cdev->tx_fifo_size; ++i) {
if (!cdev->tx_ops[i].skb)
@@ -494,6 +495,10 @@ static void m_can_clean(struct net_device *net)
for (int i = 0; i != cdev->can.echo_skb_max; ++i)
can_free_echo_skb(cdev->net, i, NULL);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1064,6 +1069,24 @@ static void m_can_tx_update_stats(struct m_can_classdev *cdev,
stats->tx_packets++;
}
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static void m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ ++cdev->tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
static int m_can_echo_tx_event(struct net_device *dev)
{
u32 txe_count = 0;
@@ -1073,6 +1096,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1102,12 +1126,15 @@ static int m_can_echo_tx_event(struct net_device *dev)
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed);
+
return err;
}
@@ -1189,6 +1216,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
timestamp = m_can_get_timestamp(cdev);
m_can_tx_update_stats(cdev, 0, timestamp);
netif_wake_queue(dev);
+ m_can_finish_tx(cdev, 1);
}
} else {
if (ir & (IR_TEFN | IR_TEFW)) {
@@ -1874,11 +1902,22 @@ static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
}
netif_stop_queue(cdev->net);
+
+ m_can_start_tx(cdev);
+
m_can_tx_queue_skb(cdev, skb);
return NETDEV_TX_OK;
}
+static netdev_tx_t m_can_start_fast_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ m_can_start_tx(cdev);
+
+ return m_can_tx_handler(cdev, skb);
+}
+
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1890,7 +1929,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
if (cdev->is_peripheral)
return m_can_start_peripheral_xmit(cdev, skb);
else
- return m_can_tx_handler(cdev, skb);
+ return m_can_start_fast_xmit(cdev, skb);
}
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
@@ -108,6 +108,10 @@ struct m_can_classdev {
// Store this internally to avoid fetch delays on peripheral chips
u32 tx_fifo_putidx;
+ /* Protects shared state between start_xmit and m_can_isr */
+ spinlock_t tx_handling_spinlock;
+ int tx_fifo_in_flight;
+
struct m_can_tx_op *tx_ops;
int tx_fifo_size;
int next_tx_op;