@@ -1160,6 +1160,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
+ bool flow_sch_en;
int err, i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
@@ -1167,6 +1168,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!vport->txq_grps)
return -ENOMEM;
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
@@ -1195,8 +1199,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
q->txq_grp = tx_qgrp;
hash_init(q->sched_buf_hash);
- if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED))
+ if (flow_sch_en)
set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
}
@@ -1215,6 +1218,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
tx_qgrp->complq->desc_count = vport->complq_desc_count;
tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+
+ if (flow_sch_en)
+ __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
}
return 0;
@@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/* Populate the queue info buffer with all queue context info */
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
+ int j, sched_mode;
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
qi[k].queue_id =
@@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ qi[k].sched_mode = cpu_to_le16(sched_mode);
+
k++;
}