diff mbox series

[iwl-next,v1,3/3] ice: allow changing SF VSI queues number

Message ID 20241031060009.38979-4-michal.swiatkowski@linux.intel.com (mailing list archive)
State Awaiting Upstream
Delegated to: Netdev Maintainers
Headers show
Series ice: multiqueue on subfunction | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Michal Swiatkowski Oct. 31, 2024, 6 a.m. UTC
Move setting number of Rx and Tx queues to the separate functions and
use it in SF case.

Adjust getting max Rx and Tx queues for SF usecase.

Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
---
 drivers/net/ethernet/intel/ice/ice_ethtool.c | 37 +++++++-----
 drivers/net/ethernet/intel/ice/ice_lib.c     | 63 ++++++++++++--------
 2 files changed, 60 insertions(+), 40 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9e2f20ed55d5..c68f7796b83e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3786,22 +3786,31 @@  ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
 
 /**
  * ice_get_max_txq - return the maximum number of Tx queues for in a PF
- * @pf: PF structure
+ * @vsi: VSI structure
  */
-static int ice_get_max_txq(struct ice_pf *pf)
+static int ice_get_max_txq(struct ice_vsi *vsi)
 {
-	return min3(pf->num_lan_msix, (u16)num_online_cpus(),
-		    (u16)pf->hw.func_caps.common_cap.num_txq);
+	u16 num_queues = vsi->back->num_lan_msix;
+
+	if (vsi->max_io_eqs)
+		num_queues = vsi->max_io_eqs;
+	return min3(num_queues, (u16)num_online_cpus(),
+		    (u16)vsi->back->hw.func_caps.common_cap.num_txq);
 }
 
 /**
  * ice_get_max_rxq - return the maximum number of Rx queues for in a PF
- * @pf: PF structure
+ * @vsi: VSI structure
  */
-static int ice_get_max_rxq(struct ice_pf *pf)
+static int ice_get_max_rxq(struct ice_vsi *vsi)
 {
-	return min3(pf->num_lan_msix, (u16)num_online_cpus(),
-		    (u16)pf->hw.func_caps.common_cap.num_rxq);
+	u16 num_queues = vsi->back->num_lan_msix;
+
+	if (vsi->max_io_eqs)
+		num_queues = vsi->max_io_eqs;
+
+	return min3(num_queues, (u16)num_online_cpus(),
+		    (u16)vsi->back->hw.func_caps.common_cap.num_rxq);
 }
 
 /**
@@ -3839,8 +3848,8 @@  ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
 	struct ice_pf *pf = vsi->back;
 
 	/* report maximum channels */
-	ch->max_rx = ice_get_max_rxq(pf);
-	ch->max_tx = ice_get_max_txq(pf);
+	ch->max_rx = ice_get_max_rxq(vsi);
+	ch->max_tx = ice_get_max_txq(vsi);
 	ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
 
 	/* report current channels */
@@ -3958,14 +3967,14 @@  static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
 			   vsi->tc_cfg.numtc);
 		return -EINVAL;
 	}
-	if (new_rx > ice_get_max_rxq(pf)) {
+	if (new_rx > ice_get_max_rxq(vsi)) {
 		netdev_err(dev, "Maximum allowed Rx channels is %d\n",
-			   ice_get_max_rxq(pf));
+			   ice_get_max_rxq(vsi));
 		return -EINVAL;
 	}
-	if (new_tx > ice_get_max_txq(pf)) {
+	if (new_tx > ice_get_max_txq(vsi)) {
 		netdev_err(dev, "Maximum allowed Tx channels is %d\n",
-			   ice_get_max_txq(pf));
+			   ice_get_max_txq(vsi));
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 01220e21cc81..64a6152eaaef 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,32 @@  static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
 	}
 }
 
+static void ice_vsi_set_num_txqs(struct ice_vsi *vsi, u16 def_qs)
+{
+	if (vsi->req_txq) {
+		vsi->alloc_txq = vsi->req_txq;
+		vsi->num_txq = vsi->req_txq;
+	} else {
+		vsi->alloc_txq = min_t(u16, def_qs, (u16)num_online_cpus());
+	}
+}
+
+static void ice_vsi_set_num_rxqs(struct ice_vsi *vsi, bool rss_ena, u16 def_qs)
+{
+	/* only 1 Rx queue unless RSS is enabled */
+	if (rss_ena) {
+		vsi->alloc_rxq = 1;
+		return;
+	}
+
+	if (vsi->req_rxq) {
+		vsi->alloc_rxq = vsi->req_rxq;
+		vsi->num_rxq = vsi->req_rxq;
+	} else {
+		vsi->alloc_rxq = min_t(u16, def_qs, (u16)num_online_cpus());
+	}
+}
+
 /**
  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
  * @vsi: the VSI being configured
@@ -174,31 +200,13 @@  static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
 
 	switch (vsi_type) {
 	case ICE_VSI_PF:
-		if (vsi->req_txq) {
-			vsi->alloc_txq = vsi->req_txq;
-			vsi->num_txq = vsi->req_txq;
-		} else {
-			vsi->alloc_txq = min3(pf->num_lan_msix,
-					      ice_get_avail_txq_count(pf),
-					      (u16)num_online_cpus());
-		}
-
+		ice_vsi_set_num_txqs(vsi, min(pf->num_lan_msix,
+					      ice_get_avail_txq_count(pf)));
 		pf->num_lan_tx = vsi->alloc_txq;
 
-		/* only 1 Rx queue unless RSS is enabled */
-		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
-			vsi->alloc_rxq = 1;
-		} else {
-			if (vsi->req_rxq) {
-				vsi->alloc_rxq = vsi->req_rxq;
-				vsi->num_rxq = vsi->req_rxq;
-			} else {
-				vsi->alloc_rxq = min3(pf->num_lan_msix,
-						      ice_get_avail_rxq_count(pf),
-						      (u16)num_online_cpus());
-			}
-		}
-
+		ice_vsi_set_num_rxqs(vsi, !test_bit(ICE_FLAG_RSS_ENA, pf->flags),
+				     min(pf->num_lan_msix,
+					 ice_get_avail_rxq_count(pf)));
 		pf->num_lan_rx = vsi->alloc_rxq;
 
 		vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
@@ -206,9 +214,12 @@  static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
 						 vsi->alloc_txq));
 		break;
 	case ICE_VSI_SF:
-		vsi->alloc_txq = 1;
-		vsi->alloc_rxq = 1;
-		vsi->num_q_vectors = 1;
+		ice_vsi_set_num_txqs(vsi, min(vsi->max_io_eqs,
+					      ice_get_avail_txq_count(pf)));
+		ice_vsi_set_num_rxqs(vsi, !test_bit(ICE_FLAG_RSS_ENA, pf->flags),
+				     min(vsi->max_io_eqs,
+					 ice_get_avail_rxq_count(pf)));
+		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
 		vsi->irq_dyn_alloc = true;
 		break;
 	case ICE_VSI_VF: