@@ -618,7 +618,6 @@ struct ice_pf {
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
struct ice_pf_msix msix;
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -795,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
@@ -3789,8 +3789,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3799,8 +3798,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -108,7 +108,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
/* load default PF MSI-X range */
if (!pf->msix.min)
@@ -118,20 +118,17 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
pf->msix.max = min(total_vectors,
ice_get_default_msix_amount(pf));
- if (pci_msix_can_alloc_dyn(pf->pdev)) {
+ if (pci_msix_can_alloc_dyn(pf->pdev))
vectors = pf->msix.min;
- max_vectors = total_vectors;
- } else {
+ else
vectors = pf->msix.max;
- max_vectors = vectors;
- }
vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
PCI_IRQ_MSIX);
if (vectors < pf->msix.min)
return -ENOMEM;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
return 0;
}
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -1173,12 +1177,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);