@@ -2899,8 +2899,8 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_xdp_xmit = ena_xdp_xmit,
};
-static void ena_calc_io_queue_size(struct ena_adapter *adapter,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -2959,6 +2959,18 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
+ max_tx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
+ max_rx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
/* When forcing large headers, we multiply the entry size by 2, and therefore divide
* the queue size by 2, leaving the amount of memory used by the queues unchanged.
*/
@@ -2989,6 +3001,8 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
adapter->max_rx_ring_size = max_rx_queue_size;
adapter->requested_tx_ring_size = tx_queue_size;
adapter->requested_rx_ring_size = rx_queue_size;
+
+ return 0;
}
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3190,11 +3204,15 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
goto err_admin_init;
}
- ena_calc_io_queue_size(adapter, get_feat_ctx);
+ rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
+ if (unlikely(rc))
+ goto err_admin_init;
return 0;
err_admin_init:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less: