@@ -201,6 +201,12 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mutex_unlock(&lag_mutex);
if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
+ if (mlx5_sriov_is_enabled(dev0) ||
+ mlx5_sriov_is_enabled(dev1)) {
+ mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
+ return;
+ }
+
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
MLX5_INTERFACE_PROTOCOL_IB);
@@ -90,6 +90,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
@@ -37,6 +37,13 @@
#include "eswitch.h"
#endif
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+ return !!sriov->num_vfs;
+}
+
static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
@@ -144,6 +151,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!mlx5_core_is_pf(dev))
return -EPERM;
+ if (num_vfs && mlx5_lag_is_active(dev)) {
+ mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
+ return -EINVAL;
+ }
+
mlx5_core_cleanup_vfs(dev);
if (!num_vfs) {