@@ -49,3 +49,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
+ice-$(CONFIG_ICE_VFIO_PCI) += ice_migration.o
@@ -55,6 +55,7 @@
#include <net/vxlan.h>
#include <net/gtp.h>
#include <linux/ppp_defs.h>
+#include <linux/net/intel/ice_migration.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -77,6 +78,7 @@
#include "ice_gnss.h"
#include "ice_irq.h"
#include "ice_dpll.h"
+#include "ice_migration_private.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -963,6 +965,7 @@ void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+struct ice_pf *ice_get_pf_from_vf_pdev(struct pci_dev *pdev);
/**
* ice_set_rdma_cap - enable RDMA support
@@ -9313,3 +9313,18 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
};
+
+/**
+ * ice_get_pf_from_vf_pdev - Get PF structure from PCI device
+ * @pdev: pointer to PCI device
+ *
+ * Return pointer to ice PF structure, NULL for failure
+ */
+struct ice_pf *ice_get_pf_from_vf_pdev(struct pci_dev *pdev)
+{
+ struct ice_pf *pf;
+
+ pf = pci_iov_get_pf_drvdata(pdev, &ice_driver);
+
+ return !IS_ERR(pf) ? pf : NULL;
+}
new file mode 100644
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2023 Intel Corporation */
+
+#include "ice.h"
+
+/**
+ * ice_migration_get_pf - Get ice PF structure pointer by pdev
+ * @pdev: pointer to ice vfio pci VF pdev structure
+ *
+ * Return nonzero for success, NULL for failure.
+ */
+struct ice_pf *ice_migration_get_pf(struct pci_dev *pdev)
+{
+ return ice_get_pf_from_vf_pdev(pdev);
+}
+EXPORT_SYMBOL(ice_migration_get_pf);
+
+/**
+ * ice_migration_init_vf - init ice VF device state data
+ * @vf: pointer to VF
+ */
+void ice_migration_init_vf(struct ice_vf *vf)
+{
+ vf->migration_enabled = true;
+}
+
+/**
+ * ice_migration_uninit_vf - uninit VF device state data
+ * @vf: pointer to VF
+ */
+void ice_migration_uninit_vf(struct ice_vf *vf)
+{
+ if (!vf->migration_enabled)
+ return;
+
+ vf->migration_enabled = false;
+}
+
+/**
+ * ice_migration_init_dev - init ice migration device
+ * @pf: pointer to PF of migration device
+ * @vf_id: VF index of migration device
+ *
+ * Return 0 for success, negative for failure
+ */
+int ice_migration_init_dev(struct ice_pf *pf, int vf_id)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vf *vf;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf) {
+ dev_err(dev, "Unable to locate VF from VF ID%d\n", vf_id);
+ return -EINVAL;
+ }
+
+ ice_migration_init_vf(vf);
+ ice_put_vf(vf);
+ return 0;
+}
+EXPORT_SYMBOL(ice_migration_init_dev);
+
+/**
+ * ice_migration_uninit_dev - uninit ice migration device
+ * @pf: pointer to PF of migration device
+ * @vf_id: VF index of migration device
+ */
+void ice_migration_uninit_dev(struct ice_pf *pf, int vf_id)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vf *vf;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf) {
+ dev_err(dev, "Unable to locate VF from VF ID%d\n", vf_id);
+ return;
+ }
+
+ ice_migration_uninit_vf(vf);
+ ice_put_vf(vf);
+}
+EXPORT_SYMBOL(ice_migration_uninit_dev);
new file mode 100644
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2018-2023 Intel Corporation */
+
+#ifndef _ICE_MIGRATION_PRIVATE_H_
+#define _ICE_MIGRATION_PRIVATE_H_
+
+/* This header file is for exposing functions in ice_migration.c to
+ * files which will be compiled in ice.ko.
+ * Functions which may be used by other files which will be compiled
+ * in ice-vfio-pic.ko should be exposed as part of ice_migration.h.
+ */
+
+#if IS_ENABLED(CONFIG_ICE_VFIO_PCI)
+void ice_migration_init_vf(struct ice_vf *vf);
+void ice_migration_uninit_vf(struct ice_vf *vf);
+#else
+static inline void ice_migration_init_vf(struct ice_vf *vf) { }
+static inline void ice_migration_uninit_vf(struct ice_vf *vf) { }
+#endif /* CONFIG_ICE_VFIO_PCI */
+
+#endif /* _ICE_MIGRATION_PRIVATE_H_ */
@@ -243,6 +243,10 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
if (vf->vf_ops->irq_close)
vf->vf_ops->irq_close(vf);
+ if (vf->migration_enabled) {
+ ice_migration_uninit_vf(vf);
+ ice_migration_init_vf(vf);
+ }
ice_vf_clear_counters(vf);
vf->vf_ops->clear_reset_trigger(vf);
}
@@ -139,6 +139,8 @@ struct ice_vf {
struct devlink_port devlink_port;
u16 num_msix; /* num of MSI-X configured on this VF */
+
+ u8 migration_enabled:1;
};
/* Flags for controlling behavior of ice_reset_vf */
new file mode 100644
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2018-2023 Intel Corporation */
+
+#ifndef _ICE_MIGRATION_H_
+#define _ICE_MIGRATION_H_
+
+struct ice_pf;
+
+#if IS_ENABLED(CONFIG_ICE_VFIO_PCI)
+struct ice_pf *ice_migration_get_pf(struct pci_dev *pdev);
+int ice_migration_init_dev(struct ice_pf *pf, int vf_id);
+void ice_migration_uninit_dev(struct ice_pf *pf, int vf_id);
+#else
+static inline struct ice_pf *ice_migration_get_pf(struct pci_dev *pdev)
+{
+ return NULL;
+}
+
+static inline int ice_migration_init_dev(struct ice_pf *pf, int vf_id)
+{
+ return 0;
+}
+
+static inline void ice_migration_uninit_dev(struct ice_pf *pf, int vf_id) { }
+#endif /* CONFIG_ICE_VFIO_PCI */
+
+#endif /* _ICE_MIGRATION_H_ */