@@ -867,6 +867,25 @@ hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return ERR_PTR(-EINVAL);
}
+/*
+ * This function is called in all state_mutex unlock cases to
+ * handle a 'deferred_reset' if exists.
+ */
+static void hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+again:
+ spin_lock(&hisi_acc_vdev->reset_lock);
+ if (hisi_acc_vdev->deferred_reset) {
+ hisi_acc_vdev->deferred_reset = false;
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ goto again;
+ }
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+}
+
static struct file *
hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state new_state)
@@ -897,7 +916,7 @@ hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
break;
}
}
- mutex_unlock(&hisi_acc_vdev->state_mutex);
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
return res;
}
@@ -910,7 +929,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
mutex_lock(&hisi_acc_vdev->state_mutex);
*curr_state = hisi_acc_vdev->mig_state;
- mutex_unlock(&hisi_acc_vdev->state_mutex);
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
return 0;
}
@@ -1065,6 +1084,30 @@ static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int
return vfio_pci_core_ioctl(core_vdev, cmd, arg);
}
+static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+
+ if (!hisi_acc_vdev->migration_support)
+ return;
+
+ /*
+ * As the higher VFIO layers are holding locks across reset and using
+ * those same locks with the mm_lock we need to prevent ABBA deadlock
+ * with the state_mutex and mm_lock.
+ * In case the state_mutex was taken already we defer the cleanup work
+ * to the unlock flow of the other running context.
+ */
+ spin_lock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->deferred_reset = true;
+ if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ return;
+ }
+ spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+}
+
static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
@@ -1168,12 +1211,17 @@ static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
+static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
+ .reset_done = hisi_acc_vf_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
static struct pci_driver hisi_acc_vfio_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = hisi_acc_vfio_pci_table,
.probe = hisi_acc_vfio_pci_probe,
.remove = hisi_acc_vfio_pci_remove,
- .err_handler = &vfio_pci_core_err_handlers,
+ .err_handler = &hisi_acc_vf_err_handlers,
};
module_pci_driver(hisi_acc_vfio_pci_driver);
@@ -104,6 +104,7 @@ struct hisi_acc_vf_migration_file {
struct hisi_acc_vf_core_device {
struct vfio_pci_core_device core_device;
u8 migration_support:1;
+ u8 deferred_reset:1;
/* for migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
@@ -112,7 +113,8 @@ struct hisi_acc_vf_core_device {
struct hisi_qm *pf_qm;
struct hisi_qm vf_qm;
int vf_id;
-
+ /* for reset handler */
+ spinlock_t reset_lock;
struct hisi_acc_vf_migration_file *resuming_migf;
struct hisi_acc_vf_migration_file *saving_migf;
};
Register private handler for pci_error_handlers.reset_done and update state accordingly. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> --- drivers/vfio/pci/hisi_acc_vfio_pci.c | 54 ++++++++++++++++++++++++++-- drivers/vfio/pci/hisi_acc_vfio_pci.h | 4 ++- 2 files changed, 54 insertions(+), 4 deletions(-)