@@ -869,5 +869,7 @@ int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
void idpf_idc_deinit_core_aux_device(struct idc_rdma_core_dev_info *cdev_info);
void idpf_idc_deinit_vport_aux_device(struct idc_rdma_vport_dev_info *vdev_info);
void idpf_idc_issue_reset_event(struct idc_rdma_core_dev_info *cdev_info);
+void idpf_idc_vdev_mtu_event(struct idc_rdma_vport_dev_info *vdev_info,
+ enum idc_rdma_event_type event_type);
#endif /* !_IDPF_H_ */
@@ -134,6 +134,37 @@ static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport)
}
/**
+ * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events
+ * @vdev_info: idc vport device info pointer
+ * @event_type: type of event to pass to handler
+ */
+void idpf_idc_vdev_mtu_event(struct idc_rdma_vport_dev_info *vdev_info,
+ enum idc_rdma_event_type event_type)
+{
+ struct idc_rdma_vport_auxiliary_drv *iadrv;
+ struct idc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!vdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&vdev_info->adev->dev);
+ adev = vdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+ iadrv = container_of(adev->dev.driver,
+ struct idc_rdma_vport_auxiliary_drv,
+ adrv.driver);
+ if (iadrv && iadrv->event_handler)
+ iadrv->event_handler(vdev_info, &event);
+unlock:
+ device_unlock(&vdev_info->adev->dev);
+}
+
+/**
* idpf_core_adev_release - function to be mapped to aux dev's release op
* @dev: pointer to device to free
*/
@@ -1941,6 +1941,8 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_calc_num_q_desc(new_vport);
break;
case IDPF_SR_MTU_CHANGE:
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
case IDPF_SR_RSC_CHANGE:
break;
default:
@@ -1952,6 +1954,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
err = idpf_vport_queues_alloc(new_vport);
if (err)
goto free_vport;
+
if (current_state <= __IDPF_VPORT_DOWN) {
idpf_send_delete_queues_msg(vport);
} else {
@@ -2028,15 +2031,17 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (current_state == __IDPF_VPORT_UP)
err = idpf_vport_open(vport, false);
- kfree(new_vport);
-
- return err;
+ goto free_vport;
err_reset:
idpf_vport_queues_rel(new_vport);
free_vport:
kfree(new_vport);
+ if (reset_cause == IDPF_SR_MTU_CHANGE)
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IDC_RDMA_EVENT_AFTER_MTU_CHANGE);
+
return err;
}