@@ -868,5 +868,6 @@ int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
enum idc_function_type ftype);
void idpf_idc_deinit_core_aux_device(struct idc_rdma_core_dev_info *cdev_info);
void idpf_idc_deinit_vport_aux_device(struct idc_rdma_vport_dev_info *vdev_info);
+void idpf_idc_issue_reset_event(struct idc_rdma_core_dev_info *cdev_info);
#endif /* !_IDPF_H_ */
@@ -209,6 +209,38 @@ static void idpf_unplug_aux_dev(struct auxiliary_device *adev)
}
/**
+ * idpf_idc_issue_reset_event - Function to handle reset IDC event
+ * @cdev_info: idc core device info pointer
+ */
+void idpf_idc_issue_reset_event(struct idc_rdma_core_dev_info *cdev_info)
+{
+ enum idc_rdma_event_type event_type = IDC_RDMA_EVENT_WARN_RESET;
+ struct idc_rdma_core_auxiliary_drv *iadrv;
+ struct idc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!cdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&cdev_info->adev->dev);
+
+ adev = cdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+
+ iadrv = container_of(adev->dev.driver,
+ struct idc_rdma_core_auxiliary_drv,
+ adrv.driver);
+ if (iadrv && iadrv->event_handler)
+ iadrv->event_handler(cdev_info, &event);
+unlock:
+ device_unlock(&cdev_info->adev->dev);
+}
+
+/**
* idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs
* @adapter: private data struct
*/
@@ -285,7 +317,16 @@ static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
idpf_idc_request_reset(struct idc_rdma_core_dev_info *cdev_info,
enum idc_rdma_reset_type __always_unused reset_type)
{
- return -EOPNOTSUPP;
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (!idpf_is_reset_in_prog(adapter)) {
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+
+ return 0;
}
/* Implemented by the Auxiliary Device and called by the Auxiliary Driver */
@@ -1814,6 +1814,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
+ idpf_idc_issue_reset_event(adapter->cdev_info);
+
idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
@@ -3703,5 +3703,26 @@ int idpf_idc_rdma_vc_send_sync(struct idc_rdma_core_dev_info *cdev_info,
u8 *send_msg, u16 msg_size,
u8 *recv_msg, u16 *recv_len)
{
- return -EOPNOTSUPP;
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+ struct idpf_vc_xn_params xn_params = { };
+ ssize_t reply_sz;
+ u16 recv_size;
+
+ if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
+ *recv_len = 0;
+ xn_params.vc_op = VIRTCHNL2_OP_RDMA;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = send_msg;
+ xn_params.send_buf.iov_len = msg_size;
+ xn_params.recv_buf.iov_base = recv_msg;
+ xn_params.recv_buf.iov_len = recv_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ *recv_len = reply_sz;
+
+ return 0;
}
@@ -62,8 +62,9 @@ enum virtchnl2_op {
VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
* VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
- * Opcodes 529, 530, 531, 532 and 533 are reserved.
*/
+ VIRTCHNL2_OP_RDMA = 529,
+ /* Opcodes 530 through 533 are reserved. */
VIRTCHNL2_OP_LOOPBACK = 534,
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,