@@ -25,6 +25,7 @@ struct ice_migration_dev_state {
u16 num_txq;
u16 num_rxq;
+ u16 vsi_id;
u8 virtchnl_msgs[];
} __aligned(8);
@@ -50,6 +51,7 @@ void ice_migration_init_vf(struct ice_vf *vf)
INIT_LIST_HEAD(&vf->virtchnl_msg_list);
vf->virtchnl_msg_num = 0;
vf->virtchnl_msg_size = 0;
+ vf->vm_vsi_num = vf->lan_vsi_num;
}
/**
@@ -314,6 +316,7 @@ int ice_migration_save_devstate(struct ice_pf *pf, int vf_id, u8 *buf, u64 buf_s
devstate->num_txq = vsi->num_txq;
devstate->num_rxq = vsi->num_rxq;
buf = devstate->virtchnl_msgs;
+ devstate->vsi_id = vf->vm_vsi_num;
list_for_each_entry(msg_listnode, &vf->virtchnl_msg_list, node) {
struct ice_migration_virtchnl_msg_slot *msg_slot;
@@ -439,6 +442,8 @@ int ice_migration_restore_devstate(struct ice_pf *pf, int vf_id, const u8 *buf,
goto out_put_vf;
devstate = (struct ice_migration_dev_state *)buf;
+ vf->vm_vsi_num = devstate->vsi_id;
+ dev_dbg(dev, "VF %d vm vsi num is:%d\n", vf->vf_id, vf->vm_vsi_num);
msg_slot = (struct ice_migration_virtchnl_msg_slot *)devstate->virtchnl_msgs;
set_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states);
@@ -470,3 +475,94 @@ int ice_migration_restore_devstate(struct ice_pf *pf, int vf_id, const u8 *buf,
return ret;
}
EXPORT_SYMBOL(ice_migration_restore_devstate);
+
+/**
+ * ice_migration_fix_msg_vsi - change virtual channel msg VSI id
+ *
+ * @vf: pointer to the VF structure
+ * @v_opcode: virtchnl message operation code
+ * @msg: pointer to the virtual channel message
+ *
+ * After migration, the VSI id of virtual channel message is still
+ * migration src VSI id. Some virtual channel commands will fail
+ * due to unmatch VSI id.
+ * Change virtual channel message payload VSI id to real VSI id.
+ */
+void ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg)
+{
+ if (!vf->migration_enabled)
+ return;
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ case VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN: {
+ /* Read the beginning two bytes of message for VSI id */
+ u16 *vsi_id = (u16 *)msg;
+
+ /* For VM runtime stage, vsi_id in the virtual channel message
+ * should be equal to the PF logged vsi_id and vsi_id is
+ * replaced by VF's VSI id to guarantee that messages are
+ * processed successfully. If vsi_id is not equal to the PF
+ * logged vsi_id, then this message must be sent by malicious
+ * VF and no replacement is needed. Just let virtual channel
+ * handler to fail this message.
+ *
+ * For virtual channel replaying stage, all of the PF logged
+ * virtual channel messages are trusted and vsi_id is replaced
+ * anyway to guarantee the messages are processed successfully.
+ */
+ if (*vsi_id == vf->vm_vsi_num ||
+ test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states))
+ *vsi_id = vf->lan_vsi_num;
+ break;
+ }
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP: {
+ struct virtchnl_irq_map_info *irqmap_info;
+ u16 num_q_vectors_mapped;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct virtchnl_vector_map *map;
+
+ map = &irqmap_info->vecmap[i];
+ if (map->vsi_id == vf->vm_vsi_num ||
+ test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states))
+ map->vsi_id = vf->lan_vsi_num;
+ }
+ break;
+ }
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES: {
+ struct virtchnl_vsi_queue_config_info *qci;
+
+ qci = (struct virtchnl_vsi_queue_config_info *)msg;
+ if (qci->vsi_id == vf->vm_vsi_num ||
+ test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states)) {
+ int i;
+
+ qci->vsi_id = vf->lan_vsi_num;
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ struct virtchnl_queue_pair_info *qpi;
+
+ qpi = &qci->qpair[i];
+ qpi->txq.vsi_id = vf->lan_vsi_num;
+ qpi->rxq.vsi_id = vf->lan_vsi_num;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
@@ -17,6 +17,7 @@ int ice_migration_log_vf_msg(struct ice_vf *vf,
struct ice_rq_event_info *event);
void ice_migration_unlog_vf_msg(struct ice_vf *vf, u32 v_opcode);
u32 ice_migration_supported_caps(void);
+void ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg);
#else
static inline void ice_migration_init_vf(struct ice_vf *vf) { }
static inline void ice_migration_uninit_vf(struct ice_vf *vf) { }
@@ -28,6 +29,9 @@ ice_migration_supported_caps(void)
{
return 0xFFFFFFFF;
}
+
+static inline void
+ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg) { }
#endif /* CONFIG_ICE_VFIO_PCI */
#endif /* _ICE_MIGRATION_PRIVATE_H_ */
@@ -143,6 +143,7 @@ struct ice_vf {
u64 virtchnl_msg_num;
u64 virtchnl_msg_size;
u32 virtchnl_retval;
+ u16 vm_vsi_num;
};
/* Flags for controlling behavior of ice_reset_vf */
@@ -4048,6 +4048,7 @@ int ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
}
if (vf->migration_enabled) {
+ ice_migration_fix_msg_vsi(vf, v_opcode, msg);
if (ice_migration_log_vf_msg(vf, event)) {
err = ice_vc_respond_to_vf(vf, v_opcode,
VIRTCHNL_STATUS_ERR_NO_MEMORY,