@@ -2,9 +2,11 @@
/* Copyright (C) 2018-2023 Intel Corporation */
#include "ice.h"
+#include "ice_base.h"
#define ICE_MIG_DEVSTAT_MAGIC 0xE8000001
#define ICE_MIG_DEVSTAT_VERSION 0x1
+#define ICE_MIG_VF_QRX_TAIL_MAX 256
struct ice_migration_virtchnl_msg_slot {
u32 opcode;
@@ -26,6 +28,8 @@ struct ice_migration_dev_state {
u16 num_rxq;
u16 vsi_id;
+ /* next RX desc index to be processed by the device */
+ u16 rx_head[ICE_MIG_VF_QRX_TAIL_MAX];
u8 virtchnl_msgs[];
} __aligned(8);
@@ -265,6 +269,54 @@ u32 ice_migration_supported_caps(void)
return VIRTCHNL_VF_MIGRATION_SUPPORT_FEATURE;
}
+/**
+ * ice_migration_save_rx_head - save rx head into device state buffer
+ * @vf: pointer to VF structure
+ * @devstate: pointer to migration buffer
+ *
+ * Return 0 for success, negative for error
+ */
+static int
+ice_migration_save_rx_head(struct ice_vf *vf,
+ struct ice_migration_dev_state *devstate)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi;
+ int i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ dev_err(dev, "VF %d VSI is NULL\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rlan_ctx rlan_ctx = {};
+ struct ice_hw *hw = &vf->pf->hw;
+ u16 rxq_index;
+ int status;
+
+ if (WARN_ON_ONCE(!rx_ring))
+ return -EINVAL;
+
+ devstate->rx_head[i] = 0;
+ if (!test_bit(i, vf->rxq_ena))
+ continue;
+
+ rxq_index = rx_ring->reg_idx;
+ status = ice_read_rxq_ctx(hw, &rlan_ctx, rxq_index);
+ if (status) {
+ dev_err(dev, "Failed to read RXQ[%d] context, err=%d\n",
+ rx_ring->q_index, status);
+ return -EIO;
+ }
+ devstate->rx_head[i] = rlan_ctx.head;
+ }
+
+ return 0;
+}
+
/**
* ice_migration_save_devstate - save device state to migration buffer
* @pf: pointer to PF of migration device
@@ -318,6 +370,12 @@ int ice_migration_save_devstate(struct ice_pf *pf, int vf_id, u8 *buf, u64 buf_s
buf = devstate->virtchnl_msgs;
devstate->vsi_id = vf->vm_vsi_num;
+ ret = ice_migration_save_rx_head(vf, devstate);
+ if (ret) {
+ dev_err(dev, "VF %d failed to save rxq head\n", vf->vf_id);
+ goto out_put_vf;
+ }
+
list_for_each_entry(msg_listnode, &vf->virtchnl_msg_list, node) {
struct ice_migration_virtchnl_msg_slot *msg_slot;
u64 slot_size;
@@ -408,6 +466,57 @@ static int ice_migration_check_match(struct ice_vf *vf, const u8 *buf, u64 buf_s
return 0;
}
+/**
+ * ice_migration_restore_rx_head - restore rx head from device state buffer
+ * @vf: pointer to VF structure
+ * @devstate: pointer to migration device state
+ *
+ * Return 0 for success, negative for error
+ */
+static int
+ice_migration_restore_rx_head(struct ice_vf *vf,
+ struct ice_migration_dev_state *devstate)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi;
+ int i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ dev_err(dev, "VF %d VSI is NULL\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rlan_ctx rlan_ctx = {};
+ struct ice_hw *hw = &vf->pf->hw;
+ u16 rxq_index;
+ int status;
+
+ if (WARN_ON_ONCE(!rx_ring))
+ return -EINVAL;
+
+ rxq_index = rx_ring->reg_idx;
+ status = ice_read_rxq_ctx(hw, &rlan_ctx, rxq_index);
+ if (status) {
+ dev_err(dev, "Failed to read RXQ[%d] context, err=%d\n",
+ rx_ring->q_index, status);
+ return -EIO;
+ }
+
+ rlan_ctx.head = devstate->rx_head[i];
+ status = ice_write_rxq_ctx(hw, &rlan_ctx, rxq_index);
+ if (status) {
+ dev_err(dev, "Failed to set LAN RXQ[%d] context, err=%d\n",
+ rx_ring->q_index, status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
/**
* ice_migration_restore_devstate - restore device state at dst
* @pf: pointer to PF of migration device
@@ -464,6 +573,22 @@ int ice_migration_restore_devstate(struct ice_pf *pf, int vf_id, const u8 *buf,
vf->vf_id, msg_slot->opcode);
goto out_clear_replay;
}
+
+ /* Once RX Queue is enabled, network traffic may come in at any
+ * time. As a result, RX Queue head needs to be restored before
+ * RX Queue is enabled.
+ * For simplicity and integration, overwrite RX head just after
+ * RX ring context is configured.
+ */
+ if (msg_slot->opcode == VIRTCHNL_OP_CONFIG_VSI_QUEUES) {
+ ret = ice_migration_restore_rx_head(vf, devstate);
+ if (ret) {
+ dev_err(dev, "VF %d failed to restore rx head\n",
+ vf->vf_id);
+ goto out_clear_replay;
+ }
+ }
+
event.msg_buf = NULL;
msg_slot = (struct ice_migration_virtchnl_msg_slot *)
((char *)msg_slot + slot_sz);