@@ -29,6 +29,7 @@
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net.h"
+#include "nfp_net_sriov.h"
static const char nfp_driver_name[] = "nfp";
@@ -252,6 +253,10 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return -EINVAL;
}
+ err = nfp_vf_queues_config(pf, num_vfs);
+ if (err)
+ return err;
+
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
@@ -782,6 +787,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_fw_unload;
+ pf->default_config_vfs_queue = true;
pf->num_vfs = pci_num_vf(pdev);
if (pf->num_vfs > pf->limit_vfs) {
dev_err(&pdev->dev,
@@ -17,6 +17,12 @@
#include <linux/workqueue.h>
#include <net/devlink.h>
+ /* Define how many types of max-q-number is supported to
+ * configure, currently we support 16, 8, 4, 2, 1.
+ */
+#define NFP_NET_CFG_QUEUE_TYPE 5
+#define NFP_NET_CFG_MAX_Q(type) (1 << (NFP_NET_CFG_QUEUE_TYPE - (type) - 1))
+
struct dentry;
struct device;
struct pci_dev;
@@ -63,6 +69,10 @@ struct nfp_dumpspec {
* @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
+ * @max_vf_queues: number of queues can be allocated to VFs
+ * @config_vfs_queue: Array to indicate VF number of each max-queue-num type
+ * The quantity of distributable queues is {16, 8, 4, 2, 1}
+ * @default_config_vfs_queue: Is the method of allocating queues to VFS evenly distributed
* @fw_loaded: Is the firmware loaded?
* @unload_fw_on_remove:Do we need to unload firmware on driver removal?
* @sp_indiff: Is the firmware indifferent to physical port speed?
@@ -112,6 +122,9 @@ struct nfp_pf {
unsigned int limit_vfs;
unsigned int num_vfs;
+ unsigned int max_vf_queues;
+ u8 config_vfs_queue[NFP_NET_CFG_QUEUE_TYPE];
+ bool default_config_vfs_queue;
bool fw_loaded;
bool unload_fw_on_remove;
@@ -78,6 +78,7 @@
/* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
#define NFP_NET_MAX_RX_RINGS 64 /* Max. # of Rx rings per device */
+#define NFP_NET_CTRL_RINGS 1 /* Max. # of Ctrl rings per device */
#define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
@@ -296,6 +296,7 @@ static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
if (err)
goto err_prev_deinit;
+ pf->max_vf_queues -= nn->max_r_vecs;
id++;
}
@@ -794,6 +795,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
}
}
+ pf->max_vf_queues = NFP_NET_MAX_R_VECS - NFP_NET_CTRL_RINGS;
+
err = nfp_net_pf_app_init(pf, qc_bar, stride);
if (err)
goto err_unmap;
@@ -29,6 +29,9 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool
return -EOPNOTSUPP;
}
+ if (cap == NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG)
+ return 0;
+
if (vf < 0 || vf >= app->pf->num_vfs) {
if (warn)
nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
@@ -309,3 +312,101 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+
+static int nfp_set_vf_queue_config(struct nfp_pf *pf, int num_vfs)
+{
+ unsigned char config_content[sizeof(u32)] = {0};
+ unsigned int i, j, k, cfg_vf_count, offset;
+ struct nfp_net *nn;
+ u32 raw;
+ int err;
+
+ raw = 0; k = 0; cfg_vf_count = 0;
+ offset = NFP_NET_VF_CFG_MB_SZ + pf->limit_vfs * NFP_NET_VF_CFG_SZ;
+
+ for (i = 0; i < NFP_NET_CFG_QUEUE_TYPE; i++) {
+ for (j = 0; j < pf->config_vfs_queue[i]; j++) {
+ config_content[k++] = NFP_NET_CFG_MAX_Q(i);
+ cfg_vf_count++;
+ if (k == sizeof(raw) || cfg_vf_count == num_vfs) {
+ raw = config_content[0] |
+ (config_content[1] << BITS_PER_BYTE) |
+ (config_content[2] << (2 * BITS_PER_BYTE)) |
+ (config_content[3] << (3 * BITS_PER_BYTE));
+ writel(raw, pf->vfcfg_tbl2 + offset);
+ offset += sizeof(raw);
+ memset(config_content, 0, sizeof(u32));
+ k = 0;
+ }
+ }
+ }
+
+ writew(NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG, pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_UPD);
+
+ nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VF);
+ if (err) {
+ nfp_warn(pf->cpp,
+ "FW reconfig VF config queue failed: %d\n", err);
+ return -EINVAL;
+ }
+
+ err = readw(pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_RET);
+ if (err) {
+ nfp_warn(pf->cpp,
+ "FW refused VF config queue update with errno: %d\n", err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nfp_vf_queues_config(struct nfp_pf *pf, int num_vfs)
+{
+ unsigned int i, j, cfg_num_queues = 0, cfg_num_vfs;
+
+ if (nfp_net_sriov_check(pf->app, 0, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG, "max_queue", true))
+ return 0;
+
+ /* In default mode, the created VFs divide all the VF queues equally,
+ * and round down to power of 2
+ */
+ if (pf->default_config_vfs_queue) {
+ memset(pf->config_vfs_queue, 0, NFP_NET_CFG_QUEUE_TYPE);
+ j = pf->max_vf_queues / num_vfs;
+ for (i = 0; i < NFP_NET_CFG_QUEUE_TYPE; i++) {
+ if (j >= NFP_NET_CFG_MAX_Q(i)) {
+ pf->config_vfs_queue[i] = num_vfs;
+ break;
+ }
+ }
+ return nfp_set_vf_queue_config(pf, num_vfs);
+ }
+
+ for (i = 0, cfg_num_vfs = 0; i < NFP_NET_CFG_QUEUE_TYPE; i++) {
+ cfg_num_queues += NFP_NET_CFG_MAX_Q(i) * pf->config_vfs_queue[i];
+ cfg_num_vfs += pf->config_vfs_queue[i];
+ }
+
+ if (cfg_num_queues > pf->max_vf_queues) {
+ dev_warn(&pf->pdev->dev,
+ "Number of queues from configuration is bigger than total queues number.\n");
+ return -EINVAL;
+ }
+
+ cfg_num_queues = pf->max_vf_queues - cfg_num_queues;
+
+ if (num_vfs > cfg_num_vfs) {
+ cfg_num_vfs = num_vfs - cfg_num_vfs;
+ if (cfg_num_queues < cfg_num_vfs) {
+ dev_warn(&pf->pdev->dev,
+ "Remaining queues are not enough to be allocated.\n");
+ return -EINVAL;
+ }
+ dev_info(&pf->pdev->dev,
+ "The extra created VFs are allocated with single queue.\n");
+ pf->config_vfs_queue[NFP_NET_CFG_QUEUE_TYPE - 1] += cfg_num_vfs;
+ }
+
+ return nfp_set_vf_queue_config(pf, num_vfs);
+}
@@ -21,6 +21,7 @@
#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6)
+#define NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG (0x1 << 7)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
@@ -30,6 +31,7 @@
#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6)
+#define NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG (0x1 << 7)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -67,5 +69,6 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state);
int nfp_app_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
+int nfp_vf_queues_config(struct nfp_pf *pf, int num_vfs);
#endif /* _NFP_NET_SRIOV_H_ */