@@ -98,7 +98,7 @@ Then you can add this to your QEMU command line:
.. code-block:: shell
-drive file=blknvme,if=none,id=mynvme,format=raw \
- -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
+ -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323,spdm_trans=doe
At which point QEMU will try to connect to the SPDM server.
@@ -113,7 +113,13 @@ of the default. So the entire QEMU command might look like this
-append "root=/dev/vda console=ttyS0" \
-net none -nographic \
-drive file=blknvme,if=none,id=mynvme,format=raw \
- -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
+ -device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323,spdm_trans=doe
+
+The `spdm_trans` argument defines the underlying transport type that is emulated
+by QEMU. For an PCIe NVMe controller, both "doe" and "nvme" are supported. Where,
+"doe" does SPDM transport over the PCIe extended capability Data Object Exchange
+(DOE), and "nvme" uses the NVMe Admin Security Send/Receive commands to
+implement the SPDM transport.
.. _DMTF:
https://www.dmtf.org/standards/SPDM
@@ -8746,6 +8746,23 @@ static DOEProtocol doe_spdm_prot[] = {
{ }
};
+static inline uint32_t nvme_get_spdm_trans_type(PCIDevice *pci_dev)
+{
+ if (!pci_dev) {
+ return false;
+ }
+
+ if (!strcmp(pci_dev->spdm_trans, "nvme")) {
+ return SPDM_SOCKET_TRANSPORT_TYPE_NVME;
+ }
+
+ if (!strcmp(pci_dev->spdm_trans, "doe")) {
+ return SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE;
+ }
+
+ return 0;
+}
+
static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
{
ERRP_GUARD();
@@ -8829,19 +8846,31 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
pcie_cap_deverr_init(pci_dev);
- /* DOE Initialisation */
+ /* SPDM Initialisation */
if (pci_dev->spdm_port) {
- uint16_t doe_offset = n->params.sriov_max_vfs ?
- PCI_CONFIG_SPACE_SIZE + PCI_ARI_SIZEOF
- : PCI_CONFIG_SPACE_SIZE;
+ switch (nvme_get_spdm_trans_type(pci_dev)) {
+ case SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE:
+ uint16_t doe_offset = n->params.sriov_max_vfs ?
+ PCI_CONFIG_SPACE_SIZE + PCI_ARI_SIZEOF
+ : PCI_CONFIG_SPACE_SIZE;
- pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset,
- doe_spdm_prot, true, 0);
+ pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset,
+ doe_spdm_prot, true, 0);
- pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(pci_dev->spdm_port,
- errp);
+ pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(
+ pci_dev->spdm_port, errp);
- if (pci_dev->doe_spdm.spdm_socket < 0) {
+ if (pci_dev->doe_spdm.spdm_socket < 0) {
+ return false;
+ }
+ break;
+ case SPDM_SOCKET_TRANSPORT_TYPE_NVME:
+ n->spdm_socket = spdm_socket_connect(pci_dev->spdm_port, errp);
+ if (n->spdm_socket < 0) {
+ return false;
+ }
+ break;
+ default:
return false;
}
}
@@ -9110,11 +9139,17 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->cmb.buf);
}
+ /* Only one of the `spdm_socket` below would have been setup */
if (pci_dev->doe_spdm.spdm_socket > 0) {
spdm_socket_close(pci_dev->doe_spdm.spdm_socket,
SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE);
}
+ if (n->spdm_socket > 0) {
+ spdm_socket_close(pci_dev->doe_spdm.spdm_socket,
+ SPDM_SOCKET_TRANSPORT_TYPE_NVME);
+ }
+
if (n->pmr.dev) {
host_memory_backend_set_mapped(n->pmr.dev, false);
}
@@ -9166,6 +9201,7 @@ static const Property nvme_props[] = {
false),
DEFINE_PROP_UINT16("mqes", NvmeCtrl, params.mqes, 0x7ff),
DEFINE_PROP_UINT16("spdm_port", PCIDevice, spdm_port, 0),
+ DEFINE_PROP_STRING("spdm_trans", PCIDevice, spdm_trans),
DEFINE_PROP_BOOL("ctratt.mem", NvmeCtrl, params.ctratt.mem, false),
DEFINE_PROP_BOOL("atomic.dn", NvmeCtrl, params.atomic_dn, 0),
DEFINE_PROP_UINT16("atomic.awun", NvmeCtrl, params.atomic_awun, 0),
@@ -9240,7 +9276,9 @@ static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
{
uint16_t old_num_vfs = pcie_sriov_num_vfs(dev);
- if (pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
+ /* DOE is only initialised if SPDM over DOE is used */
+ if (pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE) &&
+ nvme_get_spdm_trans_type(dev) == SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE) {
pcie_doe_write_config(&dev->doe_spdm, address, val, len);
}
pci_default_write_config(dev, address, val, len);
@@ -9251,7 +9289,9 @@ static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
static uint32_t nvme_pci_read_config(PCIDevice *dev, uint32_t address, int len)
{
uint32_t val;
- if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
+
+ if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE) &&
+ (nvme_get_spdm_trans_type(dev) == SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE)) {
if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) {
return val;
}
@@ -160,6 +160,7 @@ struct PCIDevice {
/* SPDM */
uint16_t spdm_port;
+ char *spdm_trans;
/* DOE */
DOECap doe_spdm;
This patch extends the existing support we have for NVMe with only DoE to also add support to SPDM over the NVMe Security Send/Recv commands. With the new definition of the `spdm-trans` argument, users can specify `spdm_trans=nvme` or `spdm_trans=doe`. This allows us to select the SPDM transport respectively. SPDM over the NVMe Security Send/Recv commands are defined in the DMTF DSP0286. Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com> --- docs/specs/spdm.rst | 10 ++++-- hw/nvme/ctrl.c | 62 ++++++++++++++++++++++++++++++------- include/hw/pci/pci_device.h | 1 + 3 files changed, 60 insertions(+), 13 deletions(-)