@@ -1193,7 +1193,8 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req,
+ bool only_active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
@@ -1211,11 +1212,16 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
ns = &n->namespaces[nsid - 1];
assert(nsid == ns->nsid);
+ if (only_active && !ns->attached) {
+ return nvme_rpt_empty_id_struct(n, prp1, prp2, req);
+ }
+
return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
prp2, DMA_DIRECTION_FROM_DEVICE, req);
}
-static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool only_active)
{
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
NvmeNamespace *ns;
@@ -1233,6 +1239,10 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
ns = &n->namespaces[nsid - 1];
assert(nsid == ns->nsid);
+ if (only_active && !ns->attached) {
+ return nvme_rpt_empty_id_struct(n, prp1, prp2, req);
+ }
+
if (c->csi == NVME_CSI_NVM) {
return nvme_rpt_empty_id_struct(n, prp1, prp2, req);
}
@@ -1240,7 +1250,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req,
+ bool only_active)
{
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
static const int data_len = NVME_IDENTIFY_DATA_SIZE;
@@ -1265,7 +1276,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
list = g_malloc0(data_len);
for (i = 0; i < n->num_namespaces; i++) {
- if (i < min_nsid) {
+ if (i < min_nsid || (only_active && !n->namespaces[i].attached)) {
continue;
}
list[j++] = cpu_to_le32(i + 1);
@@ -1279,7 +1290,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
return ret;
}
-static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
+static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req,
+ bool only_active)
{
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
static const int data_len = NVME_IDENTIFY_DATA_SIZE;
@@ -1298,7 +1310,8 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
list = g_malloc0(data_len);
for (i = 0; i < n->num_namespaces; i++) {
- if (i < min_nsid) {
+ if (i < min_nsid || c->csi != n->namespaces[i].csi ||
+ (only_active && !n->namespaces[i].attached)) {
continue;
}
list[j++] = cpu_to_le32(i + 1);
@@ -1390,17 +1403,25 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
switch (le32_to_cpu(c->cns)) {
case NVME_ID_CNS_NS:
- return nvme_identify_ns(n, req);
+ return nvme_identify_ns(n, req, true);
case NVME_ID_CNS_CS_NS:
- return nvme_identify_ns_csi(n, req);
+ return nvme_identify_ns_csi(n, req, true);
+ case NVME_ID_CNS_NS_PRESENT:
+ return nvme_identify_ns(n, req, false);
+ case NVME_ID_CNS_CS_NS_PRESENT:
+ return nvme_identify_ns_csi(n, req, false);
case NVME_ID_CNS_CTRL:
return nvme_identify_ctrl(n, req);
case NVME_ID_CNS_CS_CTRL:
return nvme_identify_ctrl_csi(n, req);
case NVME_ID_CNS_NS_ACTIVE_LIST:
- return nvme_identify_nslist(n, req);
+ return nvme_identify_nslist(n, req, true);
case NVME_ID_CNS_CS_NS_ACTIVE_LIST:
- return nvme_identify_nslist_csi(n, req);
+ return nvme_identify_nslist_csi(n, req, true);
+ case NVME_ID_CNS_NS_PRESENT_LIST:
+ return nvme_identify_nslist(n, req, false);
+ case NVME_ID_CNS_CS_NS_PRESENT_LIST:
+ return nvme_identify_nslist_csi(n, req, false);
case NVME_ID_CNS_NS_DESCR_LIST:
return nvme_identify_ns_descr_list(n, req);
case NVME_ID_CNS_IO_COMMAND_SET:
@@ -1842,6 +1863,7 @@ static int nvme_start_ctrl(NvmeCtrl *n)
{
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
uint32_t page_size = 1 << page_bits;
+ int i;
if (unlikely(n->cq[0])) {
trace_pci_nvme_err_startfail_cq();
@@ -1928,6 +1950,18 @@ static int nvme_start_ctrl(NvmeCtrl *n)
nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
NVME_AQA_ASQS(n->bar.aqa) + 1);
+ for (i = 0; i < n->num_namespaces; i++) {
+ n->namespaces[i].attached = false;
+ switch (n->namespaces[i].csi) {
+ case NVME_CSI_NVM:
+ if (NVME_CC_CSS(n->bar.cc) == CSS_NVM_ONLY ||
+ NVME_CC_CSS(n->bar.cc) == CSS_CSI) {
+ n->namespaces[i].attached = true;
+ }
+ break;
+ }
+ }
+
nvme_set_timestamp(n, 0ULL);
QTAILQ_INIT(&n->aer_queue);
@@ -66,6 +66,7 @@ typedef struct NvmeNamespace {
NvmeIdNs id_ns;
uint32_t nsid;
uint8_t csi;
+ bool attached;
QemuUUID uuid;
} NvmeNamespace;
@@ -806,14 +806,18 @@ typedef struct QEMU_PACKED NvmePSD {
#define NVME_IDENTIFY_DATA_SIZE 4096
enum NvmeIdCns {
- NVME_ID_CNS_NS = 0x00,
- NVME_ID_CNS_CTRL = 0x01,
- NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
- NVME_ID_CNS_NS_DESCR_LIST = 0x03,
- NVME_ID_CNS_CS_NS = 0x05,
- NVME_ID_CNS_CS_CTRL = 0x06,
- NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07,
- NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
+ NVME_ID_CNS_NS = 0x00,
+ NVME_ID_CNS_CTRL = 0x01,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_ID_CNS_NS_DESCR_LIST = 0x03,
+ NVME_ID_CNS_CS_NS = 0x05,
+ NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07,
+ NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a,
+ NVME_ID_CNS_CS_NS_PRESENT = 0x1b,
+ NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
};
typedef struct QEMU_PACKED NvmeIdCtrl {