@@ -1003,9 +1003,18 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
{
uint32_t dw10 = le32_to_cpu(cmd->cdw10);
+ uint32_t dw11 = le32_to_cpu(cmd->cdw11);
uint32_t result;
+ trace_nvme_dev_getfeat(nvme_cid(req), dw10);
+
switch (dw10) {
+ case NVME_ARBITRATION:
+ result = cpu_to_le32(n->features.arbitration);
+ break;
+ case NVME_POWER_MANAGEMENT:
+ result = cpu_to_le32(n->features.power_mgmt);
+ break;
case NVME_TEMPERATURE_THRESHOLD:
result = 0;
@@ -1026,6 +1035,9 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
break;
}
+ break;
+ case NVME_ERROR_RECOVERY:
+ result = cpu_to_le32(n->features.err_rec);
break;
case NVME_VOLATILE_WRITE_CACHE:
result = blk_enable_write_cache(n->conf.blk);
@@ -1038,6 +1050,19 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
break;
case NVME_TIMESTAMP:
return nvme_get_feature_timestamp(n, cmd);
+ case NVME_INTERRUPT_COALESCING:
+ result = cpu_to_le32(n->features.int_coalescing);
+ break;
+ case NVME_INTERRUPT_VECTOR_CONF:
+ if ((dw11 & 0xffff) > n->params.num_queues) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ result = cpu_to_le32(n->features.int_vector_config[dw11 & 0xffff]);
+ break;
+ case NVME_WRITE_ATOMICITY:
+ result = cpu_to_le32(n->features.write_atomicity);
+ break;
case NVME_ASYNCHRONOUS_EVENT_CONF:
result = cpu_to_le32(n->features.async_config);
break;
@@ -1073,6 +1098,8 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
uint32_t dw10 = le32_to_cpu(cmd->cdw10);
uint32_t dw11 = le32_to_cpu(cmd->cdw11);
+ trace_nvme_dev_setfeat(nvme_cid(req), dw10, dw11);
+
switch (dw10) {
case NVME_TEMPERATURE_THRESHOLD:
if (NVME_TEMP_TMPSEL(dw11)) {
@@ -1113,6 +1140,13 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_ASYNCHRONOUS_EVENT_CONF:
n->features.async_config = dw11;
break;
+ case NVME_ARBITRATION:
+ case NVME_POWER_MANAGEMENT:
+ case NVME_ERROR_RECOVERY:
+ case NVME_INTERRUPT_COALESCING:
+ case NVME_INTERRUPT_VECTOR_CONF:
+ case NVME_WRITE_ATOMICITY:
+ return NVME_FEAT_NOT_CHANGABLE | NVME_DNR;
default:
trace_nvme_dev_err_invalid_setfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
@@ -1686,6 +1720,21 @@ static void nvme_init_state(NvmeCtrl *n)
n->temperature = NVME_TEMPERATURE;
n->features.temp_thresh_hi = le16_to_cpu(n->id_ctrl.wctemp);
n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+
+ /*
+ * There is no limit on the number of commands that the controller may
+ * launch at one time from a particular Submission Queue.
+ */
+ n->features.arbitration = 0x7;
+
+ n->features.int_vector_config = g_malloc0_n(n->params.num_queues,
+ sizeof(*n->features.int_vector_config));
+
+ /* disable coalescing (not supported) */
+ for (int i = 0; i < n->params.num_queues; i++) {
+ n->features.int_vector_config[i] = i | (1 << 16);
+ }
+
n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
}
@@ -1779,15 +1828,17 @@ static void nvme_init_ctrl(NvmeCtrl *n)
id->nn = cpu_to_le32(n->num_namespaces);
id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP);
+
+ if (blk_enable_write_cache(n->conf.blk)) {
+ id->vwc = 1;
+ }
+
strcpy((char *) id->subnqn, "nqn.2019-08.org.qemu:");
pstrcat((char *) id->subnqn, sizeof(id->subnqn), n->params.serial);
id->psd[0].mp = cpu_to_le16(0x9c4);
id->psd[0].enlat = cpu_to_le32(0x10);
id->psd[0].exlat = cpu_to_le32(0x4);
- if (blk_enable_write_cache(n->conf.blk)) {
- id->vwc = 1;
- }
n->bar.cap = 0;
NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
@@ -1858,6 +1909,7 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->cq);
g_free(n->sq);
g_free(n->aer_reqs);
+ g_free(n->features.int_vector_config);
if (n->params.cmb_size_mb) {
g_free(n->cmbuf);
@@ -41,6 +41,8 @@ nvme_dev_del_cq(uint16_t cqid) "deleted completion queue, sqid=%"PRIu16""
nvme_dev_identify_ctrl(void) "identify controller"
nvme_dev_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
nvme_dev_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
+nvme_dev_getfeat(uint16_t cid, uint32_t fid) "cid %"PRIu16" fid 0x%"PRIx32""
+nvme_dev_setfeat(uint16_t cid, uint32_t fid, uint32_t val) "cid %"PRIu16" fid 0x%"PRIx32" val 0x%"PRIx32""
nvme_dev_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s"
nvme_dev_getfeat_numq(int result) "get feature number of queues, result=%d"
nvme_dev_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
@@ -445,7 +445,8 @@ enum NvmeStatusCodes {
NVME_FW_REQ_RESET = 0x010b,
NVME_INVALID_QUEUE_DEL = 0x010c,
NVME_FID_NOT_SAVEABLE = 0x010d,
- NVME_FID_NOT_NSID_SPEC = 0x010f,
+ NVME_FEAT_NOT_CHANGABLE = 0x010e,
+ NVME_FEAT_NOT_NSID_SPEC = 0x010f,
NVME_FW_REQ_SUSYSTEM_RESET = 0x0110,
NVME_CONFLICTING_ATTRS = 0x0180,
NVME_INVALID_PROT_INFO = 0x0181,
Add support for returning a resonable response to Get/Set Features of mandatory features. Signed-off-by: Klaus Jensen <klaus.jensen@cnexlabs.com> --- hw/block/nvme.c | 58 ++++++++++++++++++++++++++++++++++++++++--- hw/block/trace-events | 2 ++ include/block/nvme.h | 3 ++- 3 files changed, 59 insertions(+), 4 deletions(-)