@@ -1,3 +1,4 @@
obj-$(CONFIG_ACPI_NFIT) := nfit.o
nfit-y := core.o
+nfit-$(CONFIG_X86) += intel.o
nfit-$(CONFIG_X86_MCE) += mce.o
@@ -1904,7 +1904,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
acpi_nfit_dimm_attribute_groups,
flags, cmd_mask, flush ? flush->hint_count : 0,
- nfit_mem->flush_wpq, &nfit_mem->id[0]);
+ nfit_mem->flush_wpq, &nfit_mem->id[0],
+ acpi_nfit_get_security_ops(nfit_mem->family));
if (!nvdimm)
return -ENOMEM;
new file mode 100644
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. All rights reserved. */
+/*
+ * Intel specific NFIT ops
+ */
+#include <linux/libnvdimm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ndctl.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/nd.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <acpi/nfit.h>
+#include "intel.h"
+#include "nfit.h"
+
+static int intel_dimm_security_unlock(struct nvdimm_bus *nvdimm_bus,
+ struct nvdimm *nvdimm, struct nvdimm_key_data *nkey)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ int cmd_rc, rc = 0;
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_unlock_unit cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
+ .nd_size_out = ND_INTEL_STATUS_SIZE,
+ .nd_fw_size = ND_INTEL_STATUS_SIZE,
+ },
+ .cmd = {
+ .status = 0,
+ },
+ };
+
+ if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
+ return -ENOTTY;
+
+ memcpy(nd_cmd.cmd.passphrase, nkey->data, ND_INTEL_PASSPHRASE_SIZE);
+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_CALL, &nd_cmd,
+ sizeof(nd_cmd), &cmd_rc);
+ if (rc < 0)
+ goto out;
+ if (cmd_rc < 0) {
+ rc = cmd_rc;
+ goto out;
+ }
+
+ switch (nd_cmd.cmd.status) {
+ case 0:
+ break;
+ case ND_INTEL_STATUS_INVALID_PASS:
+ rc = -EINVAL;
+ goto out;
+ case ND_INTEL_STATUS_INVALID_STATE:
+ default:
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * TODO: define a cross arch wbinvd when/if NVDIMM_FAMILY_INTEL
+ * support arrives on another arch.
+ */
+ /* DIMM unlocked, invalidate all CPU caches before we read it */
+ wbinvd_on_all_cpus();
+
+ out:
+ return rc;
+}
+
+static int intel_dimm_security_state(struct nvdimm_bus *nvdimm_bus,
+ struct nvdimm *nvdimm, enum nvdimm_security_state *state)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ int cmd_rc, rc = 0;
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_get_security_state cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = 0,
+ .nd_size_out =
+ sizeof(struct nd_intel_get_security_state),
+ .nd_fw_size =
+ sizeof(struct nd_intel_get_security_state),
+ },
+ .cmd = {
+ .status = 0,
+ .state = 0,
+ },
+ };
+
+ if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask)) {
+ *state = NVDIMM_SECURITY_UNSUPPORTED;
+ return 0;
+ }
+
+ *state = NVDIMM_SECURITY_DISABLED;
+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_CALL, &nd_cmd,
+ sizeof(nd_cmd), &cmd_rc);
+ if (rc < 0)
+ goto out;
+ if (cmd_rc < 0) {
+ rc = cmd_rc;
+ goto out;
+ }
+
+ switch (nd_cmd.cmd.status) {
+ case 0:
+ break;
+ case ND_INTEL_STATUS_RETRY:
+ rc = -EAGAIN;
+ goto out;
+ case ND_INTEL_STATUS_NOT_READY:
+ default:
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* check and see if security is enabled and locked */
+ if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
+ *state = NVDIMM_SECURITY_UNSUPPORTED;
+ else if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
+ if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
+ *state = NVDIMM_SECURITY_LOCKED;
+ else
+ *state = NVDIMM_SECURITY_UNLOCKED;
+ } else
+ *state = NVDIMM_SECURITY_DISABLED;
+
+ out:
+ if (rc < 0)
+ *state = NVDIMM_SECURITY_INVALID;
+ return rc;
+}
+
+struct nvdimm_security_ops intel_security_ops = {
+ .state = intel_dimm_security_state,
+ .unlock = intel_dimm_security_unlock,
+};
@@ -8,6 +8,8 @@
#ifdef CONFIG_X86
+extern struct nvdimm_security_ops intel_security_ops;
+
#define ND_INTEL_STATUS_SIZE 4
#define ND_INTEL_PASSPHRASE_SIZE 32
@@ -62,6 +64,20 @@ struct nd_intel_overwrite {
struct nd_intel_query_overwrite {
u32 status;
} __packed;
+
#endif /* CONFIG_X86 */
+static inline struct nvdimm_security_ops *
+acpi_nfit_get_security_ops(int family)
+{
+ switch (family) {
+#ifdef CONFIG_X86
+ case NVDIMM_FAMILY_INTEL:
+ return &intel_security_ops;
+#endif
+ default:
+ return NULL;
+ }
+}
+
#endif
@@ -51,6 +51,13 @@ static int nvdimm_probe(struct device *dev)
get_device(dev);
kref_init(&ndd->kref);
+ nvdimm_security_get_state(dev);
+
+ /* unlock DIMM here before touch label */
+ rc = nvdimm_security_unlock_dimm(dev);
+ if (rc < 0)
+ dev_warn(dev, "failed to unlock dimm %s\n", dev_name(dev));
+
/*
* EACCES failures reading the namespace label-area-properties
* are interpreted as the DIMM capacity being locked but the
@@ -67,6 +67,110 @@ static void nvdimm_key_destroy(struct key *key)
kfree(key->payload.data[0]);
}
+/*
+ * Find key in kernel keyring
+ */
+static struct key *nvdimm_search_key(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ char *desc;
+ key_ref_t keyref;
+ struct key *key = NULL;
+
+ if (!nvdimm->security_ops)
+ return NULL;
+
+ desc = kzalloc(NVDIMM_KEY_DESC_LEN, GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ keyref = keyring_search(make_key_ref(nvdimm_cred->thread_keyring, 1),
+ &nvdimm_key_type, nvdimm->dimm_id);
+ if (IS_ERR(keyref))
+ key = NULL;
+ else
+ key = key_ref_to_ptr(keyref);
+
+ kfree(desc);
+ return key;
+}
+
+/*
+ * Retrieve kernel key for DIMM and request from user space if necessary.
+ */
+static struct key *nvdimm_request_key(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct key *key = NULL;
+
+ if (!nvdimm->security_ops)
+ return NULL;
+
+ key = request_key(&nvdimm_key_type, nvdimm->dimm_id, nvdimm->dimm_id);
+ if (IS_ERR(key))
+ key = NULL;
+
+ return key;
+}
+
+int nvdimm_security_get_state(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+
+ if (!nvdimm->security_ops)
+ return 0;
+
+ return nvdimm->security_ops->state(nvdimm_bus, nvdimm,
+ &nvdimm->state);
+}
+
+int nvdimm_security_unlock_dimm(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct key *key;
+ int rc;
+ void *payload;
+ bool cached_key = false;
+
+ if (!nvdimm->security_ops)
+ return 0;
+
+ if (nvdimm->state == NVDIMM_SECURITY_UNLOCKED ||
+ nvdimm->state == NVDIMM_SECURITY_UNSUPPORTED ||
+ nvdimm->state == NVDIMM_SECURITY_DISABLED)
+ return 0;
+
+ key = nvdimm_search_key(dev);
+ if (!key)
+ key = nvdimm_request_key(dev);
+ else
+ cached_key = true;
+ if (!key)
+ return -ENXIO;
+
+ dev_dbg(dev, "%s: key: %#x\n", __func__, key->serial);
+ down_read(&key->sem);
+ payload = key->payload.data[0];
+ rc = nvdimm->security_ops->unlock(nvdimm_bus, nvdimm, payload);
+ up_read(&key->sem);
+
+ if (rc == 0) {
+ if (!cached_key)
+ key_link(nvdimm_cred->thread_keyring, key);
+ nvdimm->state = NVDIMM_SECURITY_UNLOCKED;
+ dev_info(dev, "DIMM %s unlocked\n", dev_name(dev));
+ } else {
+ key_invalidate(key);
+ dev_warn(dev, "Failed to unlock dimm: %s\n", dev_name(dev));
+ }
+
+ key_put(key);
+ nvdimm_security_get_state(dev);
+ return rc;
+}
+
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
@@ -440,7 +544,8 @@ EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
const struct attribute_group **groups, unsigned long flags,
unsigned long cmd_mask, int num_flush,
- struct resource *flush_wpq, const char *dimm_id)
+ struct resource *flush_wpq, const char *dimm_id,
+ struct nvdimm_security_ops *sec_ops)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
@@ -455,6 +560,7 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
}
nvdimm->dimm_id = dimm_id;
+ nvdimm->security_ops = sec_ops;
nvdimm->provider_data = provider_data;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
@@ -43,6 +43,8 @@ struct nvdimm {
int id, num_flush;
struct resource *flush_wpq;
const char *dimm_id;
+ struct nvdimm_security_ops *security_ops;
+ enum nvdimm_security_state state;
};
/**
@@ -424,4 +424,6 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
const u8 *nd_dev_to_uuid(struct device *dev);
bool pmem_should_map_pages(struct device *dev);
+int nvdimm_security_unlock_dimm(struct device *dev);
+int nvdimm_security_get_state(struct device *dev);
#endif /* __ND_H__ */
@@ -157,9 +157,29 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
extern struct key_type nvdimm_key_type;
+enum nvdimm_security_state {
+ NVDIMM_SECURITY_INVALID = 0,
+ NVDIMM_SECURITY_DISABLED,
+ NVDIMM_SECURITY_UNLOCKED,
+ NVDIMM_SECURITY_LOCKED,
+ NVDIMM_SECURITY_UNSUPPORTED,
+};
+
#define NVDIMM_PASSPHRASE_LEN 32
#define NVDIMM_KEY_DESC_LEN 25
+struct nvdimm_key_data {
+ u8 data[NVDIMM_PASSPHRASE_LEN];
+};
+
+struct nvdimm_security_ops {
+ int (*state)(struct nvdimm_bus *nvdimm_bus,
+ struct nvdimm *nvdimm,
+ enum nvdimm_security_state *state);
+ int (*unlock)(struct nvdimm_bus *nvdimm_bus,
+ struct nvdimm *nvdimm, struct nvdimm_key_data *nkey);
+};
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
@@ -183,7 +203,8 @@ void *nvdimm_provider_data(struct nvdimm *nvdimm);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
const struct attribute_group **groups, unsigned long flags,
unsigned long cmd_mask, int num_flush,
- struct resource *flush_wpq, const char *dimm_id);
+ struct resource *flush_wpq, const char *dimm_id,
+ struct nvdimm_security_ops *sec_ops);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,