diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2022-11-05 18:17:21 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2022-11-05 18:17:32 +0000 |
commit | b0dc2feab3271dbcb42df6e6d8a37138a90c44a1 (patch) | |
tree | ae02f159c125f183b2adae47fdf0e64357bf76a8 /src | |
parent | Releasing debian version 1.1-2. (diff) | |
download | libnvme-b0dc2feab3271dbcb42df6e6d8a37138a90c44a1.tar.xz libnvme-b0dc2feab3271dbcb42df6e6d8a37138a90c44a1.zip |
Merging upstream version 1.2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src')
-rw-r--r-- | src/libnvme-mi.map | 13 | ||||
-rw-r--r-- | src/libnvme.map | 12 | ||||
-rw-r--r-- | src/meson.build | 9 | ||||
-rw-r--r-- | src/nvme/fabrics.c | 146 | ||||
-rw-r--r-- | src/nvme/fabrics.h | 44 | ||||
-rw-r--r-- | src/nvme/ioctl.h | 2 | ||||
-rw-r--r-- | src/nvme/json.c | 61 | ||||
-rw-r--r-- | src/nvme/mi-mctp.c | 57 | ||||
-rw-r--r-- | src/nvme/mi.c | 406 | ||||
-rw-r--r-- | src/nvme/mi.h | 1458 | ||||
-rw-r--r-- | src/nvme/private.h | 5 | ||||
-rw-r--r-- | src/nvme/tree.c | 42 | ||||
-rw-r--r-- | src/nvme/tree.h | 18 | ||||
-rw-r--r-- | src/nvme/types.h | 111 | ||||
-rw-r--r-- | src/nvme/util.c | 77 | ||||
-rw-r--r-- | src/nvme/util.h | 49 |
16 files changed, 2366 insertions, 144 deletions
diff --git a/src/libnvme-mi.map b/src/libnvme-mi.map index e16b400..53af942 100644 --- a/src/libnvme-mi.map +++ b/src/libnvme-mi.map @@ -1,3 +1,16 @@ +LIBNVME_MI_1_2 { + global: + nvme_mi_admin_get_features; + nvme_mi_admin_set_features; + nvme_mi_admin_ns_mgmt; + nvme_mi_admin_ns_attach; + nvme_mi_admin_format_nvm; + nvme_mi_admin_sanitize_nvm; + nvme_mi_admin_fw_download; + nvme_mi_admin_fw_commit; + nvme_mi_status_to_string; +}; + LIBNVME_MI_1_1 { global: nvme_mi_create_root; diff --git a/src/libnvme.map b/src/libnvme.map index 50a688d..be9bca3 100644 --- a/src/libnvme.map +++ b/src/libnvme.map @@ -1,5 +1,17 @@ # SPDX-License-Identifier: LGPL-2.1-or-later +LIBNVME_1_2 { + global: + nvme_ctrl_get_dhchap_host_key; + nvme_ctrl_set_dhchap_host_key; + nvmf_get_discovery_wargs; + nvme_get_feature_length2; + nvme_ctrl_is_persistent; + nvme_uuid_from_string; + nvme_uuid_to_string; + nvme_uuid_random; +}; + LIBNVME_1_1 { global: nvme_get_version; diff --git a/src/meson.build b/src/meson.build index 3076be6..9e49a07 100644 --- a/src/meson.build +++ b/src/meson.build @@ -28,13 +28,11 @@ if conf.get('CONFIG_JSONC') endif deps = [ - libuuid_dep, json_c_dep, openssl_dep, ] mi_deps = [ - libuuid_dep, libsystemd_dep, ] @@ -68,7 +66,6 @@ pkg.generate(libnvme, libnvme_dep = declare_dependency( include_directories: ['.'], dependencies: [ - libuuid_dep.partial_dependency(compile_args: true, includes: true), json_c_dep.partial_dependency(compile_args: true, includes: true), ], link_with: libnvme, @@ -88,9 +85,6 @@ libnvme_mi = library( libnvme_mi_dep = declare_dependency( include_directories: ['.'], - dependencies: [ - libuuid_dep.partial_dependency(compile_args: true, includes: true), - ], link_with: libnvme_mi, ) @@ -107,9 +101,6 @@ libnvme_mi_test = library( libnvme_mi_test_dep = declare_dependency( include_directories: ['.'], - dependencies: [ - libuuid_dep.partial_dependency(compile_args: true, includes: true), - ], link_with: libnvme_mi_test, ) diff --git a/src/nvme/fabrics.c b/src/nvme/fabrics.c index b68b7b9..a501f79 100644 --- a/src/nvme/fabrics.c +++ b/src/nvme/fabrics.c @@ -39,7 +39,6 @@ #include "private.h" #define NVMF_HOSTID_SIZE 37 -#define UUID_SIZE 37 /* 1b4e28ba-2fa1-11d2-883f-0016d3cca427 + \0 */ #define NVMF_HOSTNQN_FILE SYSCONFDIR "/nvme/hostnqn" #define NVMF_HOSTID_FILE SYSCONFDIR "/nvme/hostid" @@ -192,13 +191,15 @@ void nvmf_default_config(struct nvme_fabrics_config *cfg) #define MERGE_CFG_OPTION(c, n, o, d) \ if ((c)->o == d) (c)->o = (n)->o +#define MERGE_CFG_OPTION_STR(c, n, o, d) \ + if ((c)->o == d && (n)->o) (c)->o = strdup((n)->o) static struct nvme_fabrics_config *merge_config(nvme_ctrl_t c, const struct nvme_fabrics_config *cfg) { struct nvme_fabrics_config *ctrl_cfg = nvme_ctrl_get_config(c); - MERGE_CFG_OPTION(ctrl_cfg, cfg, host_traddr, NULL); - MERGE_CFG_OPTION(ctrl_cfg, cfg, host_iface, NULL); + MERGE_CFG_OPTION_STR(ctrl_cfg, cfg, host_traddr, NULL); + MERGE_CFG_OPTION_STR(ctrl_cfg, cfg, host_iface, NULL); MERGE_CFG_OPTION(ctrl_cfg, cfg, nr_io_queues, 0); MERGE_CFG_OPTION(ctrl_cfg, cfg, nr_write_queues, 0); MERGE_CFG_OPTION(ctrl_cfg, cfg, nr_poll_queues, 0); @@ -295,7 +296,7 @@ static int add_argument(char **argstr, const char *tok, const char *arg) { char *nstr; - if (!(arg && strcmp(arg, "none"))) + if (!arg || arg[0] == '\0' || !strcmp(arg, "none")) return 0; if (asprintf(&nstr, "%s,%s=%s", *argstr, tok, arg) < 0) { errno = ENOMEM; @@ -465,6 +466,8 @@ static int build_options(nvme_host_t h, nvme_ctrl_t c, char **argstr) hostnqn = nvme_host_get_hostnqn(h); hostid = nvme_host_get_hostid(h); hostkey = nvme_host_get_dhchap_key(h); + if (!hostkey) + hostkey = nvme_ctrl_get_dhchap_host_key(c); ctrlkey = nvme_ctrl_get_dhchap_key(c); if (add_argument(argstr, "transport", transport) || add_argument(argstr, "traddr", @@ -613,14 +616,20 @@ int nvmf_add_ctrl(nvme_host_t h, nvme_ctrl_t c, nvme_ctrl_get_trsvcid(c), NULL); if (fc) { + const char *key; + cfg = merge_config(c, nvme_ctrl_get_config(fc)); /* * An authentication key might already been set * in @cfg, so ensure to update @c with the correct * controller key. */ - if (fc->dhchap_key) - nvme_ctrl_set_dhchap_key(c, fc->dhchap_key); + key = nvme_ctrl_get_dhchap_host_key(fc); + if (key) + nvme_ctrl_set_dhchap_host_key(c, key); + key = nvme_ctrl_get_dhchap_key(fc); + if (key) + nvme_ctrl_set_dhchap_key(c, key); } } @@ -734,7 +743,7 @@ nvme_ctrl_t nvmf_connect_disc_entry(nvme_host_t h, default: nvme_msg(h->r, LOG_ERR, "unsupported subtype %d\n", e->subtype); - /* fallthrough */ + fallthrough; case NVME_NQN_NVME: nvme_ctrl_set_discovery_ctrl(c, false); break; @@ -772,30 +781,9 @@ nvme_ctrl_t nvmf_connect_disc_entry(nvme_host_t h, return NULL; } -static int nvme_discovery_log(int fd, __u32 len, struct nvmf_discovery_log *log, bool rae) -{ - struct nvme_get_log_args args = { - .args_size = sizeof(args), - .fd = fd, - .nsid = NVME_NSID_NONE, - .lsp = NVME_LOG_LSP_NONE, - .lsi = NVME_LOG_LSI_NONE, - .uuidx = NVME_UUID_NONE, - .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, - .result = NULL, - .lid = NVME_LOG_LID_DISCOVER, - .log = log, - .len = len, - .csi = NVME_CSI_NVM, - .rae = rae, - .ot = false, - }; - - return nvme_get_log_page(fd, 4096, &args); -} - -int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, - int max_retries) +static struct nvmf_discovery_log *nvme_discovery_log(nvme_ctrl_t c, + struct nvme_get_log_args *args, + int max_retries) { nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; struct nvmf_discovery_log *log = NULL; @@ -803,6 +791,9 @@ int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, const char *name = nvme_ctrl_get_name(c); uint64_t genctr, numrec; unsigned int size; + int fd = nvme_ctrl_get_fd(c); + + args->fd = fd; do { size = sizeof(struct nvmf_discovery_log); @@ -813,12 +804,15 @@ int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, nvme_msg(r, LOG_ERR, "could not allocate memory for discovery log header\n"); errno = ENOMEM; - return -1; + return NULL; } nvme_msg(r, LOG_DEBUG, "%s: get header (try %d/%d)\n", name, retries, max_retries); - ret = nvme_discovery_log(nvme_ctrl_get_fd(c), size, log, true); + args->rae = true; + args->len = size; + args->log = log; + ret = nvme_get_log_page(fd, 4096, args); if (ret) { nvme_msg(r, LOG_INFO, "%s: discover try %d/%d failed, error %d\n", @@ -841,14 +835,19 @@ int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, nvme_msg(r, LOG_ERR, "could not alloc memory for discovery log page\n"); errno = ENOMEM; - return -1; + return NULL; } nvme_msg(r, LOG_DEBUG, "%s: get header and %" PRIu64 " records (length %d genctr %" PRIu64 ")\n", name, numrec, size, genctr); - ret = nvme_discovery_log(nvme_ctrl_get_fd(c), size, log, false); + + args->rae = false; + args->len = size; + args->log = log; + ret = nvme_get_log_page(fd, 4096, args); + if (ret) { nvme_msg(r, LOG_INFO, "%s: discover try %d/%d failed, error %d\n", @@ -861,21 +860,63 @@ int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, if (genctr != le64_to_cpu(log->genctr)) { nvme_msg(r, LOG_INFO, "%s: discover genctr mismatch\n", name); errno = EAGAIN; - ret = -1; } else if (numrec != le64_to_cpu(log->numrec)) { nvme_msg(r, LOG_INFO, "%s: could only fetch %" PRIu64 " of %" PRIu64 " records\n", name, numrec, le64_to_cpu(log->numrec)); errno = EBADSLT; - ret = -1; } else { - *logp = log; - return 0; + return log; } out_free_log: free(log); - return ret; + return NULL; +} + +int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, + int max_retries) +{ + struct nvme_get_log_args args = { + .args_size = sizeof(args), + .fd = nvme_ctrl_get_fd(c), + .nsid = NVME_NSID_NONE, + .lsp = NVMF_LOG_DISC_LSP_NONE, + .lsi = NVME_LOG_LSI_NONE, + .uuidx = NVME_UUID_NONE, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + .lid = NVME_LOG_LID_DISCOVER, + .log = NULL, + .len = 0, + .csi = NVME_CSI_NVM, + .rae = false, + .ot = false, + }; + *logp = nvme_discovery_log(c, &args, max_retries); + return logp ? 0 : -1; +} + +struct nvmf_discovery_log *nvmf_get_discovery_wargs(struct nvme_get_discovery_args *args) +{ + struct nvme_get_log_args _args = { + .args_size = sizeof(_args), + .fd = nvme_ctrl_get_fd(args->c), + .nsid = NVME_NSID_NONE, + .lsp = args->lsp, + .lsi = NVME_LOG_LSI_NONE, + .uuidx = NVME_UUID_NONE, + .timeout = args->timeout, + .result = args->result, + .lid = NVME_LOG_LID_DISCOVER, + .log = NULL, + .len = 0, + .csi = NVME_CSI_NVM, + .rae = false, + .ot = false, + }; + + return nvme_discovery_log(args->c, &_args, args->max_retries); } #define PATH_UUID_IBM "/proc/device-tree/ibm,partition-uuid" @@ -889,8 +930,8 @@ static int uuid_from_device_tree(char *system_uuid) if (f < 0) return -ENXIO; - memset(system_uuid, 0, UUID_SIZE); - len = read(f, system_uuid, UUID_SIZE - 1); + memset(system_uuid, 0, NVME_UUID_LEN_STRING); + len = read(f, system_uuid, NVME_UUID_LEN_STRING - 1); close(f); if (len < 0) return -ENXIO; @@ -978,7 +1019,7 @@ static int uuid_from_product_uuid(char *system_uuid) system_uuid[0] = '\0'; nread = getline(&line, &len, stream); - if (nread != UUID_SIZE) { + if (nread != NVME_UUID_LEN_STRING) { ret = -ENXIO; goto out; } @@ -986,8 +1027,8 @@ static int uuid_from_product_uuid(char *system_uuid) /* The kernel is handling the byte swapping according DMTF * SMBIOS 3.0 Section 7.2.1 System UUID */ - memcpy(system_uuid, line, UUID_SIZE - 1); - system_uuid[UUID_SIZE - 1] = '\0'; + memcpy(system_uuid, line, NVME_UUID_LEN_STRING - 1); + system_uuid[NVME_UUID_LEN_STRING - 1] = '\0'; ret = 0; @@ -1023,16 +1064,17 @@ char *nvmf_hostnqn_generate() { char *hostnqn; int ret; - char uuid_str[UUID_SIZE]; - uuid_t uuid; + char uuid_str[NVME_UUID_LEN_STRING]; + unsigned char uuid[NVME_UUID_LEN]; ret = uuid_from_dmi(uuid_str); if (ret < 0) { ret = uuid_from_device_tree(uuid_str); } if (ret < 0) { - uuid_generate_random(uuid); - uuid_unparse_lower(uuid, uuid_str); + if (nvme_uuid_random(uuid) < 0) + memset(uuid, 0, NVME_UUID_LEN); + nvme_uuid_to_string(uuid, uuid_str); } if (asprintf(&hostnqn, "nqn.2014-08.org.nvmexpress:uuid:%s", uuid_str) < 0) @@ -1085,7 +1127,7 @@ static __u32 nvmf_get_tel(const char *hostsymname) __u16 len; /* Host ID is mandatory */ - tel += nvmf_exat_size(sizeof(uuid_t)); + tel += nvmf_exat_size(NVME_UUID_LEN_STRING); /* Symbolic name is optional */ len = hostsymname ? strlen(hostsymname) : 0; @@ -1129,8 +1171,8 @@ static void nvmf_fill_die(struct nvmf_ext_die *die, struct nvme_host *h, numexat++; exat = die->exat; exat->exattype = cpu_to_le16(NVMF_EXATTYPE_HOSTID); - exat->exatlen = cpu_to_le16(nvmf_exat_len(sizeof(uuid_t))); - uuid_parse(h->hostid, exat->exatval); + exat->exatlen = cpu_to_le16(nvmf_exat_len(NVME_UUID_LEN)); + nvme_uuid_from_string(h->hostid, exat->exatval); /* Extended Attribute for the Symbolic Name (optional) */ symname_len = h->hostsymname ? strlen(h->hostsymname) : 0; diff --git a/src/nvme/fabrics.h b/src/nvme/fabrics.h index 9e099fe..272bb40 100644 --- a/src/nvme/fabrics.h +++ b/src/nvme/fabrics.h @@ -195,9 +195,14 @@ int nvmf_add_ctrl(nvme_host_t h, nvme_ctrl_t c, /** * nvmf_get_discovery_log() - Return the discovery log page - * @c: Discover controller to use + * @c: Discovery controller to use * @logp: Pointer to the log page to be returned - * @max_retries: maximum number of log page entries to be returned + * @max_retries: Number of retries in case of failure + * + * The memory allocated for the log page and returned in @logp + * must be freed by the caller using free(). + * + * Note: Consider using nvmf_get_discovery_wargs() instead. * * Return: 0 on success; on failure -1 is returned and errno is set */ @@ -205,6 +210,41 @@ int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, int max_retries); /** + * struct nvme_get_discovery_args - Arguments for nvmf_get_discovery_wargs() + * @c: Discovery controller + * @args_size: Length of the structure + * @max_retries: Number of retries in case of failure + * @result: The command completion result from CQE dword0 + * @timeout: Timeout in ms (default: NVME_DEFAULT_IOCTL_TIMEOUT) + * @lsp: Log specific field (See enum nvmf_log_discovery_lsp) + */ +struct nvme_get_discovery_args { + nvme_ctrl_t c; + int args_size; + int max_retries; + __u32 *result; + __u32 timeout; + __u8 lsp; +}; + +/** + * nvmf_get_discovery_wargs() - Get the discovery log page with args + * @args: Argument structure + * + * This function is similar to nvmf_get_discovery_log(), but + * takes an extensible @args parameter. @args provides more + * options than nvmf_get_discovery_log(). + * + * This function performs a get discovery log page (DLP) command + * and returns the DLP. The memory allocated for the returned + * DLP must be freed by the caller using free(). + * + * Return: Pointer to the discovery log page (to be freed). NULL + * on failure and errno is set. + */ +struct nvmf_discovery_log *nvmf_get_discovery_wargs(struct nvme_get_discovery_args *args); + +/** * nvmf_hostnqn_generate() - Generate a machine specific host nqn * Returns: An nvm namespace qualified name string based on the machine * identifier, or NULL if not successful. diff --git a/src/nvme/ioctl.h b/src/nvme/ioctl.h index d559b12..af95851 100644 --- a/src/nvme/ioctl.h +++ b/src/nvme/ioctl.h @@ -1461,7 +1461,7 @@ static inline int nvme_get_log_telemetry_host(int fd, __u64 offset, __u32 len, void *log) { struct nvme_get_log_args args = { - .lpo = 0, + .lpo = offset, .result = NULL, .log = log, .args_size = sizeof(args), diff --git a/src/nvme/json.c b/src/nvme/json.c index b42cd51..f0c2ab4 100644 --- a/src/nvme/json.c +++ b/src/nvme/json.c @@ -95,6 +95,9 @@ static void json_parse_port(nvme_subsystem_t s, struct json_object *port_obj) if (!c) return; json_update_attributes(c, port_obj); + attr_obj = json_object_object_get(port_obj, "dhchap_key"); + if (attr_obj) + nvme_ctrl_set_dhchap_host_key(c, json_object_get_string(attr_obj)); attr_obj = json_object_object_get(port_obj, "dhchap_ctrl_key"); if (attr_obj) nvme_ctrl_set_dhchap_key(c, json_object_get_string(attr_obj)); @@ -155,6 +158,43 @@ static void json_parse_host(nvme_root_t r, struct json_object *host_obj) } } +static struct json_object *parse_json(nvme_root_t r, int fd) +{ + char buf[JSON_FILE_BUF_SIZE]; + struct json_object *obj = NULL; + struct printbuf *pb; + json_tokener *tok = NULL; + int ret; + + pb = printbuf_new(); + if (!pb) + return NULL; + + while ((ret = read(fd, buf, JSON_FILE_BUF_SIZE)) > 0) + printbuf_memappend(pb, buf, ret); + + if (ret < 0) + goto out; + + tok = json_tokener_new_ex(JSON_TOKENER_DEFAULT_DEPTH); + if (!tok) + goto out; + + /* Enforce correctly formatted JSON */ + tok->flags = JSON_TOKENER_STRICT; + + obj = json_tokener_parse_ex(tok, pb->buf, printbuf_length(pb)); + if (!obj) + nvme_msg(r, LOG_DEBUG, "JSON parsing failed: %s\n", + json_util_get_last_err()); +out: + if (tok) + json_tokener_free(tok); + printbuf_free(pb); + + return obj; +} + int json_read_config(nvme_root_t r, const char *config_file) { struct json_object *json_root, *host_obj; @@ -166,12 +206,16 @@ int json_read_config(nvme_root_t r, const char *config_file) config_file, strerror(errno)); return fd; } - json_root = json_object_from_fd(fd); + json_root = parse_json(r, fd); + close(fd); if (!json_root) { - nvme_msg(r, LOG_DEBUG, "Failed to read %s, %s\n", - config_file, json_util_get_last_err()); errno = EPROTO; - close(fd); + return -1; + } + if (!json_object_is_type(json_root, json_type_array)) { + nvme_msg(r, LOG_DEBUG, "Wrong format, expected array\n"); + json_object_put(json_root); + errno = EPROTO; return -1; } for (h = 0; h < json_object_array_length(json_root); h++) { @@ -180,7 +224,6 @@ int json_read_config(nvme_root_t r, const char *config_file) json_parse_host(r, host_obj); } json_object_put(json_root); - close(fd); return 0; } @@ -222,6 +265,10 @@ static void json_update_port(struct json_object *ctrl_array, nvme_ctrl_t c) if (value) json_object_object_add(port_obj, "trsvcid", json_object_new_string(value)); + value = nvme_ctrl_get_dhchap_host_key(c); + if (value) + json_object_object_add(port_obj, "dhchap_key", + json_object_new_string(value)); value = nvme_ctrl_get_dhchap_key(c); if (value) json_object_object_add(port_obj, "dhchap_ctrl_key", @@ -365,6 +412,10 @@ static void json_dump_ctrl(struct json_object *ctrl_array, nvme_ctrl_t c) if (value) json_object_object_add(ctrl_obj, "trsvcid", json_object_new_string(value)); + value = nvme_ctrl_get_dhchap_host_key(c); + if (value) + json_object_object_add(ctrl_obj, "dhchap_key", + json_object_new_string(value)); value = nvme_ctrl_get_dhchap_key(c); if (value) json_object_object_add(ctrl_obj, "dhchap_ctrl_key", diff --git a/src/nvme/mi-mctp.c b/src/nvme/mi-mctp.c index ae604f2..86f5df6 100644 --- a/src/nvme/mi-mctp.c +++ b/src/nvme/mi-mctp.c @@ -179,13 +179,15 @@ struct nvme_mi_msg_resp_mpr { * populate the worst-case expected processing time, given in milliseconds. */ static bool nvme_mi_mctp_resp_is_mpr(struct nvme_mi_resp *resp, size_t len, - unsigned int *mpr_time) + __le32 mic, unsigned int *mpr_time) { + struct nvme_mi_admin_resp_hdr *admin_msg; struct nvme_mi_msg_resp_mpr *msg; - __le32 mic; + size_t clen; __u32 crc; - if (len != sizeof(*msg) + sizeof(mic)) + /* We need at least the minimal header plus checksum */ + if (len < sizeof(*msg) + sizeof(mic)) return false; msg = (struct nvme_mi_msg_resp_mpr *)resp->hdr; @@ -193,22 +195,42 @@ static bool nvme_mi_mctp_resp_is_mpr(struct nvme_mi_resp *resp, size_t len, if (msg->status != NVME_MI_RESP_MPR) return false; - /* We can't use verify_resp_mic here, as the response structure has - * not been laid-out properly in resp yet (this is deferred until - * we have the actual response). + /* Find and verify the MIC from the response, which may not be laid out + * in resp as we expect. We have to preserve resp->hdr_len and + * resp->data_len, as we will need them for the eventual reply message. + * Because of that, we can't use verify_resp_mic here. * - * We know the data is a fixed size, and linear in the hdr buf, so - * calculation is fairly simple. We do need to find the MIC data - * though, which could either be in the header buf (if the original - * header was larger than the minimal header message), or the start of - * the data buf (otherwise). + * If the packet was at the expected response size, then mic will + * be set already; if not, find it within the header/data buffers. + */ + + /* Devices may send a MPR response as a full-sized Admin response, + * rather than the minimal MI-only header. Allow this, but only if the + * type indicates admin, and the allocated response header is the + * correct size for an Admin response. + */ + if (((msg->hdr.nmp >> 3) & 0xf) == NVME_MI_MT_ADMIN && + len == sizeof(*admin_msg) + sizeof(mic) && + resp->hdr_len == sizeof(*admin_msg)) { + if (resp->data_len) + mic = *(__le32 *)resp->data; + } else if (len == sizeof(*msg) + sizeof(mic)) { + if (resp->hdr_len > sizeof(*msg)) + mic = *(__le32 *)(msg + 1); + else if (resp->data_len) + mic = *(__le32 *)(resp->data); + } else { + return false; + } + + /* Since our response is just a header, we're guaranteed to have + * all data in resp->hdr. The response may be shorter than the expected + * header though, so clamp to len. */ - if (resp->hdr_len > sizeof(*msg)) - mic = *(__le32 *)(msg + 1); - else - mic = *(__le32 *)(resp->data); + len -= sizeof(mic); + clen = len < resp->hdr_len ? len : resp->hdr_len; - crc = ~nvme_mi_crc32_update(0xffffffff, msg, sizeof(*msg)); + crc = ~nvme_mi_crc32_update(0xffffffff, resp->hdr, clen); if (le32_to_cpu(mic) != crc) return false; @@ -369,7 +391,7 @@ retry: * header fields. However, we need to do this in the transport in order * to keep the tag allocated and retry the recvmsg */ - if (nvme_mi_mctp_resp_is_mpr(resp, len, &mpr_time)) { + if (nvme_mi_mctp_resp_is_mpr(resp, len, mic, &mpr_time)) { nvme_msg(ep->root, LOG_DEBUG, "Received More Processing Required, waiting for response\n"); @@ -493,6 +515,7 @@ nvme_mi_ep_t nvme_mi_open_mctp(nvme_root_t root, unsigned int netid, __u8 eid) err_free_ep: errno_save = errno; free(ep); + free(mctp); errno = errno_save; return NULL; } diff --git a/src/nvme/mi.c b/src/nvme/mi.c index 181a16c..6ff0a6f 100644 --- a/src/nvme/mi.c +++ b/src/nvme/mi.c @@ -11,6 +11,7 @@ #include <stdlib.h> #include <stdio.h> +#include <ccan/array_size/array_size.h> #include <ccan/endian/endian.h> #include "log.h" @@ -295,6 +296,52 @@ static void nvme_mi_admin_init_resp(struct nvme_mi_resp *resp, resp->hdr_len = sizeof(*hdr); } +static int nvme_mi_admin_parse_status(struct nvme_mi_resp *resp, __u32 *result) +{ + struct nvme_mi_admin_resp_hdr *admin_hdr; + struct nvme_mi_msg_resp *resp_hdr; + __u32 nvme_status; + __u32 nvme_result; + + /* we have a few different sources of "result" here: the status header + * in the MI response, the cdw3 status field, and (command specific) + * return values in cdw0. The latter is returned in the result pointer, + * the former two generate return values here + */ + + if (resp->hdr_len < sizeof(*resp_hdr)) { + errno = -EPROTO; + return -1; + } + resp_hdr = (struct nvme_mi_msg_resp *)resp->hdr; + + /* If we have a MI error, we can't be sure there's an admin header + * following; return just the MI status, with the status type + * indicator of MI. + */ + if (resp_hdr->status) + return resp_hdr->status | + (NVME_STATUS_TYPE_MI << NVME_STATUS_TYPE_SHIFT); + + /* We shouldn't hit this, as we'd have an error reported earlier. + * However, for pointer safety, ensure we have a full admin header + */ + if (resp->hdr_len < sizeof(*admin_hdr)) { + errno = EPROTO; + return -1; + } + + admin_hdr = (struct nvme_mi_admin_resp_hdr *)resp->hdr; + nvme_result = le32_to_cpu(admin_hdr->cdw0); + nvme_status = le32_to_cpu(admin_hdr->cdw3) >> 16; + + /* the result pointer, optionally stored if the caller needs it */ + if (result) + *result = nvme_result; + + return nvme_status; +} + int nvme_mi_admin_xfer(nvme_mi_ctrl_t ctrl, struct nvme_mi_admin_req_hdr *admin_req, size_t req_data_size, @@ -343,6 +390,7 @@ int nvme_mi_admin_xfer(nvme_mi_ctrl_t ctrl, admin_req->hdr.type = NVME_MI_MSGTYPE_NVME; admin_req->hdr.nmp = (NVME_MI_ROR_REQ << 7) | (NVME_MI_MT_ADMIN << 3); + admin_req->ctrl_id = cpu_to_le16(ctrl->id); memset(&req, 0, sizeof(req)); req.hdr = &admin_req->hdr; req.hdr_len = sizeof(*admin_req); @@ -414,11 +462,9 @@ int nvme_mi_admin_identify_partial(nvme_mi_ctrl_t ctrl, if (rc) return rc; - if (resp_hdr.status) - return resp_hdr.status; - - if (args->result) - *args->result = le32_to_cpu(resp_hdr.cdw0); + rc = nvme_mi_admin_parse_status(&resp, args->result); + if (rc) + return rc; /* callers will expect a full response; if the data buffer isn't * fully valid, return an error */ @@ -452,7 +498,7 @@ static int __nvme_mi_admin_get_log(nvme_mi_ctrl_t ctrl, return -1; } - if (offset < 0 || offset >= len) { + if (offset < 0 || offset >= args->len || offset + len > args->len) { errno = EINVAL; return -1; } @@ -489,12 +535,11 @@ static int __nvme_mi_admin_get_log(nvme_mi_ctrl_t ctrl, if (rc) return rc; - if (resp_hdr.status) - return resp_hdr.status; - - *lenp = resp.data_len; + rc = nvme_mi_admin_parse_status(&resp, args->result); + if (!rc) + *lenp = resp.data_len; - return 0; + return rc; } int nvme_mi_admin_get_log(nvme_mi_ctrl_t ctrl, struct nvme_get_log_args *args) @@ -580,13 +625,7 @@ int nvme_mi_admin_security_send(nvme_mi_ctrl_t ctrl, if (rc) return rc; - if (resp_hdr.status) - return resp_hdr.status; - - if (args->result) - *args->result = le32_to_cpu(resp_hdr.cdw0); - - return 0; + return nvme_mi_admin_parse_status(&resp, args->result); } int nvme_mi_admin_security_recv(nvme_mi_ctrl_t ctrl, @@ -632,17 +671,306 @@ int nvme_mi_admin_security_recv(nvme_mi_ctrl_t ctrl, if (rc) return rc; - if (resp_hdr.status) - return resp_hdr.status; + rc = nvme_mi_admin_parse_status(&resp, args->result); + if (rc) + return rc; + + args->data_len = resp.data_len; + + return 0; +} + +int nvme_mi_admin_get_features(nvme_mi_ctrl_t ctrl, + struct nvme_get_features_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_get_features); + + req_hdr.cdw1 = cpu_to_le32(args->nsid); + req_hdr.cdw10 = cpu_to_le32((args->sel & 0x7) << 8 | args->fid); + req_hdr.cdw14 = cpu_to_le32(args->uuidx & 0x7f); + req_hdr.cdw11 = cpu_to_le32(args->cdw11); + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + resp.data = args->data; + resp.data_len = args->data_len; - if (args->result) - *args->result = resp_hdr.cdw0; + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + rc = nvme_mi_admin_parse_status(&resp, args->result); + if (rc) + return rc; + + args->data_len = resp.data_len; + + return 0; +} + +int nvme_mi_admin_set_features(nvme_mi_ctrl_t ctrl, + struct nvme_set_features_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_set_features); + + req_hdr.cdw1 = cpu_to_le32(args->nsid); + req_hdr.cdw10 = cpu_to_le32((args->save ? 1 : 0) << 31 | + (args->fid & 0xff)); + req_hdr.cdw14 = cpu_to_le32(args->uuidx & 0x7f); + req_hdr.cdw11 = cpu_to_le32(args->cdw11); + req_hdr.cdw12 = cpu_to_le32(args->cdw12); + req_hdr.cdw13 = cpu_to_le32(args->cdw13); + req_hdr.cdw15 = cpu_to_le32(args->cdw15); + + req.data_len = args->data_len; + req.data = args->data; + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + rc = nvme_mi_admin_parse_status(&resp, args->result); + if (rc) + return rc; args->data_len = resp.data_len; return 0; } +int nvme_mi_admin_ns_mgmt(nvme_mi_ctrl_t ctrl, + struct nvme_ns_mgmt_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_ns_mgmt); + + req_hdr.cdw1 = cpu_to_le32(args->nsid); + req_hdr.cdw10 = cpu_to_le32(args->sel & 0xf); + req_hdr.cdw11 = cpu_to_le32(args->csi << 24); + if (args->ns) { + req.data = args->ns; + req.data_len = sizeof(*args->ns); + req_hdr.dlen = cpu_to_le32(sizeof(*args->ns)); + req_hdr.flags = 0x1; + } + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + return nvme_mi_admin_parse_status(&resp, args->result); +} + +int nvme_mi_admin_ns_attach(nvme_mi_ctrl_t ctrl, + struct nvme_ns_attach_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_ns_attach); + + req_hdr.cdw1 = cpu_to_le32(args->nsid); + req_hdr.cdw10 = cpu_to_le32(args->sel & 0xf); + req.data = args->ctrlist; + req.data_len = sizeof(*args->ctrlist); + req_hdr.dlen = cpu_to_le32(sizeof(*args->ctrlist)); + req_hdr.flags = 0x1; + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + return nvme_mi_admin_parse_status(&resp, args->result); +} + +int nvme_mi_admin_fw_download(nvme_mi_ctrl_t ctrl, + struct nvme_fw_download_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + if (args->data_len & 0x3) + return -EINVAL; + + if (args->offset & 0x3) + return -EINVAL; + + if (!args->data_len) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_fw_download); + + req_hdr.cdw10 = cpu_to_le32((args->data_len >> 2) - 1); + req_hdr.cdw11 = cpu_to_le32(args->offset >> 2); + req.data = args->data; + req.data_len = args->data_len; + req_hdr.dlen = cpu_to_le32(args->data_len); + req_hdr.flags = 0x1; + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + return nvme_mi_admin_parse_status(&resp, NULL); +} + +int nvme_mi_admin_fw_commit(nvme_mi_ctrl_t ctrl, + struct nvme_fw_commit_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_fw_commit); + + req_hdr.cdw10 = cpu_to_le32(((args->bpid & 0x1) << 31) | + ((args->action & 0x7) << 3) | + ((args->slot & 0x7) << 0)); + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + return nvme_mi_admin_parse_status(&resp, NULL); +} + +int nvme_mi_admin_format_nvm(nvme_mi_ctrl_t ctrl, + struct nvme_format_nvm_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_format_nvm); + + req_hdr.cdw1 = cpu_to_le32(args->nsid); + req_hdr.cdw10 = cpu_to_le32(((args->lbafu & 0x3) << 12) + | ((args->ses & 0x7) << 9) + | ((args->pil & 0x1) << 8) + | ((args->pi & 0x7) << 5) + | ((args->mset & 0x1) << 4) + | ((args->lbaf & 0xf) << 0)); + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + return nvme_mi_admin_parse_status(&resp, args->result); +} + +int nvme_mi_admin_sanitize_nvm(nvme_mi_ctrl_t ctrl, + struct nvme_sanitize_nvm_args *args) +{ + struct nvme_mi_admin_resp_hdr resp_hdr; + struct nvme_mi_admin_req_hdr req_hdr; + struct nvme_mi_resp resp; + struct nvme_mi_req req; + int rc; + + if (args->args_size < sizeof(*args)) + return -EINVAL; + + nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, + nvme_admin_sanitize_nvm); + + req_hdr.cdw10 = cpu_to_le32(((args->nodas ? 1 : 0) << 9) + | ((args->oipbp ? 1 : 0) << 8) + | ((args->owpass & 0xf) << 4) + | ((args->ause ? 1 : 0) << 3) + | ((args->sanact & 0x7) << 0)); + req_hdr.cdw11 = cpu_to_le32(args->ovrpat); + + nvme_mi_calc_req_mic(&req); + + nvme_mi_admin_init_resp(&resp, &resp_hdr); + + rc = nvme_mi_submit(ctrl->ep, &req, &resp); + if (rc) + return rc; + + return nvme_mi_admin_parse_status(&resp, args->result); +} + static int nvme_mi_read_data(nvme_mi_ep_t ep, __u32 cdw0, void *data, size_t *data_len) { @@ -955,3 +1283,37 @@ nvme_mi_ctrl_t nvme_mi_next_ctrl(nvme_mi_ep_t ep, nvme_mi_ctrl_t c) { return c ? list_next(&ep->controllers, c, ep_entry) : NULL; } + + +static const char *const mi_status[] = { + [NVME_MI_RESP_MPR] = "More Processing Required: The command message is in progress and requires more time to complete processing", + [NVME_MI_RESP_INTERNAL_ERR] = "Internal Error: The request message could not be processed due to a vendor-specific error", + [NVME_MI_RESP_INVALID_OPCODE] = "Invalid Command Opcode", + [NVME_MI_RESP_INVALID_PARAM] = "Invalid Parameter", + [NVME_MI_RESP_INVALID_CMD_SIZE] = "Invalid Command Size: The size of the message body of the request was different than expected", + [NVME_MI_RESP_INVALID_INPUT_SIZE] = "Invalid Command Input Data Size: The command requires data and contains too much or too little data", + [NVME_MI_RESP_ACCESS_DENIED] = "Access Denied. Processing prohibited due to a vendor-specific mechanism of the Command and Feature lockdown function", + [NVME_MI_RESP_VPD_UPDATES_EXCEEDED] = "VPD Updates Exceeded", + [NVME_MI_RESP_PCIE_INACCESSIBLE] = "PCIe Inaccessible. The PCIe functionality is not available at this time", + [NVME_MI_RESP_MEB_SANITIZED] = "Management Endpoint Buffer Cleared Due to Sanitize", + [NVME_MI_RESP_ENC_SERV_FAILURE] = "Enclosure Services Failure", + [NVME_MI_RESP_ENC_SERV_XFER_FAILURE] = "Enclosure Services Transfer Failure: Communication with the Enclosure Services Process has failed", + [NVME_MI_RESP_ENC_FAILURE] = "An unrecoverable enclosure failure has been detected by the Enclosuer Services Process", + [NVME_MI_RESP_ENC_XFER_REFUSED] = "Enclosure Services Transfer Refused: The NVM Subsystem or Enclosure Services Process indicated an error or an invalid format in communication", + [NVME_MI_RESP_ENC_FUNC_UNSUP] = "Unsupported Enclosure Function: An SES Send command has been attempted to a simple Subenclosure", + [NVME_MI_RESP_ENC_SERV_UNAVAIL] = "Enclosure Services Unavailable: The NVM Subsystem or Enclosure Services Process has encountered an error but may become available again", + [NVME_MI_RESP_ENC_DEGRADED] = "Enclosure Degraded: A noncritical failure has been detected by the Enclosure Services Process", + [NVME_MI_RESP_SANITIZE_IN_PROGRESS] = "Sanitize In Progress: The requested command is prohibited while a sanitize operation is in progress", +}; + +/* kept in mi.c while we have a split libnvme/libnvme-mi; consider moving + * to utils.c (with nvme_status_to_string) if we ever merge. */ +const char *nvme_mi_status_to_string(int status) +{ + const char *s = "Unknown status"; + + if (status < ARRAY_SIZE(mi_status) && mi_status[status]) + s = mi_status[status]; + + return s; +} diff --git a/src/nvme/mi.h b/src/nvme/mi.h index a7ed240..ab4216d 100644 --- a/src/nvme/mi.h +++ b/src/nvme/mi.h @@ -367,6 +367,20 @@ struct nvme_mi_admin_resp_hdr { __le32 cdw0, cdw1, cdw3; } __attribute__((packed)); +/** + * nvme_mi_status_to_string() - return a string representation of the MI + * status. + * @status: MI response status + * + * Gives a string description of @status, as per section 4.1.2 of the NVMe-MI + * spec. The status value should be of type NVME_STATUS_MI, and extracted + * from the return value using nvme_status_get_value(). + * + * Returned string is const, and should not be free()ed. + * + * Returns: A string representing the status value + */ +const char *nvme_mi_status_to_string(int status); /** * nvme_mi_create_root() - Create top-level MI (root) handle. @@ -601,7 +615,8 @@ nvme_root_t nvme_mi_scan_mctp(void); * so, all existing controller objects will be freed - the caller must not * hold a reference to those across this call. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &nvme_mi_for_each_ctrl */ @@ -652,7 +667,8 @@ char *nvme_mi_endpoint_desc(nvme_mi_ep_t ep); * Retrieves the Subsystem information - number of external ports and * NVMe version information. See &struct nvme_mi_read_nvm_ss_info. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_read_mi_data_subsys(nvme_mi_ep_t ep, struct nvme_mi_read_nvm_ss_info *s); @@ -670,7 +686,8 @@ int nvme_mi_mi_read_mi_data_subsys(nvme_mi_ep_t ep, * * See &struct nvme_mi_read_port_info. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_read_mi_data_port(nvme_mi_ep_t ep, __u8 portid, struct nvme_mi_read_port_info *p); @@ -687,7 +704,8 @@ int nvme_mi_mi_read_mi_data_port(nvme_mi_ep_t ep, __u8 portid, * * See &struct nvme_ctrl_list. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_read_mi_data_ctrl_list(nvme_mi_ep_t ep, __u8 start_ctrlid, struct nvme_ctrl_list *list); @@ -704,7 +722,8 @@ int nvme_mi_mi_read_mi_data_ctrl_list(nvme_mi_ep_t ep, __u8 start_ctrlid, * * See &struct nvme_mi_read_ctrl_info. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_read_mi_data_ctrl(nvme_mi_ep_t ep, __u16 ctrl_id, struct nvme_mi_read_ctrl_info *ctrl); @@ -722,7 +741,8 @@ int nvme_mi_mi_read_mi_data_ctrl(nvme_mi_ep_t ep, __u16 ctrl_id, * * See &struct nvme_mi_nvm_ss_health_status. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_subsystem_health_status_poll(nvme_mi_ep_t ep, bool clear, struct nvme_mi_nvm_ss_health_status *nshds); @@ -744,7 +764,8 @@ int nvme_mi_mi_subsystem_health_status_poll(nvme_mi_ep_t ep, bool clear, * * See &enum nvme_mi_config_id for identifiers. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_config_get(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1, __u32 *nmresp); @@ -761,7 +782,8 @@ int nvme_mi_mi_config_get(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1, * * See &enum nvme_mi_config_id for identifiers. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_mi_config_set(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1); @@ -775,7 +797,8 @@ int nvme_mi_mi_config_set(nvme_mi_ep_t ep, __u32 dw0, __u32 dw1); * the port specified in @port. On success, populates @freq with the port * frequency * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ static inline int nvme_mi_mi_config_get_smbus_freq(nvme_mi_ep_t ep, __u8 port, enum nvme_mi_config_smbus_freq *freq) @@ -787,7 +810,7 @@ static inline int nvme_mi_mi_config_get_smbus_freq(nvme_mi_ep_t ep, __u8 port, rc = nvme_mi_mi_config_get(ep, dw0, 0, &tmp); if (!rc) - *freq = tmp & 0x3; + *freq = (enum nvme_mi_config_smbus_freq)(tmp & 0x3); return rc; } @@ -803,7 +826,8 @@ static inline int nvme_mi_mi_config_get_smbus_freq(nvme_mi_ep_t ep, __u8 port, * See &struct nvme_mi_read_port_info for the maximum supported SMBus frequency * for the port. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ static inline int nvme_mi_mi_config_set_smbus_freq(nvme_mi_ep_t ep, __u8 port, enum nvme_mi_config_smbus_freq freq) @@ -828,7 +852,8 @@ static inline int nvme_mi_mi_config_set_smbus_freq(nvme_mi_ep_t ep, __u8 port, * See &nvme_mi_mi_subsystem_health_status_poll(), &enum nvme_mi_ccs for * values in @mask. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ static inline int nvme_mi_mi_config_set_health_status_change(nvme_mi_ep_t ep, __u32 mask) @@ -852,7 +877,8 @@ static inline int nvme_mi_mi_config_set_health_status_change(nvme_mi_ep_t ep, * Some controllers may also use this as the maximum receive unit size, and * may not accept MCTP messages larger than the configured MTU. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ static inline int nvme_mi_mi_config_get_mctp_mtu(nvme_mi_ep_t ep, __u8 port, __u16 *mtu) @@ -882,7 +908,8 @@ static inline int nvme_mi_mi_config_get_mctp_mtu(nvme_mi_ep_t ep, __u8 port, * this value, you will likely need to change the MTU of the local MCTP * interface(s) to match. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ static inline int nvme_mi_mi_config_set_mctp_mtu(nvme_mi_ep_t ep, __u8 port, __u16 mtu) @@ -918,7 +945,8 @@ static inline int nvme_mi_mi_config_set_mctp_mtu(nvme_mi_ep_t ep, __u8 port, * * See: &struct nvme_mi_admin_req_hdr and &struct nvme_mi_admin_resp_hdr. * - * Return: 0 on success, non-zero on failure. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise.. */ int nvme_mi_admin_xfer(nvme_mi_ctrl_t ctrl, struct nvme_mi_admin_req_hdr *admin_req, @@ -949,7 +977,8 @@ int nvme_mi_admin_xfer(nvme_mi_ctrl_t ctrl, * of this. If the type of your identify command is standardized but not * yet supported by libnvme-mi, please contact the maintainers. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_identify_args */ @@ -969,7 +998,8 @@ int nvme_mi_admin_identify_partial(nvme_mi_ctrl_t ctrl, * Will return an error if the length of the response data (from the * controller) is not a full &NVME_IDENTIFY_DATA_SIZE. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_identify_args */ @@ -997,7 +1027,8 @@ static inline int nvme_mi_admin_identify(nvme_mi_ctrl_t ctrl, * Will return an error if the length of the response data (from the * controller) is not a full &NVME_IDENTIFY_DATA_SIZE. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. */ static inline int nvme_mi_admin_identify_cns_nsid(nvme_mi_ctrl_t ctrl, enum nvme_identify_cns cns, @@ -1019,6 +1050,69 @@ static inline int nvme_mi_admin_identify_cns_nsid(nvme_mi_ctrl_t ctrl, } /** + * nvme_mi_admin_identify_ns() - Perform an Admin identify command for a + * namespace + * @ctrl: Controller to process identify command + * @nsid: namespace ID + * @ns: Namespace identification to populate + * + * Perform an Identify (namespace) command, setting the namespace id data + * in @ns. The namespace is expected to active and allocated. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_identify_ns(nvme_mi_ctrl_t ctrl, __u32 nsid, + struct nvme_id_ns *ns) +{ + return nvme_mi_admin_identify_cns_nsid(ctrl, NVME_IDENTIFY_CNS_NS, + nsid, ns); +} + +/** + * nvme_mi_admin_identify_ns_descs() - Perform an Admin identify Namespace + * Identification Descriptor list command for a namespace + * @ctrl: Controller to process identify command + * @nsid: Namespace ID + * @descs: Namespace Identification Descriptor list to populate + * + * Perform an Identify namespace identification description list command, + * setting the namespace identification description list in @descs + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_identify_ns_descs(nvme_mi_ctrl_t ctrl, + __u32 nsid, + struct nvme_ns_id_desc *descs) +{ + return nvme_mi_admin_identify_cns_nsid(ctrl, NVME_IDENTIFY_CNS_NS_DESC_LIST, + nsid, descs); +} + +/** + * nvme_mi_admin_identify_allocated_ns() - Perform an Admin identify command + * for an allocated namespace + * @ctrl: Controller to process identify command + * @nsid: namespace ID + * @ns: Namespace identification to populate + * + * Perform an Identify (namespace) command, setting the namespace id data + * in @ns. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_identify_allocated_ns(nvme_mi_ctrl_t ctrl, + __u32 nsid, + struct nvme_id_ns *ns) +{ + return nvme_mi_admin_identify_cns_nsid(ctrl, + NVME_IDENTIFY_CNS_ALLOCATED_NS, + nsid, ns); +} + +/** * nvme_mi_admin_identify_ctrl() - Perform an Admin identify for a controller * @ctrl: Controller to process identify command * @id: Controller identify data to populate @@ -1030,7 +1124,8 @@ static inline int nvme_mi_admin_identify_cns_nsid(nvme_mi_ctrl_t ctrl, * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @id will be * fully populated on success. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_id_ctrl */ @@ -1055,7 +1150,8 @@ static inline int nvme_mi_admin_identify_ctrl(nvme_mi_ctrl_t ctrl, * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @id will be * fully populated on success. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_ctrl_list */ @@ -1079,6 +1175,198 @@ static inline int nvme_mi_admin_identify_ctrl_list(nvme_mi_ctrl_t ctrl, } /** + * nvme_mi_admin_identify_nsid_ctrl_list() - Perform an Admin identify for a + * controller list with specific namespace ID + * @ctrl: Controller to process identify command + * @nsid: Namespace identifier + * @cntid: Controller ID to specify list start + * @list: List data to populate + * + * Perform an Identify command, for the controller list for @nsid, starting + * with IDs greater than or equal to @cntid. + * + * Will return an error if the length of the response data (from the + * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @id will be + * fully populated on success. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + * + * See: &struct nvme_ctrl_list + */ +static inline int nvme_mi_admin_identify_nsid_ctrl_list(nvme_mi_ctrl_t ctrl, + __u32 nsid, __u16 cntid, + struct nvme_ctrl_list *list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = list, + .args_size = sizeof(args), + .cns = NVME_IDENTIFY_CNS_CTRL_LIST, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_mi_admin_identify(ctrl, &args); +} + +/** + * nvme_mi_admin_identify_allocated_ns_list() - Perform an Admin identify for + * an allocated namespace list + * @ctrl: Controller to process identify command + * @nsid: Namespace ID to specify list start + * @list: List data to populate + * + * Perform an Identify command, for the allocated namespace list starting with + * IDs greater than or equal to @nsid. Specify &NVME_NSID_NONE for the start + * of the list. + * + * Will return an error if the length of the response data (from the + * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @list will be + * be fully populated on success. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + * + * See: &struct nvme_ns_list + */ +static inline int nvme_mi_admin_identify_allocated_ns_list(nvme_mi_ctrl_t ctrl, + __u32 nsid, + struct nvme_ns_list *list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = list, + .args_size = sizeof(args), + .cns = NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_mi_admin_identify(ctrl, &args); +} + +/** + * nvme_mi_admin_identify_active_ns_list() - Perform an Admin identify for an + * active namespace list + * @ctrl: Controller to process identify command + * @nsid: Namespace ID to specify list start + * @list: List data to populate + * + * Perform an Identify command, for the active namespace list starting with + * IDs greater than or equal to @nsid. Specify &NVME_NSID_NONE for the start + * of the list. + * + * Will return an error if the length of the response data (from the + * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @list will be + * be fully populated on success. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + * + * See: &struct nvme_ns_list + */ +static inline int nvme_mi_admin_identify_active_ns_list(nvme_mi_ctrl_t ctrl, + __u32 nsid, + struct nvme_ns_list *list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = list, + .args_size = sizeof(args), + .cns = NVME_IDENTIFY_CNS_NS_ACTIVE_LIST, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_mi_admin_identify(ctrl, &args); +} + +/** + * nvme_mi_admin_identify_primary_ctrl() - Perform an Admin identify for + * primary controller capabilities data structure. + * @ctrl: Controller to process identify command + * @cntid: Controller ID to specify + * @cap: Primary Controller Capabilities data structure to populate + * + * Perform an Identify command to get the Primary Controller Capabilities data + * for the controller specified by @cntid + * + * Will return an error if the length of the response data (from the + * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @cap will be + * be fully populated on success. + * + * Return: 0 on success, non-zero on failure + * + * See: &struct nvme_primary_ctrl_cap + */ +static inline int nvme_mi_admin_identify_primary_ctrl(nvme_mi_ctrl_t ctrl, + __u16 cntid, + struct nvme_primary_ctrl_cap *cap) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = cap, + .args_size = sizeof(args), + .cns = NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_mi_admin_identify(ctrl, &args); +} + +/** + * nvme_mi_admin_identify_secondary_ctrl_list() - Perform an Admin identify for + * a secondary controller list. + * @ctrl: Controller to process identify command + * @nsid: Namespace ID to specify list start + * @cntid: Controller ID to specify list start + * @list: List data to populate + * + * Perform an Identify command, for the secondary controllers associated with + * the current primary controller. Only entries with IDs greater than or + * equal to @cntid are returned. + * + * Will return an error if the length of the response data (from the + * controller) is not a full &NVME_IDENTIFY_DATA_SIZE, so @list will be + * be fully populated on success. + * + * Return: 0 on success, non-zero on failure + * + * See: &struct nvme_secondary_ctrl_list + */ +static inline int nvme_mi_admin_identify_secondary_ctrl_list(nvme_mi_ctrl_t ctrl, + __u32 nsid, + __u16 cntid, + struct nvme_secondary_ctrl_list *list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = list, + .args_size = sizeof(args), + .cns = NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_mi_admin_identify(ctrl, &args); +} + +/** * nvme_mi_admin_get_log() - Retrieve log page data from controller * @ctrl: Controller to query * @args: Get Log Page command arguments @@ -1091,13 +1379,851 @@ static inline int nvme_mi_admin_identify_ctrl_list(nvme_mi_ctrl_t ctrl, * This request may be implemented as multiple log page commands, in order * to fit within MI message-size limits. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_get_log_args */ int nvme_mi_admin_get_log(nvme_mi_ctrl_t ctrl, struct nvme_get_log_args *args); /** + * nvme_mi_admin_get_nsid_log() - Helper for Get Log Page functions + * @ctrl: Controller to query + * @rae: Retain Asynchronous Events + * @lid: Log identifier + * @nsid: Namespace ID + * @len: length of log buffer + * @log: pointer for resulting log data + * + * Performs a Get Log Page Admin command for a specific log ID @lid and + * namespace ID @nsid. Log data is expected to be @len bytes, and is stored + * in @log on success. The @rae flag is passed as-is to the Get Log Page + * command, and is specific to the Log Page requested. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_nsid_log(nvme_mi_ctrl_t ctrl, bool rae, + enum nvme_cmd_get_log_lid lid, + __u32 nsid, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = lid, + .len = len, + .nsid = nsid, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_simple() - Helper for Get Log Page functions with no + * NSID or RAE requirements + * @ctrl: Controller to query + * @lid: Log identifier + * @len: length of log buffer + * @log: pointer for resulting log data + * + * Performs a Get Log Page Admin command for a specific log ID @lid, using + * NVME_NSID_ALL for the namespace identifier, and rae set to false. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_simple(nvme_mi_ctrl_t ctrl, + enum nvme_cmd_get_log_lid lid, + __u32 len, void *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, false, lid, NVME_NSID_ALL, + len, log); +} + +/** + * nvme_mi_admin_get_log_supported_log_pages() - Retrieve nmve supported log + * pages + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @log: Array of LID supported and Effects data structures + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_supported_log_pages(nvme_mi_ctrl_t ctrl, + bool rae, + struct nvme_supported_log_pages *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, + NVME_LOG_LID_SUPPORTED_LOG_PAGES, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_mi_admin_get_log_error() - Retrieve nvme error log + * @ctrl: Controller to query + * @nr_entries: Number of error log entries allocated + * @rae: Retain asynchronous events + * @err_log: Array of error logs of size 'entries' + * + * This log page describes extended error information for a command that + * completed with error, or may report an error that is not specific to a + * particular command. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_error(nvme_mi_ctrl_t ctrl, + unsigned int nr_entries, bool rae, + struct nvme_error_log_page *err_log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_ERROR, + NVME_NSID_ALL, sizeof(*err_log) * nr_entries, + err_log); +} + +/** + * nvme_mi_admin_get_log_smart() - Retrieve nvme smart log + * @ctrl: Controller to query + * @nsid: Optional namespace identifier + * @rae: Retain asynchronous events + * @smart_log: User address to store the smart log + * + * This log page provides SMART and general health information. The information + * provided is over the life of the controller and is retained across power + * cycles. To request the controller log page, the namespace identifier + * specified is FFFFFFFFh. The controller may also support requesting the log + * page on a per namespace basis, as indicated by bit 0 of the LPA field in the + * Identify Controller data structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_smart(nvme_mi_ctrl_t ctrl, __u32 nsid, + bool rae, + struct nvme_smart_log *smart_log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_SMART, + nsid, sizeof(*smart_log), smart_log); +} + +/** + * nvme_mi_admin_get_log_fw_slot() - Retrieves the controller firmware log + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @fw_log: User address to store the log page + * + * This log page describes the firmware revision stored in each firmware slot + * supported. The firmware revision is indicated as an ASCII string. The log + * page also indicates the active slot number. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_fw_slot(nvme_mi_ctrl_t ctrl, bool rae, + struct nvme_firmware_slot *fw_log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_FW_SLOT, + NVME_NSID_ALL, sizeof(*fw_log), fw_log); +} + +/** + * nvme_mi_admin_get_log_changed_ns_list() - Retrieve namespace changed list + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @ns_log: User address to store the log page + * + * This log page describes namespaces attached to this controller that have + * changed since the last time the namespace was identified, been added, or + * deleted. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_changed_ns_list(nvme_mi_ctrl_t ctrl, + bool rae, + struct nvme_ns_list *ns_log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_CHANGED_NS, + NVME_NSID_ALL, sizeof(*ns_log), ns_log); +} + +/** + * nvme_mi_admin_get_log_cmd_effects() - Retrieve nvme command effects log + * @ctrl: Controller to query + * @csi: Command Set Identifier + * @effects_log: User address to store the effects log + * + * This log page describes the commands that the controller supports and the + * effects of those commands on the state of the NVM subsystem. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_cmd_effects(nvme_mi_ctrl_t ctrl, + enum nvme_csi csi, + struct nvme_cmd_effects_log *effects_log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = effects_log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_CMD_EFFECTS, + .len = sizeof(*effects_log), + .nsid = NVME_NSID_ALL, + .csi = csi, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_device_self_test() - Retrieve the device self test log + * @ctrl: Controller to query + * @log: Userspace address of the log payload + * + * The log page indicates the status of an in progress self test and the + * percent complete of that operation, and the results of the previous 20 + * self-test operations. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_device_self_test(nvme_mi_ctrl_t ctrl, + struct nvme_self_test_log *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, false, + NVME_LOG_LID_DEVICE_SELF_TEST, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_mi_admin_get_log_create_telemetry_host() - Create host telemetry log + * @ctrl: Controller to query + * @log: Userspace address of the log payload + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_create_telemetry_host(nvme_mi_ctrl_t ctrl, + struct nvme_telemetry_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_TELEMETRY_HOST, + .len = sizeof(*log), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_TELEM_HOST_LSP_CREATE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_telemetry_host() - Get Telemetry Host-Initiated log + * page + * @ctrl: Controller to query + * @offset: Offset into the telemetry data + * @len: Length of provided user buffer to hold the log data in bytes + * @log: User address for log page data + * + * Retrieves the Telemetry Host-Initiated log page at the requested offset + * using the previously existing capture. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_telemetry_host(nvme_mi_ctrl_t ctrl, + __u64 offset, __u32 len, + void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_TELEMETRY_HOST, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_TELEM_HOST_LSP_RETAIN, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_telemetry_ctrl() - Get Telemetry Controller-Initiated + * log page + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @offset: Offset into the telemetry data + * @len: Length of provided user buffer to hold the log data in bytes + * @log: User address for log page data + * + * Retrieves the Telemetry Controller-Initiated log page at the requested offset + * using the previously existing capture. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_telemetry_ctrl(nvme_mi_ctrl_t ctrl, + bool rae, + __u64 offset, __u32 len, + void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_TELEMETRY_CTRL, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_endurance_group() - Get Endurance Group log + * @ctrl: Controller to query + * @endgid: Starting group identifier to return in the list + * @log: User address to store the endurance log + * + * This log page indicates if an Endurance Group Event has occurred for a + * particular Endurance Group. If an Endurance Group Event has occurred, the + * details of the particular event are included in the Endurance Group + * Information log page for that Endurance Group. An asynchronous event is + * generated when an entry for an Endurance Group is newly added to this log + * page. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_endurance_group(nvme_mi_ctrl_t ctrl, + __u16 endgid, + struct nvme_endurance_group_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_ENDURANCE_GROUP, + .len = sizeof(*log), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = endgid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_predictable_lat_nvmset() - Predictable Latency Per NVM + * Set + * @ctrl: Controller to query + * @nvmsetid: NVM set id + * @log: User address to store the predictable latency log + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_predictable_lat_nvmset(nvme_mi_ctrl_t ctrl, + __u16 nvmsetid, + struct nvme_nvmset_predictable_lat_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_PREDICTABLE_LAT_NVMSET, + .len = sizeof(*log), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = nvmsetid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_predictable_lat_event() - Retrieve Predictable Latency + * Event Aggregate Log Page + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @offset: Offset into the predictable latency event + * @len: Length of provided user buffer to hold the log data in bytes + * @log: User address for log page data + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_predictable_lat_event(nvme_mi_ctrl_t ctrl, + bool rae, + __u32 offset, + __u32 len, + void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_PREDICTABLE_LAT_AGG, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_ana() - Retrieve Asymmetric Namespace Access log page + * @ctrl: Controller to query + * @lsp: Log specific, see &enum nvme_get_log_ana_lsp + * @rae: Retain asynchronous events + * @offset: Offset to the start of the log page + * @len: The allocated length of the log page + * @log: User address to store the ana log + * + * This log consists of a header describing the log and descriptors containing + * the asymmetric namespace access information for ANA Groups that contain + * namespaces that are attached to the controller processing the command. + * + * See &struct nvme_ana_rsp_hdr for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_ana(nvme_mi_ctrl_t ctrl, + enum nvme_log_ana_lsp lsp, bool rae, + __u64 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_ANA, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = (__u8)lsp, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_ana_groups() - Retrieve Asymmetric Namespace Access + * groups only log page + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @len: The allocated length of the log page + * @log: User address to store the ana group log + * + * See &struct nvme_ana_group_desc for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_ana_groups(nvme_mi_ctrl_t ctrl, + bool rae, __u32 len, + struct nvme_ana_group_desc *log) +{ + return nvme_mi_admin_get_log_ana(ctrl, NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY, rae, 0, + len, log); +} + +/** + * nvme_mi_admin_get_log_lba_status() - Retrieve LBA Status + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @offset: Offset to the start of the log page + * @len: The allocated length of the log page + * @log: User address to store the log page + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_lba_status(nvme_mi_ctrl_t ctrl, bool rae, + __u64 offset, __u32 len, + void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_LBA_STATUS, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_endurance_grp_evt() - Retrieve Rotational Media + * Information + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @offset: Offset to the start of the log page + * @len: The allocated length of the log page + * @log: User address to store the log page + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_endurance_grp_evt(nvme_mi_ctrl_t ctrl, + bool rae, + __u32 offset, + __u32 len, + void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_ENDURANCE_GRP_EVT, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_fid_supported_effects() - Retrieve Feature Identifiers + * Supported and Effects + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @log: FID Supported and Effects data structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_fid_supported_effects(nvme_mi_ctrl_t ctrl, + bool rae, + struct nvme_fid_supported_effects_log *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, + NVME_LOG_LID_FID_SUPPORTED_EFFECTS, + NVME_NSID_NONE, sizeof(*log), log); +} + +/** + * nvme_mi_admin_get_log_mi_cmd_supported_effects() - displays the MI Commands + * Supported by the controller + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @log: MI Command Supported and Effects data structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_mi_cmd_supported_effects(nvme_mi_ctrl_t ctrl, + bool rae, + struct nvme_mi_cmd_supported_effects_log *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_MI_CMD_SUPPORTED_EFFECTS, + NVME_NSID_NONE, sizeof(*log), log); +} + +/** + * nvme_mi_admin_get_log_boot_partition() - Retrieve Boot Partition + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @lsp: The log specified field of LID + * @len: The allocated size, minimum + * struct nvme_boot_partition + * @part: User address to store the log page + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_boot_partition(nvme_mi_ctrl_t ctrl, + bool rae, __u8 lsp, + __u32 len, + struct nvme_boot_partition *part) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = part, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_BOOT_PARTITION, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_discovery() - Retrieve Discovery log page + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @offset: Offset of this log to retrieve + * @len: The allocated size for this portion of the log + * @log: User address to store the discovery log + * + * Supported only by fabrics discovery controllers, returning discovery + * records. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_discovery(nvme_mi_ctrl_t ctrl, bool rae, + __u32 offset, __u32 len, + void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_DISCOVER, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_media_unit_stat() - Retrieve Media Unit Status + * @ctrl: Controller to query + * @domid: Domain Identifier selection, if supported + * @mus: User address to store the Media Unit statistics log + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_media_unit_stat(nvme_mi_ctrl_t ctrl, + __u16 domid, + struct nvme_media_unit_stat_log *mus) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = mus, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_MEDIA_UNIT_STATUS, + .len = sizeof(*mus), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = domid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_support_cap_config_list() - Retrieve Supported + * Capacity Configuration List + * @ctrl: Controller to query + * @domid: Domain Identifier selection, if supported + * @cap: User address to store supported capabilities config list + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_support_cap_config_list(nvme_mi_ctrl_t ctrl, + __u16 domid, + struct nvme_supported_cap_config_list_log *cap) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = cap, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_SUPPORTED_CAP_CONFIG_LIST, + .len = sizeof(*cap), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = domid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_reservation() - Retrieve Reservation Notification + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @log: User address to store the reservation log + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_reservation(nvme_mi_ctrl_t ctrl, + bool rae, + struct nvme_resv_notification_log *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_RESERVATION, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_mi_admin_get_log_sanitize() - Retrieve Sanitize Status + * @ctrl: Controller to query + * @rae: Retain asynchronous events + * @log: User address to store the sanitize log + * + * The Sanitize Status log page reports sanitize operation time estimates and + * information about the most recent sanitize operation. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_sanitize(nvme_mi_ctrl_t ctrl, bool rae, + struct nvme_sanitize_log_page *log) +{ + return nvme_mi_admin_get_nsid_log(ctrl, rae, NVME_LOG_LID_SANITIZE, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_mi_admin_get_log_zns_changed_zones() - Retrieve list of zones that have + * changed + * @ctrl: Controller to query + * @nsid: Namespace ID + * @rae: Retain asynchronous events + * @log: User address to store the changed zone log + * + * The list of zones that have changed state due to an exceptional event. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_zns_changed_zones(nvme_mi_ctrl_t ctrl, + __u32 nsid, bool rae, + struct nvme_zns_changed_zone_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_ZNS_CHANGED_ZONES, + .len = sizeof(*log), + .nsid = nsid, + .csi = NVME_CSI_ZNS, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** + * nvme_mi_admin_get_log_persistent_event() - Retrieve Persistent Event Log + * @ctrl: Controller to query + * @action: Action the controller should take during processing this command + * @size: Size of @pevent_log + * @pevent_log: User address to store the persistent event log + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_log_persistent_event(nvme_mi_ctrl_t ctrl, + enum nvme_pevent_log_action action, + __u32 size, void *pevent_log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = pevent_log, + .args_size = sizeof(args), + .lid = NVME_LOG_LID_PERSISTENT_EVENT, + .len = size, + .nsid = NVME_NSID_ALL, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = (__u8)action, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_mi_admin_get_log(ctrl, &args); +} + +/** * nvme_mi_admin_security_send() - Perform a Security Send command on a * controller. * @ctrl: Controller to send command to @@ -1111,7 +2237,8 @@ int nvme_mi_admin_get_log(nvme_mi_ctrl_t ctrl, struct nvme_get_log_args *args); * Security Send data length should not be greater than 4096 bytes to * comply with specification limits. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_get_log_args */ @@ -1132,12 +2259,297 @@ int nvme_mi_admin_security_send(nvme_mi_ctrl_t ctrl, * Security Receive data length should not be greater than 4096 bytes to * comply with specification limits. * - * Return: 0 on success, non-zero on failure + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. * * See: &struct nvme_get_log_args */ int nvme_mi_admin_security_recv(nvme_mi_ctrl_t ctrl, struct nvme_security_receive_args *args); +/** + * nvme_mi_admin_get_features - Perform a Get Feature command on a controller + * @ctrl: Controller to send command to + * @args: Get Features command arguments + * + * Performs a Get Features Admin command as specified by @args. Returned + * feature data will be stored in @args->result and @args->data, depending + * on the specification of the feature itself; most features do not return + * additional data. See section 5.27.1 of the NVMe spec (v2.0b) for + * feature-specific information. + * + * On success, @args->data_len will be updated with the actual data length + * received. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_mi_admin_get_features(nvme_mi_ctrl_t ctrl, + struct nvme_get_features_args *args); + +/** + * nvme_mi_admin_get_features_data() - Helper function for &nvme_mi_admin_get_features() + * @ctrl: Controller to send command to + * @fid: Feature identifier + * @nsid: Namespace ID, if applicable for @fid + * @data_len: Length of feature data, if applicable for @fid, in bytes + * @data: User address of feature data, if applicable + * @result: The command completion result from CQE dword0 + * + * Helper for optionally features that optionally return data, using the + * SEL_CURRENT selector value. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_get_features_data(nvme_mi_ctrl_t ctrl, + enum nvme_features_id fid, + __u32 nsid, __u32 data_len, + void *data, __u32 *result) +{ + struct nvme_get_features_args args = { + .result = result, + .data = data, + .args_size = sizeof(args), + .nsid = nsid, + .sel = NVME_GET_FEATURES_SEL_CURRENT, + .cdw11 = 0, + .data_len = data_len, + .fid = (__u8)fid, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_mi_admin_get_features(ctrl, &args); +} + +/** + * nvme_mi_admin_get_features_simple - Get a simple feature value with no data + * @ctrl: Controller to send command to + * @fid: Feature identifier + * @nsid: Namespace id, if required by @fid + * @result: output feature data + */ +static inline int nvme_mi_admin_get_features_simple(nvme_mi_ctrl_t ctrl, + enum nvme_features_id fid, + __u32 nsid, + __u32 *result) +{ + return nvme_mi_admin_get_features_data(ctrl, fid, nsid, + 0, NULL, result); +} + +/** + * nvme_mi_admin_set_features - Perform a Set Features command on a controller + * @ctrl: Controller to send command to + * @args: Set Features command arguments + * + * Performs a Set Features Admin command as specified by @args. Result + * data will be stored in @args->result. + * on the specification of the feature itself; most features do not return + * additional data. See section 5.27.1 of the NVMe spec (v2.0b) for + * feature-specific information. + * + * On success, @args->data_len will be updated with the actual data length + * received. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_mi_admin_set_features(nvme_mi_ctrl_t ctrl, + struct nvme_set_features_args *args); + +/** + * nvme_mi_admin_ns_mgmt - Issue a Namespace Management command + * @ctrl: Controller to send command to + * @args: Namespace management command arguments + * + * Issues a Namespace Management command to @ctrl, with arguments specified + * from @args. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_mi_admin_ns_mgmt(nvme_mi_ctrl_t ctrl, + struct nvme_ns_mgmt_args *args); + +/** + * nvme_mi_admin_ns_mgmt_create - Helper for Namespace Management Create command + * @ctrl: Controller to send command to + * @ns: New namespace parameters + * @csi: Command Set Identifier for new NS + * @nsid: Set to new namespace ID on create + * + * Issues a Namespace Management (Create) command to @ctrl, to create a + * new namespace specified by @ns, using command set @csi. On success, + * the new namespace ID will be written to @nsid. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_ns_mgmt_create(nvme_mi_ctrl_t ctrl, + struct nvme_id_ns *ns, + __u8 csi, __u32 *nsid) +{ + struct nvme_ns_mgmt_args args = { + .result = nsid, + .ns = ns, + .args_size = sizeof(args), + .nsid = NVME_NSID_NONE, + .sel = NVME_NS_MGMT_SEL_CREATE, + .csi = csi, + }; + + return nvme_mi_admin_ns_mgmt(ctrl, &args); +} + +/** + * nvme_mi_admin_ns_mgmt_delete - Helper for Namespace Management Delete command + * @ctrl: Controller to send command to + * @nsid: Namespace ID to delete + * + * Issues a Namespace Management (Delete) command to @ctrl, to delete the + * namespace with id @nsid. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_ns_mgmt_delete(nvme_mi_ctrl_t ctrl, __u32 nsid) +{ + struct nvme_ns_mgmt_args args = { + .args_size = sizeof(args), + .nsid = nsid, + .sel = NVME_NS_MGMT_SEL_DELETE, + }; + + return nvme_mi_admin_ns_mgmt(ctrl, &args); +} + +/** + * nvme_mi_admin_ns_attach() - Attach or detach namespace to controller(s) + * @ctrl: Controller to send command to + * @args: Namespace Attach command arguments + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_mi_admin_ns_attach(nvme_mi_ctrl_t ctrl, + struct nvme_ns_attach_args *args); + +/** + * nvme_mi_admin_ns_attach_ctrls() - Attach namespace to controllers + * @ctrl: Controller to send command to + * @nsid: Namespace ID to attach + * @ctrlist: Controller list to modify attachment state of nsid + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_ns_attach_ctrls(nvme_mi_ctrl_t ctrl, __u32 nsid, + struct nvme_ctrl_list *ctrlist) +{ + struct nvme_ns_attach_args args = { + .result = NULL, + .ctrlist = ctrlist, + .args_size = sizeof(args), + .nsid = nsid, + .sel = NVME_NS_ATTACH_SEL_CTRL_ATTACH, + }; + + return nvme_mi_admin_ns_attach(ctrl, &args); +} + +/** + * nvme_mi_admin_ns_detach_ctrls() - Detach namespace from controllers + * @ctrl: Controller to send command to + * @nsid: Namespace ID to detach + * @ctrlist: Controller list to modify attachment state of nsid + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_mi_admin_ns_detach_ctrls(nvme_mi_ctrl_t ctrl, __u32 nsid, + struct nvme_ctrl_list *ctrlist) +{ + struct nvme_ns_attach_args args = { + .result = NULL, + .ctrlist = ctrlist, + .args_size = sizeof(args), + .nsid = nsid, + .sel = NVME_NS_ATTACH_SEL_CTRL_DEATTACH, + }; + + return nvme_mi_admin_ns_attach(ctrl, &args); +} + +/** + * nvme_mi_admin_fw_download() - Download part or all of a firmware image to + * the controller + * @ctrl: Controller to send firmware data to + * @args: &struct nvme_fw_download_args argument structure + * + * The Firmware Image Download command downloads all or a portion of an image + * for a future update to the controller. The Firmware Image Download command + * downloads a new image (in whole or in part) to the controller. + * + * The image may be constructed of multiple pieces that are individually + * downloaded with separate Firmware Image Download commands. Each Firmware + * Image Download command includes a Dword Offset and Number of Dwords that + * specify a dword range. + * + * The new firmware image is not activated as part of the Firmware Image + * Download command. Use the nvme_mi_admin_fw_commit() to activate a newly + * downloaded image. + * + * Return: 0 on success, non-zero on failure + */ +int nvme_mi_admin_fw_download(nvme_mi_ctrl_t ctrl, + struct nvme_fw_download_args *args); + +/** + * nvme_mi_admin_fw_commit() - Commit firmware using the specified action + * @ctrl: Controller to send firmware data to + * @args: &struct nvme_fw_download_args argument structure + * + * The Firmware Commit command modifies the firmware image or Boot Partitions. + * + * Return: 0 on success, non-zero on failure + */ +int nvme_mi_admin_fw_commit(nvme_mi_ctrl_t ctrl, + struct nvme_fw_commit_args *args); + +/** + * nvme_mi_admin_format_nvm() - Format NVMe namespace + * @ctrl: Controller to send command to + * @args: Format NVM command arguments + * + * Perform a low-level format to set the LBA data & metadata size. May destroy + * data & metadata on the specified namespaces + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_mi_admin_format_nvm(nvme_mi_ctrl_t ctrl, + struct nvme_format_nvm_args *args); + +/** + * nvme_mi_admin_sanitize_nvm() - Start a subsystem Sanitize operation + * @ctrl: Controller to send command to + * @args: Sanitize command arguments + * + * A sanitize operation alters all user data in the NVM subsystem such that + * recovery of any previous user data from any cache, the non-volatile media, + * or any Controller Memory Buffer is not possible. + * + * The Sanitize command starts a sanitize operation or to recover from a + * previously failed sanitize operation. The sanitize operation types that may + * be supported are Block Erase, Crypto Erase, and Overwrite. All sanitize + * operations are processed in the background, i.e., completion of the sanitize + * command does not indicate completion of the sanitize operation. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_mi_admin_sanitize_nvm(nvme_mi_ctrl_t ctrl, + struct nvme_sanitize_nvm_args *args); #endif /* _LIBNVME_MI_MI_H */ diff --git a/src/nvme/private.h b/src/nvme/private.h index b5610f5..cdd1bbf 100644 --- a/src/nvme/private.h +++ b/src/nvme/private.h @@ -16,8 +16,6 @@ #include "fabrics.h" #include "mi.h" -#include <uuid.h> - extern const char *nvme_ctrl_sysfs_dir; extern const char *nvme_subsys_sysfs_dir; @@ -57,7 +55,7 @@ struct nvme_ns { uint8_t eui64[8]; uint8_t nguid[16]; - uuid_t uuid; + unsigned char uuid[NVME_UUID_LEN]; enum nvme_csi csi; }; @@ -83,6 +81,7 @@ struct nvme_ctrl { char *traddr; char *trsvcid; char *dhchap_key; + char *dhchap_ctrl_key; char *cntrltype; char *dctype; bool discovery_ctrl; diff --git a/src/nvme/tree.c b/src/nvme/tree.c index e3b41e7..b992824 100644 --- a/src/nvme/tree.c +++ b/src/nvme/tree.c @@ -810,12 +810,12 @@ struct nvme_fabrics_config *nvme_ctrl_get_config(nvme_ctrl_t c) return &c->cfg; } -const char *nvme_ctrl_get_dhchap_key(nvme_ctrl_t c) +const char *nvme_ctrl_get_dhchap_host_key(nvme_ctrl_t c) { return c->dhchap_key; } -void nvme_ctrl_set_dhchap_key(nvme_ctrl_t c, const char *key) +void nvme_ctrl_set_dhchap_host_key(nvme_ctrl_t c, const char *key) { if (c->dhchap_key) { free(c->dhchap_key); @@ -825,6 +825,21 @@ void nvme_ctrl_set_dhchap_key(nvme_ctrl_t c, const char *key) c->dhchap_key = strdup(key); } +const char *nvme_ctrl_get_dhchap_key(nvme_ctrl_t c) +{ + return c->dhchap_ctrl_key; +} + +void nvme_ctrl_set_dhchap_key(nvme_ctrl_t c, const char *key) +{ + if (c->dhchap_ctrl_key) { + free(c->dhchap_ctrl_key); + c->dhchap_ctrl_key = NULL; + } + if (key) + c->dhchap_ctrl_key = strdup(key); +} + void nvme_ctrl_set_discovered(nvme_ctrl_t c, bool discovered) { c->discovered = discovered; @@ -898,6 +913,7 @@ void nvme_deconfigure_ctrl(nvme_ctrl_t c) FREE_CTRL_ATTR(c->serial); FREE_CTRL_ATTR(c->sqsize); FREE_CTRL_ATTR(c->dhchap_key); + FREE_CTRL_ATTR(c->dhchap_ctrl_key); FREE_CTRL_ATTR(c->address); FREE_CTRL_ATTR(c->dctype); FREE_CTRL_ATTR(c->cntrltype); @@ -1146,6 +1162,7 @@ static int nvme_configure_ctrl(nvme_root_t r, nvme_ctrl_t c, const char *path, const char *name) { DIR *d; + char *host_key; d = opendir(path); if (!d) { @@ -1166,10 +1183,19 @@ static int nvme_configure_ctrl(nvme_root_t r, nvme_ctrl_t c, const char *path, c->queue_count = nvme_get_ctrl_attr(c, "queue_count"); c->serial = nvme_get_ctrl_attr(c, "serial"); c->sqsize = nvme_get_ctrl_attr(c, "sqsize"); - c->dhchap_key = nvme_get_ctrl_attr(c, "dhchap_ctrl_secret"); - if (c->dhchap_key && !strcmp(c->dhchap_key, "none")) { - free(c->dhchap_key); - c->dhchap_key = NULL; + host_key = nvme_get_ctrl_attr(c, "dhchap_secret"); + if (host_key && c->s && c->s->h && c->s->h->dhchap_key && + (!strcmp(c->s->h->dhchap_key, host_key) || + !strcmp("none", host_key))) { + free(host_key); + host_key = NULL; + } + if (host_key) + c->dhchap_key = host_key; + c->dhchap_ctrl_key = nvme_get_ctrl_attr(c, "dhchap_ctrl_secret"); + if (c->dhchap_ctrl_key && !strcmp(c->dhchap_ctrl_key, "none")) { + free(c->dhchap_ctrl_key); + c->dhchap_ctrl_key = NULL; } c->cntrltype = nvme_get_ctrl_attr(c, "cntrltype"); c->dctype = nvme_get_ctrl_attr(c, "dctype"); @@ -1525,9 +1551,9 @@ const uint8_t *nvme_ns_get_nguid(nvme_ns_t n) return n->nguid; } -void nvme_ns_get_uuid(nvme_ns_t n, uuid_t out) +void nvme_ns_get_uuid(nvme_ns_t n, unsigned char out[NVME_UUID_LEN]) { - uuid_copy(out, n->uuid); + memcpy(out, n->uuid, NVME_UUID_LEN); } int nvme_ns_identify(nvme_ns_t n, struct nvme_id_ns *ns) diff --git a/src/nvme/tree.h b/src/nvme/tree.h index 3a103c0..156cb79 100644 --- a/src/nvme/tree.h +++ b/src/nvme/tree.h @@ -15,7 +15,6 @@ #include <stddef.h> #include <sys/types.h> -#include <uuid.h> #include "ioctl.h" #include "util.h" @@ -521,7 +520,7 @@ const uint8_t *nvme_ns_get_nguid(nvme_ns_t n); * * Copies the namespace's uuid into @out */ -void nvme_ns_get_uuid(nvme_ns_t n, uuid_t out); +void nvme_ns_get_uuid(nvme_ns_t n, unsigned char out[NVME_UUID_LEN]); /** * nvme_ns_get_sysfs_dir() - sysfs directory of a namespace @@ -876,6 +875,21 @@ const char *nvme_ctrl_get_host_traddr(nvme_ctrl_t c); const char *nvme_ctrl_get_host_iface(nvme_ctrl_t c); /** + * nvme_ctrl_get_dhchap_host_key() - Return host key + * @c: Controller to be checked + * + * Return: DH-HMAC-CHAP host key or NULL if not set + */ +const char *nvme_ctrl_get_dhchap_host_key(nvme_ctrl_t c); + +/** + * nvme_ctrl_set_dhchap_host_key() - Set host key + * @c: Host for which the key should be set + * @key: DH-HMAC-CHAP Key to set or NULL to clear existing key + */ +void nvme_ctrl_set_dhchap_host_key(nvme_ctrl_t c, const char *key); + +/** * nvme_ctrl_get_dhchap_key() - Return controller key * @c: Controller for which the key should be set * diff --git a/src/nvme/types.h b/src/nvme/types.h index 3d67bc8..94066fc 100644 --- a/src/nvme/types.h +++ b/src/nvme/types.h @@ -4857,6 +4857,34 @@ enum nvmf_tcp_sectype { }; /** + * enum nvmf_log_discovery_lid_support - Discovery log specific support + * @NVMF_LOG_DISC_LID_NONE: None + * @NVMF_LOG_DISC_LID_EXTDLPES: Extended Discovery Log Page Entries Supported + * @NVMF_LOG_DISC_LID_PLEOS: Port Local Entries Only Supported + * @NVMF_LOG_DISC_LID_ALLSUBES: All NVM Subsystem Entries Supported + */ +enum nvmf_log_discovery_lid_support { + NVMF_LOG_DISC_LID_NONE = 0, + NVMF_LOG_DISC_LID_EXTDLPES = (1 << 0), + NVMF_LOG_DISC_LID_PLEOS = (1 << 1), + NVMF_LOG_DISC_LID_ALLSUBES = (1 << 2), +}; + +/** + * enum nvmf_log_discovery_lsp - Discovery log specific field + * @NVMF_LOG_DISC_LSP_NONE: None + * @NVMF_LOG_DISC_LSP_EXTDLPE: Extended Discovery Log Page Entries + * @NVMF_LOG_DISC_LSP_PLEO: Port Local Entries Only + * @NVMF_LOG_DISC_LSP_ALLSUBE: All NVM Subsystem Entries + */ +enum nvmf_log_discovery_lsp { + NVMF_LOG_DISC_LSP_NONE = 0, + NVMF_LOG_DISC_LSP_EXTDLPE = (1 << 0), + NVMF_LOG_DISC_LSP_PLEO = (1 << 1), + NVMF_LOG_DISC_LSP_ALLSUBE = (1 << 2), +}; + +/** * struct nvmf_discovery_log - Discovery Log Page (Log Identifier 70h) * @genctr: Generation Counter (GENCTR): Indicates the version of the discovery * information, starting at a value of 0h. For each change in the @@ -6114,6 +6142,75 @@ static inline __u16 nvme_status_code(__u16 status_field) } /** + * enum nvme_status_type - type encoding for NVMe return values, when + * represented as an int. + * + * The nvme_* api returns an int, with negative values indicating an internal + * or syscall error, zero signifying success, positive values representing + * the NVMe status. + * + * That latter case (the NVMe status) may represent status values from + * different parts of the transport/controller/etc, and are at most 16 bits of + * data. So, we use the most-significant 3 bits of the signed int to indicate + * which type of status this is. + * + * @NVME_STATUS_TYPE_SHIFT: shift value for status bits + * @NVME_STATUS_TYPE_MASK: mask value for status bits + * + * @NVME_STATUS_TYPE_NVME: NVMe command status value, typically from CDW3 + * @NVME_STATUS_TYPE_MI: NVMe-MI header status + */ +enum nvme_status_type { + NVME_STATUS_TYPE_SHIFT = 27, + NVME_STATUS_TYPE_MASK = 0x7, + + NVME_STATUS_TYPE_NVME = 0, + NVME_STATUS_TYPE_MI = 1, +}; + +/** + * nvme_status_get_type() - extract the type from a nvme_* return value + * @status: the (non-negative) return value from the NVMe API + * + * Returns: the type component of the status. + */ +static inline __u32 nvme_status_get_type(int status) +{ + return NVME_GET(status, STATUS_TYPE); +} + +/** + * nvme_status_get_value() - extract the status value from a nvme_* return + * value + * @status: the (non-negative) return value from the NVMe API + * + * Returns: the value component of the status; the set of values will depend + * on the status type. + */ +static inline __u32 nvme_status_get_value(int status) +{ + return status & ~(NVME_STATUS_TYPE_MASK << NVME_STATUS_TYPE_SHIFT); +} + +/** + * nvme_status_equals() - helper to check a status against a type and value + * @status: the (non-negative) return value from the NVMe API + * @type: the status type + * @value: the status value + * + * Returns: true if @status is of the specified type and value + */ +static inline __u32 nvme_status_equals(int status, enum nvme_status_type type, + unsigned int value) +{ + if (status < 0) + return false; + + return nvme_status_get_type(status) == type && + nvme_status_get_value(status) == value; +} + +/** * enum nvme_admin_opcode - Known NVMe admin opcodes * @nvme_admin_delete_sq: Delete I/O Submission Queue * @nvme_admin_create_sq: Create I/O Submission Queue @@ -6950,6 +7047,20 @@ enum nvme_fctype { }; /** + * enum nvme_data_tfr - Data transfer direction of the command + * @NVME_DATA_TFR_NO_DATA_TFR: No data transfer + * @NVME_DATA_TFR_HOST_TO_CTRL: Host to controller + * @NVME_DATA_TFR_CTRL_TO_HOST: Controller to host + * @NVME_DATA_TFR_BIDIRECTIONAL: Bidirectional + */ +enum nvme_data_tfr { + NVME_DATA_TFR_NO_DATA_TFR = 0x0, + NVME_DATA_TFR_HOST_TO_CTRL = 0x1, + NVME_DATA_TFR_CTRL_TO_HOST = 0x2, + NVME_DATA_TFR_BIDIRECTIONAL = 0x3, +}; + +/** * enum nvme_io_opcode - Opcodes for I/O Commands * @nvme_cmd_flush: Flush * @nvme_cmd_write: Write diff --git a/src/nvme/util.c b/src/nvme/util.c index ff5e0d8..c61dbe9 100644 --- a/src/nvme/util.c +++ b/src/nvme/util.c @@ -11,6 +11,8 @@ #include <string.h> #include <errno.h> +#include <sys/stat.h> +#include <fcntl.h> #include <sys/param.h> #include <sys/types.h> #include <arpa/inet.h> @@ -23,6 +25,11 @@ #include "util.h" #include "log.h" +/* The bionic libc implementation doesn't define LINE_MAX */ +#ifndef LINE_MAX +#define LINE_MAX 2048 +#endif + /* Source Code Control System, query version of binary with 'what' */ const char sccsid[] = "@(#)libnvme " GIT_VERSION; @@ -489,6 +496,22 @@ int nvme_get_feature_length(int fid, __u32 cdw11, __u32 *len) return 0; } +int nvme_get_feature_length2(int fid, __u32 cdw11, enum nvme_data_tfr dir, + __u32 *len) +{ + switch (fid) { + case NVME_FEAT_FID_HOST_MEM_BUF: + if (dir == NVME_DATA_TFR_HOST_TO_CTRL) { + *len = 0; + break; + } + fallthrough; + default: + return nvme_get_feature_length(fid, cdw11, len); + } + return 0; +} + int nvme_get_directive_receive_length(enum nvme_directive_dtype dtype, enum nvme_directive_receive_doper doper, __u32 *len) { @@ -812,3 +835,57 @@ const char *nvme_get_version(enum nvme_version type) return "n/a"; } } + +int nvme_uuid_to_string(unsigned char uuid[NVME_UUID_LEN], char *str) +{ + int n; + n = snprintf(str, NVME_UUID_LEN_STRING, + "%02x%02x%02x%02x-%02x%02x-%02x%02x-" + "%02x%02x-%02x%02x%02x%02x%02x%02x", + uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], + uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11], + uuid[12], uuid[13], uuid[14], uuid[15]); + return n != NVME_UUID_LEN_STRING - 1 ? -EINVAL : 0; +} + +int nvme_uuid_from_string(const char *str, unsigned char uuid[NVME_UUID_LEN]) +{ + int n; + + n = sscanf(str, + "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-" + "%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx", + &uuid[0], &uuid[1], &uuid[2], &uuid[3], &uuid[4], &uuid[5], + &uuid[6], &uuid[7], &uuid[8], &uuid[9], &uuid[10], &uuid[11], + &uuid[12], &uuid[13], &uuid[14], &uuid[15]); + return n != NVME_UUID_LEN ? -EINVAL : 0; + +} + +int nvme_uuid_random(unsigned char uuid[NVME_UUID_LEN]) +{ + int f; + ssize_t n; + + f = open("/dev/urandom", O_RDONLY); + if (f < 0) + return -errno; + n = read(f, uuid, NVME_UUID_LEN); + if (n < 0) { + close(f); + return -errno; + } else if (n != NVME_UUID_LEN) { + close(f); + return -EIO; + } + + /* + * See https://www.rfc-editor.org/rfc/rfc4122#section-4.4 + * Algorithms for Creating a UUID from Truly Random + * or Pseudo-Random Numbers + */ + uuid[6] = (uuid[6] & 0x0f) | 0x40; + uuid[8] = (uuid[8] & 0x3f) | 0x80; + + return 0; +} diff --git a/src/nvme/util.h b/src/nvme/util.h index 6f1d3e9..e72c156 100644 --- a/src/nvme/util.h +++ b/src/nvme/util.h @@ -156,6 +156,23 @@ void nvme_init_copy_range_f1(struct nvme_copy_range_f1 *copy, __u16 *nlbs, int nvme_get_feature_length(int fid, __u32 cdw11, __u32 *len); /** + * nvme_get_feature_length2() - Retreive the command payload length for a + * specific feature identifier + * @fid: Feature identifier, see &enum nvme_features_id. + * @cdw11: The cdw11 value may affect the transfer (only known fid is + * %NVME_FEAT_FID_HOST_ID) + * @dir: Data transfer direction: false - host to controller, true - + * controller to host may affect the transfer (only known fid is + * %NVME_FEAT_FID_HOST_MEM_BUF). + * @len: On success, set to this features payload length in bytes. + * + * Return: 0 on success, -1 with errno set to EINVAL if the function did not + * recognize &fid. + */ +int nvme_get_feature_length2(int fid, __u32 cdw11, enum nvme_data_tfr dir, + __u32 *len); + +/** * nvme_get_directive_receive_length() - Get directive receive length * @dtype: Directive type, see &enum nvme_directive_dtype * @doper: Directive receive operation, see &enum nvme_directive_receive_doper @@ -575,4 +592,36 @@ enum nvme_version { */ const char *nvme_get_version(enum nvme_version type); +#define NVME_UUID_LEN_STRING 37 /* 1b4e28ba-2fa1-11d2-883f-0016d3cca427 + \0 */ +#define NVME_UUID_LEN 16 + +/** + * nvme_uuid_to_string - Return string represenation of encoded UUID + * @uuid: Binary encoded input UUID + * @str: Output string represenation of UUID + * + * Return: Returns error code if type conversion fails. + */ +int nvme_uuid_to_string(unsigned char uuid[NVME_UUID_LEN], char *str); + +/** + * nvme_uuid_from_string - Return encoded UUID represenation of string UUID + * @uuid: Binary encoded input UUID + * @str: Output string represenation of UUID + * + * Return: Returns error code if type conversion fails. + */ +int nvme_uuid_from_string(const char *str, unsigned char uuid[NVME_UUID_LEN]); + +/** + * nvme_uuid_random - Generate random UUID + * @uuid: Generated random UUID + * + * Generate random number according + * https://www.rfc-editor.org/rfc/rfc4122#section-4.4 + * + * Return: Returns error code if generating of random number fails. + */ +int nvme_uuid_random(unsigned char uuid[NVME_UUID_LEN]); + #endif /* _LIBNVME_UTIL_H */ |