summaryrefslogtreecommitdiffstats
path: root/src/nvme/mi.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-05 08:35:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-05 08:35:44 +0000
commit7818b1a548434d2bf91a0662c547445013ae952d (patch)
tree347cb0b43ebbe45129227aebc254f3859b4726b4 /src/nvme/mi.c
parentAdding upstream version 1.9. (diff)
downloadlibnvme-upstream.tar.xz
libnvme-upstream.zip
Adding upstream version 1.10.upstream/1.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/nvme/mi.c244
1 files changed, 212 insertions, 32 deletions
diff --git a/src/nvme/mi.c b/src/nvme/mi.c
index 84d51b0..d98c74a 100644
--- a/src/nvme/mi.c
+++ b/src/nvme/mi.c
@@ -11,6 +11,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
+#include <unistd.h>
#include <ccan/array_size/array_size.h>
#include <ccan/endian/endian.h>
@@ -41,18 +42,32 @@ static bool nvme_mi_probe_enabled_default(void)
*/
nvme_root_t nvme_mi_create_root(FILE *fp, int log_level)
{
- struct nvme_root *r = calloc(1, sizeof(*r));
+ struct nvme_root *r;
+ int fd;
+ r = calloc(1, sizeof(*r));
if (!r) {
+ errno = ENOMEM;
return NULL;
}
- r->log_level = log_level;
- r->fp = stderr;
+
+ if (fp) {
+ fd = fileno(fp);
+ if (fd < 0) {
+ free(r);
+ return NULL;
+ }
+ } else
+ fd = STDERR_FILENO;
+
+ r->log.fd = fd;
+ r->log.level = log_level;
+
r->mi_probe_enabled = nvme_mi_probe_enabled_default();
- if (fp)
- r->fp = fp;
+
list_head_init(&r->hosts);
list_head_init(&r->endpoints);
+
return r;
}
@@ -128,6 +143,20 @@ void nvme_mi_ep_probe(struct nvme_mi_ep *ep)
struct nvme_mi_ctrl *ctrl;
int rc;
+ /* Ensure the probe occurs at most once. This isn't just to mitigate doubling
+ * a linear stream of commands, it also terminates recursion via the
+ * nvme_mi_submit() call issued by nvme_mi_admin_identify_partial() below.
+ */
+ if (ep->quirks_probed)
+ return;
+
+ /* Mark ep->quirks as valid. Note that for the purpose of quirk probing,
+ * the quirk probe itself cannot rely on quirks, and so the fact that none are
+ * yet set is desirable. The request that triggered nvme_mi_submit() will have
+ * an initialised ep->quirks when we return from the root probe call.
+ */
+ ep->quirks_probed = true;
+
if (!ep->root->mi_probe_enabled)
return;
@@ -250,6 +279,7 @@ struct nvme_mi_ep *nvme_mi_init_ep(nvme_root_t root)
list_node_init(&ep->root_entry);
ep->root = root;
+ ep->quirks_probed = false;
ep->controllers_scanned = false;
ep->timeout = default_timeout;
ep->mprt_max = 0;
@@ -327,7 +357,7 @@ int nvme_mi_scan_ep(nvme_mi_ep_t ep, bool force_rescan)
rc = nvme_mi_mi_read_mi_data_ctrl_list(ep, 0, &list);
if (rc)
- return -1;
+ return rc;
n_ctrl = le16_to_cpu(list.num);
if (n_ctrl > NVME_ID_CTRL_LIST_MAX) {
@@ -413,6 +443,8 @@ int nvme_mi_submit(nvme_mi_ep_t ep, struct nvme_mi_req *req,
return -1;
}
+ nvme_mi_ep_probe(ep);
+
if (ep->transport->mic_enabled)
nvme_mi_calc_req_mic(req);
@@ -639,6 +671,7 @@ int nvme_mi_admin_admin_passthru(nvme_mi_ctrl_t ctrl, __u8 opcode, __u8 flags,
struct nvme_mi_admin_req_hdr req_hdr;
struct nvme_mi_resp resp;
struct nvme_mi_req req;
+ unsigned int timeout_save;
int rc;
int direction = opcode & 0x3;
bool has_write_data = false;
@@ -665,11 +698,6 @@ int nvme_mi_admin_admin_passthru(nvme_mi_ctrl_t ctrl, __u8 opcode, __u8 flags,
has_read_data = true;
}
- if (timeout_ms > nvme_mi_ep_get_timeout(ctrl->ep)) {
- /* Set timeout if user needs a bigger timeout */
- nvme_mi_ep_set_timeout(ctrl->ep, timeout_ms);
- }
-
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id, opcode);
req_hdr.cdw1 = cpu_to_le32(nsid);
req_hdr.cdw2 = cpu_to_le32(cdw2);
@@ -701,7 +729,17 @@ int nvme_mi_admin_admin_passthru(nvme_mi_ctrl_t ctrl, __u8 opcode, __u8 flags,
resp.data_len = data_len;
}
+ /* if the user has specified a custom timeout, save the current
+ * timeout and override
+ */
+ if (timeout_ms != 0) {
+ timeout_save = nvme_mi_ep_get_timeout(ctrl->ep);
+ nvme_mi_ep_set_timeout(ctrl->ep, timeout_ms);
+ }
rc = nvme_mi_submit(ctrl->ep, &req, &resp);
+ if (timeout_ms != 0)
+ nvme_mi_ep_set_timeout(ctrl->ep, timeout_save);
+
if (rc)
return rc;
@@ -896,6 +934,133 @@ int nvme_mi_admin_get_log(nvme_mi_ctrl_t ctrl, struct nvme_get_log_args *args)
return nvme_mi_admin_get_log_page(ctrl, 4096, args);
}
+static int read_ana_chunk(nvme_mi_ctrl_t ctrl, enum nvme_log_ana_lsp lsp, bool rae,
+ __u8 *log, __u8 **read, __u8 *to_read, __u8 *log_end)
+{
+ if (to_read > log_end) {
+ errno = ENOSPC;
+ return -1;
+ }
+
+ while (*read < to_read) {
+ __u32 len = min(log_end - *read, NVME_LOG_PAGE_PDU_SIZE);
+ int ret;
+
+ ret = nvme_mi_admin_get_log_ana(ctrl, lsp, rae,
+ *read - log, len, *read);
+ if (ret)
+ return ret;
+
+ *read += len;
+ }
+ return 0;
+}
+
+static int try_read_ana(nvme_mi_ctrl_t ctrl, enum nvme_log_ana_lsp lsp, bool rae,
+ struct nvme_ana_log *log, __u8 *log_end,
+ __u8 *read, __u8 **to_read, bool *may_retry)
+{
+ __u16 ngrps = le16_to_cpu(log->ngrps);
+
+ while (ngrps--) {
+ __u8 *group = *to_read;
+ int ret;
+ __le32 nnsids;
+
+ *to_read += sizeof(*log->descs);
+ ret = read_ana_chunk(ctrl, lsp, rae,
+ (__u8 *)log, &read, *to_read, log_end);
+ if (ret) {
+ /*
+ * If the provided buffer isn't long enough,
+ * the log page may have changed while reading it
+ * and the computed length was inaccurate.
+ * Have the caller check chgcnt and retry.
+ */
+ *may_retry = errno == ENOSPC;
+ return ret;
+ }
+
+ /*
+ * struct nvme_ana_group_desc has 8-byte alignment
+ * but the group pointer is only 4-byte aligned.
+ * Don't dereference the misaligned pointer.
+ */
+ memcpy(&nnsids,
+ group + offsetof(struct nvme_ana_group_desc, nnsids),
+ sizeof(nnsids));
+ *to_read += le32_to_cpu(nnsids) * sizeof(__le32);
+ ret = read_ana_chunk(ctrl, lsp, rae,
+ (__u8 *)log, &read, *to_read, log_end);
+ if (ret) {
+ *may_retry = errno == ENOSPC;
+ return ret;
+ }
+ }
+
+ *may_retry = true;
+ return 0;
+}
+
+int nvme_mi_admin_get_ana_log_atomic(nvme_mi_ctrl_t ctrl, bool rgo, bool rae,
+ unsigned int retries,
+ struct nvme_ana_log *log, __u32 *len)
+{
+ const enum nvme_log_ana_lsp lsp =
+ rgo ? NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY : 0;
+ /* Get Log Page can only fetch multiples of dwords */
+ __u8 * const log_end = (__u8 *)log + (*len & -4);
+ __u8 *read = (__u8 *)log;
+ __u8 *to_read;
+ int ret;
+
+ if (!retries) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ to_read = (__u8 *)log->descs;
+ ret = read_ana_chunk(ctrl, lsp, rae,
+ (__u8 *)log, &read, to_read, log_end);
+ if (ret)
+ return ret;
+
+ do {
+ bool may_retry = false;
+ int saved_ret;
+ int saved_errno;
+ __le64 chgcnt;
+
+ saved_ret = try_read_ana(ctrl, lsp, rae, log, log_end,
+ read, &to_read, &may_retry);
+ /*
+ * If the log page was read with multiple Get Log Page commands,
+ * chgcnt must be checked afterwards to ensure atomicity
+ */
+ *len = to_read - (__u8 *)log;
+ if (*len <= NVME_LOG_PAGE_PDU_SIZE || !may_retry)
+ return saved_ret;
+
+ saved_errno = errno;
+ chgcnt = log->chgcnt;
+ read = (__u8 *)log;
+ to_read = (__u8 *)log->descs;
+ ret = read_ana_chunk(ctrl, lsp, rae,
+ (__u8 *)log, &read, to_read, log_end);
+ if (ret)
+ return ret;
+
+ if (log->chgcnt == chgcnt) {
+ /* Log hasn't changed; return try_read_ana() result */
+ errno = saved_errno;
+ return saved_ret;
+ }
+ } while (--retries);
+
+ errno = EAGAIN;
+ return -1;
+}
+
int nvme_mi_admin_security_send(nvme_mi_ctrl_t ctrl,
struct nvme_security_send_args *args)
{
@@ -1003,8 +1168,10 @@ int nvme_mi_admin_get_features(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_get_features);
@@ -1042,8 +1209,10 @@ int nvme_mi_admin_set_features(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_set_features);
@@ -1140,8 +1309,10 @@ int nvme_mi_admin_ns_attach(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_ns_attach);
@@ -1173,17 +1344,20 @@ int nvme_mi_admin_fw_download(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
-
- if (args->data_len & 0x3)
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
- if (args->offset & 0x3)
- return -EINVAL;
+ if ((args->data_len & 0x3) || (!args->data_len)) {
+ errno = EINVAL;
+ return -1;
+ }
- if (!args->data_len)
- return -EINVAL;
+ if (args->offset & 0x3) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_fw_download);
@@ -1215,8 +1389,10 @@ int nvme_mi_admin_fw_commit(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_fw_commit);
@@ -1245,8 +1421,10 @@ int nvme_mi_admin_format_nvm(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_format_nvm);
@@ -1279,8 +1457,10 @@ int nvme_mi_admin_sanitize_nvm(nvme_mi_ctrl_t ctrl,
struct nvme_mi_req req;
int rc;
- if (args->args_size < sizeof(*args))
- return -EINVAL;
+ if (args->args_size < sizeof(*args)) {
+ errno = EINVAL;
+ return -1;
+ }
nvme_mi_admin_init_req(&req, &req_hdr, ctrl->id,
nvme_admin_sanitize_nvm);