diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2022-07-14 18:53:09 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2022-07-14 18:53:09 +0000 |
commit | 3945f3269b3e2763faa1ab22d225ca4dd1856b82 (patch) | |
tree | 7e96ec768baa3807ce3a1076a74037a287f4caa8 /src | |
parent | Initial commit. (diff) | |
download | libnvme-3945f3269b3e2763faa1ab22d225ca4dd1856b82.tar.xz libnvme-3945f3269b3e2763faa1ab22d225ca4dd1856b82.zip |
Adding upstream version 1.0.upstream/1.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src')
-rw-r--r-- | src/libnvme.h | 30 | ||||
-rw-r--r-- | src/libnvme.map | 341 | ||||
-rw-r--r-- | src/licenses/BSD-MIT | 17 | ||||
-rw-r--r-- | src/licenses/CC0 | 28 | ||||
-rw-r--r-- | src/meson.build | 74 | ||||
-rw-r--r-- | src/nvme/cleanup.c | 4 | ||||
-rw-r--r-- | src/nvme/cleanup.h | 18 | ||||
-rw-r--r-- | src/nvme/fabrics.c | 1310 | ||||
-rw-r--r-- | src/nvme/fabrics.h | 273 | ||||
-rw-r--r-- | src/nvme/filters.c | 118 | ||||
-rw-r--r-- | src/nvme/filters.h | 96 | ||||
-rw-r--r-- | src/nvme/ioctl.c | 1870 | ||||
-rw-r--r-- | src/nvme/ioctl.h | 4407 | ||||
-rw-r--r-- | src/nvme/json.c | 466 | ||||
-rw-r--r-- | src/nvme/linux.c | 679 | ||||
-rw-r--r-- | src/nvme/linux.h | 208 | ||||
-rw-r--r-- | src/nvme/log.c | 99 | ||||
-rw-r--r-- | src/nvme/log.h | 38 | ||||
-rw-r--r-- | src/nvme/private.h | 151 | ||||
-rw-r--r-- | src/nvme/tree.c | 1909 | ||||
-rw-r--r-- | src/nvme/tree.h | 1231 | ||||
-rw-r--r-- | src/nvme/types.h | 7069 | ||||
-rw-r--r-- | src/nvme/util.c | 777 | ||||
-rw-r--r-- | src/nvme/util.h | 535 |
24 files changed, 21748 insertions, 0 deletions
diff --git a/src/libnvme.h b/src/libnvme.h new file mode 100644 index 0000000..6be9058 --- /dev/null +++ b/src/libnvme.h @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#ifndef _LIBNVME_H +#define _LIBNVME_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvme/types.h" +#include "nvme/linux.h" +#include "nvme/ioctl.h" +#include "nvme/fabrics.h" +#include "nvme/filters.h" +#include "nvme/tree.h" +#include "nvme/util.h" +#include "nvme/log.h" + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBNVME_H */ diff --git a/src/libnvme.map b/src/libnvme.map new file mode 100644 index 0000000..087dc66 --- /dev/null +++ b/src/libnvme.map @@ -0,0 +1,341 @@ +LIBNVME_1_0 { + global: + nvme_admin_passthru64; + nvme_admin_passthru; + nvme_attach_ns; + nvme_capacity_mgmt; + nvme_compare; + nvme_copy; + nvme_create_root; + nvme_create_ctrl; + nvme_ctrl_disconnect; + nvme_ctrl_first_ns; + nvme_ctrl_first_path; + nvme_ctrl_get_address; + nvme_ctrl_get_dhchap_key; + nvme_ctrl_get_discovery_ctrl; + nvme_ctrl_get_fd; + nvme_ctrl_get_firmware; + nvme_ctrl_get_host_iface; + nvme_ctrl_get_host_traddr; + nvme_ctrl_get_model; + nvme_ctrl_get_name; + nvme_ctrl_get_numa_node; + nvme_ctrl_get_queue_count; + nvme_ctrl_get_serial; + nvme_ctrl_get_sqsize; + nvme_ctrl_get_state; + nvme_ctrl_get_subsysnqn; + nvme_ctrl_get_subsystem; + nvme_ctrl_get_sysfs_dir; + nvme_ctrl_get_traddr; + nvme_ctrl_get_transport; + nvme_ctrl_get_trsvcid; + nvme_ctrl_identify; + nvme_ctrl_is_discovery_ctrl; + nvme_ctrl_next_ns; + nvme_ctrl_next_path; + nvme_ctrl_reset; + nvme_ctrl_set_dhchap_key; + nvme_ctrl_set_discovery_ctrl; + nvme_ctrl_set_persistent; + nvme_ctrls_filter; + nvme_default_host; + nvme_dev_self_test; + nvme_dim_send; + nvme_directive_recv; + nvme_directive_recv_identify_parameters; + nvme_directive_recv_stream_allocate; + nvme_directive_recv_stream_parameters; + nvme_directive_recv_stream_status; + nvme_directive_send; + nvme_directive_send_id_endir; + nvme_directive_send_stream_release_identifier; + nvme_directive_send_stream_release_resource; + nvme_disconnect_ctrl; + nvme_dsm; + nvme_dsm_range; + nvme_dump_config; + nvme_errno_to_string; + nvme_first_host; + nvme_first_subsystem; + nvme_flush; + nvme_format_nvm; + nvme_free_ctrl; + nvme_free_host; + nvme_free_ns; + nvme_free_subsystem; + nvme_free_tree; + nvme_fw_commit; + nvme_fw_download; + nvme_fw_download_seq; + nvme_gen_dhchap_key; + nvme_get_ana_log_len; + nvme_get_attr; + nvme_get_ctrl_attr; + nvme_get_ctrl_telemetry; + nvme_get_directive_receive_length; + nvme_get_feature_length; + nvme_get_features; + nvme_get_features_arbitration; + nvme_get_features_async_event; + nvme_get_features_auto_pst; + nvme_get_features_endurance_event_cfg; + nvme_get_features_err_recovery; + nvme_get_features_hctm; + nvme_get_features_host_behavior; + nvme_get_features_host_id; + nvme_get_features_host_mem_buf; + nvme_get_features_irq_coalesce; + nvme_get_features_irq_config; + nvme_get_features_kato; + nvme_get_features_lba_range; + nvme_get_features_lba_sts_interval; + nvme_get_features_nopsc; + nvme_get_features_num_queues; + nvme_get_features_plm_config; + nvme_get_features_plm_window; + nvme_get_features_power_mgmt; + nvme_get_features_resv_mask; + nvme_get_features_resv_persist; + nvme_get_features_rrl; + nvme_get_features_sanitize; + nvme_get_features_sw_progress; + nvme_get_features_temp_thresh; + nvme_get_features_timestamp; + nvme_get_features_volatile_wc; + nvme_get_features_write_atomic; + nvme_get_features_write_protect; + nvme_get_host_telemetry; + nvme_get_lba_status; + nvme_get_log; + nvme_get_log_ana; + nvme_get_log_ana_groups; + nvme_get_log_boot_partition; + nvme_get_log_changed_ns_list; + nvme_get_log_cmd_effects; + nvme_get_log_create_telemetry_host; + nvme_get_log_device_self_test; + nvme_get_log_discovery; + nvme_get_log_endurance_group; + nvme_get_log_endurance_grp_evt; + nvme_get_log_error; + nvme_get_log_fid_supported_effects; + nvme_get_log_fw_slot; + nvme_get_log_lba_status; + nvme_get_log_page; + nvme_get_log_persistent_event; + nvme_get_log_predictable_lat_event; + nvme_get_log_predictable_lat_nvmset; + nvme_get_log_reservation; + nvme_get_log_sanitize; + nvme_get_log_smart; + nvme_get_log_supported_log_pages; + nvme_get_log_telemetry_ctrl; + nvme_get_log_telemetry_host; + nvme_get_log_zns_changed_zones; + nvme_get_logical_block_size; + nvme_get_new_host_telemetry; + nvme_get_ns_attr; + nvme_get_nsid; + nvme_get_path_attr; + nvme_get_property; + nvme_get_subsys_attr; + nvme_get_telemetry_log; + nvme_host_get_dhchap_key; + nvme_host_get_hostid; + nvme_host_get_hostnqn; + nvme_host_get_hostsymname; + nvme_host_get_root; + nvme_host_set_dhchap_key; + nvme_host_set_hostsymname; + nvme_identify; + nvme_identify_active_ns_list; + nvme_identify_allocated_ns; + nvme_identify_allocated_ns_list; + nvme_identify_ctrl; + nvme_identify_ctrl_list; + nvme_identify_domain_list; + nvme_identify_endurance_group_list; + nvme_identify_independent_identify_ns; + nvme_identify_ns_csi_user_data_format; + nvme_identify_iocs_ns_csi_user_data_format; + nvme_identify_iocs; + nvme_identify_ns; + nvme_identify_ns_descs; + nvme_identify_ns_granularity; + nvme_identify_nsid_ctrl_list; + nvme_identify_nvmset_list; + nvme_identify_primary_ctrl; + nvme_identify_secondary_ctrl_list; + nvme_identify_uuid; + nvme_init_copy_range; + nvme_init_ctrl; + nvme_init_ctrl_list; + nvme_init_dsm_range; + nvme_init_logging; + nvme_io; + nvme_io_passthru64; + nvme_io_passthru; + nvme_lockdown; + nvme_lookup_ctrl; + nvme_lookup_host; + nvme_lookup_subsystem; + nvme_namespace_attach_ctrls; + nvme_namespace_detach_ctrls; + nvme_namespace_filter; + nvme_namespace_first_path; + nvme_namespace_next_path; + nvme_next_host; + nvme_next_subsystem; + nvme_ns_attach; + nvme_ns_attach_ctrls; + nvme_ns_compare; + nvme_ns_detach_ctrls; + nvme_ns_dettach_ctrls; + nvme_ns_flush; + nvme_ns_get_csi; + nvme_ns_get_ctrl; + nvme_ns_get_eui64; + nvme_ns_get_fd; + nvme_ns_get_firmware; + nvme_ns_get_generic_name; + nvme_ns_get_lba_count; + nvme_ns_get_lba_size; + nvme_ns_get_lba_util; + nvme_ns_get_meta_size; + nvme_ns_get_model; + nvme_ns_get_model; + nvme_ns_get_name; + nvme_ns_get_nguid; + nvme_ns_get_nsid; + nvme_ns_get_serial; + nvme_ns_get_subsystem; + nvme_ns_get_sysfs_dir; + nvme_ns_get_uuid; + nvme_ns_identify; + nvme_ns_mgmt; + nvme_ns_mgmt_create; + nvme_ns_mgmt_delete; + nvme_ns_open; + nvme_ns_read; + nvme_ns_rescan; + nvme_ns_verify; + nvme_ns_write; + nvme_ns_write_uncorrectable; + nvme_ns_write_zeros; + nvme_nvm_identify_ctrl; + nvme_open; + nvme_path_get_ana_state; + nvme_path_get_ctrl; + nvme_path_get_name; + nvme_path_get_ns; + nvme_path_get_sysfs_dir; + nvme_paths_filter; + nvme_read; + nvme_read_config; + nvme_refresh_topology; + nvme_rescan_ctrl; + nvme_resv_acquire; + nvme_resv_register; + nvme_resv_release; + nvme_resv_report; + nvme_sanitize; + nvme_sanitize_nvm; + nvme_scan; + nvme_scan_ctrl; + nvme_scan_ctrl_namespace_paths; + nvme_scan_ctrl_namespaces; + nvme_scan_topology; + nvme_scan_namespace; + nvme_scan_subsystem_namespaces; + nvme_scan_subsystems; + nvme_security_receive; + nvme_security_receive; + nvme_security_send; + nvme_set_feature; + nvme_set_features; + nvme_set_features_arbitration; + nvme_set_features_async_event; + nvme_set_features_auto_pst; + nvme_set_features_endurance_evt_cfg; + nvme_set_features_err_recovery; + nvme_set_features_hctm; + nvme_set_features_host_behavior; + nvme_set_features_host_id; + nvme_set_features_irq_coalesce; + nvme_set_features_irq_config; + nvme_set_features_lba_range; + nvme_set_features_lba_sts_interval; + nvme_set_features_nopsc; + nvme_set_features_plm_config; + nvme_set_features_plm_window; + nvme_set_features_power_mgmt; + nvme_set_features_resv_mask; + nvme_set_features_resv_persist; + nvme_set_features_rrl; + nvme_set_features_sanitize; + nvme_set_features_sw_progress; + nvme_set_features_temp_thresh; + nvme_set_features_timestamp; + nvme_set_features_volatile_wc; + nvme_set_features_write_atomic; + nvme_set_features_write_protect; + nvme_set_property; + nvme_setup_ctrl_list; + nvme_setup_id_ns; + nvme_status_to_errno; + nvme_status_to_string; + nvme_status_type; + nvme_submit_admin_passthru64; + nvme_submit_admin_passthru; + nvme_submit_io_passthru64; + nvme_submit_io_passthru; + nvme_subsys_filter; + nvme_subsystem_first_ctrl; + nvme_subsystem_first_ns; + nvme_subsystem_get_host; + nvme_subsystem_get_name; + nvme_subsystem_get_nqn; + nvme_subsystem_get_sysfs_dir; + nvme_subsystem_get_type; + nvme_subsystem_lookup_namespace; + nvme_subsystem_next_ctrl; + nvme_subsystem_next_ns; + nvme_subsystem_reset; + nvme_unlink_ctrl; + nvme_update_config; + nvme_verify; + nvme_virtual_mgmt; + nvme_write; + nvme_write_uncorrectable; + nvme_write_zeros; + nvme_zns_append; + nvme_zns_identify_ctrl; + nvme_zns_identify_ns; + nvme_zns_identify_ns; + nvme_zns_mgmt_recv; + nvme_zns_mgmt_send; + nvme_zns_report_zones; + nvmf_add_ctrl; + nvmf_adrfam_str; + nvmf_cms_str; + nvmf_connect_disc_entry; + nvmf_default_config; + nvmf_eflags_str; + nvmf_get_discovery_log; + nvmf_hostid_from_file; + nvmf_hostnqn_from_file; + nvmf_hostnqn_generate; + nvmf_is_registration_supported; + nvmf_prtype_str; + nvmf_qptype_str; + nvmf_register_ctrl; + nvmf_sectype_str; + nvmf_subtype_str; + nvmf_treq_str; + nvmf_trtype_str; + nvmf_update_config; + local: + *; +}; diff --git a/src/licenses/BSD-MIT b/src/licenses/BSD-MIT new file mode 100644 index 0000000..89de354 --- /dev/null +++ b/src/licenses/BSD-MIT @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/licenses/CC0 b/src/licenses/CC0 new file mode 100644 index 0000000..feb9b11 --- /dev/null +++ b/src/licenses/CC0 @@ -0,0 +1,28 @@ +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: + + the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; + moral rights retained by the original author(s) and/or performer(s); + publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; + rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; + rights protecting the extraction, dissemination, use and reuse of data in a Work; + database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and + other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. + +4. Limitations and Disclaimers. + + No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. + Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. + Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. + Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. diff --git a/src/meson.build b/src/meson.build new file mode 100644 index 0000000..b573b19 --- /dev/null +++ b/src/meson.build @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# This file is part of libnvme. +# Copyright (c) 2021 Dell Inc. +# +# Authors: Martin Belanger <Martin.Belanger@dell.com> +# +sources = [ + 'nvme/cleanup.c', + 'nvme/fabrics.c', + 'nvme/filters.c', + 'nvme/ioctl.c', + 'nvme/linux.c', + 'nvme/log.c', + 'nvme/tree.c', + 'nvme/util.c', +] + +if conf.get('CONFIG_JSONC') + sources += 'nvme/json.c' +endif + +deps = [ + libuuid_dep, + json_c_dep, + openssl_dep, +] + +source_dir = meson.current_source_dir() +mapfile = 'libnvme.map' +version_script_arg = join_paths(source_dir, mapfile) + +libnvme = library( + 'nvme', # produces libnvme.so + sources, + version: library_version, + link_args: ['-Wl,--version-script=' + version_script_arg], + dependencies: deps, + link_depends: mapfile, + include_directories: [incdir, internal_incdir], + install: true, + link_with: libccan, +) + +pkg = import('pkgconfig') +pkg.generate(libnvme, + filebase: meson.project_name(), + name: meson.project_name(), + version: meson.project_version(), + description: 'Manage "libnvme" subsystem devices (Non-volatile Memory Express)', + url: 'http://github.com/linux-nvme/libnvme/', +) + +libnvme_dep = declare_dependency( + include_directories: ['.'], + dependencies: deps, + link_with: libnvme, +) + +mode = ['rw-r--r--', 0, 0] +install_headers('libnvme.h', install_mode: mode) +install_headers([ + 'nvme/fabrics.h', + 'nvme/filters.h', + 'nvme/ioctl.h', + 'nvme/linux.h', + 'nvme/log.h', + 'nvme/tree.h', + 'nvme/types.h', + 'nvme/util.h', + ], + subdir: 'nvme', + install_mode: mode, +) diff --git a/src/nvme/cleanup.c b/src/nvme/cleanup.c new file mode 100644 index 0000000..0d5d910 --- /dev/null +++ b/src/nvme/cleanup.c @@ -0,0 +1,4 @@ +#include <stdlib.h> +#include "cleanup.h" + +DEFINE_CLEANUP_FUNC(cleanup_charp, char *, free); diff --git a/src/nvme/cleanup.h b/src/nvme/cleanup.h new file mode 100644 index 0000000..89a4984 --- /dev/null +++ b/src/nvme/cleanup.h @@ -0,0 +1,18 @@ +#ifndef __CLEANUP_H +#define __CLEANUP_H + +#define __cleanup__(fn) __attribute__((cleanup(fn))) + +#define DECLARE_CLEANUP_FUNC(name, type) \ + void name(type *__p) + +#define DEFINE_CLEANUP_FUNC(name, type, free_fn)\ +DECLARE_CLEANUP_FUNC(name, type) \ +{ \ + if (*__p) \ + free_fn(*__p); \ +} + +DECLARE_CLEANUP_FUNC(cleanup_charp, char *); + +#endif diff --git a/src/nvme/fabrics.c b/src/nvme/fabrics.c new file mode 100644 index 0000000..2a26843 --- /dev/null +++ b/src/nvme/fabrics.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#include <ctype.h> +#include <errno.h> +#include <fcntl.h> +#include <limits.h> +#include <stdbool.h> +#include <stddef.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <dirent.h> +#include <inttypes.h> + +#include <sys/param.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <arpa/inet.h> +#include <netdb.h> +#include <net/if.h> + +#include <ccan/endian/endian.h> +#include <ccan/list/list.h> +#include <ccan/array_size/array_size.h> + +#include "fabrics.h" +#include "linux.h" +#include "ioctl.h" +#include "util.h" +#include "log.h" +#include "private.h" + +#define NVMF_HOSTID_SIZE 37 +#define UUID_SIZE 37 /* 1b4e28ba-2fa1-11d2-883f-0016d3cca427 + \0 */ + +#define NVMF_HOSTNQN_FILE SYSCONFDIR "/nvme/hostnqn" +#define NVMF_HOSTID_FILE SYSCONFDIR "/nvme/hostid" + +const char *nvmf_dev = "/dev/nvme-fabrics"; + +/** + * strchomp() - Strip trailing white space + * @s: String to strip + * @l: Maximum length of string + */ +static void strchomp(char *s, int l) +{ + while (l && (s[l] == '\0' || s[l] == ' ')) + s[l--] = '\0'; +} + +const char *arg_str(const char * const *strings, + size_t array_size, size_t idx) +{ + if (idx < array_size && strings[idx]) + return strings[idx]; + return "unrecognized"; +} + +const char * const trtypes[] = { + [NVMF_TRTYPE_RDMA] = "rdma", + [NVMF_TRTYPE_FC] = "fc", + [NVMF_TRTYPE_TCP] = "tcp", + [NVMF_TRTYPE_LOOP] = "loop", +}; + +const char *nvmf_trtype_str(__u8 trtype) +{ + return arg_str(trtypes, ARRAY_SIZE(trtypes), trtype); +} + +static const char * const adrfams[] = { + [NVMF_ADDR_FAMILY_PCI] = "pci", + [NVMF_ADDR_FAMILY_IP4] = "ipv4", + [NVMF_ADDR_FAMILY_IP6] = "ipv6", + [NVMF_ADDR_FAMILY_IB] = "infiniband", + [NVMF_ADDR_FAMILY_FC] = "fibre-channel", +}; + +const char *nvmf_adrfam_str(__u8 adrfam) +{ + return arg_str(adrfams, ARRAY_SIZE(adrfams), adrfam); +} + +static const char * const subtypes[] = { + [NVME_NQN_DISC] = "discovery subsystem referral", + [NVME_NQN_NVME] = "nvme subsystem", + [NVME_NQN_CURR] = "current discovery subsystem", +}; + +const char *nvmf_subtype_str(__u8 subtype) +{ + return arg_str(subtypes, ARRAY_SIZE(subtypes), subtype); +} + +static const char * const treqs[] = { + [NVMF_TREQ_NOT_SPECIFIED] = "not specified", + [NVMF_TREQ_REQUIRED] = "required", + [NVMF_TREQ_NOT_REQUIRED] = "not required", + [NVMF_TREQ_DISABLE_SQFLOW] = "not specified, " + "sq flow control disable supported", +}; + +const char *nvmf_treq_str(__u8 treq) +{ + return arg_str(treqs, ARRAY_SIZE(treqs), treq); +} + +static const char * const eflags_strings[] = { + [NVMF_DISC_EFLAGS_NONE] = "not specified", + [NVMF_DISC_EFLAGS_EPCSD] = "explicit discovery connections", + [NVMF_DISC_EFLAGS_DUPRETINFO] = "duplicate discovery information", + [NVMF_DISC_EFLAGS_BOTH] = "explicit discovery connections, " + "duplicate discovery information", +}; + +const char *nvmf_eflags_str(__u16 eflags) +{ + return arg_str(eflags_strings, ARRAY_SIZE(eflags_strings), eflags); +} + +static const char * const sectypes[] = { + [NVMF_TCP_SECTYPE_NONE] = "none", + [NVMF_TCP_SECTYPE_TLS] = "tls", + [NVMF_TCP_SECTYPE_TLS13] = "tls13", +}; + +const char *nvmf_sectype_str(__u8 sectype) +{ + return arg_str(sectypes, ARRAY_SIZE(sectypes), sectype); +} + +static const char * const prtypes[] = { + [NVMF_RDMA_PRTYPE_NOT_SPECIFIED] = "not specified", + [NVMF_RDMA_PRTYPE_IB] = "infiniband", + [NVMF_RDMA_PRTYPE_ROCE] = "roce", + [NVMF_RDMA_PRTYPE_ROCEV2] = "roce-v2", + [NVMF_RDMA_PRTYPE_IWARP] = "iwarp", +}; + +const char *nvmf_prtype_str(__u8 prtype) +{ + return arg_str(prtypes, ARRAY_SIZE(prtypes), prtype); +} + +static const char * const qptypes[] = { + [NVMF_RDMA_QPTYPE_CONNECTED] = "connected", + [NVMF_RDMA_QPTYPE_DATAGRAM] = "datagram", +}; + +const char *nvmf_qptype_str(__u8 qptype) +{ + return arg_str(qptypes, ARRAY_SIZE(qptypes), qptype); +} + +static const char * const cms[] = { + [NVMF_RDMA_CMS_RDMA_CM] = "rdma-cm", +}; + +const char *nvmf_cms_str(__u8 cm) +{ + return arg_str(cms, ARRAY_SIZE(cms), cm); +} + +void nvmf_default_config(struct nvme_fabrics_config *cfg) +{ + memset(cfg, 0, sizeof(*cfg)); + cfg->tos = -1; + cfg->ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; +} + +#define MERGE_CFG_OPTION(c, n, o, d) \ + if ((c)->o == d) (c)->o = (n)->o +static struct nvme_fabrics_config *merge_config(nvme_ctrl_t c, + const struct nvme_fabrics_config *cfg) +{ + struct nvme_fabrics_config *ctrl_cfg = nvme_ctrl_get_config(c); + + MERGE_CFG_OPTION(ctrl_cfg, cfg, host_traddr, NULL); + MERGE_CFG_OPTION(ctrl_cfg, cfg, host_iface, NULL); + MERGE_CFG_OPTION(ctrl_cfg, cfg, nr_io_queues, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, nr_write_queues, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, nr_poll_queues, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, queue_size, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, keep_alive_tmo, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, reconnect_delay, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, ctrl_loss_tmo, + NVMF_DEF_CTRL_LOSS_TMO); + MERGE_CFG_OPTION(ctrl_cfg, cfg, fast_io_fail_tmo, 0); + MERGE_CFG_OPTION(ctrl_cfg, cfg, tos, -1); + MERGE_CFG_OPTION(ctrl_cfg, cfg, duplicate_connect, false); + MERGE_CFG_OPTION(ctrl_cfg, cfg, disable_sqflow, false); + MERGE_CFG_OPTION(ctrl_cfg, cfg, hdr_digest, false); + MERGE_CFG_OPTION(ctrl_cfg, cfg, data_digest, false); + MERGE_CFG_OPTION(ctrl_cfg, cfg, tls, false); + + return ctrl_cfg; +} + +#define UPDATE_CFG_OPTION(c, n, o, d) \ + if ((n)->o != d) (c)->o = (n)->o +void nvmf_update_config(nvme_ctrl_t c, const struct nvme_fabrics_config *cfg) +{ + struct nvme_fabrics_config *ctrl_cfg = nvme_ctrl_get_config(c); + + UPDATE_CFG_OPTION(ctrl_cfg, cfg, host_traddr, NULL); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, host_iface, NULL); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, nr_io_queues, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, nr_write_queues, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, nr_poll_queues, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, queue_size, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, keep_alive_tmo, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, reconnect_delay, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, ctrl_loss_tmo, + NVMF_DEF_CTRL_LOSS_TMO); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, fast_io_fail_tmo, 0); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, tos, -1); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, duplicate_connect, false); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, disable_sqflow, false); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, hdr_digest, false); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, data_digest, false); + UPDATE_CFG_OPTION(ctrl_cfg, cfg, tls, false); +} + +static int add_bool_argument(char **argstr, char *tok, bool arg) +{ + char *nstr; + + if (!arg) + return 0; + if (asprintf(&nstr, "%s,%s", *argstr, tok) < 0) { + errno = ENOMEM; + return -1; + } + free(*argstr); + *argstr = nstr; + + return 0; +} + +static int add_int_argument(char **argstr, char *tok, int arg, bool allow_zero) +{ + char *nstr; + + if (arg < 0 || (!arg && !allow_zero)) + return 0; + if (asprintf(&nstr, "%s,%s=%d", *argstr, tok, arg) < 0) { + errno = ENOMEM; + return -1; + } + free(*argstr); + *argstr = nstr; + + return 0; +} + +static int add_int_or_minus_one_argument(char **argstr, char *tok, int arg) +{ + char *nstr; + + if (arg < -1) + return 0; + if (asprintf(&nstr, "%s,%s=%d", *argstr, tok, arg) < 0) { + errno = ENOMEM; + return -1; + } + free(*argstr); + *argstr = nstr; + + return 0; +} + +static int add_argument(char **argstr, const char *tok, const char *arg) +{ + char *nstr; + + if (!(arg && strcmp(arg, "none"))) + return 0; + if (asprintf(&nstr, "%s,%s=%s", *argstr, tok, arg) < 0) { + errno = ENOMEM; + return -1; + } + free(*argstr); + *argstr = nstr; + + return 0; +} + +static int inet4_pton(const char *src, uint16_t port, + struct sockaddr_storage *addr) +{ + struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; + + if (strlen(src) > INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton(AF_INET, src, &addr4->sin_addr.s_addr) <= 0) + return -EINVAL; + + addr4->sin_family = AF_INET; + addr4->sin_port = htons(port); + + return 0; +} + +static int inet6_pton(nvme_root_t r, const char *src, uint16_t port, + struct sockaddr_storage *addr) +{ + int ret = -EINVAL; + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; + + if (strlen(src) > INET6_ADDRSTRLEN) + return -EINVAL; + + char *tmp = strdup(src); + if (!tmp) + nvme_msg(r, LOG_ERR, "cannot copy: %s\n", src); + + const char *scope = NULL; + char *p = strchr(tmp, SCOPE_DELIMITER); + if (p) { + *p = '\0'; + scope = src + (p - tmp) + 1; + } + + if (inet_pton(AF_INET6, tmp, &addr6->sin6_addr) != 1) + goto free_tmp; + + if (IN6_IS_ADDR_LINKLOCAL(&addr6->sin6_addr) && scope) { + addr6->sin6_scope_id = if_nametoindex(scope); + if (addr6->sin6_scope_id == 0) { + nvme_msg(r, LOG_ERR, + "can't find iface index for: %s (%m)\n", scope); + goto free_tmp; + } + } + + addr6->sin6_family = AF_INET6; + addr6->sin6_port = htons(port); + ret = 0; + +free_tmp: + free(tmp); + return ret; +} + +/** + * inet_pton_with_scope - convert an IPv4/IPv6 to socket address + * @r: nvme_root_t object + * @af: address family, AF_INET, AF_INET6 or AF_UNSPEC for either + * @src: the start of the address string + * @trsvcid: transport service identifier + * @addr: output socket address + * + * Return 0 on success, errno otherwise. + */ +static int inet_pton_with_scope(nvme_root_t r, int af, + const char *src, const char * trsvcid, + struct sockaddr_storage *addr) +{ + int ret = -EINVAL; + uint16_t port = 0; + + if (trsvcid) { + unsigned long long tmp = strtoull(trsvcid, NULL, 0); + port = (uint16_t)tmp; + if (tmp != port) { + nvme_msg(r, LOG_ERR, "trsvcid out of range: %s\n", + trsvcid); + return -ERANGE; + } + } else { + port = 0; + } + + switch (af) { + case AF_INET: + ret = inet4_pton(src, port, addr); + break; + case AF_INET6: + ret = inet6_pton(r, src, port, addr); + break; + case AF_UNSPEC: + ret = inet4_pton(src, port, addr); + if (ret) + ret = inet6_pton(r, src, port, addr); + break; + default: + nvme_msg(r, LOG_ERR, "unexpected address family %d\n", af); + } + + return ret; +} + +static bool traddr_is_hostname(nvme_root_t r, nvme_ctrl_t c) +{ + struct sockaddr_storage addr; + + if (!c->traddr) + return false; + if (strcmp(c->transport, "tcp") && strcmp(c->transport, "rdma")) + return false; + if (inet_pton_with_scope(r, AF_UNSPEC, c->traddr, c->trsvcid, &addr) == 0) + return false; + return true; +} + +static int build_options(nvme_host_t h, nvme_ctrl_t c, char **argstr) +{ + struct nvme_fabrics_config *cfg = nvme_ctrl_get_config(c); + const char *transport = nvme_ctrl_get_transport(c); + const char *hostnqn, *hostid, *hostkey, *ctrlkey; + bool discover = false, discovery_nqn = false; + + if (!transport) { + nvme_msg(h->r, LOG_ERR, "need a transport (-t) argument\n"); + return -ENVME_CONNECT_TARG; + } + + if (strncmp(transport, "loop", 4)) { + if (!nvme_ctrl_get_traddr(c)) { + nvme_msg(h->r, LOG_ERR, "need a address (-a) argument\n"); + return -ENVME_CONNECT_AARG; + } + } + + /* always specify nqn as first arg - this will init the string */ + if (asprintf(argstr, "nqn=%s", + nvme_ctrl_get_subsysnqn(c)) < 0) { + return -ENOMEM; + } + if (!strcmp(nvme_ctrl_get_subsysnqn(c), NVME_DISC_SUBSYS_NAME)) { + nvme_ctrl_set_discovery_ctrl(c, true); + discovery_nqn = true; + } + if (nvme_ctrl_is_discovery_ctrl(c)) + discover = true; + hostnqn = nvme_host_get_hostnqn(h); + hostid = nvme_host_get_hostid(h); + hostkey = nvme_host_get_dhchap_key(h); + ctrlkey = nvme_ctrl_get_dhchap_key(c); + if (add_argument(argstr, "transport", transport) || + add_argument(argstr, "traddr", + nvme_ctrl_get_traddr(c)) || + add_argument(argstr, "host_traddr", + cfg->host_traddr) || + add_argument(argstr, "host_iface", + cfg->host_iface) || + add_argument(argstr, "trsvcid", + nvme_ctrl_get_trsvcid(c)) || + (hostnqn && add_argument(argstr, "hostnqn", hostnqn)) || + (hostid && add_argument(argstr, "hostid", hostid)) || + (discover && !discovery_nqn && + add_bool_argument(argstr, "discovery", true)) || + (!discover && hostkey && + add_argument(argstr, "dhchap_secret", hostkey)) || + (!discover && ctrlkey && + add_argument(argstr, "dhchap_ctrl_secret", ctrlkey)) || + (!discover && + add_int_argument(argstr, "nr_io_queues", + cfg->nr_io_queues, false)) || + (!discover && + add_int_argument(argstr, "nr_write_queues", + cfg->nr_write_queues, false)) || + (!discover && + add_int_argument(argstr, "nr_poll_queues", + cfg->nr_poll_queues, false)) || + (!discover && + add_int_argument(argstr, "queue_size", + cfg->queue_size, false)) || + add_int_argument(argstr, "keep_alive_tmo", + cfg->keep_alive_tmo, false) || + add_int_argument(argstr, "reconnect_delay", + cfg->reconnect_delay, false) || + (strcmp(transport, "loop") && + add_int_or_minus_one_argument(argstr, "ctrl_loss_tmo", + cfg->ctrl_loss_tmo)) || + (strcmp(transport, "loop") && + add_int_argument(argstr, "fast_io_fail_tmo", + cfg->fast_io_fail_tmo, false)) || + (strcmp(transport, "loop") && + add_int_argument(argstr, "tos", cfg->tos, true)) || + add_bool_argument(argstr, "duplicate_connect", + cfg->duplicate_connect) || + add_bool_argument(argstr, "disable_sqflow", + cfg->disable_sqflow) || + (!strcmp(transport, "tcp") && + add_bool_argument(argstr, "hdr_digest", cfg->hdr_digest)) || + (!strcmp(transport, "tcp") && + add_bool_argument(argstr, "data_digest", cfg->data_digest)) || + (!strcmp(transport, "tcp") && + add_bool_argument(argstr, "tls", cfg->tls))) { + free(*argstr); + return -1; + } + + return 0; +} + +static int __nvmf_add_ctrl(nvme_root_t r, const char *argstr) +{ + int ret, fd, len = strlen(argstr); + char buf[0x1000], *options, *p; + + fd = open(nvmf_dev, O_RDWR); + if (fd < 0) { + nvme_msg(r, LOG_ERR, "Failed to open %s: %s\n", + nvmf_dev, strerror(errno)); + return -ENVME_CONNECT_OPEN; + } + + nvme_msg(r, LOG_DEBUG, "connect ctrl, '%.*s'\n", + (int)strcspn(argstr,"\n"), argstr); + ret = write(fd, argstr, len); + if (ret != len) { + nvme_msg(r, LOG_ERR, "Failed to write to %s: %s\n", + nvmf_dev, strerror(errno)); + ret = -ENVME_CONNECT_WRITE; + goto out_close; + } + + len = read(fd, buf, sizeof(buf)); + if (len < 0) { + nvme_msg(r, LOG_ERR, "Failed to read from %s: %s\n", + nvmf_dev, strerror(errno)); + ret = -ENVME_CONNECT_READ; + goto out_close; + } + nvme_msg(r, LOG_DEBUG, "connect ctrl, response '%.*s'\n", + (int)strcspn(buf, "\n"), buf); + buf[len] = '\0'; + options = buf; + while ((p = strsep(&options, ",\n")) != NULL) { + if (!*p) + continue; + if (sscanf(p, "instance=%d", &ret) == 1) + goto out_close; + } + + nvme_msg(r, LOG_ERR, "Failed to parse ctrl info for \"%s\"\n", argstr); + ret = -ENVME_CONNECT_PARSE; +out_close: + close(fd); + return ret; +} + +int nvmf_add_ctrl(nvme_host_t h, nvme_ctrl_t c, + const struct nvme_fabrics_config *cfg) +{ + char *argstr; + int ret; + + cfg = merge_config(c, cfg); + nvme_ctrl_set_discovered(c, true); + if (traddr_is_hostname(h->r, c)) { + char *traddr = c->traddr; + + c->traddr = hostname2traddr(h->r, traddr); + if (!c->traddr) { + c->traddr = traddr; + errno = ENVME_CONNECT_TRADDR; + return -1; + } + free(traddr); + } + + ret = build_options(h, c, &argstr); + if (ret) + return ret; + + ret = __nvmf_add_ctrl(h->r, argstr); + free(argstr); + if (ret < 0) { + errno = -ret; + return -1; + } + + nvme_msg(h->r, LOG_INFO, "nvme%d: ctrl connected\n", ret); + return nvme_init_ctrl(h, c, ret); +} + +nvme_ctrl_t nvmf_connect_disc_entry(nvme_host_t h, + struct nvmf_disc_log_entry *e, + const struct nvme_fabrics_config *cfg, + bool *discover) +{ + const char *transport; + char *traddr = NULL, *trsvcid = NULL; + nvme_ctrl_t c; + int ret; + + switch (e->trtype) { + case NVMF_TRTYPE_RDMA: + case NVMF_TRTYPE_TCP: + switch (e->adrfam) { + case NVMF_ADDR_FAMILY_IP4: + case NVMF_ADDR_FAMILY_IP6: + strchomp(e->traddr, NVMF_TRADDR_SIZE - 1); + strchomp(e->trsvcid, NVMF_TRSVCID_SIZE - 1); + traddr = e->traddr; + trsvcid = e->trsvcid; + break; + default: + nvme_msg(h->r, LOG_ERR, + "skipping unsupported adrfam %d\n", + e->adrfam); + errno = EINVAL; + return NULL; + } + break; + case NVMF_TRTYPE_FC: + switch (e->adrfam) { + case NVMF_ADDR_FAMILY_FC: + strchomp(e->traddr, NVMF_TRADDR_SIZE - 1); + traddr = e->traddr; + break; + default: + nvme_msg(h->r, LOG_ERR, + "skipping unsupported adrfam %d\n", + e->adrfam); + errno = EINVAL; + return NULL; + } + break; + case NVMF_TRTYPE_LOOP: + strchomp(e->traddr, NVMF_TRADDR_SIZE - 1); + traddr = strlen(e->traddr) ? e->traddr : NULL; + break; + default: + nvme_msg(h->r, LOG_ERR, "skipping unsupported transport %d\n", + e->trtype); + errno = EINVAL; + return NULL; + } + + transport = nvmf_trtype_str(e->trtype); + + nvme_msg(h->r, LOG_DEBUG, "lookup ctrl " + "(transport: %s, traddr: %s, trsvcid %s)\n", + transport, traddr, trsvcid); + c = nvme_create_ctrl(h->r, e->subnqn, transport, traddr, + cfg->host_traddr, cfg->host_iface, trsvcid); + if (!c) { + nvme_msg(h->r, LOG_DEBUG, "skipping discovery entry, " + "failed to allocate %s controller with traddr %s\n", + transport, traddr); + errno = ENOMEM; + return NULL; + } + + switch (e->subtype) { + case NVME_NQN_CURR: + nvme_ctrl_set_discovered(c, true); + break; + case NVME_NQN_DISC: + if (discover) + *discover = true; + nvme_ctrl_set_discovery_ctrl(c, true); + break; + default: + nvme_msg(h->r, LOG_ERR, "unsupported subtype %d\n", + e->subtype); + /* fallthrough */ + case NVME_NQN_NVME: + nvme_ctrl_set_discovery_ctrl(c, false); + break; + } + + if (nvme_ctrl_is_discovered(c)) { + nvme_free_ctrl(c); + errno = EAGAIN; + return NULL; + } + + if (e->treq & NVMF_TREQ_DISABLE_SQFLOW) + c->cfg.disable_sqflow = true; + + if (e->trtype == NVMF_TRTYPE_TCP && + (e->treq & NVMF_TREQ_REQUIRED || + e->treq & NVMF_TREQ_NOT_REQUIRED)) + c->cfg.tls = true; + + ret = nvmf_add_ctrl(h, c, cfg); + if (!ret) + return c; + + if (errno == EINVAL && c->cfg.disable_sqflow) { + errno = 0; + /* disable_sqflow is unrecognized option on older kernels */ + nvme_msg(h->r, LOG_INFO, "failed to connect controller, " + "retry with disabling SQ flow control\n"); + c->cfg.disable_sqflow = false; + ret = nvmf_add_ctrl(h, c, cfg); + if (!ret) + return c; + } + nvme_msg(h->r, LOG_ERR, "failed to connect controller, error %d\n", + errno); + nvme_free_ctrl(c); + return NULL; +} + +static int nvme_discovery_log(int fd, __u32 len, struct nvmf_discovery_log *log, bool rae) +{ + struct nvme_get_log_args args = { + .args_size = sizeof(args), + .fd = fd, + .nsid = NVME_NSID_NONE, + .lsp = NVME_LOG_LSP_NONE, + .lsi = NVME_LOG_LSI_NONE, + .uuidx = NVME_UUID_NONE, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + .lid = NVME_LOG_LID_DISCOVER, + .log = log, + .len = len, + .csi = NVME_CSI_NVM, + .rae = rae, + .ot = false, + }; + + return nvme_get_log_page(fd, 4096, &args); +} + +int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, + int max_retries) +{ + nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; + struct nvmf_discovery_log *log; + int hdr, ret, retries = 0; + const char *name = nvme_ctrl_get_name(c); + uint64_t genctr, numrec; + unsigned int size; + + hdr = sizeof(struct nvmf_discovery_log); + log = malloc(hdr); + if (!log) { + nvme_msg(r, LOG_ERR, + "could not allocate memory for discovery log header\n"); + errno = ENOMEM; + return -1; + } + memset(log, 0, hdr); + + nvme_msg(r, LOG_DEBUG, "%s: discover length %d\n", name, 0x100); + ret = nvme_discovery_log(nvme_ctrl_get_fd(c), 0x100, log, true); + if (ret) { + nvme_msg(r, LOG_INFO, "%s: discover failed, error %d\n", + name, errno); + goto out_free_log; + } + + do { + numrec = le64_to_cpu(log->numrec); + genctr = le64_to_cpu(log->genctr); + + if (numrec == 0) { + *logp = log; + return 0; + } + + size = sizeof(struct nvmf_discovery_log) + + sizeof(struct nvmf_disc_log_entry) * (numrec); + + free(log); + log = malloc(size); + if (!log) { + nvme_msg(r, LOG_ERR, + "could not alloc memory for discovery log page\n"); + errno = ENOMEM; + return -1; + } + memset(log, 0, size); + + nvme_msg(r, LOG_DEBUG, "%s: discover length %d\n", name, size); + ret = nvme_discovery_log(nvme_ctrl_get_fd(c), size, log, false); + if (ret) { + nvme_msg(r, LOG_INFO, + "%s: discover try %d/%d failed, error %d\n", + name, retries, max_retries, errno); + goto out_free_log; + } + + genctr = le64_to_cpu(log->genctr); + nvme_msg(r, LOG_DEBUG, + "%s: discover genctr %" PRIu64 ", retry\n", + name, genctr); + ret = nvme_discovery_log(nvme_ctrl_get_fd(c), hdr, log, true); + if (ret) { + nvme_msg(r, LOG_INFO, + "%s: discover try %d/%d failed, error %d\n", + name, retries, max_retries, errno); + goto out_free_log; + } + } while (genctr != le64_to_cpu(log->genctr) && + ++retries < max_retries); + + if (genctr != le64_to_cpu(log->genctr)) { + nvme_msg(r, LOG_INFO, "%s: discover genctr mismatch\n", name); + errno = EAGAIN; + ret = -1; + } else if (numrec != le64_to_cpu(log->numrec)) { + nvme_msg(r, LOG_INFO, + "%s: could only fetch %" PRIu64 " of %" PRIu64 " records\n", + name, numrec, le64_to_cpu(log->numrec)); + errno = EBADSLT; + ret = -1; + } else { + *logp = log; + return 0; + } + +out_free_log: + free(log); + return ret; +} + +#define PATH_UUID_IBM "/proc/device-tree/ibm,partition-uuid" + +static int uuid_from_device_tree(char *system_uuid) +{ + ssize_t len; + int f; + + f = open(PATH_UUID_IBM, O_RDONLY); + if (f < 0) + return -ENXIO; + + memset(system_uuid, 0, UUID_SIZE); + len = read(f, system_uuid, UUID_SIZE - 1); + close(f); + if (len < 0) + return -ENXIO; + + return strlen(system_uuid) ? 0 : -ENXIO; +} + +#define PATH_DMI_ENTRIES "/sys/firmware/dmi/entries" + +static int uuid_from_dmi_entries(char *system_uuid) +{ + int f; + DIR *d; + struct dirent *de; + char buf[512]; + + system_uuid[0] = '\0'; + d = opendir(PATH_DMI_ENTRIES); + if (!d) + return -ENXIO; + while ((de = readdir(d))) { + char filename[PATH_MAX]; + int len, type; + + if (de->d_name[0] == '.') + continue; + sprintf(filename, "%s/%s/type", PATH_DMI_ENTRIES, de->d_name); + f = open(filename, O_RDONLY); + if (f < 0) + continue; + len = read(f, buf, 512); + close(f); + if (len < 0) + continue; + if (sscanf(buf, "%d", &type) != 1) + continue; + if (type != 1) + continue; + sprintf(filename, "%s/%s/raw", PATH_DMI_ENTRIES, de->d_name); + f = open(filename, O_RDONLY); + if (f < 0) + continue; + len = read(f, buf, 512); + close(f); + if (len < 0) + continue; + /* Sigh. https://en.wikipedia.org/wiki/Overengineering */ + /* DMTF SMBIOS 3.0 Section 7.2.1 System UUID */ + sprintf(system_uuid, + "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" + "%02x%02x%02x%02x%02x%02x", + (uint8_t)buf[8 + 3], (uint8_t)buf[8 + 2], + (uint8_t)buf[8 + 1], (uint8_t)buf[8 + 0], + (uint8_t)buf[8 + 5], (uint8_t)buf[8 + 4], + (uint8_t)buf[8 + 7], (uint8_t)buf[8 + 6], + (uint8_t)buf[8 + 8], (uint8_t)buf[8 + 9], + (uint8_t)buf[8 + 10], (uint8_t)buf[8 + 11], + (uint8_t)buf[8 + 12], (uint8_t)buf[8 + 13], + (uint8_t)buf[8 + 14], (uint8_t)buf[8 + 15]); + break; + } + closedir(d); + return strlen(system_uuid) ? 0 : -ENXIO; +} + +#define PATH_DMI_PROD_UUID "/sys/class/dmi/id/product_uuid" + +/** + * uuid_from_product_uuid() - Get system UUID from product_uuid + * @system_uuid: Where to save the system UUID. + * + * Return: 0 on success, -ENXIO otherwise. + */ +static int uuid_from_product_uuid(char *system_uuid) +{ + FILE *stream; + ssize_t nread; + int ret; + char *line = NULL; + size_t len = 0; + + stream = fopen(PATH_DMI_PROD_UUID, "re"); + if (!stream) + return -ENXIO; + system_uuid[0] = '\0'; + + nread = getline(&line, &len, stream); + if (nread != UUID_SIZE) { + ret = -ENXIO; + goto out; + } + + /* The kernel is handling the byte swapping according DMTF + * SMBIOS 3.0 Section 7.2.1 System UUID */ + + memcpy(system_uuid, line, UUID_SIZE - 1); + system_uuid[UUID_SIZE - 1] = '\0'; + + ret = 0; + +out: + free(line); + fclose(stream); + + return ret; +} + +/** + * uuid_from_dmi() - read system UUID + * @system_uuid: buffer for the UUID + * + * The system UUID can be read from two different locations: + * + * 1) /sys/class/dmi/id/product_uuid + * 2) /sys/firmware/dmi/entries + * + * Note that the second location is not present on Debian-based systems. + * + * Return: 0 on success, negative errno otherwise. + */ +static int uuid_from_dmi(char *system_uuid) +{ + int ret = uuid_from_product_uuid(system_uuid); + if (ret != 0) + ret = uuid_from_dmi_entries(system_uuid); + return ret; +} + +char *nvmf_hostnqn_generate() +{ + char *hostnqn; + int ret; + char uuid_str[UUID_SIZE]; + uuid_t uuid; + + ret = uuid_from_dmi(uuid_str); + if (ret < 0) { + ret = uuid_from_device_tree(uuid_str); + } + if (ret < 0) { + uuid_generate_random(uuid); + uuid_unparse_lower(uuid, uuid_str); + } + + if (asprintf(&hostnqn, "nqn.2014-08.org.nvmexpress:uuid:%s", uuid_str) < 0) + return NULL; + + return hostnqn; +} + +static char *nvmf_read_file(const char *f, int len) +{ + char buf[len]; + int ret, fd; + + fd = open(f, O_RDONLY); + if (fd < 0) + return NULL; + + memset(buf, 0, len); + ret = read(fd, buf, len - 1); + close (fd); + + if (ret < 0 || !strlen(buf)) + return NULL; + return strndup(buf, strcspn(buf, "\n")); +} + +char *nvmf_hostnqn_from_file() +{ + return nvmf_read_file(NVMF_HOSTNQN_FILE, NVMF_NQN_SIZE); +} + +char *nvmf_hostid_from_file() +{ + return nvmf_read_file(NVMF_HOSTID_FILE, NVMF_HOSTID_SIZE); +} + +/** + * nvmf_get_tel() - Calculate the amount of memory needed for a DIE. + * @hostsymname: Symbolic name (may be NULL) + * + * Each Discovery Information Entry (DIE) must contain at a minimum an + * Extended Attribute for the HostID. The Entry may optionally contain an + * Extended Attribute for the Symbolic Name. + * + * Return: Total Entry Length + */ +static __u32 nvmf_get_tel(const char *hostsymname) +{ + __u32 tel = sizeof(struct nvmf_ext_die); + __u16 len; + + /* Host ID is mandatory */ + tel += nvmf_exat_size(sizeof(uuid_t)); + + /* Symbolic name is optional */ + len = hostsymname ? strlen(hostsymname) : 0; + if (len) + tel += nvmf_exat_size(len); + + return tel; +} + +/** + * nvmf_fill_die() - Fill a Discovery Information Entry. + * @die: Pointer to Discovery Information Entry to be filled + * @h: Pointer to the host data structure + * @tel: Length of the DIE + * @trtype: Transport type + * @adrfam: Address family + * @reg_addr: Address to register. Setting this to an empty string tells + * the DC to infer address from the source address of the socket. + * @tsas: Transport Specific Address Subtype for the address being + * registered. + */ +static void nvmf_fill_die(struct nvmf_ext_die *die, struct nvme_host *h, + __u32 tel, __u8 trtype, __u8 adrfam, + const char *reg_addr, union nvmf_tsas *tsas) +{ + __u16 numexat = 0; + size_t symname_len; + struct nvmf_ext_attr *exat; + + die->tel = cpu_to_le32(tel); + die->trtype = trtype; + die->adrfam = adrfam; + + memcpy(die->nqn, h->hostnqn, MIN(sizeof(die->nqn), strlen(h->hostnqn))); + memcpy(die->traddr, reg_addr, MIN(sizeof(die->traddr), strlen(reg_addr))); + + if (tsas) + memcpy(&die->tsas, tsas, sizeof(die->tsas)); + + /* Extended Attribute for the HostID (mandatory) */ + numexat++; + exat = die->exat; + exat->exattype = cpu_to_le16(NVMF_EXATTYPE_HOSTID); + exat->exatlen = cpu_to_le16(nvmf_exat_len(sizeof(uuid_t))); + uuid_parse(h->hostid, exat->exatval); + + /* Extended Attribute for the Symbolic Name (optional) */ + symname_len = h->hostsymname ? strlen(h->hostsymname) : 0; + if (symname_len) { + __u16 exatlen = nvmf_exat_len(symname_len); + + numexat++; + exat = nvmf_exat_ptr_next(exat); + exat->exattype = cpu_to_le16(NVMF_EXATTYPE_SYMNAME); + exat->exatlen = cpu_to_le16(exatlen); + memcpy(exat->exatval, h->hostsymname, symname_len); + /* Per Base specs, ASCII strings must be padded with spaces */ + memset(&exat->exatval[symname_len], ' ', exatlen - symname_len); + } + + die->numexat = cpu_to_le16(numexat); +} + +/** + * nvmf_dim() - Explicit reg, dereg, reg-update issuing DIM + * @c: Host NVMe controller instance maintaining the admin queue used to + * submit the DIM command to the DC. + * @tas: Task field of the Command Dword 10 (cdw10). Indicates whether to + * perform a Registration, Deregistration, or Registration-update. + * @trtype: Transport type (&enum nvmf_trtype - must be NVMF_TRTYPE_TCP) + * @adrfam: Address family (&enum nvmf_addr_family) + * @reg_addr: Address to register. Setting this to an empty string tells + * the DC to infer address from the source address of the socket. + * @tsas: Transport Specific Address Subtype for the address being + * registered. + * @result: Location where to save the command-specific result returned by + * the discovery controller. + * + * Perform explicit registration, deregistration, or + * registration-update (specified by @tas) by sending a Discovery + * Information Management (DIM) command to the Discovery Controller + * (DC). + * + * Return: 0 on success; on failure -1 is returned and errno is set + */ +static int nvmf_dim(nvme_ctrl_t c, enum nvmf_dim_tas tas, __u8 trtype, + __u8 adrfam, const char *reg_addr, union nvmf_tsas *tsas, + __u32 *result) +{ + nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; + struct nvmf_dim_data *dim; + struct nvmf_ext_die *die; + __u32 tdl; + __u32 tel; + int ret; + + struct nvme_dim_args args = { + .args_size = sizeof(args), + .fd = nvme_ctrl_get_fd(c), + .result = result, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .tas = tas + }; + + if (!c->s) { + nvme_msg(r, LOG_ERR, + "%s: failed to perform DIM. subsystem undefined.\n", + c->name); + errno = EINVAL; + return -1; + } + + if (!c->s->h) { + nvme_msg(r, LOG_ERR, + "%s: failed to perform DIM. host undefined.\n", + c->name); + errno = EINVAL; + return -1; + } + + if (!c->s->h->hostid) { + nvme_msg(r, LOG_ERR, + "%s: failed to perform DIM. hostid undefined.\n", + c->name); + errno = EINVAL; + return -1; + } + + if (!c->s->h->hostnqn) { + nvme_msg(r, LOG_ERR, + "%s: failed to perform DIM. hostnqn undefined.\n", + c->name); + errno = EINVAL; + return -1; + } + + if (strcmp(c->transport, "tcp")) { + nvme_msg(r, LOG_ERR, + "%s: DIM only supported for TCP connections.\n", + c->name); + errno = EINVAL; + return -1; + } + + /* Register one Discovery Information Entry (DIE) of size TEL */ + tel = nvmf_get_tel(c->s->h->hostsymname); + tdl = sizeof(struct nvmf_dim_data) + tel; + + dim = (struct nvmf_dim_data *)calloc(1, tdl); + if (!dim) { + errno = ENOMEM; + return -1; + } + + dim->tdl = cpu_to_le32(tdl); + dim->nument = cpu_to_le64(1); /* only one DIE to register */ + dim->entfmt = cpu_to_le16(NVMF_DIM_ENTFMT_EXTENDED); + dim->etype = cpu_to_le16(NVMF_DIM_ETYPE_HOST); + dim->ektype = cpu_to_le16(0x5F); /* must be 0x5F per specs */ + + memcpy(dim->eid, c->s->h->hostnqn, + MIN(sizeof(dim->eid), strlen(c->s->h->hostnqn))); + + ret = get_entity_name(dim->ename, sizeof(dim->ename)); + if (ret <= 0) + nvme_msg(r, LOG_INFO, "%s: Failed to retrieve ENAME. %s.\n", + c->name, strerror(errno)); + + ret = get_entity_version(dim->ever, sizeof(dim->ever)); + if (ret <= 0) + nvme_msg(r, LOG_INFO, "%s: Failed to retrieve EVER.\n", c->name); + + die = &dim->die->extended; + nvmf_fill_die(die, c->s->h, tel, trtype, adrfam, reg_addr, tsas); + + args.data_len = tdl; + args.data = dim; + ret = nvme_dim_send(&args); + + free(dim); + + return ret; +} + +/** + * nvme_get_adrfam() - Get address family for the address we're registering + * with the DC. + * + * We retrieve this info from the socket itself. If we can't get the source + * address from the socket, then we'll infer the address family from the + * address of the DC since the DC address has the same address family. + * + * @ctrl: Host NVMe controller instance maintaining the admin queue used to + * submit the DIM command to the DC. + * + * Return: The address family of the source address associated with the + * socket connected to the DC. + */ +static __u8 nvme_get_adrfam(nvme_ctrl_t c) +{ + struct sockaddr_storage addr; + __u8 adrfam = NVMF_ADDR_FAMILY_IP4; + nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; + + if (!inet_pton_with_scope(r, AF_UNSPEC, c->traddr, c->trsvcid, &addr)) { + if (addr.ss_family == AF_INET6) + adrfam = NVMF_ADDR_FAMILY_IP6; + } + + return adrfam; +} + +/* These string definitions must match with the kernel */ +static const char *cntrltype_str[] = { + [NVME_CTRL_CNTRLTYPE_IO] = "io", + [NVME_CTRL_CNTRLTYPE_DISCOVERY] = "discovery", + [NVME_CTRL_CNTRLTYPE_ADMIN] = "admin", +}; + +static const char *dctype_str[] = { + [NVME_CTRL_DCTYPE_NOT_REPORTED] = "none", + [NVME_CTRL_DCTYPE_DDC] = "ddc", + [NVME_CTRL_DCTYPE_CDC] = "cdc", +}; + +/** + * nvme_fetch_cntrltype_dctype_from_id - Get cntrltype and dctype from identify command + * @c: Controller instance + * + * On legacy kernels the cntrltype and dctype are not exposed through the + * sysfs. We must get them directly from the controller by performing an + * identify command. + */ +static void nvme_fetch_cntrltype_dctype_from_id(nvme_ctrl_t c) +{ + struct nvme_id_ctrl id = { 0 }; + + if (nvme_ctrl_identify(c, &id)) + return; + + if (!c->cntrltype) { + if (id.cntrltype > NVME_CTRL_CNTRLTYPE_ADMIN || !cntrltype_str[id.cntrltype]) + c->cntrltype = strdup("reserved"); + else + c->cntrltype = strdup(cntrltype_str[id.cntrltype]); + } + + if (!c->dctype) { + if (id.dctype > NVME_CTRL_DCTYPE_CDC || !dctype_str[id.dctype]) + c->dctype = strdup("reserved"); + else + c->dctype = strdup(dctype_str[id.dctype]); + } +} + +bool nvmf_is_registration_supported(nvme_ctrl_t c) +{ + if (!c->cntrltype || !c->dctype) + nvme_fetch_cntrltype_dctype_from_id(c); + + return !strcmp(c->dctype, "ddc") || !strcmp(c->dctype, "cdc"); +} + +int nvmf_register_ctrl(nvme_ctrl_t c, enum nvmf_dim_tas tas, __u32 *result) +{ + if (!nvmf_is_registration_supported(c)) { + errno = ENOTSUP; + return -1; + } + + /* We're registering our source address with the DC. To do + * that, we can simply send an empty string. This tells the DC + * to retrieve the source address from the socket and use that + * as the registration address. + */ + return nvmf_dim(c, tas, NVMF_TRTYPE_TCP, nvme_get_adrfam(c), "", NULL, result); +} diff --git a/src/nvme/fabrics.h b/src/nvme/fabrics.h new file mode 100644 index 0000000..7f8a373 --- /dev/null +++ b/src/nvme/fabrics.h @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ +#ifndef _LIBNVME_FABRICS_H +#define _LIBNVME_FABRICS_H + +#include <stdbool.h> +#include <stdint.h> +#include "tree.h" + +/** + * DOC: fabrics.h + * + * Fabrics-specific definitions. + */ + +/* default to 600 seconds of reconnect attempts before giving up */ +#define NVMF_DEF_CTRL_LOSS_TMO 600 + +/** + * struct nvme_fabrics_config - Defines all linux nvme fabrics initiator options + * @host_traddr: Host transport address + * @host_iface: Host interface name + * @queue_size: Number of IO queue entries + * @nr_io_queues: Number of controller IO queues to establish + * @reconnect_delay: Time between two consecutive reconnect attempts. + * @ctrl_loss_tmo: Override the default controller reconnect attempt timeout in seconds + * @fast_io_fail_tmo: Set the fast I/O fail timeout in seconds. + * @keep_alive_tmo: Override the default keep-alive-timeout to this value in seconds + * @nr_write_queues: Number of queues to use for exclusively for writing + * @nr_poll_queues: Number of queues to reserve for polling completions + * @tos: Type of service + * @duplicate_connect: Allow multiple connections to the same target + * @disable_sqflow: Disable controller sq flow control + * @hdr_digest: Generate/verify header digest (TCP) + * @data_digest: Generate/verify data digest (TCP) + * @tls: Start TLS on the connection (TCP) + */ +struct nvme_fabrics_config { + char *host_traddr; + char *host_iface; + int queue_size; + int nr_io_queues; + int reconnect_delay; + int ctrl_loss_tmo; + int fast_io_fail_tmo; + int keep_alive_tmo; + int nr_write_queues; + int nr_poll_queues; + int tos; + + bool duplicate_connect; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + bool tls; +}; + +/** + * nvmf_trtype_str() - Decode TRTYPE field + * @trtype: value to be decoded + * + * Decode the transport type field in the discovery + * log page entry. + * + * Return: decoded string + */ +const char *nvmf_trtype_str(__u8 trtype); + +/** + * nvmf_adrfam_str() - Decode ADRFAM field + * @adrfam: value to be decoded + * + * Decode the address family field in the discovery + * log page entry. + * + * Return: decoded string + */ +const char *nvmf_adrfam_str(__u8 adrfam); + +/** + * nvmf_subtype_str() - Decode SUBTYPE field + * @subtype: value to be decoded + * + * Decode the subsystem type field in the discovery + * log page entry. + * + * Return: decoded string + */ +const char *nvmf_subtype_str(__u8 subtype); + +/** + * nvmf_treq_str() - Decode TREQ field + * @treq: value to be decoded + * + * Decode the transport requirements field in the + * discovery log page entry. + * + * Return: decoded string + */ +const char *nvmf_treq_str(__u8 treq); + +/** + * nvmf_eflags_str() - Decode EFLAGS field + * @eflags: value to be decoded + * + * Decode the EFLAGS field in the discovery log page + * entry. + * + * Return: decoded string + */ +const char *nvmf_eflags_str(__u16 eflags); + +/** + * nvmf_sectype_str() - Decode SECTYPE field + * @sectype: value to be decoded + * + * Decode the SECTYPE field in the discovery log page + * entry. + * + * Return: decoded string + */ +const char *nvmf_sectype_str(__u8 sectype); + +/** + * nvmf_prtype_str() - Decode RDMA Provider type field + * @prtype: value to be decoded + * + * Decode the RDMA Provider type field in the discovery + * log page entry. + * + * Return: decoded string + */ +const char *nvmf_prtype_str(__u8 prtype); + +/** + * nvmf_qptype_str() - Decode RDMA QP Service type field + * @qptype: value to be decoded + * + * Decode the RDMA QP Service type field in the discovery log page + * entry. + * + * Return: decoded string + */ +const char *nvmf_qptype_str(__u8 qptype); + +/** + * nvmf_cms_str() - Decode RDMA connection management service field + * @cms: value to be decoded + * + * Decode the RDMA connection management service field in the discovery + * log page entry. + * + * Return: decoded string + */ +const char *nvmf_cms_str(__u8 cms); + +/** + * nvmf_default_config() - Default values for fabrics configuration + * @cfg: config values to set + * + * Initializes @cfg with default values. + */ +void nvmf_default_config(struct nvme_fabrics_config *cfg); + +/** + * nvmf_update_config() - Update fabrics configuration values + * @c: Controller to be modified + * @cfg: Updated configuration values + * + * Updates the values from @c with the configuration values from @cfg; + * all non-default values from @cfg will overwrite the values in @c. + */ +void nvmf_update_config(nvme_ctrl_t c, const struct nvme_fabrics_config *cfg); + +/** + * nvmf_add_ctrl() - Connect a controller and update topology + * @h: Host to which the controller should be attached + * @c: Controller to be connected + * @cfg: Default configuration for the controller + * + * Issues a 'connect' command to the NVMe-oF controller and inserts @c + * into the topology using @h as parent. + * @c must be initialized and not connected to the topology. + * + * Return: 0 on success; on failure errno is set and -1 is returned. + */ +int nvmf_add_ctrl(nvme_host_t h, nvme_ctrl_t c, + const struct nvme_fabrics_config *cfg); + +/** + * nvmf_get_discovery_log() - Return the discovery log page + * @c: Discover controller to use + * @logp: Pointer to the log page to be returned + * @max_retries: maximum number of log page entries to be returned + * + * Return: 0 on success; on failure -1 is returned and errno is set + */ +int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, + int max_retries); + +/** + * nvmf_hostnqn_generate() - Generate a machine specific host nqn + * Returns: An nvm namespace qualified name string based on the machine + * identifier, or NULL if not successful. + */ +char *nvmf_hostnqn_generate(); + +/** + * nvmf_hostnqn_from_file() - Reads the host nvm qualified name from the config + * default location in @SYSCONFDIR@/nvme/ + * Return: The host nqn, or NULL if unsuccessful. If found, the caller + * is responsible to free the string. + */ +char *nvmf_hostnqn_from_file(); + +/** + * nvmf_hostid_from_file() - Reads the host identifier from the config default + * location in @SYSCONFDIR@/nvme/. + * Return: The host identifier, or NULL if unsuccessful. If found, the caller + * is responsible to free the string. + */ +char *nvmf_hostid_from_file(); + +/** + * nvmf_connect_disc_entry() - Connect controller based on the discovery log page entry + * @h: Host to which the controller should be connected + * @e: Discovery log page entry + * @defcfg: Default configurationn to be used for the new controller + * @discover: Set to 'true' if the new controller is a discovery controller + * + * Return: Pointer to the new controller + */ +nvme_ctrl_t nvmf_connect_disc_entry(nvme_host_t h, + struct nvmf_disc_log_entry *e, + const struct nvme_fabrics_config *defcfg, bool *discover); + +/** + * nvmf_is_registration_supported - check whether registration can be performed. + * @c: Controller instance + * + * Only discovery controllers (DC) that comply with TP8010 support + * explicit registration with the DIM PDU. These can be identified by + * looking at the value of a dctype in the Identify command + * response. A value of 1 (DDC) or 2 (CDC) indicates that the DC + * supports explicit registration. + * + * Return: true if controller supports explicit registration. false + * otherwise. + */ +bool nvmf_is_registration_supported(nvme_ctrl_t c); + +/** + * nvmf_register_ctrl() - Perform registration task with a DC + * @c: Controller instance + * @tas: Task field of the Command Dword 10 (cdw10). Indicates whether to + * perform a Registration, Deregistration, or Registration-update. + * @result: The command-specific result returned by the DC upon command + * completion. + * + * Perform registration task with a Discovery Controller (DC). Three + * tasks are supported: register, deregister, and registration update. + * + * Return: 0 on success; on failure -1 is returned and errno is set + */ +int nvmf_register_ctrl(nvme_ctrl_t c, enum nvmf_dim_tas tas, __u32 *result); + +#endif /* _LIBNVME_FABRICS_H */ diff --git a/src/nvme/filters.c b/src/nvme/filters.c new file mode 100644 index 0000000..b5959a8 --- /dev/null +++ b/src/nvme/filters.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <dirent.h> +#include <libgen.h> + +#include <sys/param.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <fcntl.h> +#include <unistd.h> + +#include "filters.h" +#include "types.h" +#include "util.h" + +const char *nvme_ctrl_sysfs_dir = "/sys/class/nvme"; +const char *nvme_ns_sysfs_dir = "/sys/block"; +const char *nvme_subsys_sysfs_dir = "/sys/class/nvme-subsystem"; + +int nvme_namespace_filter(const struct dirent *d) +{ + int i, n; + + if (d->d_name[0] == '.') + return 0; + + if (strstr(d->d_name, "nvme")) + if (sscanf(d->d_name, "nvme%dn%d", &i, &n) == 2) + return 1; + + return 0; +} + +int nvme_paths_filter(const struct dirent *d) +{ + int i, c, n; + + if (d->d_name[0] == '.') + return 0; + + if (strstr(d->d_name, "nvme")) + if (sscanf(d->d_name, "nvme%dc%dn%d", &i, &c, &n) == 3) + return 1; + + return 0; +} + +int nvme_ctrls_filter(const struct dirent *d) +{ + int i, c, n; + + if (d->d_name[0] == '.') + return 0; + + if (strstr(d->d_name, "nvme")) { + if (sscanf(d->d_name, "nvme%dc%dn%d", &i, &c, &n) == 3) + return 0; + if (sscanf(d->d_name, "nvme%dn%d", &i, &n) == 2) + return 0; + if (sscanf(d->d_name, "nvme%d", &i) == 1) + return 1; + } + + return 0; +} + +int nvme_subsys_filter(const struct dirent *d) +{ + int i; + + if (d->d_name[0] == '.') + return 0; + + if (strstr(d->d_name, "nvme-subsys")) + if (sscanf(d->d_name, "nvme-subsys%d", &i) == 1) + return 1; + + return 0; +} + +int nvme_scan_subsystems(struct dirent ***subsys) +{ + return scandir(nvme_subsys_sysfs_dir, subsys, nvme_subsys_filter, + alphasort); +} + +int nvme_scan_subsystem_namespaces(nvme_subsystem_t s, struct dirent ***ns) +{ + return scandir(nvme_subsystem_get_sysfs_dir(s), ns, + nvme_namespace_filter, alphasort); +} + +int nvme_scan_ctrls(struct dirent ***ctrls) +{ + return scandir(nvme_ctrl_sysfs_dir, ctrls, nvme_ctrls_filter, + alphasort); +} + +int nvme_scan_ctrl_namespace_paths(nvme_ctrl_t c, struct dirent ***paths) +{ + return scandir(nvme_ctrl_get_sysfs_dir(c), paths, + nvme_paths_filter, alphasort); +} + +int nvme_scan_ctrl_namespaces(nvme_ctrl_t c, struct dirent ***ns) +{ + return scandir(nvme_ctrl_get_sysfs_dir(c), ns, + nvme_namespace_filter, alphasort); +} diff --git a/src/nvme/filters.h b/src/nvme/filters.h new file mode 100644 index 0000000..49bbeea --- /dev/null +++ b/src/nvme/filters.h @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + */ + +#ifndef _LIBNVME_FILTERS_H +#define _LIBNVME_FILTERS_H + +#include <dirent.h> +#include "tree.h" + +/** + * DOC: filters.h + * + * libnvme directory filter + */ + +/** + * nvme_namespace_filter() - Filter for namespaces + * @d: dirent to check + * + * Return: 1 if @d matches, 0 otherwise + */ +int nvme_namespace_filter(const struct dirent *d); + +/** + * nvme_paths_filter() - Filter for paths + * @d: dirent to check + * + * Return: 1 if @d matches, 0 otherwise + */ +int nvme_paths_filter(const struct dirent *d); + +/** + * nvme_ctrls_filter() - Filter for controllers + * @d: dirent to check + * + * Return: 1 if @d matches, 0 otherwise + */ +int nvme_ctrls_filter(const struct dirent *d); + +/** + * nvme_subsys_filter() - Filter for subsystems + * @d: dirent to check + * + * Return: 1 if @d matches, 0 otherwise + */ +int nvme_subsys_filter(const struct dirent *d); + +/** + * nvme_scan_subsystems() - Scan for subsystems + * @subsys: Pointer to array of dirents + * + * Return: number of entries in @subsys + */ +int nvme_scan_subsystems(struct dirent ***subsys); + +/** + * nvme_scan_subsystem_namespaces() - Scan for namespaces in a subsystem + * @s: Subsystem to scan + * @ns: Pointer to array of dirents + * + * Return: number of entries in @ns + */ +int nvme_scan_subsystem_namespaces(nvme_subsystem_t s, struct dirent ***ns); + +/** + * nvme_scan_ctrls() - Scan for controllers + * @ctrls: Pointer to array of dirents + * + * Return: number of entries in @ctrls + */ +int nvme_scan_ctrls(struct dirent ***ctrls); + +/** + * nvme_scan_ctrl_namespace_paths() - Scan for namespace paths in a controller + * @c: Controller to scan + * @paths: Pointer to array of dirents + * + * Return: number of entries in @paths + */ +int nvme_scan_ctrl_namespace_paths(nvme_ctrl_t c, struct dirent ***paths); + +/** + * nvme_scan_ctrl_namespaces() - Scan for namespaces in a controller + * @c: Controller to scan + * @ns: Pointer to array of dirents + * + * Return: number of entries in @ns + */ +int nvme_scan_ctrl_namespaces(nvme_ctrl_t c, struct dirent ***ns); + +#endif /* _LIBNVME_FILTERS_H */ diff --git a/src/nvme/ioctl.c b/src/nvme/ioctl.c new file mode 100644 index 0000000..f48b465 --- /dev/null +++ b/src/nvme/ioctl.c @@ -0,0 +1,1870 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ +#include <errno.h> +#include <fcntl.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <sys/ioctl.h> +#include <sys/stat.h> + +#include <ccan/build_assert/build_assert.h> +#include <ccan/endian/endian.h> + +#include "ioctl.h" +#include "util.h" + +static int nvme_verify_chr(int fd) +{ + static struct stat nvme_stat; + int err = fstat(fd, &nvme_stat); + + if (err < 0) + return errno; + + if (!S_ISCHR(nvme_stat.st_mode)) { + errno = ENOTBLK; + return -1; + } + return 0; +} + +int nvme_subsystem_reset(int fd) +{ + int ret; + + ret = nvme_verify_chr(fd); + if (ret) + return ret; + return ioctl(fd, NVME_IOCTL_SUBSYS_RESET); +} + +int nvme_ctrl_reset(int fd) +{ + int ret; + + ret = nvme_verify_chr(fd); + if (ret) + return ret; + return ioctl(fd, NVME_IOCTL_RESET); +} + +int nvme_ns_rescan(int fd) +{ + int ret; + + ret = nvme_verify_chr(fd); + if (ret) + return ret; + return ioctl(fd, NVME_IOCTL_RESCAN); +} + +int nvme_get_nsid(int fd, __u32 *nsid) +{ + errno = 0; + *nsid = ioctl(fd, NVME_IOCTL_ID); + return -1 * (errno != 0); +} + +static int nvme_submit_passthru64(int fd, unsigned long ioctl_cmd, + struct nvme_passthru_cmd64 *cmd, + __u64 *result) +{ + int err = ioctl(fd, ioctl_cmd, cmd); + + if (err >= 0 && result) + *result = cmd->result; + return err; +} + +static int nvme_submit_passthru(int fd, unsigned long ioctl_cmd, + struct nvme_passthru_cmd *cmd, __u32 *result) +{ + int err = ioctl(fd, ioctl_cmd, cmd); + + if (err >= 0 && result) + *result = cmd->result; + return err; +} + +static int nvme_passthru64(int fd, unsigned long ioctl_cmd, __u8 opcode, + __u8 flags, __u16 rsvd, __u32 nsid, __u32 cdw2, + __u32 cdw3, __u32 cdw10, __u32 cdw11, __u32 cdw12, + __u32 cdw13, __u32 cdw14, __u32 cdw15, + __u32 data_len, void *data, __u32 metadata_len, + void *metadata, __u32 timeout_ms, __u64 *result) +{ + struct nvme_passthru_cmd64 cmd = { + .opcode = opcode, + .flags = flags, + .rsvd1 = rsvd, + .nsid = nsid, + .cdw2 = cdw2, + .cdw3 = cdw3, + .metadata = (__u64)(uintptr_t)metadata, + .addr = (__u64)(uintptr_t)data, + .metadata_len = metadata_len, + .data_len = data_len, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw13 = cdw13, + .cdw14 = cdw14, + .cdw15 = cdw15, + .timeout_ms = timeout_ms, + }; + + return nvme_submit_passthru64(fd, ioctl_cmd, &cmd, result); +} + +static int nvme_passthru(int fd, unsigned long ioctl_cmd, __u8 opcode, + __u8 flags, __u16 rsvd, __u32 nsid, __u32 cdw2, + __u32 cdw3, __u32 cdw10, __u32 cdw11, __u32 cdw12, + __u32 cdw13, __u32 cdw14, __u32 cdw15, __u32 data_len, + void *data, __u32 metadata_len, void *metadata, + __u32 timeout_ms, __u32 *result) +{ + struct nvme_passthru_cmd cmd = { + .opcode = opcode, + .flags = flags, + .rsvd1 = rsvd, + .nsid = nsid, + .cdw2 = cdw2, + .cdw3 = cdw3, + .metadata = (__u64)(uintptr_t)metadata, + .addr = (__u64)(uintptr_t)data, + .metadata_len = metadata_len, + .data_len = data_len, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw13 = cdw13, + .cdw14 = cdw14, + .cdw15 = cdw15, + .timeout_ms = timeout_ms, + }; + + return nvme_submit_passthru(fd, ioctl_cmd, &cmd, result); +} + +int nvme_submit_admin_passthru64(int fd, struct nvme_passthru_cmd64 *cmd, + __u64 *result) +{ + return nvme_submit_passthru64(fd, NVME_IOCTL_ADMIN64_CMD, cmd, result); +} + +int nvme_admin_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, + __u32 cdw11, __u32 cdw12, __u32 cdw13, __u32 cdw14, + __u32 cdw15, __u32 data_len, void *data, + __u32 metadata_len, void *metadata, __u32 timeout_ms, + __u64 *result) +{ + return nvme_passthru64(fd, NVME_IOCTL_ADMIN64_CMD, opcode, flags, rsvd, + nsid, cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, + cdw14, cdw15, data_len, data, metadata_len, + metadata, timeout_ms, result); +} + +int nvme_submit_admin_passthru(int fd, struct nvme_passthru_cmd *cmd, __u32 *result) +{ + return nvme_submit_passthru(fd, NVME_IOCTL_ADMIN_CMD, cmd, result); +} + +int nvme_admin_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, + __u32 cdw11, __u32 cdw12, __u32 cdw13, __u32 cdw14, + __u32 cdw15, __u32 data_len, void *data, + __u32 metadata_len, void *metadata, __u32 timeout_ms, + __u32 *result) +{ + return nvme_passthru(fd, NVME_IOCTL_ADMIN_CMD, opcode, flags, rsvd, + nsid, cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, + cdw14, cdw15, data_len, data, metadata_len, + metadata, timeout_ms, result); +} + +enum nvme_cmd_dword_fields { + NVME_DEVICE_SELF_TEST_CDW10_STC_SHIFT = 0, + NVME_DEVICE_SELF_TEST_CDW10_STC_MASK = 0x7, + NVME_DIRECTIVE_CDW11_DOPER_SHIFT = 0, + NVME_DIRECTIVE_CDW11_DTYPE_SHIFT = 8, + NVME_DIRECTIVE_CDW11_DPSEC_SHIFT = 16, + NVME_DIRECTIVE_CDW11_DOPER_MASK = 0xff, + NVME_DIRECTIVE_CDW11_DTYPE_MASK = 0xff, + NVME_DIRECTIVE_CDW11_DPSEC_MASK = 0xffff, + NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR_SHIFT = 0, + NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE_SHIFT = 1, + NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR_MASK = 0x1, + NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE_MASK = 0x1, + NVME_FW_COMMIT_CDW10_FS_SHIFT = 0, + NVME_FW_COMMIT_CDW10_CA_SHIFT = 3, + NVME_FW_COMMIT_CDW10_BPID_SHIFT = 31, + NVME_FW_COMMIT_CDW10_FS_MASK = 0x7, + NVME_FW_COMMIT_CDW10_CA_MASK = 0x7, + NVME_FW_COMMIT_CDW10_BPID_MASK = 0x1, + NVME_GET_FEATURES_CDW10_SEL_SHIFT = 8, + NVME_GET_FEATURES_CDW10_SEL_MASK = 0x7, + NVME_SET_FEATURES_CDW10_SAVE_SHIFT = 31, + NVME_SET_FEATURES_CDW10_SAVE_MASK = 0x1, + NVME_FEATURES_CDW10_FID_SHIFT = 0, + NVME_FEATURES_CDW14_UUID_SHIFT = 0, + NVME_FEATURES_CDW10_FID_MASK = 0xff, + NVME_FEATURES_CDW14_UUID_MASK = 0x7f, + NVME_LOG_CDW10_LID_SHIFT = 0, + NVME_LOG_CDW10_LSP_SHIFT = 8, + NVME_LOG_CDW10_RAE_SHIFT = 15, + NVME_LOG_CDW10_NUMDL_SHIFT = 16, + NVME_LOG_CDW11_NUMDU_SHIFT = 0, + NVME_LOG_CDW11_LSI_SHIFT = 16, + NVME_LOG_CDW14_UUID_SHIFT = 0, + NVME_LOG_CDW14_CSI_SHIFT = 24, + NVME_LOG_CDW14_OT_SHIFT = 23, + NVME_LOG_CDW10_LID_MASK = 0xff, + NVME_LOG_CDW10_LSP_MASK = 0xf, + NVME_LOG_CDW10_RAE_MASK = 0x1, + NVME_LOG_CDW10_NUMDL_MASK = 0xffff, + NVME_LOG_CDW11_NUMDU_MASK = 0xffff, + NVME_LOG_CDW11_LSI_MASK = 0xff, + NVME_LOG_CDW14_UUID_MASK = 0x7f, + NVME_LOG_CDW14_CSI_MASK = 0xff, + NVME_LOG_CDW14_OT_MASK = 0x1, + NVME_IDENTIFY_CDW10_CNS_SHIFT = 0, + NVME_IDENTIFY_CDW10_CNTID_SHIFT = 16, + NVME_IDENTIFY_CDW11_CNSSPECID_SHIFT = 0, + NVME_IDENTIFY_CDW14_UUID_SHIFT = 0, + NVME_IDENTIFY_CDW11_CSI_SHIFT = 24, + NVME_IDENTIFY_CDW10_CNS_MASK = 0xff, + NVME_IDENTIFY_CDW10_CNTID_MASK = 0xffff, + NVME_IDENTIFY_CDW11_CNSSPECID_MASK = 0xffff, + NVME_IDENTIFY_CDW14_UUID_MASK = 0x7f, + NVME_IDENTIFY_CDW11_CSI_MASK = 0xff, + NVME_NAMESPACE_ATTACH_CDW10_SEL_SHIFT = 0, + NVME_NAMESPACE_ATTACH_CDW10_SEL_MASK = 0xf, + NVME_NAMESPACE_MGMT_CDW10_SEL_SHIFT = 0, + NVME_NAMESPACE_MGMT_CDW10_SEL_MASK = 0xf, + NVME_NAMESPACE_MGMT_CDW11_CSI_SHIFT = 24, + NVME_NAMESPACE_MGMT_CDW11_CSI_MASK = 0xff, + NVME_VIRT_MGMT_CDW10_ACT_SHIFT = 0, + NVME_VIRT_MGMT_CDW10_RT_SHIFT = 8, + NVME_VIRT_MGMT_CDW10_CNTLID_SHIFT = 16, + NVME_VIRT_MGMT_CDW11_NR_SHIFT = 0, + NVME_VIRT_MGMT_CDW10_ACT_MASK = 0xf, + NVME_VIRT_MGMT_CDW10_RT_MASK = 0x7, + NVME_VIRT_MGMT_CDW10_CNTLID_MASK = 0xffff, + NVME_VIRT_MGMT_CDW11_NR_MASK = 0xffff, + NVME_FORMAT_CDW10_LBAF_SHIFT = 0, + NVME_FORMAT_CDW10_MSET_SHIFT = 4, + NVME_FORMAT_CDW10_PI_SHIFT = 5, + NVME_FORMAT_CDW10_PIL_SHIFT = 8, + NVME_FORMAT_CDW10_SES_SHIFT = 9, + NVME_FORMAT_CDW10_LBAF_MASK = 0xf, + NVME_FORMAT_CDW10_MSET_MASK = 0x1, + NVME_FORMAT_CDW10_PI_MASK = 0x7, + NVME_FORMAT_CDW10_PIL_MASK = 0x1, + NVME_FORMAT_CDW10_SES_MASK = 0x7, + NVME_SANITIZE_CDW10_SANACT_SHIFT = 0, + NVME_SANITIZE_CDW10_AUSE_SHIFT = 3, + NVME_SANITIZE_CDW10_OWPASS_SHIFT = 4, + NVME_SANITIZE_CDW10_OIPBP_SHIFT = 8, + NVME_SANITIZE_CDW10_NODAS_SHIFT = 9, + NVME_SANITIZE_CDW10_SANACT_MASK = 0x7, + NVME_SANITIZE_CDW10_AUSE_MASK = 0x1, + NVME_SANITIZE_CDW10_OWPASS_MASK = 0xf, + NVME_SANITIZE_CDW10_OIPBP_MASK = 0x1, + NVME_SANITIZE_CDW10_NODAS_MASK = 0x1, + NVME_SECURITY_NSSF_SHIFT = 0, + NVME_SECURITY_SPSP0_SHIFT = 8, + NVME_SECURITY_SPSP1_SHIFT = 16, + NVME_SECURITY_SECP_SHIFT = 24, + NVME_SECURITY_NSSF_MASK = 0xff, + NVME_SECURITY_SPSP0_MASK = 0xff, + NVME_SECURITY_SPSP1_MASK = 0xff, + NVME_SECURITY_SECP_MASK = 0xffff, + NVME_GET_LBA_STATUS_CDW13_RL_SHIFT = 0, + NVME_GET_LBA_STATUS_CDW13_ATYPE_SHIFT = 24, + NVME_GET_LBA_STATUS_CDW13_RL_MASK = 0xffff, + NVME_GET_LBA_STATUS_CDW13_ATYPE_MASK = 0xff, + NVME_ZNS_MGMT_SEND_ZSASO_SHIFT = 9, + NVME_ZNS_MGMT_SEND_ZSASO_MASK = 0x1, + NVME_ZNS_MGMT_SEND_SEL_SHIFT = 8, + NVME_ZNS_MGMT_SEND_SEL_MASK = 0x1, + NVME_ZNS_MGMT_SEND_ZSA_SHIFT = 0, + NVME_ZNS_MGMT_SEND_ZSA_MASK = 0xff, + NVME_ZNS_MGMT_RECV_ZRA_SHIFT = 0, + NVME_ZNS_MGMT_RECV_ZRA_MASK = 0xff, + NVME_ZNS_MGMT_RECV_ZRASF_SHIFT = 8, + NVME_ZNS_MGMT_RECV_ZRASF_MASK = 0xff, + NVME_ZNS_MGMT_RECV_ZRAS_FEAT_SHIFT = 16, + NVME_ZNS_MGMT_RECV_ZRAS_FEAT_MASK = 0x1, + NVME_DIM_TAS_SHIFT = 0, + NVME_DIM_TAS_MASK = 0xF, +}; + +enum features { + NVME_FEATURES_ARBITRATION_BURST_SHIFT = 0, + NVME_FEATURES_ARBITRATION_LPW_SHIFT = 8, + NVME_FEATURES_ARBITRATION_MPW_SHIFT = 16, + NVME_FEATURES_ARBITRATION_HPW_SHIFT = 24, + NVME_FEATURES_ARBITRATION_BURST_MASK = 0x7, + NVME_FEATURES_ARBITRATION_LPW_MASK = 0xff, + NVME_FEATURES_ARBITRATION_MPW_MASK = 0xff, + NVME_FEATURES_ARBITRATION_HPW_MASK = 0xff, + NVME_FEATURES_PWRMGMT_PS_SHIFT = 0, + NVME_FEATURES_PWRMGMT_WH_SHIFT = 5, + NVME_FEATURES_PWRMGMT_PS_MASK = 0x1f, + NVME_FEATURES_PWRMGMT_WH_MASK = 0x7, + NVME_FEATURES_TMPTH_SHIFT = 0, + NVME_FEATURES_TMPSEL_SHIFT = 16, + NVME_FEATURES_THSEL_SHIFT = 20, + NVME_FEATURES_TMPTH_MASK = 0xff, + NVME_FEATURES_TMPSEL_MASK = 0xf, + NVME_FEATURES_THSEL_MASK = 0x3, + NVME_FEATURES_ERROR_RECOVERY_TLER_SHIFT = 0, + NVME_FEATURES_ERROR_RECOVERY_DULBE_SHIFT = 16, + NVME_FEATURES_ERROR_RECOVERY_TLER_MASK = 0xff, + NVME_FEATURES_ERROR_RECOVERY_DULBE_MASK = 0x1, + NVME_FEATURES_VWC_WCE_SHIFT = 0, + NVME_FEATURES_VWC_WCE_MASK = 0x1, + NVME_FEATURES_IRQC_THR_SHIFT = 0, + NVME_FEATURES_IRQC_TIME_SHIFT = 8, + NVME_FEATURES_IRQC_THR_MASK = 0xff, + NVME_FEATURES_IRQC_TIME_MASK = 0xff, + NVME_FEATURES_IVC_IV_SHIFT = 0, + NVME_FEATURES_IVC_CD_SHIFT = 16, + NVME_FEATURES_IVC_IV_MASK = 0xffff, + NVME_FEATURES_IVC_CD_MASK = 0x1, + NVME_FEATURES_WAN_DN_SHIFT = 0, + NVME_FEATURES_WAN_DN_MASK = 0x1, + NVME_FEATURES_APST_APSTE_SHIFT = 0, + NVME_FEATURES_APST_APSTE_MASK = 0x1, + NVME_FEATURES_HCTM_TMT2_SHIFT = 0, + NVME_FEATURES_HCTM_TMT1_SHIFT = 16, + NVME_FEATURES_HCTM_TMT2_MASK = 0xffff, + NVME_FEATURES_HCTM_TMT1_MASK = 0xffff, + NVME_FEATURES_NOPS_NOPPME_SHIFT = 0, + NVME_FEATURES_NOPS_NOPPME_MASK = 0x1, + NVME_FEATURES_PLM_PLE_SHIFT = 0, + NVME_FEATURES_PLM_PLE_MASK = 0x1, + NVME_FEATURES_PLM_WINDOW_SELECT_SHIFT = 0, + NVME_FEATURES_PLM_WINDOW_SELECT_MASK = 0xf, + NVME_FEATURES_LBAS_LSIRI_SHIFT = 0, + NVME_FEATURES_LBAS_LSIPI_SHIFT = 16, + NVME_FEATURES_LBAS_LSIRI_MASK = 0xffff, + NVME_FEATURES_LBAS_LSIPI_MASK = 0xffff, + NVME_FEATURES_IOCSP_IOCSCI_SHIFT = 0, + NVME_FEATURES_IOCSP_IOCSCI_MASK = 0xff, +}; + +int nvme_identify(struct nvme_identify_args *args) +{ + __u32 cdw10 = NVME_SET(args->cntid, IDENTIFY_CDW10_CNTID) | + NVME_SET(args->cns, IDENTIFY_CDW10_CNS); + __u32 cdw11 = NVME_SET(args->cns_specific_id, IDENTIFY_CDW11_CNSSPECID) | + NVME_SET(args->csi, IDENTIFY_CDW11_CSI); + __u32 cdw14 = NVME_SET(args->uuidx, IDENTIFY_CDW14_UUID); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_identify, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->data, + .data_len = NVME_IDENTIFY_DATA_SIZE, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw14 = cdw14, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_get_log(struct nvme_get_log_args *args) +{ + __u32 numd = (args->len >> 2) - 1; + __u16 numdu = numd >> 16, numdl = numd & 0xffff; + + __u32 cdw10 = NVME_SET(args->lid, LOG_CDW10_LID) | + NVME_SET(args->lsp, LOG_CDW10_LSP) | + NVME_SET(!!args->rae, LOG_CDW10_RAE) | + NVME_SET(numdl, LOG_CDW10_NUMDL); + __u32 cdw11 = NVME_SET(numdu, LOG_CDW11_NUMDU) | + NVME_SET(args->lsi, LOG_CDW11_LSI); + __u32 cdw12 = args->lpo & 0xffffffff; + __u32 cdw13 = args->lpo >> 32; + __u32 cdw14 = NVME_SET(args->uuidx, LOG_CDW14_UUID) | + NVME_SET(!!args->ot, LOG_CDW14_OT) | + NVME_SET(args->csi, LOG_CDW14_CSI); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_get_log_page, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->log, + .data_len = args->len, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw13 = cdw13, + .cdw14 = cdw14, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(struct nvme_get_log_args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_set_features(struct nvme_set_features_args *args) +{ + __u32 cdw10 = NVME_SET(args->fid, FEATURES_CDW10_FID) | + NVME_SET(!!args->save, SET_FEATURES_CDW10_SAVE); + __u32 cdw14 = NVME_SET(args->uuidx, FEATURES_CDW14_UUID); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_set_features, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->data, + .data_len = args->data_len, + .cdw10 = cdw10, + .cdw11 = args->cdw11, + .cdw12 = args->cdw12, + .cdw13 = args->cdw13, + .cdw14 = cdw14, + .cdw15 = args->cdw15, + .timeout_ms = args->timeout, + }; + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +static int __nvme_set_features(int fd, __u8 fid, __u32 cdw11, bool save, + __u32 *result) +{ + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = fid, + .nsid = NVME_NSID_NONE, + .cdw11 = cdw11, + .cdw12 = 0, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + return nvme_set_features(&args); +} + +int nvme_set_features_arbitration(int fd, __u8 ab, __u8 lpw, __u8 mpw, + __u8 hpw, bool save, __u32 *result) +{ + __u32 value = NVME_SET(ab, FEAT_ARBITRATION_BURST) | + NVME_SET(lpw, FEAT_ARBITRATION_LPW) | + NVME_SET(mpw, FEAT_ARBITRATION_MPW) | + NVME_SET(hpw, FEAT_ARBITRATION_HPW); + + return __nvme_set_features(fd, NVME_FEAT_FID_ARBITRATION, value, save, + result); +} + +int nvme_set_features_power_mgmt(int fd, __u8 ps, __u8 wh, bool save, + __u32 *result) +{ + __u32 value = NVME_SET(ps, FEAT_PWRMGMT_PS) | + NVME_SET(wh, FEAT_PWRMGMT_PS); + + return __nvme_set_features(fd, NVME_FEAT_FID_POWER_MGMT, value, save, + result); +} + +int nvme_set_features_lba_range(int fd, __u32 nsid, __u32 nr_ranges, bool save, + struct nvme_lba_range_type *data, __u32 *result) +{ + return -1; +} + +int nvme_set_features_temp_thresh(int fd, __u16 tmpth, __u8 tmpsel, + enum nvme_feat_tmpthresh_thsel thsel, + bool save, __u32 *result) +{ + __u32 value = NVME_SET(tmpth, FEAT_TT_TMPTH) | + NVME_SET(tmpsel, FEAT_TT_TMPSEL) | + NVME_SET(thsel, FEAT_TT_THSEL); + + return __nvme_set_features(fd, NVME_FEAT_FID_TEMP_THRESH, value, save, + result); +} + +int nvme_set_features_err_recovery(int fd, __u32 nsid, __u16 tler, bool dulbe, + bool save, __u32 *result) +{ + __u32 value = NVME_SET(tler, FEAT_ERROR_RECOVERY_TLER) | + NVME_SET(!!dulbe, FEAT_ERROR_RECOVERY_DULBE); + + return __nvme_set_features(fd, NVME_FEAT_FID_ERR_RECOVERY, value, save, + result); +} + +int nvme_set_features_volatile_wc(int fd, bool wce, bool save, __u32 *result) +{ + __u32 value = NVME_SET(!!wce, FEAT_VWC_WCE); + + return __nvme_set_features(fd, NVME_FEAT_FID_VOLATILE_WC, value, save, + result); +} + +int nvme_set_features_irq_coalesce(int fd, __u8 thr, __u8 time, bool save, + __u32 *result) +{ + __u32 value = NVME_SET(thr, FEAT_IRQC_TIME) | + NVME_SET(time, FEAT_IRQC_THR); + + return __nvme_set_features(fd, NVME_FEAT_FID_IRQ_COALESCE, value, save, + result); +} + +int nvme_set_features_irq_config(int fd, __u16 iv, bool cd, bool save, + __u32 *result) +{ + __u32 value = NVME_SET(iv, FEAT_ICFG_IV) | + NVME_SET(!!cd, FEAT_ICFG_CD); + + return __nvme_set_features(fd, NVME_FEAT_FID_IRQ_CONFIG, value, save, + result); +} + +int nvme_set_features_write_atomic(int fd, bool dn, bool save, __u32 *result) +{ + __u32 value = NVME_SET(!!dn, FEAT_WA_DN); + + return __nvme_set_features(fd, NVME_FEAT_FID_WRITE_ATOMIC, value, save, + result); +} + +int nvme_set_features_async_event(int fd, __u32 events, + bool save, __u32 *result) +{ + return __nvme_set_features(fd, NVME_FEAT_FID_ASYNC_EVENT, events, save, + result); +} + +int nvme_set_features_auto_pst(int fd, bool apste, bool save, + struct nvme_feat_auto_pst *apst, __u32 *result) +{ + __u32 value = NVME_SET(!!apste, FEAT_APST_APSTE); + + return __nvme_set_features(fd, NVME_FEAT_FID_AUTO_PST, value, save, + result); +} + +int nvme_set_features_timestamp(int fd, bool save, __u64 timestamp) +{ + __le64 t = cpu_to_le64(timestamp); + struct nvme_timestamp ts; + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .nsid = NVME_NSID_NONE, + .cdw11 = 0, + .cdw12 = 0, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = sizeof(ts), + .data = &ts, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + memcpy(ts.timestamp, &t, sizeof(ts.timestamp)); + return nvme_set_features(&args); +} + +int nvme_set_features_hctm(int fd, __u16 tmt2, __u16 tmt1, + bool save, __u32 *result) +{ + __u32 value = NVME_SET(tmt2, FEAT_HCTM_TMT2) | + NVME_SET(tmt1, FEAT_HCTM_TMT1); + + return __nvme_set_features(fd, NVME_FEAT_FID_HCTM, value, save, + result); +} + +int nvme_set_features_nopsc(int fd, bool noppme, bool save, __u32 *result) +{ + __u32 value = NVME_SET(noppme, FEAT_NOPS_NOPPME); + + return __nvme_set_features(fd, NVME_FEAT_FID_NOPSC, value, save, + result); +} + +int nvme_set_features_rrl(int fd, __u8 rrl, __u16 nvmsetid, + bool save, __u32 *result) +{ + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_RRL, + .nsid = NVME_NSID_NONE, + .cdw11 = nvmsetid, + .cdw12 = rrl, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_set_features(&args); +} + +int nvme_set_features_plm_config(int fd, bool plm, __u16 nvmsetid, bool save, + struct nvme_plm_config *data, __u32 *result) +{ + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_PLM_CONFIG, + .nsid = NVME_NSID_NONE, + .cdw11 = nvmsetid, + .cdw12 = !!plm, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_set_features(&args); +} + +int nvme_set_features_plm_window(int fd, enum nvme_feat_plm_window_select sel, + __u16 nvmsetid, bool save, __u32 *result) +{ + __u32 cdw12 = NVME_SET(sel, FEAT_PLMW_WS); + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_PLM_WINDOW, + .nsid = NVME_NSID_NONE, + .cdw11 = nvmsetid, + .cdw12 = cdw12, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_set_features(&args); +} + +int nvme_set_features_lba_sts_interval(int fd, __u16 lsiri, __u16 lsipi, + bool save, __u32 *result) +{ + __u32 value = NVME_SET(lsiri, FEAT_LBAS_LSIRI) | + NVME_SET(lsipi, FEAT_LBAS_LSIPI); + + return __nvme_set_features(fd, NVME_FEAT_FID_LBA_STS_INTERVAL, value, + save, result); +} + +int nvme_set_features_host_behavior(int fd, bool save, + struct nvme_feat_host_behavior *data) +{ + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_HOST_BEHAVIOR, + .nsid = NVME_NSID_NONE, + .cdw11 = 0, + .cdw12 = 0, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = sizeof(*data), + .data = data, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + return nvme_set_features(&args); +} + +int nvme_set_features_sanitize(int fd, bool nodrm, bool save, __u32 *result) +{ + return __nvme_set_features(fd, NVME_FEAT_FID_SANITIZE, !!nodrm, save, + result); +} + +int nvme_set_features_endurance_evt_cfg(int fd, __u16 endgid, __u8 egwarn, + bool save, __u32 *result) +{ + __u32 value = endgid | egwarn << 16; + + return __nvme_set_features(fd, NVME_FEAT_FID_ENDURANCE_EVT_CFG, value, + save, result); +} + +int nvme_set_features_sw_progress(int fd, __u8 pbslc, bool save, + __u32 *result) +{ + return __nvme_set_features(fd, NVME_FEAT_FID_SW_PROGRESS, pbslc, save, + result); +} + +int nvme_set_features_host_id(int fd, bool save, bool exhid, __u8 *hostid) +{ + __u32 len = exhid ? 16 : 8; + __u32 value = !!exhid; + struct nvme_set_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_HOST_ID, + .nsid = NVME_NSID_NONE, + .cdw11 = value, + .cdw12 = 0, + .save = save, + .uuidx = NVME_UUID_NONE, + .cdw15 = 0, + .data_len = len, + .data = hostid, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + return nvme_set_features(&args); +} + +int nvme_set_features_resv_mask(int fd, __u32 mask, bool save, __u32 *result) +{ + return __nvme_set_features(fd, NVME_FEAT_FID_RESV_MASK, mask, save, + result); +} + +int nvme_set_features_resv_persist(int fd, bool ptpl, bool save, __u32 *result) +{ + return __nvme_set_features(fd, NVME_FEAT_FID_RESV_PERSIST, !!ptpl, save, + result); +} + +int nvme_set_features_write_protect(int fd, enum nvme_feat_nswpcfg_state state, + bool save, __u32 *result) +{ + return __nvme_set_features(fd, NVME_FEAT_FID_WRITE_PROTECT, state, + save, result); +} + +int nvme_set_features_iocs_profile(int fd, __u8 iocsi, bool save) +{ + __u32 value = NVME_SET(iocsi, FEAT_IOCSP_IOCSCI); + + return __nvme_set_features(fd, NVME_FEAT_FID_IOCS_PROFILE, value, + save, NULL); +} + +int nvme_get_features(struct nvme_get_features_args *args) +{ + __u32 cdw10 = NVME_SET(args->fid, FEATURES_CDW10_FID) | + NVME_SET(args->sel, GET_FEATURES_CDW10_SEL); + __u32 cdw14 = NVME_SET(args->uuidx, FEATURES_CDW14_UUID); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_get_features, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->data, + .data_len = args->data_len, + .cdw10 = cdw10, + .cdw11 = args->cdw11, + .cdw14 = cdw14, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +static int __nvme_get_features(int fd, enum nvme_features_id fid, + enum nvme_get_features_sel sel, __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = fid, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_arbitration(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_ARBITRATION, sel, result); +} + +int nvme_get_features_power_mgmt(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_POWER_MGMT, sel, result); +} + +int nvme_get_features_lba_range(int fd, enum nvme_get_features_sel sel, + struct nvme_lba_range_type *data, + __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_LBA_RANGE, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_temp_thresh(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_TEMP_THRESH, sel, result); +} + +int nvme_get_features_err_recovery(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_ERR_RECOVERY, sel, + result); +} + +int nvme_get_features_volatile_wc(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_VOLATILE_WC, sel, result); +} + +int nvme_get_features_num_queues(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_NUM_QUEUES, sel, result); +} + +int nvme_get_features_irq_coalesce(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_IRQ_COALESCE, sel, + result); +} + +int nvme_get_features_irq_config(int fd, enum nvme_get_features_sel sel, + __u16 iv, __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_LBA_RANGE, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = iv, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_write_atomic(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_WRITE_ATOMIC, sel, + result); +} + +int nvme_get_features_async_event(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_ASYNC_EVENT, sel, result); +} + +int nvme_get_features_auto_pst(int fd, enum nvme_get_features_sel sel, + struct nvme_feat_auto_pst *apst, __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_LBA_RANGE, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_host_mem_buf(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_HOST_MEM_BUF, sel, result); +} + +int nvme_get_features_timestamp(int fd, enum nvme_get_features_sel sel, + struct nvme_timestamp *ts) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_TIMESTAMP, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = sizeof(*ts), + .data = ts, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_kato(int fd, enum nvme_get_features_sel sel, __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_KATO, sel, result); +} + +int nvme_get_features_hctm(int fd, enum nvme_get_features_sel sel, __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_HCTM, sel, result); +} + +int nvme_get_features_nopsc(int fd, enum nvme_get_features_sel sel, __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_NOPSC, sel, result); +} + +int nvme_get_features_rrl(int fd, enum nvme_get_features_sel sel, __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_RRL, sel, result); +} + +int nvme_get_features_plm_config(int fd, enum nvme_get_features_sel sel, + __u16 nvmsetid, struct nvme_plm_config *data, + __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_PLM_CONFIG, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = nvmsetid, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_plm_window(int fd, enum nvme_get_features_sel sel, + __u16 nvmsetid, __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_PLM_WINDOW, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = nvmsetid, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_lba_sts_interval(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_LBA_STS_INTERVAL, sel, + result); +} + +int nvme_get_features_host_behavior(int fd, enum nvme_get_features_sel sel, + struct nvme_feat_host_behavior *data, + __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_HOST_BEHAVIOR, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_sanitize(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_SANITIZE, sel, result); +} + +int nvme_get_features_endurance_event_cfg(int fd, enum nvme_get_features_sel sel, + __u16 endgid, __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_ENDURANCE_EVT_CFG, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_sw_progress(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_SW_PROGRESS, sel, result); +} + +int nvme_get_features_host_id(int fd, enum nvme_get_features_sel sel, + bool exhid, __u32 len, __u8 *hostid) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_HOST_ID, + .nsid = NVME_NSID_NONE, + .sel = sel, + .cdw11 = !!exhid, + .uuidx = NVME_UUID_NONE, + .data_len = len, + .data = hostid, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_resv_mask(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_RESV_MASK, sel, result); +} + +int nvme_get_features_resv_persist(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_RESV_PERSIST, sel, result); +} + +int nvme_get_features_write_protect(int fd, __u32 nsid, + enum nvme_get_features_sel sel, + __u32 *result) +{ + struct nvme_get_features_args args = { + .args_size = sizeof(args), + .fd = fd, + .fid = NVME_FEAT_FID_WRITE_PROTECT, + .nsid = nsid, + .sel = sel, + .cdw11 = 0, + .uuidx = NVME_UUID_NONE, + .data_len = 0, + .data = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = result, + }; + + return nvme_get_features(&args); +} + +int nvme_get_features_iocs_profile(int fd, enum nvme_get_features_sel sel, + __u32 *result) +{ + return __nvme_get_features(fd, NVME_FEAT_FID_IOCS_PROFILE, sel, result); +} + +int nvme_format_nvm(struct nvme_format_nvm_args *args) +{ + __u32 cdw10 = NVME_SET(args->lbaf, FORMAT_CDW10_LBAF) | + NVME_SET(args->mset, FORMAT_CDW10_MSET) | + NVME_SET(args->pi, FORMAT_CDW10_PI) | + NVME_SET(args->pil, FORMAT_CDW10_PIL) | + NVME_SET(args->ses, FORMAT_CDW10_SES); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_format_nvm, + .nsid = args->nsid, + .cdw10 = cdw10, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_ns_mgmt(struct nvme_ns_mgmt_args *args) +{ + __u32 cdw10 = NVME_SET(args->sel, NAMESPACE_MGMT_CDW10_SEL); + __u32 cdw11 = NVME_SET(args->csi, NAMESPACE_MGMT_CDW11_CSI); + __u32 data_len = args->ns ? sizeof(*args->ns) : 0; + + struct nvme_passthru_cmd cmd = { + .nsid = args->nsid, + .opcode = nvme_admin_ns_mgmt, + .cdw10 = cdw10, + .cdw11 = cdw11, + .timeout_ms = args->timeout, + .data_len = data_len, + .addr = (__u64)(uintptr_t)args->ns, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_ns_attach(struct nvme_ns_attach_args *args) +{ + __u32 cdw10 = NVME_SET(args->sel, NAMESPACE_ATTACH_CDW10_SEL); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_ns_attach, + .nsid = args->nsid, + .cdw10 = cdw10, + .data_len = sizeof(*args->ctrlist), + .addr = (__u64)(uintptr_t)args->ctrlist, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_fw_download(struct nvme_fw_download_args *args) +{ + __u32 cdw10 = (args->data_len >> 2) - 1; + __u32 cdw11 = args->offset >> 2; + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_fw_download, + .cdw10 = cdw10, + .cdw11 = cdw11, + .data_len = args->data_len, + .addr = (__u64)(uintptr_t)args->data, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_fw_commit(struct nvme_fw_commit_args *args) +{ + __u32 cdw10 = NVME_SET(args->slot, FW_COMMIT_CDW10_FS) | + NVME_SET(args->action, FW_COMMIT_CDW10_CA) | + NVME_SET(args->bpid, FW_COMMIT_CDW10_BPID); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_fw_commit, + .cdw10 = cdw10, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_security_send(struct nvme_security_send_args *args) +{ + __u32 cdw10 = NVME_SET(args->secp, SECURITY_SECP) | + NVME_SET(args->spsp0, SECURITY_SPSP0) | + NVME_SET(args->spsp1, SECURITY_SPSP1) | + NVME_SET(args->nssf, SECURITY_NSSF); + __u32 cdw11 = args->tl; + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_security_send, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .data_len = args->data_len, + .addr = (__u64)(uintptr_t)args->data, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_security_receive(struct nvme_security_receive_args *args) +{ + __u32 cdw10 = NVME_SET(args->secp, SECURITY_SECP) | + NVME_SET(args->spsp0, SECURITY_SPSP0) | + NVME_SET(args->spsp1, SECURITY_SPSP1) | + NVME_SET(args->nssf, SECURITY_NSSF); + __u32 cdw11 = args->al; + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_security_recv, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .data_len = args->data_len, + .addr = (__u64)(uintptr_t)args->data, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_get_lba_status(struct nvme_get_lba_status_args *args) +{ + __u32 cdw10 = args->slba & 0xffffffff; + __u32 cdw11 = args->slba >> 32; + __u32 cdw12 = args->mndw; + __u32 cdw13 = NVME_SET(args->rl, GET_LBA_STATUS_CDW13_RL) | + NVME_SET(args->atype, GET_LBA_STATUS_CDW13_ATYPE); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_get_lba_status, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->lbas, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw13 = cdw13, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_directive_send(struct nvme_directive_send_args *args) +{ + __u32 cdw10 = args->data_len ? (args->data_len >> 2) - 1 : 0; + __u32 cdw11 = NVME_SET(args->doper, DIRECTIVE_CDW11_DOPER) | + NVME_SET(args->dtype, DIRECTIVE_CDW11_DTYPE) | + NVME_SET(args->dspec, DIRECTIVE_CDW11_DPSEC); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_directive_send, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = args->cdw12, + .data_len = args->data_len, + .addr = (__u64)(uintptr_t)args->data, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_directive_send_id_endir(int fd, __u32 nsid, bool endir, + enum nvme_directive_dtype dtype, + struct nvme_id_directives *id) +{ + __u32 cdw12 = NVME_SET(dtype, DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE) | + NVME_SET(endir, DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR); + struct nvme_directive_send_args args = { + .args_size = sizeof(args), + .fd = fd, + .nsid = nsid, + .dspec = 0, + .dtype = NVME_DIRECTIVE_DTYPE_IDENTIFY, + .doper = NVME_DIRECTIVE_SEND_IDENTIFY_DOPER_ENDIR, + .cdw12 = cdw12, + .data_len = sizeof(*id), + .data = id, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + return nvme_directive_send(&args); +} + +int nvme_directive_recv(struct nvme_directive_recv_args *args) +{ + __u32 cdw10 = args->data_len ? (args->data_len >> 2) - 1 : 0; + __u32 cdw11 = NVME_SET(args->doper, DIRECTIVE_CDW11_DOPER) | + NVME_SET(args->dtype, DIRECTIVE_CDW11_DTYPE) | + NVME_SET(args->dspec, DIRECTIVE_CDW11_DPSEC); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_directive_recv, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = args->cdw12, + .data_len = args->data_len, + .addr = (__u64)(uintptr_t)args->data, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_capacity_mgmt(struct nvme_capacity_mgmt_args *args) +{ + __u32 cdw10 = args->op | args->element_id << 16; + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_capacity_mgmt, + .cdw10 = cdw10, + .cdw11 = args->cdw11, + .cdw12 = args->cdw12, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_lockdown(struct nvme_lockdown_args *args) +{ + __u32 cdw10 = args->ofi << 8 | + (args->ifc & 0x3) << 5 | + (args->prhbt & 0x1) << 4 | + (args->scp & 0xF); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_lockdown, + .cdw10 = cdw10, + .cdw14 = args->uuidx & 0x3F, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_set_property(struct nvme_set_property_args *args) +{ + __u32 cdw10 = nvme_is_64bit_reg(args->offset); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_fabrics, + .nsid = nvme_fabrics_type_property_set, + .cdw10 = cdw10, + .cdw11 = args->offset, + .cdw12 = args->value & 0xffffffff, + .cdw13 = args->value >> 32, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_get_property(struct nvme_get_property_args *args) +{ + __u32 cdw10 = nvme_is_64bit_reg(args->offset); + + struct nvme_passthru_cmd64 cmd = { + .opcode = nvme_admin_fabrics, + .nsid = nvme_fabrics_type_property_get, + .cdw10 = cdw10, + .cdw11 = args->offset, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru64(args->fd, &cmd, args->value); +} + +int nvme_sanitize_nvm(struct nvme_sanitize_nvm_args *args) +{ + __u32 cdw10 = NVME_SET(args->sanact, SANITIZE_CDW10_SANACT) | + NVME_SET(!!args->ause, SANITIZE_CDW10_AUSE) | + NVME_SET(args->owpass, SANITIZE_CDW10_OWPASS) | + NVME_SET(!!args->oipbp, SANITIZE_CDW10_OIPBP) | + NVME_SET(!!args->nodas, SANITIZE_CDW10_NODAS); + __u32 cdw11 = args->ovrpat; + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_sanitize_nvm, + .cdw10 = cdw10, + .cdw11 = cdw11, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_dev_self_test(struct nvme_dev_self_test_args *args) +{ + __u32 cdw10 = NVME_SET(args->stc, DEVICE_SELF_TEST_CDW10_STC); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_dev_self_test, + .nsid = args->nsid, + .cdw10 = cdw10, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_virtual_mgmt(struct nvme_virtual_mgmt_args *args) +{ + __u32 cdw10 = NVME_SET(args->act, VIRT_MGMT_CDW10_ACT) | + NVME_SET(args->rt, VIRT_MGMT_CDW10_RT) | + NVME_SET(args->cntlid, VIRT_MGMT_CDW10_CNTLID); + __u32 cdw11 = NVME_SET(args->nr, VIRT_MGMT_CDW11_NR); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_virtual_mgmt, + .cdw10 = cdw10, + .cdw11 = cdw11, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} + +int nvme_submit_io_passthru64(int fd, struct nvme_passthru_cmd64 *cmd, + __u64 *result) +{ + return nvme_submit_passthru64(fd, NVME_IOCTL_IO64_CMD, cmd, result); +} + +int nvme_io_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, + __u32 cdw11, __u32 cdw12, __u32 cdw13, __u32 cdw14, + __u32 cdw15, __u32 data_len, void *data, __u32 metadata_len, + void *metadata, __u32 timeout_ms, __u64 *result) +{ + return nvme_passthru64(fd, NVME_IOCTL_IO64_CMD, opcode, flags, rsvd, + nsid, cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, + cdw14, cdw15, data_len, data, metadata_len, metadata, + timeout_ms, result); +} + +int nvme_submit_io_passthru(int fd, struct nvme_passthru_cmd *cmd, __u32 *result) +{ + return nvme_submit_passthru(fd, NVME_IOCTL_IO_CMD, cmd, result); +} + +int nvme_io_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, + __u32 cdw11, __u32 cdw12, __u32 cdw13, __u32 cdw14, + __u32 cdw15, __u32 data_len, void *data, __u32 metadata_len, + void *metadata, __u32 timeout_ms, __u32 *result) +{ + return nvme_passthru(fd, NVME_IOCTL_IO_CMD, opcode, flags, rsvd, nsid, + cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, cdw14, + cdw15, data_len, data, metadata_len, metadata, + timeout_ms, result); +} + +int nvme_io(struct nvme_io_args *args, __u8 opcode) +{ + __u32 cdw2 = args->storage_tag & 0xffffffff; + __u32 cdw3 = (args->storage_tag >> 32) & 0xffff; + __u32 cdw10 = args->slba & 0xffffffff; + __u32 cdw11 = args->slba >> 32; + __u32 cdw12 = args->nlb | (args->control << 16); + __u32 cdw13 = args->dsm | (args->dspec << 16); + __u32 cdw14 = args->reftag; + __u32 cdw15 = args->apptag | (args->appmask << 16); + + struct nvme_passthru_cmd cmd = { + .opcode = opcode, + .nsid = args->nsid, + .cdw2 = cdw2, + .cdw3 = cdw3, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw13 = cdw13, + .cdw14 = cdw14, + .cdw15 = cdw15, + .data_len = args->data_len, + .metadata_len = args->metadata_len, + .addr = (__u64)(uintptr_t)args->data, + .metadata = (__u64)(uintptr_t)args->metadata, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_dsm(struct nvme_dsm_args *args) +{ + struct nvme_passthru_cmd cmd = { + .opcode = nvme_cmd_dsm, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->dsm, + .data_len = args->nr_ranges * sizeof(*args->dsm), + .cdw10 = args->nr_ranges - 1, + .cdw11 = args->attrs, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_copy(struct nvme_copy_args *args) +{ + __u32 cdw12 = ((args->nr - 1) & 0xff) | ((args->format & 0xf) << 8) | + ((args->prinfor & 0xf) << 12) | ((args->dtype & 0xf) << 20) | + ((args->prinfow & 0xf) << 26) | ((args->fua & 0x1) << 30) | + ((args->lr & 0x1) << 31); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_cmd_copy, + .nsid = args->nsid, + .addr = (__u64)(uintptr_t)args->copy, + .data_len = args->nr * sizeof(*args->copy), + .cdw10 = args->sdlba & 0xffffffff, + .cdw11 = args->sdlba >> 32, + .cdw12 = cdw12, + .cdw13 = (args->dspec & 0xffff) << 16, + .cdw14 = args->ilbrt, + .cdw15 = (args->lbatm << 16) | args->lbat, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_resv_acquire(struct nvme_resv_acquire_args *args) +{ + __le64 payload[2] = { + cpu_to_le64(args->crkey), + cpu_to_le64(args->nrkey) + }; + __u32 cdw10 = (args->racqa & 0x7) | + (args->iekey ? 1 << 3 : 0) | + (args->rtype << 8); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_cmd_resv_acquire, + .nsid = args->nsid, + .cdw10 = cdw10, + .data_len = sizeof(payload), + .addr = (__u64)(uintptr_t)(payload), + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_resv_register(struct nvme_resv_register_args *args) +{ + __le64 payload[2] = { + cpu_to_le64(args->crkey), + cpu_to_le64(args->nrkey) + }; + __u32 cdw10 = (args->rrega & 0x7) | + (args->iekey ? 1 << 3 : 0) | + (args->cptpl << 30); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_cmd_resv_register, + .nsid = args->nsid, + .cdw10 = cdw10, + .data_len = sizeof(payload), + .addr = (__u64)(uintptr_t)(payload), + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_resv_release(struct nvme_resv_release_args *args) +{ + __le64 payload[1] = { cpu_to_le64(args->crkey) }; + __u32 cdw10 = (args->rrela & 0x7) | + (args->iekey ? 1 << 3 : 0) | + (args->rtype << 8); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_cmd_resv_release, + .nsid = args->nsid, + .cdw10 = cdw10, + .addr = (__u64)(uintptr_t)(payload), + .data_len = sizeof(payload), + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_resv_report(struct nvme_resv_report_args *args) +{ + struct nvme_passthru_cmd cmd = { + .opcode = nvme_cmd_resv_report, + .nsid = args->nsid, + .cdw10 = (args->len >> 2) - 1, + .cdw11 = args->eds ? 1 : 0, + .addr = (__u64)(uintptr_t)args->report, + .data_len = args->len, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_zns_mgmt_send(struct nvme_zns_mgmt_send_args *args) +{ + __u32 cdw10 = args->slba & 0xffffffff; + __u32 cdw11 = args->slba >> 32; + __u32 cdw13 = NVME_SET(args->zsaso, ZNS_MGMT_SEND_ZSASO) | + NVME_SET(!!args->select_all, ZNS_MGMT_SEND_SEL) | + NVME_SET(args->zsa, ZNS_MGMT_SEND_ZSA); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_zns_cmd_mgmt_send, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw13 = cdw13, + .addr = (__u64)(uintptr_t)args->data, + .data_len = args->data_len, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_zns_mgmt_recv(struct nvme_zns_mgmt_recv_args *args) +{ + __u32 cdw10 = args->slba & 0xffffffff; + __u32 cdw11 = args->slba >> 32; + __u32 cdw12 = (args->data_len >> 2) - 1; + __u32 cdw13 = NVME_SET(args->zra, ZNS_MGMT_RECV_ZRA) | + NVME_SET(args->zrasf, ZNS_MGMT_RECV_ZRASF) | + NVME_SET(args->zras_feat, ZNS_MGMT_RECV_ZRAS_FEAT); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_zns_cmd_mgmt_recv, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw13 = cdw13, + .addr = (__u64)(uintptr_t)args->data, + .data_len = args->data_len, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru(args->fd, &cmd, args->result); +} + +int nvme_zns_append(struct nvme_zns_append_args *args) +{ + __u32 cdw10 = args->zslba & 0xffffffff; + __u32 cdw11 = args->zslba >> 32; + __u32 cdw12 = args->nlb | (args->control << 16); + __u32 cdw14 = args->ilbrt; + __u32 cdw15 = args->lbat | (args->lbatm << 16); + + struct nvme_passthru_cmd64 cmd = { + .opcode = nvme_zns_cmd_append, + .nsid = args->nsid, + .cdw10 = cdw10, + .cdw11 = cdw11, + .cdw12 = cdw12, + .cdw14 = cdw14, + .cdw15 = cdw15, + .data_len = args->data_len, + .addr = (__u64)(uintptr_t)args->data, + .metadata_len = args->metadata_len, + .metadata = (__u64)(uintptr_t)args->metadata, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + return nvme_submit_io_passthru64(args->fd, &cmd, args->result); +} + +int nvme_dim_send(struct nvme_dim_args *args) +{ + __u32 cdw10 = NVME_SET(args->tas, DIM_TAS); + + struct nvme_passthru_cmd cmd = { + .opcode = nvme_admin_discovery_info_mgmt, + .cdw10 = cdw10, + .addr = (__u64)(uintptr_t)args->data, + .data_len = args->data_len, + .timeout_ms = args->timeout, + }; + + if (args->args_size < sizeof(*args)) { + errno = EINVAL; + return -1; + } + + return nvme_submit_admin_passthru(args->fd, &cmd, args->result); +} diff --git a/src/nvme/ioctl.h b/src/nvme/ioctl.h new file mode 100644 index 0000000..ab3797d --- /dev/null +++ b/src/nvme/ioctl.h @@ -0,0 +1,4407 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#ifndef _LIBNVME_IOCTL_H +#define _LIBNVME_IOCTL_H + +#include <stddef.h> +#include <sys/ioctl.h> +#include "types.h" + +/* + * We can not always count on the kernel UAPI being installed. Use the same + * 'ifdef' guard to avoid double definitions just in case. + */ +#ifndef _UAPI_LINUX_NVME_IOCTL_H +#define _UAPI_LINUX_NVME_IOCTL_H + +#ifndef _LINUX_NVME_IOCTL_H +#define _LINUX_NVME_IOCTL_H + +/** + * DOC: ioctl.h + * + * Linux NVMe ioctl interface functions + */ + +/* '0' is interpreted by the kernel to mean 'apply the default timeout' */ +#define NVME_DEFAULT_IOCTL_TIMEOUT 0 + +/** + * struct nvme_passthru_cmd - nvme passthrough command structure + * @opcode: Operation code, see &enum nvme_io_opcodes and &enum nvme_admin_opcodes + * @flags: Not supported: intended for command flags (eg: SGL, FUSE) + * @rsvd1: Reserved for future use + * @nsid: Namespace Identifier, or Fabrics type + * @cdw2: Command Dword 2 (no spec defined use) + * @cdw3: Command Dword 3 (no spec defined use) + * @metadata: User space address to metadata buffer (NULL if not used) + * @addr: User space address to data buffer (NULL if not used) + * @metadata_len: Metadata buffer transfer length + * @data_len: Data buffer transfer length + * @cdw10: Command Dword 10 (command specific) + * @cdw11: Command Dword 11 (command specific) + * @cdw12: Command Dword 12 (command specific) + * @cdw13: Command Dword 13 (command specific) + * @cdw14: Command Dword 14 (command specific) + * @cdw15: Command Dword 15 (command specific) + * @timeout_ms: If non-zero, overrides system default timeout in milliseconds + * @result: Set on completion to the command's CQE DWORD 0 controller response + */ +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +/** + * struct nvme_passthru_cmd64 - 64-bit nvme passthrough command structure + * @opcode: Operation code, see &enum nvme_io_opcodes and &enum nvme_admin_opcodes + * @flags: Not supported: intended for command flags (eg: SGL, FUSE) + * @rsvd1: Reserved for future use + * @nsid: Namespace Identifier, or Fabrics type + * @cdw2: Command Dword 2 (no spec defined use) + * @cdw3: Command Dword 3 (no spec defined use) + * @metadata: User space address to metadata buffer (NULL if not used) + * @addr: User space address to data buffer (NULL if not used) + * @metadata_len: Metadata buffer transfer length + * @data_len: Data buffer transfer length + * @cdw10: Command Dword 10 (command specific) + * @cdw11: Command Dword 11 (command specific) + * @cdw12: Command Dword 12 (command specific) + * @cdw13: Command Dword 13 (command specific) + * @cdw14: Command Dword 14 (command specific) + * @cdw15: Command Dword 15 (command specific) + * @timeout_ms: If non-zero, overrides system default timeout in milliseconds + * @rsvd2: Reserved for future use (and fills an impicit struct pad + * @result: Set on completion to the command's CQE DWORD 0-1 controller response + */ +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +#define NVME_IOCTL_ID _IO('N', 0x40) +#define NVME_IOCTL_RESET _IO('N', 0x44) +#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) +#define NVME_IOCTL_RESCAN _IO('N', 0x46) +#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_passthru_cmd) +#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) +#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) +#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) + +#endif /* _UAPI_LINUX_NVME_IOCTL_H */ + +#endif /* _LINUX_NVME_IOCTL_H */ + +/** + * nvme_submit_admin_passthru64() - Submit a 64-bit nvme passthrough admin + * command + * @fd: File descriptor of nvme device + * @cmd: The nvme admin command to send + * @result: Optional field to return the result from the CQE DW0-1 + * + * Uses NVME_IOCTL_ADMIN64_CMD for the ioctl request. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_submit_admin_passthru64(int fd, struct nvme_passthru_cmd64 *cmd, + __u64 *result); + +/** + * nvme_admin_passthru64() - Submit a 64-bit nvme passthrough command + * @fd: File descriptor of nvme device + * @opcode: The nvme io command to send + * @flags: NVMe command flags (not used) + * @rsvd: Reserevd for future use + * @nsid: Namespace identifier + * @cdw2: Command dword 2 + * @cdw3: Command dword 3 + * @cdw10: Command dword 10 + * @cdw11: Command dword 11 + * @cdw12: Command dword 12 + * @cdw13: Command dword 13 + * @cdw14: Command dword 14 + * @cdw15: Command dword 15 + * @data_len: Length of the data transfered in this command in bytes + * @data: Pointer to user address of the data buffer + * @metadata_len:Length of metadata transfered in this command + * @metadata: Pointer to user address of the metadata buffer + * @timeout_ms: How long the kernel waits for the command to complete + * @result: Optional field to return the result from the CQE dword 0 + * + * Parameterized form of nvme_submit_admin_passthru64(). This sets up and + * submits a &struct nvme_passthru_cmd64. + * + * Known values for @opcode are defined in &enum nvme_admin_opcode. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_admin_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11, + __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15, + __u32 data_len, void *data, __u32 metadata_len, void *metadata, + __u32 timeout_ms, __u64 *result); + +/** + * nvme_submit_admin_passthru() - Submit an nvme passthrough admin command + * @fd: File descriptor of nvme device + * @cmd: The nvme admin command to send + * @result: Optional field to return the result from the CQE DW0 + * + * Uses NVME_IOCTL_ADMIN_CMD for the ioctl request. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_submit_admin_passthru(int fd, struct nvme_passthru_cmd *cmd, + __u32 *result); + +/** + * nvme_admin_passthru() - Submit an nvme passthrough command + * @fd: File descriptor of nvme device + * @opcode: The nvme io command to send + * @flags: NVMe command flags (not used) + * @rsvd: Reserevd for future use + * @nsid: Namespace identifier + * @cdw2: Command dword 2 + * @cdw3: Command dword 3 + * @cdw10: Command dword 10 + * @cdw11: Command dword 11 + * @cdw12: Command dword 12 + * @cdw13: Command dword 13 + * @cdw14: Command dword 14 + * @cdw15: Command dword 15 + * @data_len: Length of the data transfered in this command in bytes + * @data: Pointer to user address of the data buffer + * @metadata_len:Length of metadata transfered in this command + * @metadata: Pointer to user address of the metadata buffer + * @timeout_ms: How long the kernel waits for the command to complete + * @result: Optional field to return the result from the CQE dword 0 + * + * Parameterized form of nvme_submit_admin_passthru(). This sets up and + * submits a &struct nvme_passthru_cmd. + * + * Known values for @opcode are defined in &enum nvme_admin_opcode. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_admin_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11, + __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15, + __u32 data_len, void *data, __u32 metadata_len, void *metadata, + __u32 timeout_ms, __u32 *result); + +/** + * nvme_submit_io_passthru64() - Submit a 64-bit nvme passthrough command + * @fd: File descriptor of nvme device + * @cmd: The nvme io command to send + * @result: Optional field to return the result from the CQE DW0-1 + * + * Uses NVME_IOCTL_IO64_CMD for the ioctl request. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_submit_io_passthru64(int fd, struct nvme_passthru_cmd64 *cmd, + __u64 *result); + +/** + * nvme_io_passthru64() - Submit an nvme io passthrough command + * @fd: File descriptor of nvme device + * @opcode: The nvme io command to send + * @flags: NVMe command flags (not used) + * @rsvd: Reserevd for future use + * @nsid: Namespace identifier + * @cdw2: Command dword 2 + * @cdw3: Command dword 3 + * @cdw10: Command dword 10 + * @cdw11: Command dword 11 + * @cdw12: Command dword 12 + * @cdw13: Command dword 13 + * @cdw14: Command dword 14 + * @cdw15: Command dword 15 + * @data_len: Length of the data transfered in this command in bytes + * @data: Pointer to user address of the data buffer + * @metadata_len:Length of metadata transfered in this command + * @metadata: Pointer to user address of the metadata buffer + * @timeout_ms: How long the kernel waits for the command to complete + * @result: Optional field to return the result from the CQE dword 0 + * + * Parameterized form of nvme_submit_io_passthru64(). This sets up and submits + * a &struct nvme_passthru_cmd64. + * + * Known values for @opcode are defined in &enum nvme_io_opcode. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_io_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11, + __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15, + __u32 data_len, void *data, __u32 metadata_len, void *metadata, + __u32 timeout_ms, __u64 *result); + +/** + * nvme_submit_io_passthru() - Submit an nvme passthrough command + * @fd: File descriptor of nvme device + * @cmd: The nvme io command to send + * @result: Optional field to return the result from the CQE dword 0 + * @result: Optional field to return the result from the CQE DW0 + * + * Uses NVME_IOCTL_IO_CMD for the ioctl request. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_submit_io_passthru(int fd, struct nvme_passthru_cmd *cmd, + __u32 *result); + +/** + * nvme_io_passthru() - Submit an nvme io passthrough command + * @fd: File descriptor of nvme device + * @opcode: The nvme io command to send + * @flags: NVMe command flags (not used) + * @rsvd: Reserevd for future use + * @nsid: Namespace identifier + * @cdw2: Command dword 2 + * @cdw3: Command dword 3 + * @cdw10: Command dword 10 + * @cdw11: Command dword 11 + * @cdw12: Command dword 12 + * @cdw13: Command dword 13 + * @cdw14: Command dword 14 + * @cdw15: Command dword 15 + * @data_len: Length of the data transfered in this command in bytes + * @data: Pointer to user address of the data buffer + * @metadata_len:Length of metadata transfered in this command + * @metadata: Pointer to user address of the metadata buffer + * @timeout_ms: How long the kernel waits for the command to complete + * @result: Optional field to return the result from the CQE dword 0 + * + * Parameterized form of nvme_submit_io_passthru(). This sets up and submits + * a &struct nvme_passthru_cmd. + * + * Known values for @opcode are defined in &enum nvme_io_opcode. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_io_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd, + __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11, + __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15, + __u32 data_len, void *data, __u32 metadata_len, void *metadata, + __u32 timeout_ms, __u32 *result); + +/** + * nvme_subsystem_reset() - Initiate a subsystem reset + * @fd: File descriptor of nvme device + * + * This should only be sent to controller handles, not to namespaces. + * + * Return: Zero if a subsystem reset was initiated or -1 with errno set + * otherwise. + */ +int nvme_subsystem_reset(int fd); + +/** + * nvme_ctrl_reset() - Initiate a controller reset + * @fd: File descriptor of nvme device + * + * This should only be sent to controller handles, not to namespaces. + * + * Return: 0 if a reset was initiated or -1 with errno set otherwise. + */ +int nvme_ctrl_reset(int fd); + +/** + * nvme_ns_rescan() - Initiate a controller rescan + * @fd: File descriptor of nvme device + * + * This should only be sent to controller handles, not to namespaces. + * + * Return: 0 if a rescan was initiated or -1 with errno set otherwise. + */ +int nvme_ns_rescan(int fd); + +/** + * nvme_get_nsid() - Retrieve the NSID from a namespace file descriptor + * @fd: File descriptor of nvme namespace + * @nsid: User pointer to namespace id + * + * This should only be sent to namespace handles, not to controllers. The + * kernel's interface returns the nsid as the return value. This is unfortunate + * for many architectures that are incapable of allowing distinguishing a + * namespace id > 0x80000000 from a negative error number. + * + * Return: 0 if @nsid was set successfully or -1 with errno set otherwise. + */ +int nvme_get_nsid(int fd, __u32 *nsid); + +/** + * struct nvme_identify_args - Arguments for the NVMe Identify command + * @result: The command completion result from CQE dword0 + * @data: User space destination address to transfer the data + * @args_size: Size of &struct nvme_identify_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms (0 for default timeout) + * @cns: The Controller or Namespace structure, see @enum nvme_identify_cns + * @csi: Command Set Identifier + * @nsid: Namespace identifier, if applicable + * @cntid: The Controller Identifier, if applicable + * @cns_specific_id: Identifier that is required for a particular CNS value + * @uuidx: UUID Index if controller supports this id selection method + */ +struct nvme_identify_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + enum nvme_identify_cns cns; + enum nvme_csi csi; + __u32 nsid; + __u16 cntid; + __u16 cns_specific_id; + __u8 uuidx; +}; + +/** + * nvme_identify() - Send the NVMe Identify command + * @args: &struct nvme_identify_args argument structure + * + * The Identify command returns a data buffer that describes information about + * the NVM subsystem, the controller or the namespace(s). + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_identify(struct nvme_identify_args *args); + +static int nvme_identify_cns_nsid(int fd, enum nvme_identify_cns cns, + __u32 nsid, void *data) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = cns, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_ctrl() - Retrieves nvme identify controller + * @fd: File descriptor of nvme device + * @id: User space destination address to transfer the data, + * + * Sends nvme identify with CNS value %NVME_IDENTIFY_CNS_CTRL. + * + * See &struct nvme_id_ctrl for details on the data returned. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ctrl(int fd, struct nvme_id_ctrl *id) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_CTRL, + NVME_NSID_NONE, id); +} + +/** + * nvme_identify_ns() - Retrieves nvme identify namespace + * @fd: File descriptor of nvme device + * @nsid: Namespace to identify + * @ns: User space destination address to transfer the data + * + * If the Namespace Identifier (NSID) field specifies an active NSID, then the + * Identify Namespace data structure is returned to the host for that specified + * namespace. + * + * If the controller supports the Namespace Management capability and the NSID + * field is set to %NVME_NSID_ALL, then the controller returns an Identify Namespace + * data structure that specifies capabilities that are common across namespaces + * for this controller. + * + * See &struct nvme_id_ns for details on the structure returned. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ns(int fd, __u32 nsid, struct nvme_id_ns *ns) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_NS, nsid, ns); +} + +/** + * nvme_identify_allocated_ns() - Same as nvme_identify_ns, but only for + * allocated namespaces + * @fd: File descriptor of nvme device + * @nsid: Namespace to identify + * @ns: User space destination address to transfer the data + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_allocated_ns(int fd, __u32 nsid, + struct nvme_id_ns *ns) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_ALLOCATED_NS, + nsid, ns); +} + +/** + * nvme_identify_active_ns_list() - Retrieves active namespaces id list + * @fd: File descriptor of nvme device + * @nsid: Return namespaces greater than this identifer + * @list: User space destination address to transfer the data + * + * A list of 1024 namespace IDs is returned to the host containing NSIDs in + * increasing order that are greater than the value specified in the Namespace + * Identifier (nsid) field of the command. + * + * See &struct nvme_ns_list for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_active_ns_list(int fd, __u32 nsid, + struct nvme_ns_list *list) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_NS_ACTIVE_LIST, + nsid, list); +} + +/** + * nvme_identify_allocated_ns_list() - Retrieves allocated namespace id list + * @fd: File descriptor of nvme device + * @nsid: Return namespaces greater than this identifer + * @list: User space destination address to transfer the data + * + * A list of 1024 namespace IDs is returned to the host containing NSIDs in + * increasing order that are greater than the value specified in the Namespace + * Identifier (nsid) field of the command. + * + * See &struct nvme_ns_list for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_allocated_ns_list(int fd, __u32 nsid, + struct nvme_ns_list *list) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST, + nsid, list); +} + +/** + * nvme_identify_ctrl_list() - Retrieves identify controller list + * @fd: File descriptor of nvme device + * @cntid: Starting CNTLID to return in the list + * @cntlist: User space destination address to transfer the data + * + * Up to 2047 controller identifiers is returned containing a controller + * identifier greater than or equal to the controller identifier specified in + * @cntid. + * + * See &struct nvme_ctrl_list for a definition of the structure returned. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ctrl_list(int fd, __u16 cntid, + struct nvme_ctrl_list *cntlist) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = cntlist, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CTRL_LIST, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_nsid_ctrl_list() - Retrieves controller list attached to an nsid + * @fd: File descriptor of nvme device + * @nsid: Return controllers that are attached to this nsid + * @cntid: Starting CNTLID to return in the list + * @cntlist: User space destination address to transfer the data + * + * Up to 2047 controller identifiers are returned containing a controller + * identifier greater than or equal to the controller identifier specified in + * @cntid attached to @nsid. + * + * See &struct nvme_ctrl_list for a definition of the structure returned. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 + */ +static inline int nvme_identify_nsid_ctrl_list(int fd, __u32 nsid, __u16 cntid, + struct nvme_ctrl_list *cntlist) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = cntlist, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_NS_CTRL_LIST, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_ns_descs() - Retrieves namespace descriptor list + * @fd: File descriptor of nvme device + * @nsid: The namespace id to retrieve destriptors + * @descs: User space destination address to transfer the data + * + * A list of Namespace Identification Descriptor structures is returned to the + * host for the namespace specified in the Namespace Identifier (NSID) field if + * it is an active NSID. + * + * The data returned is in the form of an arrray of 'struct nvme_ns_id_desc'. + * + * See &struct nvme_ns_id_desc for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ns_descs(int fd, __u32 nsid, + struct nvme_ns_id_desc *descs) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_NS_DESC_LIST, + nsid, descs); +} + +/** + * nvme_identify_nvmset_list() - Retrieves NVM Set List + * @fd: File descriptor of nvme device + * @nvmsetid: NVM Set Identifier + * @nvmset: User space destination address to transfer the data + * + * Retrieves an NVM Set List, &struct nvme_id_nvmset_list. The data structure + * is an ordered list by NVM Set Identifier, starting with the first NVM Set + * Identifier supported by the NVM subsystem that is equal to or greater than + * the NVM Set Identifier. + * + * See &struct nvme_id_nvmset_list for the defintion of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_nvmset_list(int fd, __u16 nvmsetid, + struct nvme_id_nvmset_list *nvmset) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = nvmset, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_NVMSET_LIST, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = nvmsetid, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_primary_ctrl() - Retrieve NVMe Primary Controller + * identification + * @fd: File descriptor of nvme device + * @cntid: Return controllers starting at this identifier + * @cap: User space destination buffer address to transfer the data + * + * See &struct nvme_primary_ctrl_cap for the defintion of the returned structure, @cap. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_primary_ctrl(int fd, __u16 cntid, + struct nvme_primary_ctrl_cap *cap) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = cap, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_secondary_ctrl_list() - Retrieves secondary controller list + * @fd: File descriptor of nvme device + * @nsid: Namespace identifier + * @cntid: Return controllers starting at this identifier + * @sc_list: User space destination address to transfer the data + * + * A Secondary Controller List is returned to the host for up to 127 secondary + * controllers associated with the primary controller processing this command. + * The list contains entries for controller identifiers greater than or equal + * to the value specified in the Controller Identifier (cntid). + * + * See &struct nvme_secondary_ctrls_list for a defintion of the returned + * structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_secondary_ctrl_list(int fd, __u32 nsid, + __u16 cntid, struct nvme_secondary_ctrl_list *sc_list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = sc_list, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cntid = cntid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_ns_granularity() - Retrieves namespace granularity + * identification + * @fd: File descriptor of nvme device + * @gr_list: User space destination address to transfer the data + * + * If the controller supports reporting of Namespace Granularity, then a + * Namespace Granularity List is returned to the host for up to sixteen + * namespace granularity descriptors + * + * See &struct nvme_id_ns_granularity_list for the definition of the returned + * structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ns_granularity(int fd, + struct nvme_id_ns_granularity_list *gr_list) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_NS_GRANULARITY, + NVME_NSID_NONE, gr_list); +} + +/** + * nvme_identify_uuid() - Retrieves device's UUIDs + * @fd: File descriptor of nvme device + * @uuid_list: User space destination address to transfer the data + * + * Each UUID List entry is either 0h, the NVMe Invalid UUID, or a valid UUID. + * Valid UUIDs are those which are non-zero and are not the NVMe Invalid UUID. + * + * See &struct nvme_id_uuid_list for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_uuid(int fd, struct nvme_id_uuid_list *uuid_list) +{ + return nvme_identify_cns_nsid(fd, NVME_IDENTIFY_CNS_UUID_LIST, + NVME_NSID_NONE, uuid_list); +} + +/** + * nvme_identify_ns_csi() - I/O command set specific identify namespace data + * @fd: File descriptor of nvme device + * @nsid: Namespace to identify + * @uuidx: UUID Index for differentiating vendor specific encoding + * @csi: Command Set Identifier + * @data: User space destination address to transfer the data + * + * An I/O Command Set specific Identify Namespace data structre is returned + * for the namespace specified in @nsid. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ns_csi(int fd, __u32 nsid, __u8 uuidx, + enum nvme_csi csi, void *data) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_NS, + .csi = csi, + .nsid = nsid, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = uuidx, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_ctrl_csi() - I/O command set specific Identify Controller data + * @fd: File descriptor of nvme device + * @csi: Command Set Identifier + * @data: User space destination address to transfer the data + * + * An I/O Command Set specific Identify Controller data structure is returned + * to the host for the controller processing the command. The specific Identify + * Controller data structure to be returned is specified by @csi. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ctrl_csi(int fd, enum nvme_csi csi, void *data) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_CTRL, + .csi = csi, + .nsid = NVME_NSID_NONE, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_active_ns_list_csi() - Active namespace ID list associated with a specified I/O command set + * @fd: File descriptor of nvme device + * @nsid: Return namespaces greater than this identifier + * @csi: Command Set Identifier + * @ns_list: User space destination address to transfer the data + * + * A list of 1024 namespace IDs is returned to the host containing active + * NSIDs in increasing order that are greater than the value specified in + * the Namespace Identifier (nsid) field of the command and matching the + * I/O Command Set specified in the @csi argument. + * + * See &struct nvme_ns_list for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_active_ns_list_csi(int fd, __u32 nsid, + enum nvme_csi csi, struct nvme_ns_list *ns_list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = ns_list, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_NS_ACTIVE_LIST, + .csi = csi, + .nsid = nsid, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_allocated_ns_list_csi() - Allocated namespace ID list associated with a specified I/O command set + * @fd: File descriptor of nvme device + * @nsid: Return namespaces greater than this identifier + * @csi: Command Set Identifier + * @ns_list: User space destination address to transfer the data + * + * A list of 1024 namespace IDs is returned to the host containing allocated + * NSIDs in increasing order that are greater than the value specified in + * the @nsid field of the command and matching the I/O Command Set + * specified in the @csi argument. + * + * See &struct nvme_ns_list for the definition of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_allocated_ns_list_csi(int fd, __u32 nsid, + enum nvme_csi csi, struct nvme_ns_list *ns_list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = ns_list, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_ALLOCATED_NS_LIST, + .csi = csi, + .nsid = nsid, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_independent_identify_ns() - I/O command set independent Identify namespace data + * @fd: File descriptor of nvme device + * @nsid: Return namespaces greater than this identifier + * @ns: I/O Command Set Independent Identify Namespace data + * structure + * + * The I/O command set independent Identify namespace data structure for + * the namespace identified with @ns is returned to the host. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_independent_identify_ns(int fd, __u32 nsid, + struct nvme_id_independent_id_ns *ns) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = ns, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_INDEPENDENT_ID_NS, + .csi = NVME_CSI_NVM, + .nsid = nsid, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_ns_csi_user_data_format() - + * @fd: File descriptor of nvme device + * @user_data_format: Return namespaces capability of identifier + * @uuidx: UUID selection, if supported + * @csi: Command Set Identifier + * + * Identify Namespace data structure for the specified User Data Format + * index containing the namespace capabilities for the NVM Command Set. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_ns_csi_user_data_format(int fd, + __u16 user_data_format, __u8 uuidx, + enum nvme_csi csi, void *data) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_NS_USER_DATA_FORMAT, + .csi = csi, + .nsid = NVME_NSID_NONE, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = user_data_format, + .uuidx = uuidx, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_iocs_ns_csi_user_data_format() - + * @fd: File descriptor of nvme device + * @user_data_format: Return namespaces capability of identifier + * @uuidx: UUID selection, if supported + * @csi: Command Set Identifier + * + * I/O Command Set specific Identify Namespace data structure for + * the specified User Data Format index containing the namespace + * capabilities for the I/O Command Set specified in the CSI field. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_iocs_ns_csi_user_data_format(int fd, + __u16 user_data_format, __u8 uuidx, + enum nvme_csi csi, void *data) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_NS_USER_DATA_FORMAT, + .csi = csi, + .nsid = NVME_NSID_NONE, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = user_data_format, + .uuidx = uuidx, + }; + + return nvme_identify(&args); +} + +/** + * nvme_nvm_identify_ctrl() - Identify controller data + * @fd: File descriptor of nvme device + * @id: User space destination address to transfer the data + * + * Return an identify controller data structure to the host of + * processing controller. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_nvm_identify_ctrl(int fd, struct nvme_id_ctrl_nvm *id) +{ + return nvme_identify_ctrl_csi(fd, NVME_CSI_NVM, id); +} + +/** + * nvme_identify_domain_list() - Domain list data + * @fd: File descriptor of nvme device + * @domid: Domain ID + * @list: User space destiantion address to transfer data + * + * A list of 31 domain IDs is returned to the host containing domain + * attributes in increasing order that are greater than the value + * specified in the @domid field. + * + * See &struct nvme_identify_domain_attr for the definition of the + * returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_domain_list(int fd, __u16 domid, + struct nvme_id_domain_list *list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = list, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_DOMAIN_LIST, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = domid, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_endurance_group_list() - Endurance group list data + * @fd: File descriptor of nvme device + * @endgrp_id: Endurance group identifier + * @list: Array of endurance group identifiers + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_endurance_group_list(int fd, __u16 endgrp_id, + struct nvme_id_endurance_group_list *list) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = list, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_ENDURANCE_GROUP_ID, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = endgrp_id, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_identify_iocs() - I/O command set data structure + * @fd: File descriptor of nvme device + * @cntlid: Controller ID + * @iocs: User space destination address to transfer the data + * + * Retrieves list of the controller's supported io command set vectors. See + * &struct nvme_id_iocs. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_identify_iocs(int fd, __u16 cntlid, + struct nvme_id_iocs *iocs) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = iocs, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_COMMAND_SET_STRUCTURE, + .csi = NVME_CSI_NVM, + .nsid = NVME_NSID_NONE, + .cntid = cntlid, + .cns_specific_id = NVME_CNSSPECID_NONE, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_zns_identify_ns() - ZNS identify namespace data + * @fd: File descriptor of nvme device + * @nsid: Namespace to identify + * @data: User space destination address to transfer the data + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_zns_identify_ns(int fd, __u32 nsid, + struct nvme_zns_id_ns *data) +{ + struct nvme_identify_args args = { + .result = NULL, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .cns = NVME_IDENTIFY_CNS_CSI_NS, + .csi = NVME_CSI_ZNS, + .nsid = nsid, + .cntid = NVME_CNTLID_NONE, + .cns_specific_id = NVME_CNSSPECID_NONE, + }; + + return nvme_identify(&args); +} + +/** + * nvme_zns_identify_ctrl() - ZNS identify controller data + * @fd: File descriptor of nvme device + * @id: User space destination address to transfer the data + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_zns_identify_ctrl(int fd, struct nvme_zns_id_ctrl *id) +{ + return nvme_identify_ctrl_csi(fd, NVME_CSI_ZNS, id); +} + +/** + * struct nvme_get_log_args - Arguments for the NVMe Admin Get Log command + * @lpo: Log page offset for partial log transfers + * @result: The command completion result from CQE dword0 + * @log: User space destination address to transfer the data + * @args_size: Length of the structure + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @lid: Log page identifier, see &enum nvme_cmd_get_log_lid for known + * values + * @len: Length of provided user buffer to hold the log data in bytes + * @nsid: Namespace identifier, if applicable + * @csi: Command set identifier, see &enum nvme_csi for known values + * @lsi: Log Specific Identifier + * @lsp: Log specific field + * @uuidx: UUID selection, if supported + * @rae: Retain asynchronous events + * @ot: Offset Type; if set @lpo specifies the index into the list + * of data structures, otherwise @lpo specifies the byte offset + * into the log page. + */ +struct nvme_get_log_args { + __u64 lpo; + __u32 *result; + void *log; + int args_size; + int fd; + __u32 timeout; + enum nvme_cmd_get_log_lid lid; + __u32 len; + __u32 nsid; + enum nvme_csi csi; + __u16 lsi; + __u8 lsp; + __u8 uuidx; + bool rae; + bool ot; +}; + +/** + * nvme_get_log() - NVMe Admin Get Log command + * @args: &struct nvme_get_log_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_log(struct nvme_get_log_args *args); + +static inline int nvme_get_nsid_log(int fd, bool rae, + enum nvme_cmd_get_log_lid lid, + __u32 nsid, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = lid, + .len = len, + .nsid = nsid, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + + return nvme_get_log(&args); +} + +static inline int nvme_get_log_simple(int fd, enum nvme_cmd_get_log_lid lid, + __u32 len, void *log) +{ + return nvme_get_nsid_log(fd, false, lid, NVME_NSID_ALL, len, log); +} + +/** + * nvme_get_log_supported_log_pages() - Retrieve nmve supported log pages + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @log: Array of LID supported and Effects data structures + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_supported_log_pages(int fd, bool rae, + struct nvme_supported_log_pages *log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_SUPPORTED_LOG_PAGES, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_get_log_error() - Retrieve nvme error log + * @fd: File descriptor of nvme device + * @nr_entries: Number of error log entries allocated + * @rae: Retain asynchronous events + * @err_log: Array of error logs of size 'entries' + * + * This log page describes extended error information for a command that + * completed with error, or may report an error that is not specific to a + * particular command. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_error(int fd, unsigned nr_entries, bool rae, + struct nvme_error_log_page *err_log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_ERROR, + NVME_NSID_ALL, sizeof(*err_log) * nr_entries, + err_log); +} + +/** + * nvme_get_log_smart() - Retrieve nvme smart log + * @fd: File descriptor of nvme device + * @nsid: Optional namespace identifier + * @rae: Retain asynchronous events + * @smart_log: User address to store the smart log + * + * This log page provides SMART and general health information. The information + * provided is over the life of the controller and is retained across power + * cycles. To request the controller log page, the namespace identifier + * specified is FFFFFFFFh. The controller may also support requesting the log + * page on a per namespace basis, as indicated by bit 0 of the LPA field in the + * Identify Controller data structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_smart(int fd, __u32 nsid, bool rae, + struct nvme_smart_log *smart_log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_SMART, + nsid, sizeof(*smart_log), smart_log); +} + +/** + * nvme_get_log_fw_slot() - Retrieves the controller firmware log + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @fw_log: User address to store the log page + * + * This log page describes the firmware revision stored in each firmware slot + * supported. The firmware revision is indicated as an ASCII string. The log + * page also indicates the active slot number. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_fw_slot(int fd, bool rae, + struct nvme_firmware_slot *fw_log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_FW_SLOT, + NVME_NSID_ALL, sizeof(*fw_log), fw_log); +} + +/** + * nvme_get_log_changed_ns_list() - Retrieve namespace changed list + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @ns_log: User address to store the log page + * + * This log page describes namespaces attached to this controller that have + * changed since the last time the namespace was identified, been added, or + * deleted. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_changed_ns_list(int fd, bool rae, + struct nvme_ns_list *ns_log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_CHANGED_NS, + NVME_NSID_ALL, sizeof(*ns_log), ns_log); +} + +/** + * nvme_get_log_cmd_effects() - Retrieve nvme command effects log + * @fd: File descriptor of nvme device + * @csi: Command Set Identifier + * @effects_log:User address to store the effects log + * + * This log page describes the commands that the controller supports and the + * effects of those commands on the state of the NVM subsystem. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_cmd_effects(int fd, enum nvme_csi csi, + struct nvme_cmd_effects_log *effects_log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = effects_log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_CMD_EFFECTS, + .len = sizeof(*effects_log), + .nsid = NVME_NSID_ALL, + .csi = csi, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_device_self_test() - Retrieve the device self test log + * @fd: File descriptor of nvme device + * @log: Userspace address of the log payload + * + * The log page indicates the status of an in progress self test and the + * percent complete of that operation, and the results of the previous 20 + * self-test operations. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_device_self_test(int fd, + struct nvme_self_test_log *log) +{ + return nvme_get_nsid_log(fd, false, NVME_LOG_LID_DEVICE_SELF_TEST, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_get_log_create_telemetry_host() - Create host telemetry log + * @fd: File descriptor of nvme device + * @log: Userspace address of the log payload + */ +static inline int nvme_get_log_create_telemetry_host(int fd, + struct nvme_telemetry_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_TELEMETRY_HOST, + .len = sizeof(*log), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_TELEM_HOST_LSP_CREATE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_telemetry_host() - + * @fd: File descriptor of nvme device + * @offset: Offset into the telemetry data + * @len: Length of provided user buffer to hold the log data in bytes + * @log: User address for log page data + * + * Retreives the Telemetry Host-Initiated log page at the requested offset + * using the previously existing capture. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_telemetry_host(int fd, __u64 offset, + __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_TELEMETRY_HOST, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_TELEM_HOST_LSP_RETAIN, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_telemetry_ctrl() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @offset: Offset into the telemetry data + * @len: Length of provided user buffer to hold the log data in bytes + * @log: User address for log page data + */ +static inline int nvme_get_log_telemetry_ctrl(int fd, bool rae, + __u64 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_TELEMETRY_CTRL, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_endurance_group() - + * @fd: File descriptor of nvme device + * @endgid: Starting group identifier to return in the list + * @log: User address to store the endurance log + * + * This log page indicates if an Endurance Group Event has occurred for a + * particular Endurance Group. If an Endurance Group Event has occurred, the + * details of the particular event are included in the Endurance Group + * Information log page for that Endurance Group. An asynchronous event is + * generated when an entry for an Endurance Group is newly added to this log + * page. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_endurance_group(int fd, __u16 endgid, + struct nvme_endurance_group_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_ENDURANCE_GROUP, + .len = sizeof(*log), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = endgid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_predictable_lat_nvmset() - + * @fd: File descriptor of nvme device + * @nvmsetid: NVM set id + * @log: User address to store the predictable latency log + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_predictable_lat_nvmset(int fd, __u16 nvmsetid, + struct nvme_nvmset_predictable_lat_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_PREDICTABLE_LAT_NVMSET, + .len = sizeof(*log), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = nvmsetid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_predictable_lat_event() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @offset: Offset into the predictable latency event + * @len: Length of provided user buffer to hold the log data in bytes + * @log: User address for log page data + */ +static inline int nvme_get_log_predictable_lat_event(int fd, bool rae, + __u32 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_PREDICTABLE_LAT_AGG, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_ana() - + * @fd: File descriptor of nvme device + * @lsp: Log specific, see &enum nvme_get_log_ana_lsp + * @rae: Retain asynchronous events + * @offset: Offset to the start of the log page + * @len: The allocated length of the log page + * @log: User address to store the ana log + * + * This log consists of a header describing the log and descriptors containing + * the asymmetric namespace access information for ANA Groups that contain + * namespaces that are attached to the controller processing the command. + * + * See &struct nvme_ana_rsp_hdr for the defintion of the returned structure. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static int nvme_get_log_ana(int fd, enum nvme_log_ana_lsp lsp, bool rae, + __u64 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_ANA, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = lsp, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_ana_groups() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @len: The allocated length of the log page + * @log: User address to store the ana group log + * + * See &struct nvme_ana_group_desc for the defintion of the returned structure. + */ +static inline int nvme_get_log_ana_groups(int fd, bool rae, __u32 len, + struct nvme_ana_group_desc *log) +{ + return nvme_get_log_ana(fd, NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY, rae, 0, + len, log); +} + +/** + * nvme_get_log_lba_status() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @offset: Offset to the start of the log page + * @len: The allocated length of the log page + * @log: User address to store the log page + */ +static inline int nvme_get_log_lba_status(int fd, bool rae, + __u64 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_LBA_STATUS, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_endurance_grp_evt() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @offset: Offset to the start of the log page + * @len: The allocated length of the log page + * @log: User address to store the log page + */ +static inline int nvme_get_log_endurance_grp_evt(int fd, bool rae, + __u32 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_ENDURANCE_GRP_EVT, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_fid_supported_effects() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @log: FID Supported and Effects data structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise + */ +static inline int nvme_get_log_fid_supported_effects(int fd, bool rae, + struct nvme_fid_supported_effects_log *log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_FID_SUPPORTED_EFFECTS, + NVME_NSID_NONE, sizeof(*log), log); +} + +/** + * nvme_get_log_mi_cmd_supported_effects() - displays the MI Commands Supported byt the controller + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @log: MI Command Supported and Effects data structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise + */ +static inline int nvme_get_log_mi_cmd_supported_effects(int fd, bool rae, + struct nvme_mi_cmd_supported_effects_log *log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_MI_CMD_SUPPORTED_EFFECTS, + NVME_NSID_NONE, sizeof(*log), log); +} + +/** + * nvme_get_log_boot_partition() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @lsp: The log specified field of LID + * @len: The allocated size, minimum + * struct nvme_boot_partition + * @part: User address to store the log page + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise + */ +static inline int nvme_get_log_boot_partition(int fd, bool rae, + __u8 lsp, __u32 len, struct nvme_boot_partition *part) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = part, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_BOOT_PARTITION, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_discovery() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @offset: Offset of this log to retrieve + * @len: The allocated size for this portion of the log + * @log: User address to store the discovery log + * + * Supported only by fabrics discovery controllers, returning discovery + * records. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_discovery(int fd, bool rae, + __u32 offset, __u32 len, void *log) +{ + struct nvme_get_log_args args = { + .lpo = offset, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_DISCOVER, + .len = len, + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_media_unit_stat() - + * @fd: File descriptor of nvme device + * @domid: Domain Identifier selection, if supported + * @mus: User address to store the Media Unit statistics log + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise + */ +static inline int nvme_get_log_media_unit_stat(int fd, __u16 domid, + struct nvme_media_unit_stat_log *mus) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = mus, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_MEDIA_UNIT_STATUS, + .len = sizeof(*mus), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = domid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_support_cap_config_list() - + * @fd: File descriptor of nvme device + * @domid: Domain Identifier selection, if supported + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise + */ +static inline int nvme_get_log_support_cap_config_list(int fd, __u16 domid, + struct nvme_supported_cap_config_list_log *cap) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = cap, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_SUPPORTED_CAP_CONFIG_LIST, + .len = sizeof(*cap), + .nsid = NVME_NSID_NONE, + .csi = NVME_CSI_NVM, + .lsi = domid, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_reservation() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @log: User address to store the reservation log + */ +static inline int nvme_get_log_reservation(int fd, bool rae, + struct nvme_resv_notification_log *log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_RESERVATION, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_get_log_sanitize() - + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @log: User address to store the sanitize log + * + * The Sanitize Status log page reports sanitize operation time estimates and + * information about the most recent sanitize operation. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_sanitize(int fd, bool rae, + struct nvme_sanitize_log_page *log) +{ + return nvme_get_nsid_log(fd, rae, NVME_LOG_LID_SANITIZE, + NVME_NSID_ALL, sizeof(*log), log); +} + +/** + * nvme_get_log_zns_changed_zones() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @rae: Retain asynchronous events + * @log: User address to store the changed zone log + * + * The list of zones that have changed state due to an exceptional event. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_get_log_zns_changed_zones(int fd, __u32 nsid, bool rae, + struct nvme_zns_changed_zone_log *log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_ZNS_CHANGED_ZONES, + .len = sizeof(*log), + .nsid = nsid, + .csi = NVME_CSI_ZNS, + .lsi = NVME_LOG_LSI_NONE, + .lsp = NVME_LOG_LSP_NONE, + .uuidx = NVME_UUID_NONE, + .rae = rae, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * nvme_get_log_persistent_event() - + * @fd: File descriptor of nvme device + * @action: Action the controller should take during processing this command + * @size: Size of @pevent_log + * @pevent_log: User address to store the persistent event log + */ +static inline int nvme_get_log_persistent_event(int fd, + enum nvme_pevent_log_action action, + __u32 size, void *pevent_log) +{ + struct nvme_get_log_args args = { + .lpo = 0, + .result = NULL, + .log = pevent_log, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .lid = NVME_LOG_LID_PERSISTENT_EVENT, + .len = size, + .nsid = NVME_NSID_ALL, + .csi = NVME_CSI_NVM, + .lsi = NVME_LOG_LSI_NONE, + .lsp = action, + .uuidx = NVME_UUID_NONE, + .rae = false, + .ot = false, + }; + return nvme_get_log(&args); +} + +/** + * struct nvme_set_features_args - Arguments for the NVMe Admin Set Feature command + * @result: The command completion result from CQE dword0 + * @data: User address of feature data, if applicable + * @args_size: Size of &struct nvme_set_features_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID, if applicable + * @cdw11: Value to set the feature to + * @cdw12: Feature specific command dword12 field + * @cdw15: Feature specific command dword15 field + * @data_len: Length of feature data, if applicable, in bytes + * @save: Save value across power states + * @uuidx: UUID Index for differentiating vendor specific encoding + * @fid: Feature identifier + */ +struct nvme_set_features_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw15; + __u32 data_len; + bool save; + __u8 uuidx; + __u8 fid; +}; + +/** + * nvme_set_features() - Set a feature attribute + * @args: &struct nvme_set_features_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features(struct nvme_set_features_args *args); + +/** + * nvme_set_features_data() - Helper function for @nvme_set_features() + * @fd: File descriptor of nvme device + * @fid: Feature identifier + * @nsid: Namespace ID, if applicable + * @cdw11: Value to set the feature to + * @save: Save value across power states + * @data_len: Length of feature data, if applicable, in bytes + * @data: User address of feature data, if applicable + * @result: The command completion result from CQE dword0 + */ +static inline int nvme_set_features_data(int fd, __u8 fid, __u32 nsid, + __u32 cdw11, bool save, __u32 data_len, void *data, + __u32 *result) +{ + struct nvme_set_features_args args = { + .result = result, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .cdw11 = cdw11, + .cdw12 = 0, + .cdw13 = 0, + .cdw15 = 0, + .data_len = data_len, + .save = save, + .uuidx = 0, + .fid = fid, + }; + return nvme_set_features(&args); +} + +/** + * nvme_set_features_simple() - Helper functionn for @nvme_set_features() + * @fd: File descriptor of nvme device + * @fid: Feature identifier + * @nsid: Namespace ID, if applicable + * @cdw11: Value to set the feature to + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + */ +static inline int nvme_set_features_simple(int fd, __u8 fid, __u32 nsid, + __u32 cdw11, bool save, __u32 *result) +{ + return nvme_set_features_data(fd, fid, nsid, cdw11, save, 0, NULL, + result); +} + +/** + * nvme_set_features_arbitration() - + * @fd: File descriptor of nvme device + * @ab: Arbitration Burst + * @lpw: Low Priority Weight + * @mpw: Medium Priority Weight + * @hpw: High Priority Weight + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_arbitration(int fd, __u8 ab, __u8 lpw, __u8 mpw, + __u8 hpw, bool save, __u32 *result); + +/** + * nvme_set_features_power_mgmt() - + * @fd: File descriptor of nvme device + * @ps: Power State + * @wh: Workload Hint + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_power_mgmt(int fd, __u8 ps, __u8 wh, bool save, + __u32 *result); + +/** + * nvme_set_features_lba_range() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @nr_ranges: Number of ranges in @data + * @save: Save value across power states + * @data: User address of feature data + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_lba_range(int fd, __u32 nsid, __u32 nr_ranges, bool save, + struct nvme_lba_range_type *data, __u32 *result); + +/** + * nvme_set_features_temp_thresh() - + * @fd: File descriptor of nvme device + * @tmpth: Temperature Threshold + * @tmpsel: Threshold Temperature Select + * @thsel: Threshold Type Select + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_temp_thresh(int fd, __u16 tmpth, __u8 tmpsel, + enum nvme_feat_tmpthresh_thsel thsel, + bool save, __u32 *result); + +/** + * nvme_set_features_err_recovery() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @tler: Time-limited error recovery value + * @dulbe: Deallocated or Unwritten Logical Block Error Enable + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_err_recovery(int fd, __u32 nsid, __u16 tler, + bool dulbe, bool save, __u32 *result); + +/** + * nvme_set_features_volatile_wc() - + * @fd: File descriptor of nvme device + * @wce: Write cache enable + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_volatile_wc(int fd, bool wce, bool save, + __u32 *result); + +/** + * nvme_set_features_irq_coalesce() - + * @fd: File descriptor of nvme device + * @thr: Aggregation Threshold + * @time: Aggregation Time + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_irq_coalesce(int fd, __u8 thr, __u8 time, + bool save, __u32 *result); + +/** + * nvme_set_features_irq_config() - + * @fd: File descriptor of nvme device + * @iv: Interrupt Vector + * @cd: Coalescing Disable + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_irq_config(int fd, __u16 iv, bool cd, bool save, + __u32 *result); + +/** + * nvme_set_features_write_atomic() - + * @fd: File descriptor of nvme device + * @dn: Disable Normal + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_write_atomic(int fd, bool dn, bool save, + __u32 *result); + +/** + * nvme_set_features_async_event() - + * @fd: File descriptor of nvme device + * @events: Events to enable + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_async_event(int fd, __u32 events, bool save, + __u32 *result); + +/** + * nvme_set_features_auto_pst() - + * @fd: File descriptor of nvme device + * @apste: Autonomous Power State Transition Enable + * @apst: Autonomous Power State Transition + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_auto_pst(int fd, bool apste, bool save, + struct nvme_feat_auto_pst *apst, + __u32 *result); + +/** + * nvme_set_features_timestamp() - + * @fd: File descriptor of nvme device + * @save: Save value across power states + * @timestamp: The current timestamp value to assign to this this feature + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_timestamp(int fd, bool save, __u64 timestamp); + +/** + * nvme_set_features_hctm() - + * @fd: File descriptor of nvme device + * @tmt2: Thermal Management Temperature 2 + * @tmt1: Thermal Management Temperature 1 + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_hctm(int fd, __u16 tmt2, __u16 tmt1, bool save, + __u32 *result); + +/** + * nvme_set_features_nopsc() - + * @fd: File descriptor of nvme device + * @noppme: Non-Operational Power State Permissive Mode Enable + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_nopsc(int fd, bool noppme, bool save, __u32 *result); + +/** + * nvme_set_features_rrl() - + * @fd: File descriptor of nvme device + * @rrl: Read recovery level setting + * @nvmsetid: NVM set id + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_rrl(int fd, __u8 rrl, __u16 nvmsetid, bool save, + __u32 *result); + +/** + * nvme_set_features_plm_config() - + * @fd: File descriptor of nvme device + * @enable: Predictable Latency Enable + * @nvmsetid: NVM Set Identifier + * @save: Save value across power states + * @data: Pointer to structure nvme_plm_config + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_plm_config(int fd, bool enable, __u16 nvmsetid, + bool save, struct nvme_plm_config *data, + __u32*result); + +/** + * nvme_set_features_plm_window() - + * @fd: File descriptor of nvme device + * @sel: Window Select + * @nvmsetid: NVM Set Identifier + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_plm_window(int fd, enum nvme_feat_plm_window_select sel, + __u16 nvmsetid, bool save, __u32 *result); + +/** + * nvme_set_features_lba_sts_interval() - + * @fd: File descriptor of nvme device + * @save: Save value across power states + * @lsiri: LBA Status Information Report Interval + * @lsipi: LBA Status Information Poll Interval + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_lba_sts_interval(int fd, __u16 lsiri, __u16 lsipi, + bool save, __u32 *result); + +/** + * nvme_set_features_host_behavior() - + * @fd: File descriptor of nvme device + * @save: Save value across power states + * @data: Pointer to structure nvme_feat_host_behavior + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_host_behavior(int fd, bool save, + struct nvme_feat_host_behavior *data); + +/** + * nvme_set_features_sanitize() - + * @fd: File descriptor of nvme device + * @nodrm: No-Deallocate Response Mode + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_sanitize(int fd, bool nodrm, bool save, __u32 *result); + +/** + * nvme_set_features_endurance_evt_cfg() - + * @fd: File descriptor of nvme device + * @endgid: Endurance Group Identifier + * @egwarn: Flags to enable warning, see &enum nvme_eg_critical_warning_flags + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_endurance_evt_cfg(int fd, __u16 endgid, __u8 egwarn, + bool save, __u32 *result); + +/** + * nvme_set_features_sw_progress() - + * @fd: File descriptor of nvme device + * @pbslc: Pre-boot Software Load Count + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_sw_progress(int fd, __u8 pbslc, bool save, + __u32 *result); + +/** + * nvme_set_features_host_id() - + * @fd: File descriptor of nvme device + * @exhid: Enable Extended Host Identifier + * @save: Save value across power states + * @hostid: Host ID to set + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_host_id(int fd, bool exhid, bool save, __u8 *hostid); + +/** + * nvme_set_features_resv_mask() - + * @fd: File descriptor of nvme device + * @mask: Reservation Notification Mask Field + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_resv_mask(int fd, __u32 mask, bool save, __u32 *result); + +/** + * nvme_set_features_resv_persist() - + * @fd: File descriptor of nvme device + * @ptpl: Persist Through Power Loss + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_resv_persist(int fd, bool ptpl, bool save, __u32 *result); + +/** + * nvme_set_features_write_protect() - + * @fd: File descriptor of nvme device + * @state: Write Protection State + * @save: Save value across power states + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_features_write_protect(int fd, enum nvme_feat_nswpcfg_state state, + bool save, __u32 *result); +/** + * struct nvme_get_features_args - Arguments for the NVMe Admin Get Feature command + * @args_size: Size of &struct nvme_get_features_args + * @fd: File descriptor of nvme device + * @result: The command completion result from CQE dword0 + * @timeout: Timeout in ms + * @nsid: Namespace ID, if applicable + * @sel: Select which type of attribute to return, + * see &enum nvme_get_features_sel + * @cdw11: Feature specific command dword11 field + * @data_len: Length of feature data, if applicable, in bytes + * @data: User address of feature data, if applicable + * @fid: Feature identifier, see &enum nvme_features_id + * @uuidx: UUID Index for differentiating vendor specific encoding + */ +struct nvme_get_features_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_get_features_sel sel; + __u32 cdw11; + __u32 data_len; + __u8 fid; + __u8 uuidx; +}; + +/** + * nvme_get_features() - Retrieve a feature attribute + * @args: &struct nvme_get_features_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features(struct nvme_get_features_args *args); + +/** + * nvme_get_features_data() - Helper function for @nvme_get_features() + * @fd: File descriptor of nvme device + * @fid: Feature identifier + * @nsid: Namespace ID, if applicable + * @data_len: Length of feature data, if applicable, in bytes + * @data: User address of feature data, if applicable + * @result: The command completion result from CQE dword0 + */ +static inline int nvme_get_features_data(int fd, enum nvme_features_id fid, + __u32 nsid, __u32 data_len, void *data, __u32 *result) +{ + struct nvme_get_features_args args = { + .result = result, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .sel = NVME_GET_FEATURES_SEL_CURRENT, + .cdw11 = 0, + .data_len = data_len, + .fid = fid, + .uuidx = NVME_UUID_NONE, + }; + + return nvme_get_features(&args); +} + +/** + * nvme_get_features_simple() - Helper function for @nvme_get_features() + * @fd: File descriptor of nvme device + * @fid: Feature identifier + * @nsid: Namespace ID, if applicable + * @result: The command completion result from CQE dword0 + */ +static inline int nvme_get_features_simple(int fd, enum nvme_features_id fid, + __u32 nsid, __u32 *result) +{ + return nvme_get_features_data(fd, fid, nsid, 0, NULL, result); +} + +/** + * nvme_get_features_arbitration() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_arbitration(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_power_mgmt() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_power_mgmt(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_lba_range() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @data: User address of feature data, if applicable + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_lba_range(int fd, enum nvme_get_features_sel sel, + struct nvme_lba_range_type *data, + __u32 *result); + +/** + * nvme_get_features_temp_thresh() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_temp_thresh(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_err_recovery() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_err_recovery(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_volatile_wc() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_volatile_wc(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_num_queues() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_num_queues(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_irq_coalesce() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_irq_coalesce(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_irq_config() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @iv: + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_irq_config(int fd, enum nvme_get_features_sel sel, + __u16 iv, __u32 *result); + +/** + * nvme_get_features_write_atomic() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_write_atomic(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_async_event() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_async_event(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_auto_pst() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @apst: + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_auto_pst(int fd, enum nvme_get_features_sel sel, + struct nvme_feat_auto_pst *apst, __u32 *result); + +/** + * nvme_get_features_host_mem_buf() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_host_mem_buf(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_timestamp() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @ts: Current timestamp + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_timestamp(int fd, enum nvme_get_features_sel sel, + struct nvme_timestamp *ts); + +/** + * nvme_get_features_kato() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_kato(int fd, enum nvme_get_features_sel sel, __u32 *result); + +/** + * nvme_get_features_hctm() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_hctm(int fd, enum nvme_get_features_sel sel, __u32 *result); + +/** + * nvme_get_features_nopsc() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_nopsc(int fd, enum nvme_get_features_sel sel, __u32 *result); + +/** + * nvme_get_features_rrl() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_rrl(int fd, enum nvme_get_features_sel sel, __u32 *result); + +/** + * nvme_get_features_plm_config() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @nvmsetid: NVM set id + * @data: + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_plm_config(int fd, enum nvme_get_features_sel sel, + __u16 nvmsetid, struct nvme_plm_config *data, + __u32 *result); + +/** + * nvme_get_features_plm_window() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @nvmsetid: NVM set id + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_plm_window(int fd, enum nvme_get_features_sel sel, + __u16 nvmsetid, __u32 *result); + +/** + * nvme_get_features_lba_sts_interval() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_lba_sts_interval(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_host_behavior() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @data: Poniter to structure nvme_feat_host_behavior + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_host_behavior(int fd, enum nvme_get_features_sel sel, + struct nvme_feat_host_behavior *data, + __u32 *result); + +/** + * nvme_get_features_sanitize() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_sanitize(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_endurance_event_cfg() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @endgid: Endurance Group Identifier + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_endurance_event_cfg(int fd, enum nvme_get_features_sel sel, + __u16 endgid, __u32 *result); + +/** + * nvme_get_features_sw_progress() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_sw_progress(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_host_id() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @exhid: Enable Extended Host Identifier + * @len: Length of @hostid + * @hostid: Buffer for returned host ID + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_host_id(int fd, enum nvme_get_features_sel sel, + bool exhid, __u32 len, __u8 *hostid); + +/** + * nvme_get_features_resv_mask() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_resv_mask(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_resv_persist() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_resv_persist(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_write_protect() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_write_protect(int fd, __u32 nsid, + enum nvme_get_features_sel sel, + __u32 *result); + +/** + * nvme_get_features_iocs_profile() - + * @fd: File descriptor of nvme device + * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_features_iocs_profile(int fd, enum nvme_get_features_sel sel, + __u32 *result); + +/** + * struct nvme_format_nvm_args - Arguments for the Format Nvme Namespace command + * @result: The command completion result from CQE dword0 + * @args_size: Size of &struct nvme_format_nvm_args + * @fd: File descriptor of nvme device + * @timeout: Set to override default timeout to this value in milliseconds; + * useful for long running formats. 0 will use system default. + * @nsid: Namespace ID to format + * @mset: Metadata settings (extended or separated), true if extended + * @pi: Protection information type + * @pil: Protection information location (beginning or end), true if end + * @ses: Secure erase settings + * @lbaf: Logical block address format + */ +struct nvme_format_nvm_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_cmd_format_mset mset; + enum nvme_cmd_format_pi pi; + enum nvme_cmd_format_pil pil; + enum nvme_cmd_format_ses ses; + __u8 lbaf; +}; + +/** + * nvme_format_nvm() - Format nvme namespace(s) + * @args: &struct nvme_format_nvme_args argument structure + * + * The Format NVM command low level formats the NVM media. This command is used + * by the host to change the LBA data size and/or metadata size. A low level + * format may destroy all data and metadata associated with all namespaces or + * only the specific namespace associated with the command + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_format_nvm(struct nvme_format_nvm_args *args); + +/** + * struct nvme_ns_mgmt_args - Arguments for NVMe Namespace Management command + * @result: NVMe command result + * @ns: Namespace identication descriptors + * @args_size: Size of &struct nvme_ns_mgmt_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace identifier + * @sel: Type of management operation to perform + * @csi: Command Set Identifier + */ +struct nvme_ns_mgmt_args { + __u32 *result; + struct nvme_id_ns *ns; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_ns_mgmt_sel sel; + __u8 csi; +}; + +/** + * nvme_ns_mgmt() - Issue a Namespace management command + * @args: &struct nvme_ns_mgmt_args Argument structure + */ +int nvme_ns_mgmt(struct nvme_ns_mgmt_args *args); + +/** + * nvme_ns_mgmt_create() - + * @fd: File descriptor of nvme device + * @ns: Namespace identification that defines ns creation parameters + * @nsid: On success, set to the namespace id that was created + * @timeout: Overide the default timeout to this value in milliseconds; + * set to 0 to use the system default. + * @csi: Command Set Identifier + * + * On successful creation, the namespace exists in the subsystem, but is not + * attached to any controller. Use the nvme_ns_attach_ctrls() to assign the + * namespace to one or more controllers. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_ns_mgmt_create(int fd, struct nvme_id_ns *ns, + __u32 *nsid, __u32 timeout, __u8 csi) +{ + struct nvme_ns_mgmt_args args = { + .result = nsid, + .ns = ns, + .args_size = sizeof(args), + .fd = fd, + .timeout = timeout, + .nsid = NVME_NSID_NONE, + .sel = NVME_NS_MGMT_SEL_CREATE, + .csi = csi, + }; + + return nvme_ns_mgmt(&args); +} + +/** + * nvme_ns_mgmt_delete() - + * @fd: File descriptor of nvme device + * @nsid: Namespace identifier to delete + * + * It is recommended that a namespace being deleted is not attached to any + * controller. Use the nvme_ns_detach_ctrls() first if the namespace is still + * attached. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_ns_mgmt_delete(int fd, __u32 nsid) +{ + struct nvme_ns_mgmt_args args = { + .result = NULL, + .ns = NULL, + .args_size = sizeof(args), + .fd = fd, + .timeout = 0, + .nsid = nsid, + .sel = NVME_NS_MGMT_SEL_DELETE, + .csi = 0, + }; + + return nvme_ns_mgmt(&args); +} + +/** + * struct nvme_ns_attach_args - Arguments for Nvme Namespace Management command + * @result: NVMe command result + * @ctrlist: Controller list to modify attachment state of nsid + * @args_size: Size of &struct nvme_ns_attach_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID to execute attach selection + * @sel: Attachment selection, see &enum nvme_ns_attach_sel + */ +struct nvme_ns_attach_args { + __u32 *result; + struct nvme_ctrl_list *ctrlist; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_ns_attach_sel sel; +}; + +/** + * nvme_ns_attach() - Attach or detach namespace to controller(s) + * @args: &struct nvme_ns_attach_args Argument structure + */ +int nvme_ns_attach(struct nvme_ns_attach_args *args); + +/** + * nvme_ns_attach_ctrls() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID to attach + * @ctrlist: Controller list to modify attachment state of nsid + */ +static inline int nvme_ns_attach_ctrls(int fd, __u32 nsid, + struct nvme_ctrl_list *ctrlist) +{ + struct nvme_ns_attach_args args = { + .result = NULL, + .ctrlist = ctrlist, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .sel = NVME_NS_ATTACH_SEL_CTRL_ATTACH, + }; + + return nvme_ns_attach(&args); +} + +/** + * nvme_ns_detach_ctrls() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID to detach + * @ctrlist: Controller list to modify attachment state of nsid + */ +static inline int nvme_ns_detach_ctrls(int fd, __u32 nsid, + struct nvme_ctrl_list *ctrlist) +{ + struct nvme_ns_attach_args args = { + .result = NULL, + .ctrlist = ctrlist, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .sel = NVME_NS_ATTACH_SEL_CTRL_DEATTACH, + }; + + return nvme_ns_attach(&args); +} + +/** + * struct nvme_fw_download_args - Arguments for the NVMe Firmware Download command + * @args_size: Size of &struct nvme_fw_download_args + * @fd: File descriptor of nvme device + * @result: The command completion result from CQE dword0 + * @timeout: Timeout in ms + * @offset: Offset in the firmware data + * @data: Userspace address of the firmware data + * @data_len: Length of data in this command in bytes + */ +struct nvme_fw_download_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 offset; + __u32 data_len; +}; + +/** + * nvme_fw_download() - Download part or all of a firmware image to the + * controller + * @args: &struct nvme_fw_download_args argument structure + * + * The Firmware Image Download command downloads all or a portion of an image + * for a future update to the controller. The Firmware Image Download command + * downloads a new image (in whole or in part) to the controller. + * + * The image may be constructed of multiple pieces that are individually + * downloaded with separate Firmware Image Download commands. Each Firmware + * Image Download command includes a Dword Offset and Number of Dwords that + * specify a dword range. + * + * The new firmware image is not activated as part of the Firmware Image + * Download command. Use the nvme_fw_commit() to activate a newly downloaded + * image. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_fw_download(struct nvme_fw_download_args *args); + +/** + * struct nvme_fw_commit_args - Arguments for the NVMe Firmware Commit command + * @args_size: Size of &struct nvme_fw_commit_args + * @fd: File descriptor of nvme device + * @action: Action to use for the firmware image, see &enum nvme_fw_commit_ca + * @timeout: Timeout in ms + * @result: The command completion result from CQE dword0 + * @slot: Firmware slot to commit the downloaded image + * @bpid: Set to true to select the boot partition id + */ +struct nvme_fw_commit_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + enum nvme_fw_commit_ca action; + __u8 slot; + bool bpid; +}; + +/** + * nvme_fw_commit() - Commit firmware using the specified action + * @args: &struct nvme_fw_commit_args argument structure + * + * The Firmware Commit command modifies the firmware image or Boot Partitions. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. The command + * status response may specify additional reset actions required to complete + * the commit process. + */ +int nvme_fw_commit(struct nvme_fw_commit_args *args); + +/** + * struct nvme_security_send_args - Arguments for the NVMe Security Send command + * @result: The command completion result from CQE dword0 + * @data: Security data payload to send + * @args_size: Size of &struct nvme_security_send_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID to issue security command on + * @tl: Protocol specific transfer length + * @data_len: Data length of the payload in bytes + * @nssf: NVMe Security Specific field + * @spsp0: Security Protocol Specific field + * @spsp1: Security Protocol Specific field + * @secp: Security Protocol + */ +struct nvme_security_send_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 tl; + __u32 data_len; + __u8 nssf; + __u8 spsp0; + __u8 spsp1; + __u8 secp; +}; + +/** + * nvme_security_send() - + * @args: &struct nvme_security_send argument structure + * + * The Security Send command transfers security protocol data to the + * controller. The data structure transferred to the controller as part of this + * command contains security protocol specific commands to be performed by the + * controller. The data structure transferred may also contain data or + * parameters associated with the security protocol commands. + * + * The security data is protocol specific and is not defined by the NVMe + * specification. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_security_send(struct nvme_security_send_args *args); + +/** + * struct nvme_security_receive_args - Arguments for the NVMe Security Receive command + * @result: The command completion result from CQE dword0 + * @data: Security data payload to send + * @args_size: Size of &struct nvme_security_receive_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID to issue security command on + * @al: Protocol specific allocation length + * @data_len: Data length of the payload in bytes + * @nssf: NVMe Security Specific field + * @spsp0: Security Protocol Specific field + * @spsp1: Security Protocol Specific field + * @secp: Security Protocol + */ +struct nvme_security_receive_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 al; + __u32 data_len; + __u8 nssf; + __u8 spsp0; + __u8 spsp1; + __u8 secp; +}; + +/** + * nvme_security_receive() - + * @args: &struct nvme_security_recevice argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_security_receive(struct nvme_security_receive_args *args); + +/** + * struct nvme_get_lba_status_args - Arguments for the NVMe Get LBA Status command + * @lbas: Data payload to return status descriptors + * @result: The command completion result from CQE dword0 + * @slba: Starting logical block address to check statuses + * @args_size: Size of &struct nvme_get_lba_status_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID to retrieve LBA status + * @mndw: Maximum number of dwords to return + * @atype: Action type mechanism to determine LBA status desctriptors to + * return, see &enum nvme_lba_status_atype + * @rl: Range length from slba to perform the action + */ +struct nvme_get_lba_status_args { + __u64 slba; + __u32 *result; + struct nvme_lba_status *lbas; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 mndw; + enum nvme_lba_status_atype atype; + __u16 rl; +}; + +/** + * nvme_get_lba_status() - Retrieve information on possibly unrecoverable LBAs + * @args: &struct nvme_get_lba_status_args argument structure + * + * The Get LBA Status command requests information about Potentially + * Unrecoverable LBAs. Refer to the specification for action type descriptions. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_lba_status(struct nvme_get_lba_status_args *args); + +/** + * struct nvme_directive_send_args - Arguments for the NVMe Directive Send command + * @result: If successful, the CQE dword0 value + * @data: Data payload to to be send + * @args_size: Size of &struct nvme_directive_send_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID, if applicable + * @doper: Directive send operation, see &enum nvme_directive_send_doper + * @dtype: Directive type, see &enum nvme_directive_dtype + * @cdw12: Directive specific command dword12 + * @data_len: Length of data payload in bytes + * @dspec: Directive specific field + */ +struct nvme_directive_send_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_directive_send_doper doper; + enum nvme_directive_dtype dtype; + __u32 cdw12; + __u32 data_len; + __u16 dspec; +}; + +/** + * nvme_directive_send() - Send directive command + * @args: &struct nvme_directive_send_args argument structure + * + * Directives is a mechanism to enable host and NVM subsystem or controller + * information exchange. The Directive Send command transfers data related to a + * specific Directive Type from the host to the controller. + * + * See the NVMe specification for more information. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_directive_send(struct nvme_directive_send_args *args); + +/** + * nvme_directive_send_id_endir() - + * @fd: File descriptor of nvme device + * @nsid: Namespace Identifier + * @endir: Enable Directive + * @dtype: Directive Type + * @id: Pointer to structure nvme_id_directives + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_directive_send_id_endir(int fd, __u32 nsid, bool endir, + enum nvme_directive_dtype dtype, + struct nvme_id_directives *id); + +/** + * nvme_directive_send_stream_release_identifier() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @stream_id: Stream identifier + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_directive_send_stream_release_identifier(int fd, + __u32 nsid, __u16 stream_id) +{ + struct nvme_directive_send_args args = { + .result = NULL, + .data = NULL, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .doper = NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_IDENTIFIER, + .dtype = NVME_DIRECTIVE_DTYPE_STREAMS, + .cdw12 = 0, + .data_len = 0, + .dspec = stream_id, + }; + + return nvme_directive_send(&args); +} + +/** + * nvme_directive_send_stream_release_resource() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_directive_send_stream_release_resource(int fd, __u32 nsid) +{ + struct nvme_directive_send_args args = { + .result = NULL, + .data = NULL, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .doper = NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_RESOURCE, + .dtype = NVME_DIRECTIVE_DTYPE_STREAMS, + .cdw12 = 0, + .data_len = 0, + .dspec = 0, + }; + + return nvme_directive_send(&args); +} + +/** + * struct nvme_directive_recv_args - Arguments for the NVMe Directive Receive command + * @result: If successful, the CQE dword0 value + * @data: Usespace address of data payload + * @args_size: Size of &struct nvme_directive_recv_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID, if applicable + * @doper: Directive send operation, see &enum nvme_directive_send_doper + * @dtype: Directive type, see &enum nvme_directive_dtype + * @cdw12: Directive specific command dword12 + * @data_len: Length of data payload in bytes + * @dspec: Directive specific field + */ +struct nvme_directive_recv_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_directive_receive_doper doper; + enum nvme_directive_dtype dtype; + __u32 cdw12; + __u32 data_len; + __u16 dspec; +}; + +/** + * nvme_directive_recv() - Receive directive specific data + * @args: &struct nvme_directive_recv_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_directive_recv(struct nvme_directive_recv_args *args); + +/** + * nvme_directive_recv_identify_parameters() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @id: Identify parameters buffer + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_directive_recv_identify_parameters(int fd, __u32 nsid, + struct nvme_id_directives *id) +{ + struct nvme_directive_recv_args args = { + .result = NULL, + .data = id, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .doper = NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM, + .dtype = NVME_DIRECTIVE_DTYPE_IDENTIFY, + .cdw12 = 0, + .data_len = sizeof(*id), + .dspec = 0, + }; + + return nvme_directive_recv(&args); +} + +/** + * nvme_directive_recv_stream_parameters() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @parms: Streams directive parameters buffer + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_directive_recv_stream_parameters(int fd, __u32 nsid, + struct nvme_streams_directive_params *parms) +{ + struct nvme_directive_recv_args args = { + .result = NULL, + .data = parms, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .doper = NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM, + .dtype = NVME_DIRECTIVE_DTYPE_STREAMS, + .cdw12 = 0, + .data_len = sizeof(*parms), + .dspec = 0, + }; + + return nvme_directive_recv(&args); +} + +/** + * nvme_directive_recv_stream_status() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @nr_entries: Number of streams to receive + * @id: Stream status buffer + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_directive_recv_stream_status(int fd, __u32 nsid, + unsigned nr_entries, + struct nvme_streams_directive_status *id) +{ + struct nvme_directive_recv_args args = { + .result = NULL, + .data = id, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .doper = NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS, + .dtype = NVME_DIRECTIVE_DTYPE_STREAMS, + .cdw12 = 0, + .data_len = sizeof(*id), + .dspec = 0, + }; + + return nvme_directive_recv(&args); +} + +/** + * nvme_directive_recv_stream_allocate() - + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @nsr: Namespace Streams Requested + * @result: If successful, the CQE dword0 value + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_directive_recv_stream_allocate(int fd, __u32 nsid, + __u16 nsr, __u32 *result) +{ + struct nvme_directive_recv_args args = { + .result = result, + .data = NULL, + .args_size = sizeof(args), + .fd = fd, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .nsid = nsid, + .doper = NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE, + .dtype = NVME_DIRECTIVE_DTYPE_STREAMS, + .cdw12 = nsr, + .data_len = 0, + .dspec = 0, + }; + + return nvme_directive_recv(&args); +} + +/** + * struct nvme_capacity_mgmt_args - Arguments for the NVMe Capacity Management command + * @result: If successful, the CQE dword0 value + * @args_size: Size of &struct nvme_capacity_mgmt_args + * @fd: File descriptor of nvme device + * @cdw11: Least significant 32 bits of the capacity in bytes of the + * Endurance Group or NVM Set to be created + * @cdw12: Most significant 32 bits of the capacity in bytes of the + * Endurance Group or NVM Set to be created + * @timeout: Timeout in ms + * @element_id: Value specific to the value of the Operation field + * @op: Operation to be performed by the controller + */ +struct nvme_capacity_mgmt_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u32 cdw11; + __u32 cdw12; + __u16 element_id; + __u8 op; +}; + +/** + * nvme_capacity_mgmt() - + * @args: &struct nvme_capacity_mgmt_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_capacity_mgmt(struct nvme_capacity_mgmt_args *args); + +/** + * struct nvme_lockdown_args - Arguments for the NVME Lockdown command + * @args_size: Size of &struct nvme_lockdown_args + * @fd: File descriptor of nvme device + * @result: The command completion result from CQE dword0 + * @timeout: Timeout in ms (0 for default timeout) + * @scp: Scope of the command + * @prhbt: Prohibit or allow the command opcode or Set Features command + * @ifc: Affected interface + * @ofi: Opcode or Feature Identifier + * @uuidx: UUID Index if controller supports this id selection method + */ +struct nvme_lockdown_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u8 scp; + __u8 prhbt; + __u8 ifc; + __u8 ofi; + __u8 uuidx; +}; + +/** + * nvme_lockdown() - Issue lockdown command + * @args: &struct nvme_lockdown_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_lockdown(struct nvme_lockdown_args *args); + +/** + * struct nvme_set_property_args - Arguments for NVMe Set Property command + * @args_size: Size of &struct nvme_set_property_args + * @fd: File descriptor of nvme device + * @result: The command completion result from CQE dword0 + * @timeout: Timeout in ms + * @offset: Property offset from the base to set + * @value: The value to set the property + */ +struct nvme_set_property_args { + __u64 value; + __u32 *result; + int args_size; + int fd; + __u32 timeout; + int offset; +}; + +/** + * nvme_set_property() - Set controller property + * @args: &struct nvme_set_property_args argument structure + * + * This is an NVMe-over-Fabrics specific command, not applicable to PCIe. These + * properties align to the PCI MMIO controller registers. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_set_property(struct nvme_set_property_args *args); + +/** + * struct nvme_get_property_args - Arguments for NVMe Get Property command + * @value: Where the property's value will be stored on success + * @args_size: Size of &struct nvme_get_property_args + * @fd: File descriptor of nvme device + * @offset: Property offset from the base to retrieve + * @timeout: Timeout in ms + */ +struct nvme_get_property_args { + __u64 *value; + int args_size; + int fd; + __u32 timeout; + int offset; +}; + +/** + * nvme_get_property() - Get a controller property + * @args: &struct nvme_get_propert_args argument structure + * + * This is an NVMe-over-Fabrics specific command, not applicable to PCIe. These + * properties align to the PCI MMIO controller registers. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_property(struct nvme_get_property_args *args); + +/** + * struct nvme_sanitize_nvm_args - Arguments for the NVMe Sanitize NVM command + * @result: The command completion result from CQE dword0 + * @args_size: Size of &struct nvme_sanitize_nvm_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @ovrpat: Overwrite pattern + * @sanact: Sanitize action, see &enum nvme_sanitize_sanact + * @ause: Set to allow unrestriced sanitize exit + * @owpass: Overwrite pass count + * @oipbp: Set to overwrite invert pattern between passes + * @nodas: Set to not deallocate blocks after sanitizing + */ +struct nvme_sanitize_nvm_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + enum nvme_sanitize_sanact sanact; + __u32 ovrpat; + bool ause; + __u8 owpass; + bool oipbp; + bool nodas; +}; + +/** + * nvme_sanitize_nvm() - Start a sanitize operation + * @args: &struct nvme_sanitize_nvm_args argument structure + * + * A sanitize operation alters all user data in the NVM subsystem such that + * recovery of any previous user data from any cache, the non-volatile media, + * or any Controller Memory Buffer is not possible. + * + * The Sanitize command starts a sanitize operation or to recover from a + * previously failed sanitize operation. The sanitize operation types that may + * be supported are Block Erase, Crypto Erase, and Overwrite. All sanitize + * operations are processed in the background, i.e., completion of the sanitize + * command does not indicate completion of the sanitize operation. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_sanitize_nvm(struct nvme_sanitize_nvm_args *args); + +/** + * struct nvme_dev_self_test_args - Arguments for the NVMe Device Self Test command + * @result: The command completion result from CQE dword0 + * @args_size: Size of &struct nvme_dev_self_test_args + * @fd: File descriptor of nvme device + * @nsid: Namespace ID to test + * @stc: Self test code, see &enum nvme_dst_stc + * @timeout: Timeout in ms + */ +struct nvme_dev_self_test_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_dst_stc stc; +}; + +/** + * nvme_dev_self_test() - Start or abort a self test + * @args: &struct nvme_dev_self_test argument structure + * + * The Device Self-test command starts a device self-test operation or abort a + * device self-test operation. A device self-test operation is a diagnostic + * testing sequence that tests the integrity and functionality of the + * controller and may include testing of the media associated with namespaces. + * The controller may return a response to this command immediately while + * running the self-test in the background. + * + * Set the 'nsid' field to 0 to not include namepsaces in the test. Set to + * 0xffffffff to test all namespaces. All other values tests a specific + * namespace, if present. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_dev_self_test(struct nvme_dev_self_test_args *args); + +/** + * struct nvme_virtual_mgmt_args - Arguments for the NVMe Virtualization + * resource management command + * @args_size: Size of &struct nvme_virtual_mgmt_args + * @fd: File descriptor of nvme device + * @result: If successful, the CQE dword0 + * @timeout: Timeout in ms + * @act: Virtual resource action, see &enum nvme_virt_mgmt_act + * @rt: Resource type to modify, see &enum nvme_virt_mgmt_rt + * @cntlid: Controller id for which resources are bing modified + * @nr: Number of resources being allocated or assigned + */ +struct nvme_virtual_mgmt_args { + __u32 *result; + int args_size; + int fd; + __u32 timeout; + enum nvme_virt_mgmt_act act; + enum nvme_virt_mgmt_rt rt; + __u16 cntlid; + __u16 nr; +}; + +/** + * nvme_virtual_mgmt() - Virtualization resource management + * @args: &struct nvme_virtual_mgmt_args argument structure + * + * The Virtualization Management command is supported by primary controllers + * that support the Virtualization Enhancements capability. This command is + * used for several functions: + * + * - Modifying Flexible Resource allocation for the primary controller + * - Assigning Flexible Resources for secondary controllers + * - Setting the Online and Offline state for secondary controllers + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_virtual_mgmt(struct nvme_virtual_mgmt_args *args); + +/** + * nvme_flush() - Send an nvme flush command + * @fd: File descriptor of nvme device + * @nsid: Namespace identifier + * + * The Flush command requests that the contents of volatile write cache be made + * non-volatile. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_flush(int fd, __u32 nsid) { + struct nvme_passthru_cmd cmd = {}; + + cmd.opcode = nvme_cmd_flush; + cmd.nsid = nsid; + + return nvme_submit_io_passthru(fd, &cmd, NULL); +} + +/** + * struct nvme_io_args - Arguments for NVMe I/O commands + * @slba: Starting logical block + * @storage_tag: This filed specifies Variable Sized Expected Logical Block + * Storage Tag (ELBST) and Expected Logical Block Reference + * Tag (ELBRT) + * @result: The command completion result from CQE dword0 + * @data: Pointer to user address of the data buffer + * @metadata: Pointer to user address of the metadata buffer + * @args_size: Size of &struct nvme_io_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID + * @data_len: Length of user buffer, @data, in bytes + * @metadata_len:Length of user buffer, @metadata, in bytes + * @nlb: Number of logical blocks to send (0's based value) + * @control: Command control flags, see &enum nvme_io_control_flags. + * @apptag: This field specifies the Application Tag Mask expected value. + * Used only if the namespace is formatted to use end-to-end + * protection information. + * @appmask: This field specifies the Application Tag expected value. Used + * only if the namespace is formatted to use end-to-end protection + * information. + * @reftag: This field specifies the Initial Logical Block Reference Tag + * expected value. Used only if the namespace is formatted to use + * end-to-end protection information. + * @dspec: Directive specific value + * @dsm: Data set management attributes, see &enum nvme_io_dsm_flags + */ +struct nvme_io_args { + __u64 slba; + __u64 storage_tag; + __u32 *result; + void *data; + void *metadata; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 reftag; + __u32 data_len; + __u32 metadata_len; + __u16 nlb; + __u16 control; + __u16 apptag; + __u16 appmask; + __u16 dspec; + __u8 dsm; +}; + +/** + * nvme_io() - Submit an nvme user I/O command + * @args: &struct nvme_io_args argument structure + * @opcode: Opcode to execute + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_io(struct nvme_io_args *args, __u8 opcode); + +/** + * nvme_read() - Submit an nvme user read command + * @args: &struct nvme_io_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_read(struct nvme_io_args *args) +{ + return nvme_io(args, nvme_cmd_read); +} + +/** + * nvme_write() - Submit an nvme user write command + * @args: &struct nvme_io_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_write(struct nvme_io_args *args) +{ + return nvme_io(args, nvme_cmd_write); +} + +/** + * nvme_compare() - Submit an nvme user compare command + * @args: &struct nvme_io_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_compare(struct nvme_io_args *args) +{ + return nvme_io(args, nvme_cmd_compare); +} + +/** + * nvme_write_zeros() - Submit an nvme write zeroes command + * @args: &struct nvme_io_args argument structure + * + * The Write Zeroes command sets a range of logical blocks to zero. After + * successful completion of this command, the value returned by subsequent + * reads of logical blocks in this range shall be all bytes cleared to 0h until + * a write occurs to this LBA range. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_write_zeros(struct nvme_io_args *args) +{ + return nvme_io(args, nvme_cmd_write_zeroes); +} + +/** + * nvme_write_uncorrectable() - Submit an nvme write uncorrectable command + * @args: &struct nvme_io_args argument structure + * + * The Write Uncorrectable command marks a range of logical blocks as invalid. + * When the specified logical block(s) are read after this operation, a failure + * is returned with Unrecovered Read Error status. To clear the invalid logical + * block status, a write operation on those logical blocks is required. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_write_uncorrectable(struct nvme_io_args *args) +{ + return nvme_io(args, nvme_cmd_write_uncor); +} + +/** + * nvme_verify() - Send an nvme verify command + * @args: &struct nvme_io_args argument structure + * + * The Verify command verifies integrity of stored information by reading data + * and metadata, if applicable, for the LBAs indicated without transferring any + * data or metadata to the host. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_verify(struct nvme_io_args *args) +{ + return nvme_io(args, nvme_cmd_verify); +} + +/** + * struct nvme_dsm_args - Arguments for the NVMe Dataset Management command + * @result: The command completion result from CQE dword0 + * @dsm: The data set management attributes + * @args_size: Size of &struct nvme_dsm_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace identifier + * @attrs: DSM attributes, see &enum nvme_dsm_attributes + * @nr_ranges: Number of block ranges in the data set management attributes + */ +struct nvme_dsm_args { + __u32 *result; + struct nvme_dsm_range *dsm; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 attrs; + __u16 nr_ranges; +}; + +/** + * nvme_dsm() - Send an nvme data set management command + * @args: &struct nvme_dsm_args argument structure + * + * The Dataset Management command is used by the host to indicate attributes + * for ranges of logical blocks. This includes attributes like frequency that + * data is read or written, access size, and other information that may be used + * to optimize performance and reliability, and may be used to + * deallocate/unmap/trim those logical blocks. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_dsm(struct nvme_dsm_args *args); + +/** + * struct nvme_copy_args - Arguments for the NVMe Copy command + * @sdlba: Start destination LBA + * @result: The command completion result from CQE dword0 + * @copy: Range descriptior + * @args_size: Size of &struct nvme_copy_args + * @fd: File descriptor of the nvme device + * @timeout: Timeout in ms + * @nsid: Namespace identifier + * @ilbrt: Initial logical block reference tag + * @lr: Limited retry + * @fua: Force unit access + * @nr: Number of ranges + * @dspec: Directive specific value + * @lbatm: Logical block application tag mask + * @lbat: Logical block application tag + * @prinfor: Protection information field for read + * @prinfow: Protection information field for write + * @dtype: Directive type + * @format: Descriptor format + */ +struct nvme_copy_args { + __u64 sdlba; + __u32 *result; + struct nvme_copy_range *copy; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 ilbrt; + int lr; + int fua; + __u16 nr; + __u16 dspec; + __u16 lbatm; + __u16 lbat; + __u8 prinfor; + __u8 prinfow; + __u8 dtype; + __u8 format; +}; + +/** + * nvme_copy() - + * + * @args: &struct nvme_copy_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_copy(struct nvme_copy_args *args); + +/** + * struct nvme_resv_acquire_args - Arguments for the NVMe Reservation Acquire Comand + * @nrkey: The reservation key to be unregistered from the namespace if + * the action is preempt + * @iekey: Set to ignore the existing key + * @result: The command completion result from CQE dword0 + * @args_size: Size of &struct nvme_resv_acquire_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace identifier + * @rtype: The type of reservation to be create, see &enum nvme_resv_rtype + * @racqa: The action that is performed by the command, see &enum nvme_resv_racqa + * @crkey: The current reservation key associated with the host + */ +struct nvme_resv_acquire_args { + __u64 crkey; + __u64 nrkey; + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_resv_rtype rtype; + enum nvme_resv_racqa racqa; + bool iekey; +}; + +/** + * nvme_resv_acquire() - Send an nvme reservation acquire + * @args: &struct nvme_resv_acquire argument structure + * + * The Reservation Acquire command acquires a reservation on a namespace, + * preempt a reservation held on a namespace, and abort a reservation held on a + * namespace. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_resv_acquire(struct nvme_resv_acquire_args *args); + +/** + * struct nvme_resv_register_args - Arguments for the NVMe Reservation Register command + * @crkey: The current reservation key associated with the host + * @nrkey: The new reservation key to be register if action is register or + * replace + * @result: The command completion result from CQE dword0 + * @args_size: Size of &struct nvme_resv_register_args + * @fd: File descriptor of nvme device + * @nsid: Namespace identifier + * @rrega: The registration action, see &enum nvme_resv_rrega + * @cptpl: Change persist through power loss, see &enum nvme_resv_cptpl + * @iekey: Set to ignore the existing key + * @timeout: Timeout in ms + */ +struct nvme_resv_register_args { + __u64 crkey; + __u64 nrkey; + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_resv_rrega rrega; + enum nvme_resv_cptpl cptpl; + bool iekey; +}; + +/** + * nvme_resv_register() - Send an nvme reservation register + * @args: &struct nvme_resv_register_args argument structure + * + * The Reservation Register command registers, unregisters, or replaces a + * reservation key. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_resv_register(struct nvme_resv_register_args *args); + +/** + * struct nvme_resv_release_args - Arguments for the NVMe Reservation Release Command + * @crkey: The current reservation key to release + * @result: The command completion result from CQE dword0 + * @args_size: Size of &struct nvme_resv_release_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace identifier + * @rtype: The type of reservation to be create, see &enum nvme_resv_rtype + * @rrela: Reservation releast action, see &enum nvme_resv_rrela + * @iekey: Set to ignore the existing key + */ +struct nvme_resv_release_args { + __u64 crkey; + __u32 *result; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_resv_rtype rtype; + enum nvme_resv_rrela rrela; + bool iekey; +}; + +/** + * nvme_resv_release() - Send an nvme reservation release + * @args: &struct nvme_resv_release_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_resv_release(struct nvme_resv_release_args *args); + +/** + * struct nvme_resv_report_args - Arguments for the NVMe Reservation Report command + * @result: The command completion result from CQE dword0 + * @report: The user space destination address to store the reservation + * report + * @args_size: Size of &struct nvme_resv_report_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace identifier + * @len: Number of bytes to request transfered with this command + * @eds: Request extended Data Structure + */ +struct nvme_resv_report_args { + __u32 *result; + struct nvme_resv_status *report; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 len; + bool eds; +}; + +/** + * nvme_resv_report() - Send an nvme reservation report + * @args: struct nvme_resv_report_args argument structure + * + * Returns a Reservation Status data structure to memory that describes the + * registration and reservation status of a namespace. See the defintion for + * the returned structure, &struct nvme_reservation_status, for more details. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_resv_report(struct nvme_resv_report_args *args); + +/** + * struct nvme_zns_mgmt_send_args - Arguments for the NVMe ZNS Management Send command + * @slba: Starting logical block address + * @result: The command completion result from CQE dword0 + * @data: Userspace address of the data + * @args_size: Size of &struct nvme_zns_mgmt_send_args + * @fd: File descriptor of nvme device + * @timeout: timeout in ms + * @nsid: Namespace ID + * @zsa: Zone send action + * @data_len: Length of @data + * @select_all: Select all flag + * @zsaso: Zone Send Action Specific Option + */ +struct nvme_zns_mgmt_send_args { + __u64 slba; + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_zns_send_action zsa; + __u32 data_len; + bool select_all; + __u8 zsaso; +}; + +/** + * nvme_zns_mgmt_send() - + * @args: &struct nvme_zns_mgmt_send_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_zns_mgmt_send(struct nvme_zns_mgmt_send_args *args); + + +/** + * struct nvme_zns_mgmt_recv_args - Arguments for the NVMe ZNS Management Receive command + * @slba: Starting logical block address + * @result: The command completion result from CQE dword0 + * @data: Userspace address of the data + * @args_size: Size of &struct nvme_zns_mgmt_recv_args + * @fd: File descriptor of nvme device + * @timeout: timeout in ms + * @nsid: Namespace ID + * @zra: zone receive action + * @data_len: Length of @data + * @zrasf: Zone receive action specific field + * @zras_feat: Zone receive action specific features + */ +struct nvme_zns_mgmt_recv_args { + __u64 slba; + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + enum nvme_zns_recv_action zra; + __u32 data_len; + __u16 zrasf; + bool zras_feat; +}; + +/** + * nvme_zns_mgmt_recv() - + * @args: &struct nvme_zns_mgmt_recv_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_zns_mgmt_recv(struct nvme_zns_mgmt_recv_args *args); + +/** + * nvme_zns_report_zones() - Return the list of zones + * @fd: File descriptor of nvme device + * @nsid: Namespace ID + * @slba: Starting LBA + * @opts: Reporting options + * @extended: Extended report + * @partial: Partial report requested + * @data_len: Length of the data buffer + * @data: Userspace address of the report zones data + * @timeout: timeout in ms + * @result: The command completion result from CQE dword0 + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +static inline int nvme_zns_report_zones(int fd, __u32 nsid, __u64 slba, + enum nvme_zns_report_options opts, + bool extended, bool partial, + __u32 data_len, void *data, + __u32 timeout, __u32 *result) +{ + struct nvme_zns_mgmt_recv_args args = { + .slba = slba, + .result = result, + .data = data, + .args_size = sizeof(args), + .fd = fd, + .timeout = timeout, + .nsid = nsid, + .zra = extended ? NVME_ZNS_ZRA_EXTENDED_REPORT_ZONES : + NVME_ZNS_ZRA_REPORT_ZONES, + .data_len = data_len, + .zrasf = opts, + .zras_feat = partial, + }; + + return nvme_zns_mgmt_recv(&args); +} + +/** + * struct nvme_zns_append_args - Arguments for the NVMe ZNS Append command + * @zslba: Zone start logical block address + * @result: The command completion result from CQE dword0 + * @data: Userspace address of the data + * @metadata: Userspace address of the metadata + * @args_size: Size of &struct nvme_zns_append_args + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @nsid: Namespace ID + * @ilbrt: Initial logical block reference tag + * @data_len: Length of @data + * @metadata_len: Length of @metadata + * @nlb: Number of logical blocks + * @control: + * @lbat: Logical block application tag + * @lbatm: Logical block application tag mask + */ +struct nvme_zns_append_args { + __u64 zslba; + __u64 *result; + void *data; + void *metadata; + int args_size; + int fd; + __u32 timeout; + __u32 nsid; + __u32 ilbrt; + __u32 data_len; + __u32 metadata_len; + __u16 nlb; + __u16 control; + __u16 lbat; + __u16 lbatm; +}; + +/** + * nvme_zns_append() - Append data to a zone + * @args: &struct nvme_zns_append_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_zns_append(struct nvme_zns_append_args *args); + +/** + * struct nvme_dim_args - Arguments for the Discovery Information Management (DIM) command + * @result: Set on completion to the command's CQE DWORD 0 controller response. + * @data: Pointer to the DIM data + * @args_size: Length of the structure + * @fd: File descriptor of nvme device + * @timeout: Timeout in ms + * @data_len: Length of @data + * @tas: Task field of the Command Dword 10 (cdw10) + */ +struct nvme_dim_args { + __u32 *result; + void *data; + int args_size; + int fd; + __u32 timeout; + __u32 data_len; + __u8 tas; +}; + +/** + * nvme_dim_send - Send a Discovery Information Management (DIM) command + * @args: &struct nvme_dim_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_dim_send(struct nvme_dim_args *args); + +#endif /* _LIBNVME_IOCTL_H */ diff --git a/src/nvme/json.c b/src/nvme/json.c new file mode 100644 index 0000000..6f0b81c --- /dev/null +++ b/src/nvme/json.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2021 SUSE Software Solutions + * + * Authors: Hannes Reinecke <hare@suse.de> + */ + +#include <stdio.h> +#include <errno.h> +#include <string.h> +#include <unistd.h> +#include <fcntl.h> + +#include <json.h> + +#include "fabrics.h" +#include "log.h" +#include "private.h" + +#define JSON_UPDATE_INT_OPTION(c, k, a, o) \ + if (!strcmp(# a, k ) && !c->a) c->a = json_object_get_int(o); +#define JSON_UPDATE_BOOL_OPTION(c, k, a, o) \ + if (!strcmp(# a, k ) && !c->a) c->a = json_object_get_boolean(o); + +static void json_update_attributes(nvme_ctrl_t c, + struct json_object *ctrl_obj) +{ + struct nvme_fabrics_config *cfg = nvme_ctrl_get_config(c); + + json_object_object_foreach(ctrl_obj, key_str, val_obj) { + JSON_UPDATE_INT_OPTION(cfg, key_str, + nr_io_queues, val_obj); + JSON_UPDATE_INT_OPTION(cfg, key_str, + nr_write_queues, val_obj); + JSON_UPDATE_INT_OPTION(cfg, key_str, + nr_poll_queues, val_obj); + JSON_UPDATE_INT_OPTION(cfg, key_str, + queue_size, val_obj); + JSON_UPDATE_INT_OPTION(cfg, key_str, + keep_alive_tmo, val_obj); + JSON_UPDATE_INT_OPTION(cfg, key_str, + reconnect_delay, val_obj); + if (!strcmp("ctrl_loss_tmo", key_str) && + cfg->ctrl_loss_tmo != NVMF_DEF_CTRL_LOSS_TMO) + cfg->ctrl_loss_tmo = json_object_get_int(val_obj); + JSON_UPDATE_INT_OPTION(cfg, key_str, + fast_io_fail_tmo, val_obj); + if (!strcmp("tos", key_str) && cfg->tos != -1) + cfg->tos = json_object_get_int(val_obj); + JSON_UPDATE_BOOL_OPTION(cfg, key_str, + duplicate_connect, val_obj); + JSON_UPDATE_BOOL_OPTION(cfg, key_str, + disable_sqflow, val_obj); + JSON_UPDATE_BOOL_OPTION(cfg, key_str, + hdr_digest, val_obj); + JSON_UPDATE_BOOL_OPTION(cfg, key_str, + data_digest, val_obj); + JSON_UPDATE_BOOL_OPTION(cfg, key_str, + tls, val_obj); + if (!strcmp("persistent", key_str) && + !nvme_ctrl_is_persistent(c)) + nvme_ctrl_set_persistent(c, true); + if (!strcmp("discovery", key_str) && + !nvme_ctrl_is_discovery_ctrl(c)) + nvme_ctrl_set_discovery_ctrl(c, true); + } +} + +static void json_parse_port(nvme_subsystem_t s, struct json_object *port_obj) +{ + nvme_ctrl_t c; + struct json_object *attr_obj; + const char *transport, *traddr = NULL; + const char *host_traddr = NULL, *host_iface = NULL, *trsvcid = NULL; + + attr_obj = json_object_object_get(port_obj, "transport"); + if (!attr_obj) + return; + transport = json_object_get_string(attr_obj); + attr_obj = json_object_object_get(port_obj, "traddr"); + if (attr_obj) + traddr = json_object_get_string(attr_obj); + attr_obj = json_object_object_get(port_obj, "host_traddr"); + if (attr_obj) + host_traddr = json_object_get_string(attr_obj); + attr_obj = json_object_object_get(port_obj, "host_iface"); + if (attr_obj) + host_iface = json_object_get_string(attr_obj); + attr_obj = json_object_object_get(port_obj, "trsvcid"); + if (attr_obj) + trsvcid = json_object_get_string(attr_obj); + c = nvme_lookup_ctrl(s, transport, traddr, host_traddr, + host_iface, trsvcid, NULL); + if (!c) + return; + json_update_attributes(c, port_obj); + attr_obj = json_object_object_get(port_obj, "dhchap_key"); + if (attr_obj) + nvme_ctrl_set_dhchap_key(c, json_object_get_string(attr_obj)); +} + +static void json_parse_subsys(nvme_host_t h, struct json_object *subsys_obj) +{ + struct json_object *nqn_obj, *port_array; + nvme_subsystem_t s; + const char *nqn; + int p; + + nqn_obj = json_object_object_get(subsys_obj, "nqn"); + if (!nqn_obj) + return; + nqn = json_object_get_string(nqn_obj); + s = nvme_lookup_subsystem(h, NULL, nqn); + port_array = json_object_object_get(subsys_obj, "ports"); + if (!port_array) + return; + for (p = 0; p < json_object_array_length(port_array); p++) { + struct json_object *port_obj; + + port_obj = json_object_array_get_idx(port_array, p); + if (port_obj) + json_parse_port(s, port_obj); + } +} + +static void json_parse_host(nvme_root_t r, struct json_object *host_obj) +{ + struct json_object *attr_obj, *subsys_array, *subsys_obj; + nvme_host_t h; + const char *hostnqn, *hostid = NULL; + int s; + + attr_obj = json_object_object_get(host_obj, "hostnqn"); + if (!attr_obj) + return; + hostnqn = json_object_get_string(attr_obj); + attr_obj = json_object_object_get(host_obj, "hostid"); + if (attr_obj) + hostid = json_object_get_string(attr_obj); + h = nvme_lookup_host(r, hostnqn, hostid); + attr_obj = json_object_object_get(host_obj, "dhchap_key"); + if (attr_obj) + nvme_host_set_dhchap_key(h, json_object_get_string(attr_obj)); + attr_obj = json_object_object_get(host_obj, "hostsymname"); + if (attr_obj) + nvme_host_set_hostsymname(h, json_object_get_string(attr_obj)); + subsys_array = json_object_object_get(host_obj, "subsystems"); + if (!subsys_array) + return; + for (s = 0; s < json_object_array_length(subsys_array); s++) { + subsys_obj = json_object_array_get_idx(subsys_array, s); + if (subsys_obj) + json_parse_subsys(h, subsys_obj); + } +} + +int json_read_config(nvme_root_t r, const char *config_file) +{ + struct json_object *json_root, *host_obj; + int fd, h; + + fd = open(config_file, O_RDONLY); + if (fd < 0) { + nvme_msg(r, LOG_DEBUG, "Error opening %s, %s\n", + config_file, strerror(errno)); + return fd; + } + json_root = json_object_from_fd(fd); + if (!json_root) { + nvme_msg(r, LOG_DEBUG, "Failed to read %s, %s\n", + config_file, json_util_get_last_err()); + errno = EPROTO; + close(fd); + return -1; + } + for (h = 0; h < json_object_array_length(json_root); h++) { + host_obj = json_object_array_get_idx(json_root, h); + if (host_obj) + json_parse_host(r, host_obj); + } + json_object_put(json_root); + close(fd); + return 0; +} + +#define JSON_STRING_OPTION(c, p, o) \ + if ((c)->o && strcmp((c)->o, "none")) \ + json_object_object_add((p), # o , \ + json_object_new_string((c)->o)) +#define JSON_INT_OPTION(c, p, o, d) \ + if ((c)->o != d) \ + json_object_object_add((p), # o , \ + json_object_new_int((c)->o)) +#define JSON_BOOL_OPTION(c, p, o) \ + if ((c)->o) \ + json_object_object_add((p), # o , \ + json_object_new_boolean((c)->o)) + +static void json_update_port(struct json_object *ctrl_array, nvme_ctrl_t c) +{ + struct nvme_fabrics_config *cfg = nvme_ctrl_get_config(c); + struct json_object *port_obj = json_object_new_object(); + const char *transport, *value; + + transport = nvme_ctrl_get_transport(c); + json_object_object_add(port_obj, "transport", + json_object_new_string(transport)); + value = nvme_ctrl_get_traddr(c); + if (value) + json_object_object_add(port_obj, "traddr", + json_object_new_string(value)); + value = nvme_ctrl_get_host_traddr(c); + if (value) + json_object_object_add(port_obj, "host_traddr", + json_object_new_string(value)); + value = nvme_ctrl_get_host_iface(c); + if (value) + json_object_object_add(port_obj, "host_iface", + json_object_new_string(value)); + value = nvme_ctrl_get_trsvcid(c); + if (value) + json_object_object_add(port_obj, "trsvcid", + json_object_new_string(value)); + value = nvme_ctrl_get_dhchap_key(c); + if (value) + json_object_object_add(port_obj, "dhchap_key", + json_object_new_string(value)); + JSON_INT_OPTION(cfg, port_obj, nr_io_queues, 0); + JSON_INT_OPTION(cfg, port_obj, nr_write_queues, 0); + JSON_INT_OPTION(cfg, port_obj, nr_poll_queues, 0); + JSON_INT_OPTION(cfg, port_obj, queue_size, 0); + JSON_INT_OPTION(cfg, port_obj, keep_alive_tmo, 0); + JSON_INT_OPTION(cfg, port_obj, reconnect_delay, 0); + if (strcmp(transport, "loop")) { + JSON_INT_OPTION(cfg, port_obj, ctrl_loss_tmo, + NVMF_DEF_CTRL_LOSS_TMO); + JSON_INT_OPTION(cfg, port_obj, fast_io_fail_tmo, 0); + } + JSON_INT_OPTION(cfg, port_obj, tos, -1); + JSON_BOOL_OPTION(cfg, port_obj, duplicate_connect); + JSON_BOOL_OPTION(cfg, port_obj, disable_sqflow); + JSON_BOOL_OPTION(cfg, port_obj, hdr_digest); + JSON_BOOL_OPTION(cfg, port_obj, data_digest); + JSON_BOOL_OPTION(cfg, port_obj, tls); + if (nvme_ctrl_is_persistent(c)) + json_object_object_add(port_obj, "persistent", + json_object_new_boolean(true)); + if (nvme_ctrl_is_discovery_ctrl(c)) + json_object_object_add(port_obj, "discovery", + json_object_new_boolean(true)); + json_object_array_add(ctrl_array, port_obj); +} + +static void json_update_subsys(struct json_object *subsys_array, + nvme_subsystem_t s) +{ + nvme_ctrl_t c; + const char *subsysnqn = nvme_subsystem_get_nqn(s); + struct json_object *subsys_obj = json_object_new_object(); + struct json_object *port_array; + + /* Skip discovery subsystems as the nqn is not unique */ + if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) + return; + + json_object_object_add(subsys_obj, "nqn", + json_object_new_string(subsysnqn)); + port_array = json_object_new_array(); + nvme_subsystem_for_each_ctrl(s, c) { + json_update_port(port_array, c); + } + if (json_object_array_length(port_array)) + json_object_object_add(subsys_obj, "ports", port_array); + else + json_object_put(port_array); + json_object_array_add(subsys_array, subsys_obj); +} + +int json_update_config(nvme_root_t r, const char *config_file) +{ + nvme_host_t h; + struct json_object *json_root, *host_obj; + struct json_object *subsys_array; + int ret = 0; + + json_root = json_object_new_array(); + nvme_for_each_host(r, h) { + nvme_subsystem_t s; + const char *hostnqn, *hostid, *dhchap_key, *hostsymname; + + host_obj = json_object_new_object(); + if (!host_obj) + continue; + hostnqn = nvme_host_get_hostnqn(h); + json_object_object_add(host_obj, "hostnqn", + json_object_new_string(hostnqn)); + hostid = nvme_host_get_hostid(h); + if (hostid) + json_object_object_add(host_obj, "hostid", + json_object_new_string(hostid)); + dhchap_key = nvme_host_get_dhchap_key(h); + if (dhchap_key) + json_object_object_add(host_obj, "dhchap_key", + json_object_new_string(dhchap_key)); + hostsymname = nvme_host_get_hostsymname(h); + if (hostsymname) + json_object_object_add(host_obj, "hostsymname", + json_object_new_string(hostsymname)); + subsys_array = json_object_new_array(); + nvme_for_each_subsystem(h, s) { + json_update_subsys(subsys_array, s); + } + if (json_object_array_length(subsys_array)) + json_object_object_add(host_obj, "subsystems", + subsys_array); + else + json_object_put(subsys_array); + json_object_array_add(json_root, host_obj); + } + if (!config_file) + ret = json_object_to_fd(1, json_root, JSON_C_TO_STRING_PRETTY); + else + ret = json_object_to_file_ext(config_file, json_root, + JSON_C_TO_STRING_PRETTY); + if (ret < 0) { + nvme_msg(r, LOG_ERR, "Failed to write to %s, %s\n", + config_file ? "stdout" : config_file, + json_util_get_last_err()); + ret = -1; + errno = EIO; + } + json_object_put(json_root); + + return ret; +} + +static void json_dump_ctrl(struct json_object *ctrl_array, nvme_ctrl_t c) +{ + struct nvme_fabrics_config *cfg = nvme_ctrl_get_config(c); + struct json_object *ctrl_obj = json_object_new_object(); + const char *name, *transport, *value; + + name = nvme_ctrl_get_name(c); + if (name && strlen(name)) + json_object_object_add(ctrl_obj, "name", + json_object_new_string(name)); + transport = nvme_ctrl_get_transport(c); + json_object_object_add(ctrl_obj, "transport", + json_object_new_string(transport)); + value = nvme_ctrl_get_traddr(c); + if (value) + json_object_object_add(ctrl_obj, "traddr", + json_object_new_string(value)); + value = nvme_ctrl_get_host_traddr(c); + if (value) + json_object_object_add(ctrl_obj, "host_traddr", + json_object_new_string(value)); + value = nvme_ctrl_get_host_iface(c); + if (value) + json_object_object_add(ctrl_obj, "host_iface", + json_object_new_string(value)); + value = nvme_ctrl_get_trsvcid(c); + if (value) + json_object_object_add(ctrl_obj, "trsvcid", + json_object_new_string(value)); + value = nvme_ctrl_get_dhchap_key(c); + if (value) + json_object_object_add(ctrl_obj, "dhchap_key", + json_object_new_string(value)); + JSON_INT_OPTION(cfg, ctrl_obj, nr_io_queues, 0); + JSON_INT_OPTION(cfg, ctrl_obj, nr_write_queues, 0); + JSON_INT_OPTION(cfg, ctrl_obj, nr_poll_queues, 0); + JSON_INT_OPTION(cfg, ctrl_obj, queue_size, 0); + JSON_INT_OPTION(cfg, ctrl_obj, keep_alive_tmo, 0); + JSON_INT_OPTION(cfg, ctrl_obj, reconnect_delay, 0); + if (strcmp(transport, "loop")) { + JSON_INT_OPTION(cfg, ctrl_obj, ctrl_loss_tmo, + NVMF_DEF_CTRL_LOSS_TMO); + JSON_INT_OPTION(cfg, ctrl_obj, fast_io_fail_tmo, 0); + } + JSON_INT_OPTION(cfg, ctrl_obj, tos, -1); + JSON_BOOL_OPTION(cfg, ctrl_obj, duplicate_connect); + JSON_BOOL_OPTION(cfg, ctrl_obj, disable_sqflow); + JSON_BOOL_OPTION(cfg, ctrl_obj, hdr_digest); + JSON_BOOL_OPTION(cfg, ctrl_obj, data_digest); + JSON_BOOL_OPTION(cfg, ctrl_obj, tls); + if (nvme_ctrl_is_persistent(c)) + json_object_object_add(ctrl_obj, "persistent", + json_object_new_boolean(true)); + if (nvme_ctrl_is_discovery_ctrl(c)) + json_object_object_add(ctrl_obj, "discovery", + json_object_new_boolean(true)); + json_object_array_add(ctrl_array, ctrl_obj); +} + +static void json_dump_subsys(struct json_object *subsys_array, + nvme_subsystem_t s) +{ + nvme_ctrl_t c; + struct json_object *subsys_obj = json_object_new_object(); + struct json_object *ctrl_array; + + json_object_object_add(subsys_obj, "name", + json_object_new_string(nvme_subsystem_get_name(s))); + json_object_object_add(subsys_obj, "nqn", + json_object_new_string(nvme_subsystem_get_nqn(s))); + ctrl_array = json_object_new_array(); + nvme_subsystem_for_each_ctrl(s, c) { + json_dump_ctrl(ctrl_array, c); + } + if (json_object_array_length(ctrl_array)) + json_object_object_add(subsys_obj, "controllers", ctrl_array); + else + json_object_put(ctrl_array); + json_object_array_add(subsys_array, subsys_obj); +} + +int json_dump_tree(nvme_root_t r) +{ + nvme_host_t h; + struct json_object *json_root, *host_obj; + struct json_object *host_array, *subsys_array; + int ret = 0; + + json_root = json_object_new_object(); + host_array = json_object_new_array(); + nvme_for_each_host(r, h) { + nvme_subsystem_t s; + const char *hostid, *dhchap_key; + + host_obj = json_object_new_object(); + json_object_object_add(host_obj, "hostnqn", + json_object_new_string(nvme_host_get_hostnqn(h))); + hostid = nvme_host_get_hostid(h); + if (hostid) + json_object_object_add(host_obj, "hostid", + json_object_new_string(hostid)); + dhchap_key = nvme_host_get_dhchap_key(h); + if (dhchap_key) + json_object_object_add(host_obj, "dhchap_key", + json_object_new_string(dhchap_key)); + subsys_array = json_object_new_array(); + nvme_for_each_subsystem(h, s) { + json_dump_subsys(subsys_array, s); + } + if (json_object_array_length(subsys_array)) + json_object_object_add(host_obj, "subsystems", + subsys_array); + else + json_object_put(subsys_array); + json_object_array_add(host_array, host_obj); + } + json_object_object_add(json_root, "hosts", host_array); + + ret = json_object_to_fd(1, json_root, JSON_C_TO_STRING_PRETTY); + if (ret < 0) { + nvme_msg(r, LOG_ERR, "Failed to write to stdout, %s\n", + json_util_get_last_err()); + ret = -1; + errno = EIO; + } + json_object_put(json_root); + + return ret; +} diff --git a/src/nvme/linux.c b/src/nvme/linux.c new file mode 100644 index 0000000..c9b6bbf --- /dev/null +++ b/src/nvme/linux.c @@ -0,0 +1,679 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#include <sys/param.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> + +#ifdef CONFIG_OPENSSL +#include <openssl/engine.h> +#include <openssl/evp.h> +#include <openssl/hmac.h> + +#ifdef CONFIG_OPENSSL_3 +#include <openssl/core_names.h> +#include <openssl/params.h> +#endif +#endif + +#include <ccan/endian/endian.h> + +#include "linux.h" +#include "tree.h" +#include "log.h" +#include "private.h" + +static int __nvme_open(const char *name) +{ + char *path; + int fd, ret; + + ret = asprintf(&path, "%s/%s", "/dev", name); + if (ret < 0) { + errno = ENOMEM; + return -1; + } + + fd = open(path, O_RDONLY); + free(path); + return fd; +} + +int nvme_open(const char *name) +{ + int ret, fd, id, ns; + struct stat stat; + bool c; + + ret = sscanf(name, "nvme%dn%d", &id, &ns); + if (ret != 1 && ret != 2) { + errno = EINVAL; + return -1; + } + c = ret == 1; + + fd = __nvme_open(name); + if (fd < 0) + return fd; + + ret = fstat(fd, &stat); + if (ret < 0) + goto close_fd; + + if (c) { + if (!S_ISCHR(stat.st_mode)) { + errno = EINVAL; + goto close_fd; + } + } else if (!S_ISBLK(stat.st_mode)) { + errno = EINVAL; + goto close_fd; + } + + return fd; + +close_fd: + close(fd); + return -1; +} + +int nvme_fw_download_seq(int fd, __u32 size, __u32 xfer, __u32 offset, + void *buf) +{ + int err = 0; + struct nvme_fw_download_args args = { + .args_size = sizeof(args), + .fd = fd, + .offset = offset, + .data_len = xfer, + .data = buf, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + while (size > 0) { + args.data_len = MIN(xfer, size); + err = nvme_fw_download(&args); + if (err) + break; + + args.data += xfer; + size -= xfer; + args.offset += xfer; + } + + return err; +} + +int nvme_get_log_page(int fd, __u32 xfer_len, struct nvme_get_log_args *args) +{ + __u64 offset = 0, xfer, data_len = args->len; + bool retain = true; + void *ptr = args->log; + int ret; + + /* + * 4k is the smallest possible transfer unit, so restricting to 4k + * avoids having to check the MDTS value of the controller. + */ + do { + xfer = data_len - offset; + if (xfer > xfer_len) + xfer = xfer_len; + + /* + * Always retain regardless of the RAE parameter until the very + * last portion of this log page so the data remains latched + * during the fetch sequence. + */ + if (offset + xfer == data_len) + retain = args->rae; + + args->lpo = offset; + args->len = xfer; + args->log = ptr; + args->rae = retain; + ret = nvme_get_log(args); + if (ret) + return ret; + + offset += xfer; + ptr += xfer; + } while (offset < data_len); + + return 0; +} + +static int nvme_get_telemetry_log(int fd, bool create, bool ctrl, bool rae, + struct nvme_telemetry_log **buf, enum nvme_telemetry_da da, + size_t *size) +{ + static const __u32 xfer = NVME_LOG_TELEM_BLOCK_SIZE; + + struct nvme_telemetry_log *telem; + enum nvme_cmd_get_log_lid lid; + struct nvme_id_ctrl id_ctrl; + void *log, *tmp; + int err; + struct nvme_get_log_args args = { + .args_size = sizeof(args), + .fd = fd, + .nsid = NVME_NSID_NONE, + .lsp = NVME_LOG_LSP_NONE, + .lsi = NVME_LOG_LSI_NONE, + .uuidx = NVME_UUID_NONE, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + .csi = NVME_CSI_NVM, + .rae = rae, + .ot = false, + }; + + *size = 0; + + log = malloc(xfer); + if (!log) { + errno = ENOMEM; + return -1; + } + + if (ctrl) { + err = nvme_get_log_telemetry_ctrl(fd, true, 0, xfer, log); + lid = NVME_LOG_LID_TELEMETRY_CTRL; + } else { + lid = NVME_LOG_LID_TELEMETRY_HOST; + if (create) + err = nvme_get_log_create_telemetry_host(fd, log); + else + err = nvme_get_log_telemetry_host(fd, 0, xfer, log); + } + + if (err) + goto free; + + telem = log; + if (ctrl && !telem->ctrlavail) { + *buf = log; + *size = xfer; + return 0; + } + + switch (da) { + case NVME_TELEMETRY_DA_1: + case NVME_TELEMETRY_DA_2: + case NVME_TELEMETRY_DA_3: + /* dalb3 >= dalb2 >= dalb1 */ + *size = (le16_to_cpu(telem->dalb3) + 1) * xfer; + break; + case NVME_TELEMETRY_DA_4: + err = nvme_identify_ctrl(fd, &id_ctrl); + if (err) { + perror("identify-ctrl"); + errno = EINVAL; + goto free; + } + + if (id_ctrl.lpa & 0x40) { + *size = (le32_to_cpu(telem->dalb4) + 1) * xfer; + } else { + fprintf(stderr, "Data area 4 unsupported, bit 6 of Log Page Attributes not set\n"); + errno = EINVAL; + err = -1; + goto free; + } + break; + default: + fprintf(stderr, "Invalid data area parameter - %d\n", da); + errno = EINVAL; + err = -1; + goto free; + } + + tmp = realloc(log, *size); + if (!tmp) { + errno = ENOMEM; + err = -1; + goto free; + } + log = tmp; + + args.lid = lid; + args.log = log; + args.len = *size; + err = nvme_get_log_page(fd, 4096, &args); + if (!err) { + *buf = log; + return 0; + } +free: + free(log); + return err; +} + +int nvme_get_ctrl_telemetry(int fd, bool rae, struct nvme_telemetry_log **log, + enum nvme_telemetry_da da, size_t *size) +{ + return nvme_get_telemetry_log(fd, false, true, rae, log, da, size); +} + +int nvme_get_host_telemetry(int fd, struct nvme_telemetry_log **log, + enum nvme_telemetry_da da, size_t *size) +{ + return nvme_get_telemetry_log(fd, false, false, false, log, da, size); +} + +int nvme_get_new_host_telemetry(int fd, struct nvme_telemetry_log **log, + enum nvme_telemetry_da da, size_t *size) +{ + return nvme_get_telemetry_log(fd, true, false, false, log, da, size); +} + +int nvme_get_lba_status_log(int fd, bool rae, struct nvme_lba_status_log **log) +{ + __u32 size = sizeof(struct nvme_lba_status_log); + void *buf, *tmp; + int err; + struct nvme_get_log_args args = { + .args_size = sizeof(args), + .fd = fd, + .nsid = NVME_NSID_NONE, + .lsp = NVME_LOG_LSP_NONE, + .lsi = NVME_LOG_LSI_NONE, + .uuidx = NVME_UUID_NONE, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + .csi = NVME_CSI_NVM, + .rae = rae, + .ot = false, + }; + + buf = malloc(size); + if (!buf) + return -1; + + *log = buf; + err = nvme_get_log_lba_status(fd, true, 0, size, buf); + if (err) + goto free; + + size = le32_to_cpu((*log)->lslplen); + if (!size) + return 0; + + tmp = realloc(buf, size); + if (!tmp) { + err = -1; + goto free; + } + buf = tmp; + *log = buf; + + args.lid = NVME_LOG_LID_LBA_STATUS; + args.log = buf; + args.len = size; + err = nvme_get_log_page(fd, 4096, &args); + if (!err) + return 0; + +free: + *log = NULL; + free(buf); + return err; +} + +static int nvme_ns_attachment(int fd, __u32 nsid, __u16 num_ctrls, + __u16 *ctrlist, bool attach, __u32 timeout) +{ + struct nvme_ctrl_list cntlist = { 0 }; + struct nvme_ns_attach_args args = { + .args_size = sizeof(args), + .fd = fd, + .nsid = nsid, + .sel = NVME_NS_ATTACH_SEL_CTRL_DEATTACH, + .ctrlist = &cntlist, + .timeout = timeout, + }; + + if (attach) + args.sel = NVME_NS_ATTACH_SEL_CTRL_ATTACH; + + nvme_init_ctrl_list(args.ctrlist, num_ctrls, ctrlist); + return nvme_ns_attach(&args); +} + +int nvme_namespace_attach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, + __u16 *ctrlist) +{ + return nvme_ns_attachment(fd, nsid, num_ctrls, ctrlist, true, + NVME_DEFAULT_IOCTL_TIMEOUT); +} + +int nvme_namespace_detach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, + __u16 *ctrlist) +{ + return nvme_ns_attachment(fd, nsid, num_ctrls, ctrlist, false, + NVME_DEFAULT_IOCTL_TIMEOUT); +} + +int nvme_get_ana_log_len(int fd, size_t *analen) +{ + struct nvme_id_ctrl ctrl; + int ret; + + ret = nvme_identify_ctrl(fd, &ctrl); + if (ret) + return ret; + + *analen = sizeof(struct nvme_ana_log) + + le32_to_cpu(ctrl.nanagrpid) * sizeof(struct nvme_ana_group_desc) + + le32_to_cpu(ctrl.mnan) * sizeof(__le32); + return 0; +} + +int nvme_get_logical_block_size(int fd, __u32 nsid, int *blksize) +{ + struct nvme_id_ns ns; + __u8 flbas; + int ret; + + ret = nvme_identify_ns(fd, nsid, &ns); + if (ret) + return ret; + + nvme_id_ns_flbas_to_lbaf_inuse(ns.flbas, &flbas); + *blksize = 1 << ns.lbaf[flbas].ds; + + return 0; +} + +static int __nvme_set_attr(const char *path, const char *value) +{ + int ret, fd; + + fd = open(path, O_WRONLY); + if (fd < 0) { +#if 0 + nvme_msg(LOG_DEBUG, "Failed to open %s: %s\n", path, + strerror(errno)); +#endif + return -1; + } + ret = write(fd, value, strlen(value)); + close(fd); + return ret; +} + +int nvme_set_attr(const char *dir, const char *attr, const char *value) +{ + char *path; + int ret; + + ret = asprintf(&path, "%s/%s", dir, attr); + if (ret < 0) + return -1; + + ret = __nvme_set_attr(path, value); + free(path); + return ret; +} + +static char *__nvme_get_attr(const char *path) +{ + char value[4096] = { 0 }; + int ret, fd; + int saved_errno; + + fd = open(path, O_RDONLY); + if (fd < 0) + return NULL; + + ret = read(fd, value, sizeof(value) - 1); + saved_errno = errno; + close(fd); + if (ret < 0) { + errno = saved_errno; + return NULL; + } + errno = 0; + if (!strlen(value)) + return NULL; + + if (value[strlen(value) - 1] == '\n') + value[strlen(value) - 1] = '\0'; + while (strlen(value) > 0 && value[strlen(value) - 1] == ' ') + value[strlen(value) - 1] = '\0'; + + return strlen(value) ? strdup(value) : NULL; +} + +char *nvme_get_attr(const char *dir, const char *attr) +{ + char *path, *value; + int ret; + + ret = asprintf(&path, "%s/%s", dir, attr); + if (ret < 0) { + errno = ENOMEM; + return NULL; + } + + value = __nvme_get_attr(path); + free(path); + return value; +} + +char *nvme_get_subsys_attr(nvme_subsystem_t s, const char *attr) +{ + return nvme_get_attr(nvme_subsystem_get_sysfs_dir(s), attr); +} + +char *nvme_get_ctrl_attr(nvme_ctrl_t c, const char *attr) +{ + return nvme_get_attr(nvme_ctrl_get_sysfs_dir(c), attr); +} + +char *nvme_get_ns_attr(nvme_ns_t n, const char *attr) +{ + return nvme_get_attr(nvme_ns_get_sysfs_dir(n), attr); +} + +char *nvme_get_path_attr(nvme_path_t p, const char *attr) +{ + return nvme_get_attr(nvme_path_get_sysfs_dir(p), attr); +} + +#ifndef CONFIG_OPENSSL +int nvme_gen_dhchap_key(char *hostnqn, enum nvme_hmac_alg hmac, + unsigned int key_len, unsigned char *secret, + unsigned char *key) +{ + if (hmac != NVME_HMAC_ALG_NONE) { + nvme_msg(NULL, LOG_ERR, "HMAC transformation not supported; " \ + "recompile with OpenSSL support.\n"); + errno = -EINVAL; + return -1; + } + + memcpy(key, secret, key_len); + return 0; +} +#endif /* !CONFIG_OPENSSL */ + +#ifdef CONFIG_OPENSSL_1 +int nvme_gen_dhchap_key(char *hostnqn, enum nvme_hmac_alg hmac, + unsigned int key_len, unsigned char *secret, + unsigned char *key) +{ + const char hmac_seed[] = "NVMe-over-Fabrics"; + HMAC_CTX *hmac_ctx; + const EVP_MD *md; + int err = -1; + + ENGINE_load_builtin_engines(); + ENGINE_register_all_complete(); + + hmac_ctx = HMAC_CTX_new(); + if (!hmac_ctx) { + errno = ENOMEM; + return err; + } + + switch (hmac) { + case NVME_HMAC_ALG_NONE: + memcpy(key, secret, key_len); + err = 0; + goto out; + case NVME_HMAC_ALG_SHA2_256: + md = EVP_sha256(); + break; + case NVME_HMAC_ALG_SHA2_384: + md = EVP_sha384(); + break; + case NVME_HMAC_ALG_SHA2_512: + md = EVP_sha512(); + break; + default: + errno = EINVAL; + goto out; + } + + if (!md) { + errno = EINVAL; + goto out; + } + + if (!HMAC_Init_ex(hmac_ctx, secret, key_len, md, NULL)) { + errno = ENOMEM; + goto out; + } + + if (!HMAC_Update(hmac_ctx, (unsigned char *)hostnqn, + strlen(hostnqn))) { + errno = ENOKEY; + goto out; + } + + if (!HMAC_Update(hmac_ctx, (unsigned char *)hmac_seed, + strlen(hmac_seed))) { + errno = ENOKEY; + goto out; + } + + if (!HMAC_Final(hmac_ctx, key, &key_len)) { + errno = ENOKEY; + goto out; + } + + err = 0; + +out: + HMAC_CTX_free(hmac_ctx); + return err; +} +#endif /* !CONFIG_OPENSSL_1 */ + +#ifdef CONFIG_OPENSSL_3 +int nvme_gen_dhchap_key(char *hostnqn, enum nvme_hmac_alg hmac, + unsigned int key_len, unsigned char *secret, + unsigned char *key) +{ + const char hmac_seed[] = "NVMe-over-Fabrics"; + OSSL_PARAM params[2], *p = params; + OSSL_LIB_CTX *lib_ctx; + EVP_MAC_CTX *mac_ctx = NULL; + EVP_MAC *mac = NULL; + char *progq = NULL; + char *digest; + size_t len; + int err = -1; + + lib_ctx = OSSL_LIB_CTX_new(); + if (!lib_ctx) { + errno = ENOMEM; + return err; + } + + mac = EVP_MAC_fetch(lib_ctx, OSSL_MAC_NAME_HMAC, progq); + if (!mac) { + errno = ENOMEM; + goto out; + } + + mac_ctx = EVP_MAC_CTX_new(mac); + if (!mac_ctx) { + errno = ENOMEM; + goto out; + } + + switch (hmac) { + case NVME_HMAC_ALG_NONE: + memcpy(key, secret, key_len); + err = 0; + goto out; + case NVME_HMAC_ALG_SHA2_256: + digest = OSSL_DIGEST_NAME_SHA2_256; + break; + case NVME_HMAC_ALG_SHA2_384: + digest = OSSL_DIGEST_NAME_SHA2_384; + break; + case NVME_HMAC_ALG_SHA2_512: + digest = OSSL_DIGEST_NAME_SHA2_512; + break; + default: + errno = EINVAL; + goto out; + } + *p++ = OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_DIGEST, + digest, + 0); + *p = OSSL_PARAM_construct_end(); + + if (!EVP_MAC_init(mac_ctx, secret, key_len, params)) { + errno = ENOKEY; + goto out; + } + + if (!EVP_MAC_update(mac_ctx, (unsigned char *)hostnqn, + strlen(hostnqn))) { + errno = ENOKEY; + goto out; + } + + if (!EVP_MAC_update(mac_ctx, (unsigned char *)hmac_seed, + strlen(hmac_seed))) { + errno = ENOKEY; + goto out; + } + + if (!EVP_MAC_final(mac_ctx, key, &len, key_len)) { + errno = ENOKEY; + goto out; + } + + if (len != key_len) { + errno = EMSGSIZE; + goto out; + } + + err = 0; + +out: + EVP_MAC_CTX_free(mac_ctx); + EVP_MAC_free(mac); + OSSL_LIB_CTX_free(lib_ctx); + + return err; +} +#endif /* !CONFIG_OPENSSL_3 */ diff --git a/src/nvme/linux.h b/src/nvme/linux.h new file mode 100644 index 0000000..6223c0a --- /dev/null +++ b/src/nvme/linux.h @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ +#ifndef _LIBNVME_LINUX_H +#define _LIBNVME_LINUX_H + +#include <stddef.h> + +#include "ioctl.h" +#include "types.h" + +/** + * DOC: linux.h + * + * linux-specific utility functions + */ + +/** + * nvme_fw_download_seq() - Firmware download sequence + * @fd: File descriptor of nvme device + * @size: Total size of the firmware image to transfer + * @xfer: Maximum size to send with each partial transfer + * @offset: Starting offset to send with this firmware downlaod + * @buf: Address of buffer containing all or part of the firmware image. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_fw_download_seq(int fd, __u32 size, __u32 xfer, __u32 offset, + void *buf); + +/** + * enum nvme_telemetry_da - Telemetry Log Data Area + * @NVME_TELEMETRY_DA_1: Data Area 1 + * @NVME_TELEMETRY_DA_2: Data Area 2 + * @NVME_TELEMETRY_DA_3: Data Area 3 + * @NVME_TELEMETRY_DA_4: Data Area 4 + */ +enum nvme_telemetry_da { + NVME_TELEMETRY_DA_1 = 1, + NVME_TELEMETRY_DA_2 = 2, + NVME_TELEMETRY_DA_3 = 3, + NVME_TELEMETRY_DA_4 = 4, +}; + +/** + * nvme_get_ctrl_telemetry() - Get controller telemetry log + * @fd: File descriptor of nvme device + * @rae: Retain asynchronous events + * @log: On success, set to the value of the allocated and retrieved log. + * @da: Log page data area, valid values: &enum nvme_telemetry_da + * @size: Ptr to the telemetry log size, so it can be returned + * + * The total size allocated can be calculated as: + * (nvme_telemetry_log da size + 1) * NVME_LOG_TELEM_BLOCK_SIZE. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_ctrl_telemetry(int fd, bool rae, struct nvme_telemetry_log **log, + enum nvme_telemetry_da da, size_t *size); + +/** + * nvme_get_host_telemetry() - Get host telemetry log + * @fd: File descriptor of nvme device + * @log: On success, set to the value of the allocated and retrieved log. + * @da: Log page data area, valid values: &enum nvme_telemetry_da + * @size: Ptr to the telemetry log size, so it can be returned + * + * The total size allocated can be calculated as: + * (nvme_telemetry_log da size + 1) * NVME_LOG_TELEM_BLOCK_SIZE. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_host_telemetry(int fd, struct nvme_telemetry_log **log, + enum nvme_telemetry_da da, size_t *size); + +/** + * nvme_get_new_host_telemetry() - Get new host telemetry log + * @fd: File descriptor of nvme device + * @log: On success, set to the value of the allocated and retrieved log. + * @da: Log page data area, valid values: &enum nvme_telemetry_da + * @size: Ptr to the telemetry log size, so it can be returned + * + * The total size allocated can be calculated as: + * (nvme_telemetry_log da size + 1) * NVME_LOG_TELEM_BLOCK_SIZE. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_new_host_telemetry(int fd, struct nvme_telemetry_log **log, + enum nvme_telemetry_da da, size_t *size); + +/** + * nvme_get_log_page() - Get log page data + * @fd: File descriptor of nvme device + * @xfer_len: Max log transfer size per request to split the total. + * @args: &struct nvme_get_log_args argument structure + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_log_page(int fd, __u32 xfer_len, struct nvme_get_log_args *args); + +/** + * nvme_get_ana_log_len() - Retreive size of the current ANA log + * @fd: File descriptor of nvme device + * @analen: Pointer to where the length will be set on success + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_ana_log_len(int fd, size_t *analen); + +/** + * nvme_get_logical_block_size() - Retrieve block size + * @fd: File descriptor of nvme device + * @nsid: Namespace id + * @blksize: Pointer to where the block size will be set on success + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_logical_block_size(int fd, __u32 nsid, int *blksize); + +/** + * nvme_get_lba_status_log() - Retreive the LBA Status log page + * @fd: File descriptor of the nvme device + * @rae: Retain asynchronous events + * @log: On success, set to the value of the allocated and retreived log. + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_get_lba_status_log(int fd, bool rae, struct nvme_lba_status_log **log); + +/** + * nvme_namespace_attach_ctrls() - Attach namespace to controller(s) + * @fd: File descriptor of nvme device + * @nsid: Namespace ID to attach + * @num_ctrls: Number of controllers in ctrlist + * @ctrlist: List of controller IDs to perform the attach action + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_namespace_attach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist); + +/** + * nvme_namespace_detach_ctrls() - Detach namespace from controller(s) + * @fd: File descriptor of nvme device + * @nsid: Namespace ID to detach + * @num_ctrls: Number of controllers in ctrlist + * @ctrlist: List of controller IDs to perform the detach action + * + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. + */ +int nvme_namespace_detach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist); + +/** + * nvme_open() - Open an nvme controller or namespace device + * @name: The basename of the device to open + * + * This will look for the handle in /dev/ and validate the name and filetype + * match linux conventions. + * + * Return: A file descriptor for the device on a successful open, or -1 with + * errno set otherwise. + */ +int nvme_open(const char *name); + +/** + * enum nvme_hmac_alg - HMAC algorithm + * @NVME_HMAC_ALG_NONE: No HMAC algorithm + * @NVME_HMAC_ALG_SHA2_256: SHA2-256 + * @NVME_HMAC_ALG_SHA2_384: SHA2-384 + * @NVME_HMAC_ALG_SHA2_512: SHA2-512 + */ +enum nvme_hmac_alg { + NVME_HMAC_ALG_NONE = 0, + NVME_HMAC_ALG_SHA2_256 = 1, + NVME_HMAC_ALG_SHA2_384 = 2, + NVME_HMAC_ALG_SHA2_512 = 3, +}; + +/** + * nvme_gen_dhchap_key() - DH-HMAC-CHAP key generation + * @hostnqn: Host NVMe Qualified Name + * @hmac: HMAC algorithm + * @key_len: Output key length + * @secret: Secret to used for digest + * @key: Generated DH-HMAC-CHAP key + * + * Return: If key generation was successful the function returns 0 or + * -1 with errno set otherwise. + */ +int nvme_gen_dhchap_key(char *hostnqn, enum nvme_hmac_alg hmac, + unsigned int key_len, unsigned char *secret, + unsigned char *key); + +#endif /* _LIBNVME_LINUX_H */ diff --git a/src/nvme/log.c b/src/nvme/log.c new file mode 100644 index 0000000..1bc44f2 --- /dev/null +++ b/src/nvme/log.c @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2021 SUSE LLC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This file implements basic logging functionality. + */ + +#include <sys/types.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <stdbool.h> +#include <syslog.h> +#include <unistd.h> +#include <time.h> +#include <string.h> +#define LOG_FUNCNAME 1 +#include "private.h" +#include "log.h" +#include "cleanup.h" + +#ifndef LOG_CLOCK +#define LOG_CLOCK CLOCK_MONOTONIC +#endif + +void __attribute__((format(printf, 4, 5))) +__nvme_msg(nvme_root_t r, int lvl, + const char *func, const char *format, ...) +{ + FILE *fp = r ? r->fp : stderr; + va_list ap; + char pidbuf[16]; + char timebuf[32]; + static const char *const formats[] = { + "%s%s%s", + "%s%s%s: ", + "%s<%s>%s ", + "%s<%s> %s: ", + "[%s] %s%s ", + "[%s]%s %s: ", + "[%s] <%s>%s ", + "[%s] <%s> %s: ", + }; + char *header __cleanup__(cleanup_charp) = NULL; + char *message __cleanup__(cleanup_charp) = NULL; + int idx = 0; + + if (r && lvl > r->log_level) + return; + + if (r && r->log_timestamp) { + struct timespec now; + + clock_gettime(LOG_CLOCK, &now); + snprintf(timebuf, sizeof(timebuf), "%6ld.%06ld", + (long)now.tv_sec, now.tv_nsec / 1000); + idx |= 1 << 2; + } else + *timebuf = '\0'; + + if (r && r->log_pid) { + snprintf(pidbuf, sizeof(pidbuf), "%ld", (long)getpid()); + idx |= 1 << 1; + } else + *pidbuf = '\0'; + + if (func) + idx |= 1 << 0; + + if (asprintf(&header, formats[idx], + timebuf, pidbuf, func ? func : "") == -1) + header = NULL; + + va_start(ap, format); + if (vasprintf(&message, format, ap) == -1) + message = NULL; + va_end(ap); + + fprintf(fp, "%s%s", + header ? header : "<error>", + message ? message : "<error>"); +} + +void nvme_init_logging(nvme_root_t r, int lvl, bool log_pid, bool log_tstamp) +{ + r->log_level = lvl; + r->log_pid = log_pid; + r->log_timestamp = log_tstamp; +} diff --git a/src/nvme/log.h b/src/nvme/log.h new file mode 100644 index 0000000..1cf797a --- /dev/null +++ b/src/nvme/log.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * Copyright (c) 2021 Martin Wilck, SUSE LLC + */ +#ifndef _LOG_H +#define _LOG_H + +#include <stdbool.h> +#include <syslog.h> + +/* for nvme_root_t */ +#include "tree.h" + +#ifndef MAX_LOGLEVEL +# define MAX_LOGLEVEL LOG_DEBUG +#endif +#ifndef DEFAULT_LOGLEVEL +# define DEFAULT_LOGLEVEL LOG_NOTICE +#endif + +/** + * DOC: log.h + * + * logging functions + */ + +/** + * nvme_init_logging() - Initialize logging + * @r: nvme_root_t context + * @lvl: Logging level to set + * @log_pid: Boolean to enable logging of the PID + * @log_tstamp: Boolean to enable logging of the timestamp + * + * Sets the default logging variables for the library. + */ +void nvme_init_logging(nvme_root_t r, int lvl, bool log_pid, bool log_tstamp); + +#endif /* _LOG_H */ diff --git a/src/nvme/private.h b/src/nvme/private.h new file mode 100644 index 0000000..bea1ae9 --- /dev/null +++ b/src/nvme/private.h @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2021 SUSE Software Solutions + * + * Authors: Hannes Reinecke <hare@suse.de> + */ + +#ifndef _LIBNVME_PRIVATE_H +#define _LIBNVME_PRIVATE_H + +#include <ccan/list/list.h> + +#include "fabrics.h" + +#include <uuid/uuid.h> + + +extern const char *nvme_ctrl_sysfs_dir; +extern const char *nvme_subsys_sysfs_dir; +extern const char *nvme_ns_sysfs_dir; + +struct nvme_path { + struct list_node entry; + struct list_node nentry; + + struct nvme_ctrl *c; + struct nvme_ns *n; + + char *name; + char *sysfs_dir; + char *ana_state; + int grpid; +}; + +struct nvme_ns { + struct list_node entry; + struct list_head paths; + + struct nvme_subsystem *s; + struct nvme_ctrl *c; + + int fd; + __u32 nsid; + char *name; + char *generic_name; + char *sysfs_dir; + + int lba_shift; + int lba_size; + int meta_size; + uint64_t lba_count; + uint64_t lba_util; + + uint8_t eui64[8]; + uint8_t nguid[16]; + uuid_t uuid; + enum nvme_csi csi; +}; + +struct nvme_ctrl { + struct list_node entry; + struct list_head paths; + struct list_head namespaces; + struct nvme_subsystem *s; + + int fd; + char *name; + char *sysfs_dir; + char *address; + char *firmware; + char *model; + char *state; + char *numa_node; + char *queue_count; + char *serial; + char *sqsize; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *dhchap_key; + char *cntrltype; + char *dctype; + bool discovery_ctrl; + bool discovered; + bool persistent; + struct nvme_fabrics_config cfg; +}; + +struct nvme_subsystem { + struct list_node entry; + struct list_head ctrls; + struct list_head namespaces; + struct nvme_host *h; + + char *name; + char *sysfs_dir; + char *subsysnqn; + char *model; + char *serial; + char *firmware; + char *subsystype; +}; + +struct nvme_host { + struct list_node entry; + struct list_head subsystems; + struct nvme_root *r; + + char *hostnqn; + char *hostid; + char *dhchap_key; + char *hostsymname; +}; + +struct nvme_root { + char *config_file; + struct list_head hosts; + FILE *fp; + int log_level; + bool log_pid; + bool log_timestamp; + bool modified; +}; + +int nvme_set_attr(const char *dir, const char *attr, const char *value); + +int json_read_config(nvme_root_t r, const char *config_file); + +int json_update_config(nvme_root_t r, const char *config_file); + +int json_dump_tree(nvme_root_t r); + +#if (LOG_FUNCNAME == 1) +#define __nvme_log_func __func__ +#else +#define __nvme_log_func NULL +#endif + +void __attribute__((format(printf, 4, 5))) +__nvme_msg(nvme_root_t r, int lvl, const char *func, const char *format, ...); + +#define nvme_msg(r, lvl, format, ...) \ + do { \ + if ((lvl) <= MAX_LOGLEVEL) \ + __nvme_msg(r, lvl, __nvme_log_func, \ + format, ##__VA_ARGS__); \ + } while (0) + +#endif /* _LIBNVME_PRIVATE_H */ diff --git a/src/nvme/tree.c b/src/nvme/tree.c new file mode 100644 index 0000000..a2cfa6a --- /dev/null +++ b/src/nvme/tree.c @@ -0,0 +1,1909 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ +#include <dirent.h> +#include <errno.h> +#include <stdint.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <fcntl.h> +#include <libgen.h> +#include <unistd.h> + +#include <sys/types.h> +#include <sys/stat.h> +#include <arpa/inet.h> +#include <netdb.h> + +#include <ccan/endian/endian.h> +#include <ccan/list/list.h> + +#include "ioctl.h" +#include "linux.h" +#include "filters.h" +#include "tree.h" +#include "filters.h" +#include "util.h" +#include "fabrics.h" +#include "log.h" +#include "private.h" + +static struct nvme_host *default_host; + +static void __nvme_free_host(nvme_host_t h); +static void __nvme_free_ctrl(nvme_ctrl_t c); +static int nvme_subsystem_scan_namespace(nvme_root_t r, + struct nvme_subsystem *s, char *name, + nvme_scan_filter_t f, void *f_args); +static int nvme_init_subsystem(nvme_subsystem_t s, const char *name); +static int nvme_scan_subsystem(nvme_root_t r, const char *name, + nvme_scan_filter_t f, void *f_args); +static int nvme_ctrl_scan_namespace(nvme_root_t r, struct nvme_ctrl *c, + char *name); +static int nvme_ctrl_scan_path(nvme_root_t r, struct nvme_ctrl *c, char *name); + +static inline void nvme_free_dirents(struct dirent **d, int i) +{ + while (i-- > 0) + free(d[i]); + free(d); +} + +nvme_host_t nvme_default_host(nvme_root_t r) +{ + struct nvme_host *h; + char *hostnqn, *hostid; + + hostnqn = nvmf_hostnqn_from_file(); + if (!hostnqn) + hostnqn = nvmf_hostnqn_generate(); + hostid = nvmf_hostid_from_file(); + + h = nvme_lookup_host(r, hostnqn, hostid); + + nvme_host_set_hostsymname(h, NULL); + + default_host = h; + free(hostnqn); + if (hostid) + free(hostid); + return h; +} + +int nvme_scan_topology(struct nvme_root *r, nvme_scan_filter_t f, void *f_args) +{ + struct dirent **subsys, **ctrls; + int i, num_subsys, num_ctrls, ret; + + if (!r) + return 0; + + num_ctrls = nvme_scan_ctrls(&ctrls); + if (num_ctrls < 0) { + nvme_msg(r, LOG_DEBUG, "failed to scan ctrls: %s\n", + strerror(errno)); + return num_ctrls; + } + + for (i = 0; i < num_ctrls; i++) { + nvme_ctrl_t c = nvme_scan_ctrl(r, ctrls[i]->d_name); + if (!c) { + nvme_msg(r, LOG_DEBUG, "failed to scan ctrl %s: %s\n", + ctrls[i]->d_name, strerror(errno)); + continue; + } + if ((f) && !f(NULL, c, NULL, f_args)) { + nvme_msg(r, LOG_DEBUG, "filter out controller %s\n", + ctrls[i]->d_name); + nvme_free_ctrl(c); + } + } + + nvme_free_dirents(ctrls, i); + + num_subsys = nvme_scan_subsystems(&subsys); + if (num_subsys < 0) { + nvme_msg(r, LOG_DEBUG, "failed to scan subsystems: %s\n", + strerror(errno)); + return num_subsys; + } + + for (i = 0; i < num_subsys; i++) { + ret = nvme_scan_subsystem(r, subsys[i]->d_name, f, f_args); + if (ret < 0) { + nvme_msg(r, LOG_DEBUG, + "failed to scan subsystem %s: %s\n", + subsys[i]->d_name, strerror(errno)); + } + } + + nvme_free_dirents(subsys, i); + + return 0; +} + +nvme_root_t nvme_create_root(FILE *fp, int log_level) +{ + struct nvme_root *r = calloc(1, sizeof(*r)); + + if (!r) { + errno = ENOMEM; + return NULL; + } + r->log_level = log_level; + r->fp = stderr; + if (fp) + r->fp = fp; + list_head_init(&r->hosts); + return r; +} + +int nvme_read_config(nvme_root_t r, const char *config_file) +{ + int err = -1; + + if (!r || !config_file) { + errno = ENODEV; + return err; + } + + r->config_file = strdup(config_file); + if (!r->config_file) { + errno = ENOMEM; + return err; + } + err = json_read_config(r, config_file); + /* + * The json configuration file is optional, + * so ignore errors when opening the file. + */ + if (err < 0 && errno != EPROTO) + err = 0; + + return err; +} + +nvme_root_t nvme_scan(const char *config_file) +{ + nvme_root_t r = nvme_create_root(NULL, DEFAULT_LOGLEVEL); + + nvme_scan_topology(r, NULL, NULL); + nvme_read_config(r, config_file); + return r; +} + +int nvme_update_config(nvme_root_t r) +{ + if (!r->modified || !r->config_file) + return 0; + + return json_update_config(r, r->config_file); +} + +int nvme_dump_config(nvme_root_t r) +{ + return json_update_config(r, NULL); +} + +int nvme_dump_tree(nvme_root_t r) +{ + return json_dump_tree(r); +} + +nvme_host_t nvme_first_host(nvme_root_t r) +{ + return list_top(&r->hosts, struct nvme_host, entry); +} + +nvme_host_t nvme_next_host(nvme_root_t r, nvme_host_t h) +{ + return h ? list_next(&r->hosts, h, entry) : NULL; +} + +nvme_root_t nvme_host_get_root(nvme_host_t h) +{ + return h->r; +} + +const char *nvme_host_get_hostnqn(nvme_host_t h) +{ + return h->hostnqn; +} + +const char *nvme_host_get_hostid(nvme_host_t h) +{ + return h->hostid; +} + +const char *nvme_host_get_hostsymname(nvme_host_t h) +{ + return h->hostsymname; +} + +void nvme_host_set_hostsymname(nvme_host_t h, const char *hostsymname) +{ + if (h->hostsymname) { + free(h->hostsymname); + h->hostsymname = NULL; + } + if (hostsymname) + h->hostsymname = strdup(hostsymname); +} + +const char *nvme_host_get_dhchap_key(nvme_host_t h) +{ + return h->dhchap_key; +} + +void nvme_host_set_dhchap_key(nvme_host_t h, const char *key) +{ + if (h->dhchap_key) { + free(h->dhchap_key); + h->dhchap_key = NULL; + } + if (key) + h->dhchap_key = strdup(key); +} + +nvme_subsystem_t nvme_first_subsystem(nvme_host_t h) +{ + return list_top(&h->subsystems, struct nvme_subsystem, entry); +} + +nvme_subsystem_t nvme_next_subsystem(nvme_host_t h, nvme_subsystem_t s) +{ + return s ? list_next(&h->subsystems, s, entry) : NULL; +} + +void nvme_refresh_topology(nvme_root_t r) +{ + struct nvme_host *h, *_h; + + nvme_for_each_host_safe(r, h, _h) + __nvme_free_host(h); + nvme_scan_topology(r, NULL, NULL); +} + +void nvme_free_tree(nvme_root_t r) +{ + struct nvme_host *h, *_h; + + nvme_for_each_host_safe(r, h, _h) + __nvme_free_host(h); + if (r->config_file) + free(r->config_file); + free(r); +} + +const char *nvme_subsystem_get_nqn(nvme_subsystem_t s) +{ + return s->subsysnqn; +} + +const char *nvme_subsystem_get_sysfs_dir(nvme_subsystem_t s) +{ + return s->sysfs_dir; +} + +const char *nvme_subsystem_get_name(nvme_subsystem_t s) +{ + return s->name; +} + +const char *nvme_subsystem_get_type(nvme_subsystem_t s) +{ + return s->subsystype; +} + +nvme_ctrl_t nvme_subsystem_first_ctrl(nvme_subsystem_t s) +{ + return list_top(&s->ctrls, struct nvme_ctrl, entry); +} + +nvme_ctrl_t nvme_subsystem_next_ctrl(nvme_subsystem_t s, nvme_ctrl_t c) +{ + return c ? list_next(&s->ctrls, c, entry) : NULL; +} + +nvme_host_t nvme_subsystem_get_host(nvme_subsystem_t s) +{ + return s->h; +} + +nvme_ns_t nvme_subsystem_first_ns(nvme_subsystem_t s) +{ + return list_top(&s->namespaces, struct nvme_ns, entry); +} + +nvme_ns_t nvme_subsystem_next_ns(nvme_subsystem_t s, nvme_ns_t n) +{ + return n ? list_next(&s->namespaces, n, entry) : NULL; +} + +nvme_path_t nvme_namespace_first_path(nvme_ns_t ns) +{ + return list_top(&ns->paths, struct nvme_path, nentry); +} + +nvme_path_t nvme_namespace_next_path(nvme_ns_t ns, nvme_path_t p) +{ + return p ? list_next(&ns->paths, p, nentry) : NULL; +} + +static void __nvme_free_ns(struct nvme_ns *n) +{ + list_del_init(&n->entry); + close(n->fd); + free(n->generic_name); + free(n->name); + free(n->sysfs_dir); + free(n); +} + +/* Stub for SWIG */ +void nvme_free_ns(struct nvme_ns *n) +{ +} + +static void __nvme_free_subsystem(struct nvme_subsystem *s) +{ + struct nvme_ctrl *c, *_c; + struct nvme_ns *n, *_n; + + list_del_init(&s->entry); + nvme_subsystem_for_each_ctrl_safe(s, c, _c) + __nvme_free_ctrl(c); + + nvme_subsystem_for_each_ns_safe(s, n, _n) + __nvme_free_ns(n); + + if (s->name) + free(s->name); + free(s->sysfs_dir); + free(s->subsysnqn); + if (s->model) + free(s->model); + if (s->serial) + free(s->serial); + if (s->firmware) + free(s->firmware); + if (s->subsystype) + free(s->subsystype); + free(s); +} + +/* + * Stub for SWIG + */ +void nvme_free_subsystem(nvme_subsystem_t s) +{ +} + +struct nvme_subsystem *nvme_alloc_subsystem(struct nvme_host *h, + const char *name, + const char *subsysnqn) +{ + struct nvme_subsystem *s; + + s = calloc(1, sizeof(*s)); + if (!s) + return NULL; + + s->h = h; + s->subsysnqn = strdup(subsysnqn); + if (name) + nvme_init_subsystem(s, name); + list_head_init(&s->ctrls); + list_head_init(&s->namespaces); + list_node_init(&s->entry); + list_add(&h->subsystems, &s->entry); + h->r->modified = true; + return s; +} + +struct nvme_subsystem *nvme_lookup_subsystem(struct nvme_host *h, + const char *name, + const char *subsysnqn) +{ + struct nvme_subsystem *s; + + nvme_for_each_subsystem(h, s) { + if (strcmp(s->subsysnqn, subsysnqn)) + continue; + if (name && s->name && + strcmp(s->name, name)) + continue; + return s; + } + return nvme_alloc_subsystem(h, name, subsysnqn); +} + +static void __nvme_free_host(struct nvme_host *h) +{ + struct nvme_subsystem *s, *_s; + + list_del_init(&h->entry); + nvme_for_each_subsystem_safe(h, s, _s) + __nvme_free_subsystem(s); + free(h->hostnqn); + if (h->hostid) + free(h->hostid); + if (h->dhchap_key) + free(h->dhchap_key); + nvme_host_set_hostsymname(h, NULL); + h->r->modified = true; + free(h); +} + +/* Stub for SWIG */ +void nvme_free_host(struct nvme_host *h) +{ +} + +struct nvme_host *nvme_lookup_host(nvme_root_t r, const char *hostnqn, + const char *hostid) +{ + struct nvme_host *h; + + if (!hostnqn) + return NULL; + nvme_for_each_host(r, h) { + if (strcmp(h->hostnqn, hostnqn)) + continue; + if (hostid && (!h->hostid || + strcmp(h->hostid, hostid))) + continue; + return h; + } + h = calloc(1,sizeof(*h)); + if (!h) + return NULL; + h->hostnqn = strdup(hostnqn); + if (hostid) + h->hostid = strdup(hostid); + list_head_init(&h->subsystems); + list_node_init(&h->entry); + h->r = r; + list_add(&r->hosts, &h->entry); + r->modified = true; + + return h; +} + +static int nvme_subsystem_scan_namespaces(nvme_root_t r, nvme_subsystem_t s, + nvme_scan_filter_t f, void *f_args) +{ + struct dirent **namespaces; + int i, num_ns, ret; + + num_ns = nvme_scan_subsystem_namespaces(s, &namespaces); + if (num_ns < 0) { + nvme_msg(r, LOG_DEBUG, + "failed to scan namespaces for subsys %s: %s\n", + s->subsysnqn, strerror(errno)); + return num_ns; + } + + for (i = 0; i < num_ns; i++) { + ret = nvme_subsystem_scan_namespace(r, s, + namespaces[i]->d_name, f, f_args); + if (ret < 0) + nvme_msg(r, LOG_DEBUG, + "failed to scan namespace %s: %s\n", + namespaces[i]->d_name, strerror(errno)); + } + + nvme_free_dirents(namespaces, i); + return 0; +} + +static int nvme_init_subsystem(nvme_subsystem_t s, const char *name) +{ + char *path; + + if (asprintf(&path, "%s/%s", nvme_subsys_sysfs_dir, name) < 0) + return -1; + + s->model = nvme_get_attr(path, "model"); + if (!s->model) + s->model = strdup("undefined"); + s->serial = nvme_get_attr(path, "serial"); + s->firmware = nvme_get_attr(path, "firmware_rev"); + s->subsystype = nvme_get_attr(path, "subsystype"); + if (!s->subsystype) { + if (!strcmp(s->subsysnqn, NVME_DISC_SUBSYS_NAME)) + s->subsystype = strdup("discovery"); + else + s->subsystype = strdup("nvm"); + } + s->name = strdup(name); + s->sysfs_dir = (char *)path; + + return 0; +} + +static int nvme_scan_subsystem(struct nvme_root *r, const char *name, + nvme_scan_filter_t f, void *f_args) +{ + struct nvme_subsystem *s = NULL, *_s; + char *path, *subsysnqn; + nvme_host_t h = NULL; + int ret; + + nvme_msg(r, LOG_DEBUG, "scan subsystem %s\n", name); + ret = asprintf(&path, "%s/%s", nvme_subsys_sysfs_dir, name); + if (ret < 0) + return ret; + + subsysnqn = nvme_get_attr(path, "subsysnqn"); + free(path); + if (!subsysnqn) { + errno = ENODEV; + return -1; + } + nvme_for_each_host(r, h) { + nvme_for_each_subsystem(h, _s) { + /* + * We are always called after nvme_scan_ctrl(), + * so any subsystem we're interested at _must_ + * have a name. + */ + if (!_s->name) + continue; + if (strcmp(_s->name, name)) + continue; + s = _s; + } + } + if (!s) { + /* + * Subsystem with non-matching controller. odd. + * Create a subsystem with the default host + * and hope for the best. + */ + nvme_msg(r, LOG_DEBUG, "creating detached subsystem '%s'\n", + name); + h = nvme_default_host(r); + s = nvme_alloc_subsystem(h, name, subsysnqn); + if (!s) { + errno = ENOMEM; + } + } else if (strcmp(s->subsysnqn, subsysnqn)) { + nvme_msg(r, LOG_WARNING, "NQN mismatch for subsystem '%s'\n", + name); + s = NULL; + errno = EINVAL; + return -1; + } + free(subsysnqn); + if (!s) + return -1; + + if (f && !f(s, NULL, NULL, f_args)) { + nvme_msg(r, LOG_DEBUG, "filter out subsystem %s\n", name); + __nvme_free_subsystem(s); + return 0; + } + + nvme_subsystem_scan_namespaces(r, s, f, f_args); + + return 0; +} + +nvme_ctrl_t nvme_path_get_ctrl(nvme_path_t p) +{ + return p->c; +} + +nvme_ns_t nvme_path_get_ns(nvme_path_t p) +{ + return p->n; +} + +const char *nvme_path_get_sysfs_dir(nvme_path_t p) +{ + return p->sysfs_dir; +} + +const char *nvme_path_get_name(nvme_path_t p) +{ + return p->name; +} + +const char *nvme_path_get_ana_state(nvme_path_t p) +{ + return p->ana_state; +} + +void nvme_free_path(struct nvme_path *p) +{ + list_del_init(&p->entry); + list_del_init(&p->nentry); + free(p->name); + free(p->sysfs_dir); + free(p->ana_state); + free(p); +} + +static void nvme_subsystem_set_path_ns(nvme_subsystem_t s, nvme_path_t p) +{ + char n_name[32] = { }; + int i, c, nsid, ret; + nvme_ns_t n; + + ret = sscanf(nvme_path_get_name(p), "nvme%dc%dn%d", &i, &c, &nsid); + if (ret != 3) + return; + + sprintf(n_name, "nvme%dn%d", i, nsid); + nvme_subsystem_for_each_ns(s, n) { + if (!strcmp(n_name, nvme_ns_get_name(n))) { + list_add(&n->paths, &p->nentry); + p->n = n; + } + } +} + +static int nvme_ctrl_scan_path(nvme_root_t r, struct nvme_ctrl *c, char *name) +{ + struct nvme_path *p; + char *path, *grpid; + int ret; + + nvme_msg(r, LOG_DEBUG, "scan controller %s path %s\n", + c->name, name); + if (!c->s) { + errno = ENXIO; + return -1; + } + ret = asprintf(&path, "%s/%s", c->sysfs_dir, name); + if (ret < 0) { + errno = ENOMEM; + return -1; + } + + p = calloc(1, sizeof(*p)); + if (!p) { + errno = ENOMEM; + goto free_path; + } + + p->c = c; + p->name = strdup(name); + p->sysfs_dir = path; + p->ana_state = nvme_get_path_attr(p, "ana_state"); + if (!p->ana_state) + p->ana_state = strdup("optimized"); + + grpid = nvme_get_path_attr(p, "ana_grpid"); + if (grpid) { + sscanf(grpid, "%d", &p->grpid); + free(grpid); + } + + list_node_init(&p->nentry); + nvme_subsystem_set_path_ns(c->s, p); + list_node_init(&p->entry); + list_add(&c->paths, &p->entry); + return 0; + +free_path: + free(path); + return -1; +} + +int nvme_ctrl_get_fd(nvme_ctrl_t c) +{ + nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; + + if (c->fd < 0) { + c->fd = nvme_open(c->name); + if (c->fd < 0) + nvme_msg(r, LOG_ERR, + "Failed to open ctrl %s, errno %d\n", + c->name, errno); + } + return c->fd; +} + +nvme_subsystem_t nvme_ctrl_get_subsystem(nvme_ctrl_t c) +{ + return c->s; +} + +const char *nvme_ctrl_get_name(nvme_ctrl_t c) +{ + return c->name; +} + +const char *nvme_ctrl_get_sysfs_dir(nvme_ctrl_t c) +{ + return c->sysfs_dir; +} + +const char *nvme_ctrl_get_subsysnqn(nvme_ctrl_t c) +{ + return c->s ? c->s->subsysnqn : c->subsysnqn; +} + +const char *nvme_ctrl_get_address(nvme_ctrl_t c) +{ + return c->address ? c->address : ""; +} + +const char *nvme_ctrl_get_firmware(nvme_ctrl_t c) +{ + return c->firmware; +} + +const char *nvme_ctrl_get_model(nvme_ctrl_t c) +{ + return c->model; +} + +const char *nvme_ctrl_get_state(nvme_ctrl_t c) +{ + char *state = c->state; + + c->state = nvme_get_ctrl_attr(c, "state"); + if (state) + free(state); + return c->state; +} + +const char *nvme_ctrl_get_numa_node(nvme_ctrl_t c) +{ + return c->numa_node; +} + +const char *nvme_ctrl_get_queue_count(nvme_ctrl_t c) +{ + return c->queue_count; +} + +const char *nvme_ctrl_get_serial(nvme_ctrl_t c) +{ + return c->serial; +} + +const char *nvme_ctrl_get_sqsize(nvme_ctrl_t c) +{ + return c->sqsize; +} + +const char *nvme_ctrl_get_transport(nvme_ctrl_t c) +{ + return c->transport; +} + +const char *nvme_ctrl_get_traddr(nvme_ctrl_t c) +{ + return c->traddr; +} + +const char *nvme_ctrl_get_trsvcid(nvme_ctrl_t c) +{ + return c->trsvcid; +} + +const char *nvme_ctrl_get_host_traddr(nvme_ctrl_t c) +{ + return c->cfg.host_traddr; +} + +const char *nvme_ctrl_get_host_iface(nvme_ctrl_t c) +{ + return c->cfg.host_iface; +} + +struct nvme_fabrics_config *nvme_ctrl_get_config(nvme_ctrl_t c) +{ + return &c->cfg; +} + +const char *nvme_ctrl_get_dhchap_key(nvme_ctrl_t c) +{ + return c->dhchap_key; +} + +void nvme_ctrl_set_dhchap_key(nvme_ctrl_t c, const char *key) +{ + if (c->dhchap_key) { + free(c->dhchap_key); + c->dhchap_key = NULL; + } + if (key) + c->dhchap_key = strdup(key); +} + +void nvme_ctrl_set_discovered(nvme_ctrl_t c, bool discovered) +{ + c->discovered = discovered; +} + +bool nvme_ctrl_is_discovered(nvme_ctrl_t c) +{ + return c->discovered; +} + +void nvme_ctrl_set_persistent(nvme_ctrl_t c, bool persistent) +{ + c->persistent = persistent; +} + +bool nvme_ctrl_is_persistent(nvme_ctrl_t c) +{ + return c->persistent; +} + +void nvme_ctrl_set_discovery_ctrl(nvme_ctrl_t c, bool discovery) +{ + c->discovery_ctrl = discovery; +} + +bool nvme_ctrl_is_discovery_ctrl(nvme_ctrl_t c) +{ + return c->discovery_ctrl; +} + +int nvme_ctrl_identify(nvme_ctrl_t c, struct nvme_id_ctrl *id) +{ + return nvme_identify_ctrl(nvme_ctrl_get_fd(c), id); +} + +nvme_ns_t nvme_ctrl_first_ns(nvme_ctrl_t c) +{ + return list_top(&c->namespaces, struct nvme_ns, entry); +} + +nvme_ns_t nvme_ctrl_next_ns(nvme_ctrl_t c, nvme_ns_t n) +{ + return n ? list_next(&c->namespaces, n, entry) : NULL; +} + +nvme_path_t nvme_ctrl_first_path(nvme_ctrl_t c) +{ + return list_top(&c->paths, struct nvme_path, entry); +} + +nvme_path_t nvme_ctrl_next_path(nvme_ctrl_t c, nvme_path_t p) +{ + return p ? list_next(&c->paths, p, entry) : NULL; +} + +#define FREE_CTRL_ATTR(a) \ + do { if (a) { free(a); (a) = NULL; } } while (0) +void nvme_deconfigure_ctrl(nvme_ctrl_t c) +{ + if (c->fd >= 0) { + close(c->fd); + c->fd = -1; + } + FREE_CTRL_ATTR(c->name); + FREE_CTRL_ATTR(c->sysfs_dir); + FREE_CTRL_ATTR(c->firmware); + FREE_CTRL_ATTR(c->model); + FREE_CTRL_ATTR(c->state); + FREE_CTRL_ATTR(c->numa_node); + FREE_CTRL_ATTR(c->queue_count); + FREE_CTRL_ATTR(c->serial); + FREE_CTRL_ATTR(c->sqsize); + FREE_CTRL_ATTR(c->address); + FREE_CTRL_ATTR(c->dctype); + FREE_CTRL_ATTR(c->cntrltype); +} + +int nvme_disconnect_ctrl(nvme_ctrl_t c) +{ + nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; + int ret; + + ret = nvme_set_attr(nvme_ctrl_get_sysfs_dir(c), + "delete_controller", "1"); + if (ret < 0) { + nvme_msg(r, LOG_ERR, "%s: failed to disconnect, error %d\n", + c->name, errno); + return ret; + } + nvme_msg(r, LOG_INFO, "%s: disconnected\n", c->name); + nvme_deconfigure_ctrl(c); + return 0; +} + +void nvme_unlink_ctrl(nvme_ctrl_t c) +{ + list_del_init(&c->entry); + c->s = NULL; +} + +static void __nvme_free_ctrl(nvme_ctrl_t c) +{ + struct nvme_path *p, *_p; + struct nvme_ns *n, *_n; + + nvme_unlink_ctrl(c); + + nvme_ctrl_for_each_path_safe(c, p, _p) + nvme_free_path(p); + + nvme_ctrl_for_each_ns_safe(c, n, _n) + __nvme_free_ns(n); + + nvme_deconfigure_ctrl(c); + + FREE_CTRL_ATTR(c->transport); + FREE_CTRL_ATTR(c->subsysnqn); + FREE_CTRL_ATTR(c->traddr); + FREE_CTRL_ATTR(c->cfg.host_traddr); + FREE_CTRL_ATTR(c->cfg.host_iface); + FREE_CTRL_ATTR(c->trsvcid); + free(c); +} + +void nvme_free_ctrl(nvme_ctrl_t c) +{ + __nvme_free_ctrl(c); +} + +static bool traddr_is_hostname(const char *transport, const char *traddr) +{ + char addrstr[NVMF_TRADDR_SIZE]; + + if (!traddr || !transport) + return false; + if (strcmp(transport, "tcp") && + strcmp(transport, "rdma")) + return false; + if (inet_pton(AF_INET, traddr, addrstr) > 0 || + inet_pton(AF_INET6, traddr, addrstr) > 0) + return false; + return true; +} + +struct nvme_ctrl *nvme_create_ctrl(nvme_root_t r, + const char *subsysnqn, const char *transport, + const char *traddr, const char *host_traddr, + const char *host_iface, const char *trsvcid) +{ + struct nvme_ctrl *c; + + if (!transport) { + nvme_msg(r, LOG_ERR, "No transport specified\n"); + errno = EINVAL; + return NULL; + } + if (strncmp(transport, "loop", 4) && + strncmp(transport, "pcie", 4) && !traddr) { + nvme_msg(r, LOG_ERR, "No transport address for '%s'\n", + transport); + errno = EINVAL; + return NULL; + } + if (!subsysnqn) { + nvme_msg(r, LOG_ERR, "No subsystem NQN specified\n"); + errno = EINVAL; + return NULL; + } + c = calloc(1, sizeof(*c)); + if (!c) { + errno = ENOMEM; + return NULL; + } + c->fd = -1; + nvmf_default_config(&c->cfg); + list_head_init(&c->namespaces); + list_head_init(&c->paths); + list_node_init(&c->entry); + c->transport = strdup(transport); + c->subsysnqn = strdup(subsysnqn); + if (traddr) + c->traddr = strdup(traddr); + if (host_traddr) { + if (traddr_is_hostname(transport, host_traddr)) + c->cfg.host_traddr = hostname2traddr(r, host_traddr); + if (!c->cfg.host_traddr) + c->cfg.host_traddr = strdup(host_traddr); + } + if (host_iface) + c->cfg.host_iface = strdup(host_iface); + if (trsvcid) + c->trsvcid = strdup(trsvcid); + + return c; +} + +nvme_ctrl_t nvme_lookup_ctrl(nvme_subsystem_t s, const char *transport, + const char *traddr, const char *host_traddr, + const char *host_iface, const char *trsvcid, + nvme_ctrl_t p) +{ + nvme_root_t r; + struct nvme_ctrl *c; + + if (!s || !transport) + return NULL; + r = s->h ? s->h->r : NULL; + c = p ? nvme_subsystem_next_ctrl(s, p) : nvme_subsystem_first_ctrl(s); + for (; c != NULL; c = nvme_subsystem_next_ctrl(s, c)) { + if (strcmp(c->transport, transport)) + continue; + if (traddr && c->traddr && + strcasecmp(c->traddr, traddr)) + continue; + if (host_traddr && c->cfg.host_traddr && + strcmp(c->cfg.host_traddr, host_traddr)) + continue; + if (host_iface && c->cfg.host_iface && + strcmp(c->cfg.host_iface, host_iface)) + continue; + if (trsvcid && c->trsvcid && + strcmp(c->trsvcid, trsvcid)) + continue; + return c; + } + c = nvme_create_ctrl(r, s->subsysnqn, transport, traddr, + host_traddr, host_iface, trsvcid); + if (c) { + c->s = s; + list_add(&s->ctrls, &c->entry); + s->h->r->modified = true; + } + return c; +} + +static int nvme_ctrl_scan_paths(nvme_root_t r, struct nvme_ctrl *c) +{ + struct dirent **paths; + int i, ret; + + ret = nvme_scan_ctrl_namespace_paths(c, &paths); + if (ret < 0) + return ret; + + for (i = 0; i < ret; i++) + nvme_ctrl_scan_path(r, c, paths[i]->d_name); + + nvme_free_dirents(paths, i); + return 0; +} + +static int nvme_ctrl_scan_namespaces(nvme_root_t r, struct nvme_ctrl *c) +{ + struct dirent **namespaces; + int i, ret; + + ret = nvme_scan_ctrl_namespaces(c, &namespaces); + for (i = 0; i < ret; i++) + nvme_ctrl_scan_namespace(r, c, namespaces[i]->d_name); + + nvme_free_dirents(namespaces, i); + return 0; +} + +static char *nvme_ctrl_lookup_subsystem_name(nvme_root_t r, + const char *ctrl_name) +{ + struct dirent **subsys; + char *subsys_name = NULL; + int ret, i; + char path[PATH_MAX]; + + ret = nvme_scan_subsystems(&subsys); + if (ret < 0) + return NULL; + for (i = 0; i < ret; i++) { + struct stat st; + + sprintf(path, "%s/%s/%s", nvme_subsys_sysfs_dir, + subsys[i]->d_name, ctrl_name); + nvme_msg(r, LOG_DEBUG, "lookup subsystem %s\n", path); + if (stat(path, &st) < 0) + continue; + subsys_name = strdup(subsys[i]->d_name); + break; + } + nvme_free_dirents(subsys, ret); + return subsys_name; +} + +static int nvme_configure_ctrl(nvme_root_t r, nvme_ctrl_t c, const char *path, + const char *name) +{ + DIR *d; + + d = opendir(path); + if (!d) { + nvme_msg(r, LOG_ERR, "Failed to open ctrl dir %s, error %d\n", + path, errno); + errno = ENODEV; + return -1; + } + closedir(d); + + c->fd = -1; + c->name = strdup(name); + c->sysfs_dir = (char *)path; + c->firmware = nvme_get_ctrl_attr(c, "firmware_rev"); + c->model = nvme_get_ctrl_attr(c, "model"); + c->state = nvme_get_ctrl_attr(c, "state"); + c->numa_node = nvme_get_ctrl_attr(c, "numa_node"); + c->queue_count = nvme_get_ctrl_attr(c, "queue_count"); + c->serial = nvme_get_ctrl_attr(c, "serial"); + c->sqsize = nvme_get_ctrl_attr(c, "sqsize"); + c->dhchap_key = nvme_get_ctrl_attr(c, "dhchap_ctrl_secret"); + if (c->dhchap_key && !strcmp(c->dhchap_key, "none")) { + free(c->dhchap_key); + c->dhchap_key = NULL; + } + c->cntrltype = nvme_get_ctrl_attr(c, "cntrltype"); + c->dctype = nvme_get_ctrl_attr(c, "dctype"); + + errno = 0; /* cleanup after nvme_get_ctrl_attr() */ + return 0; +} + +int nvme_init_ctrl(nvme_host_t h, nvme_ctrl_t c, int instance) +{ + nvme_subsystem_t s; + char *subsys_name = NULL; + char *path, *name; + int ret; + + ret = asprintf(&name, "nvme%d", instance); + if (ret < 0) { + errno = ENOMEM; + return -1; + } + ret = asprintf(&path, "%s/nvme%d", nvme_ctrl_sysfs_dir, instance); + if (ret < 0) { + errno = ENOMEM; + goto out_free_name; + } + + ret = nvme_configure_ctrl(h->r, c, path, name); + if (ret < 0) { + free(path); + goto out_free_name; + } + + c->address = nvme_get_attr(path, "address"); + if (!c->address && strcmp(c->transport, "loop")) { + errno = ENVME_CONNECT_INVAL_TR; + ret = -1; + goto out_free_name; + } + + subsys_name = nvme_ctrl_lookup_subsystem_name(h->r, name); + if (!subsys_name) { + nvme_msg(h->r, LOG_ERR, + "Failed to lookup subsystem name for %s\n", + c->name); + errno = ENVME_CONNECT_LOOKUP_SUBSYS_NAME; + ret = -1; + goto out_free_name; + } + s = nvme_lookup_subsystem(h, subsys_name, c->subsysnqn); + if (!s) { + errno = ENVME_CONNECT_LOOKUP_SUBSYS; + ret = -1; + goto out_free_subsys; + } + if (s->subsystype && !strcmp(s->subsystype, "discovery")) + c->discovery_ctrl = true; + c->s = s; + list_add(&s->ctrls, &c->entry); +out_free_subsys: + free(subsys_name); + out_free_name: + free(name); + return ret; +} + +static nvme_ctrl_t nvme_ctrl_alloc(nvme_root_t r, nvme_subsystem_t s, + const char *path, const char *name) +{ + nvme_ctrl_t c, p; + char *addr = NULL, *address = NULL, *a, *e; + char *transport, *traddr = NULL, *trsvcid = NULL; + char *host_traddr = NULL, *host_iface = NULL; + int ret; + + transport = nvme_get_attr(path, "transport"); + if (!transport) { + errno = ENXIO; + return NULL; + } + /* Parse 'address' string into components */ + addr = nvme_get_attr(path, "address"); + if (!addr) { + char *rpath = NULL, *p = NULL, *_a = NULL; + + /* loop transport might not have an address */ + if (!strcmp(transport, "loop")) + goto skip_address; + + /* Older kernel don't support pcie transport addresses */ + if (strcmp(transport, "pcie")) { + free(transport); + errno = ENXIO; + return NULL; + } + /* Figure out the PCI address from the attribute path */ + rpath = realpath(path, NULL); + if (!rpath) { + free(transport); + errno = ENOMEM; + return NULL; + } + a = strtok_r(rpath, "/", &e); + while(a && strlen(a)) { + if (_a) + p = _a; + _a = a; + if (!strncmp(a, "nvme", 4)) + break; + a = strtok_r(NULL, "/", &e); + } + if (p) + addr = strdup(p); + free(rpath); + } else if (!strcmp(transport, "pcie")) { + /* The 'address' string is the transport address */ + traddr = addr; + } else { + address = strdup(addr); + a = strtok_r(address, ",", &e); + while (a && strlen(a)) { + if (!strncmp(a, "traddr=", 7)) + traddr = a + 7; + else if (!strncmp(a, "trsvcid=", 8)) + trsvcid = a + 8; + else if (!strncmp(a, "host_traddr=", 12)) + host_traddr = a + 12; + else if (!strncmp(a, "host_iface=", 11)) + host_iface = a + 11; + a = strtok_r(NULL, ",", &e); + } + } +skip_address: + p = NULL; + do { + c = nvme_lookup_ctrl(s, transport, traddr, + host_traddr, host_iface, trsvcid, p); + if (c) { + if (!c->name) + break; + if (!strcmp(c->name, name)) { + nvme_msg(r, LOG_DEBUG, + "found existing ctrl %s\n", c->name); + break; + } + nvme_msg(r, LOG_DEBUG, "skipping ctrl %s\n", c->name); + p = c; + } + } while (c); + if (!c) + c = p; + free(transport); + if (address) + free(address); + if (!c) { + if (!p) { + nvme_msg(r, LOG_ERR, "failed to lookup ctrl\n"); + errno = ENODEV; + } else + errno = ENOMEM; + return NULL; + } + c->address = addr; + if (s->subsystype && !strcmp(s->subsystype, "discovery")) + c->discovery_ctrl = true; + ret = nvme_configure_ctrl(r, c, path, name); + return (ret < 0) ? NULL : c; +} + +nvme_ctrl_t nvme_scan_ctrl(nvme_root_t r, const char *name) +{ + nvme_host_t h; + nvme_subsystem_t s; + nvme_ctrl_t c; + char *path; + char *hostnqn, *hostid, *subsysnqn, *subsysname; + int ret; + + nvme_msg(r, LOG_DEBUG, "scan controller %s\n", name); + ret = asprintf(&path, "%s/%s", nvme_ctrl_sysfs_dir, name); + if (ret < 0) { + errno = ENOMEM; + return NULL; + } + + hostnqn = nvme_get_attr(path, "hostnqn"); + hostid = nvme_get_attr(path, "hostid"); + h = nvme_lookup_host(r, hostnqn, hostid); + if (hostnqn) + free(hostnqn); + if (hostid) + free(hostid); + if (h) { + if (h->dhchap_key) + free(h->dhchap_key); + h->dhchap_key = nvme_get_attr(path, "dhchap_secret"); + if (h->dhchap_key && !strcmp(h->dhchap_key, "none")) { + free(h->dhchap_key); + h->dhchap_key = NULL; + } + } + if (!h) { + h = nvme_default_host(r); + if (!h) { + free(path); + errno = ENOMEM; + return NULL; + } + } + + subsysnqn = nvme_get_attr(path, "subsysnqn"); + if (!subsysnqn) { + free(path); + errno = ENXIO; + return NULL; + } + subsysname = nvme_ctrl_lookup_subsystem_name(r, name); + if (!subsysname) { + nvme_msg(r, LOG_ERR, + "failed to lookup subsystem for controller %s\n", + name); + free(path); + errno = ENXIO; + return NULL; + } + s = nvme_lookup_subsystem(h, subsysname, subsysnqn); + free(subsysnqn); + free(subsysname); + + if (!s) { + free(path); + errno = ENOMEM; + return NULL; + } + + c = nvme_ctrl_alloc(r, s, path, name); + if (!c) { + free(path); + return NULL; + } + + nvme_ctrl_scan_namespaces(r, c); + nvme_ctrl_scan_paths(r, c); + return c; +} + +void nvme_rescan_ctrl(struct nvme_ctrl *c) +{ + nvme_root_t r = c->s && c->s->h ? c->s->h->r : NULL; + if (!c->s) + return; + nvme_ctrl_scan_namespaces(r, c); + nvme_ctrl_scan_paths(r, c); + nvme_subsystem_scan_namespaces(r, c->s, NULL, NULL); +} + +static int nvme_bytes_to_lba(nvme_ns_t n, off_t offset, size_t count, + __u64 *lba, __u16 *nlb) +{ + int bs; + + bs = nvme_ns_get_lba_size(n); + if (!count || offset & bs || count & bs) { + errno = EINVAL; + return -1; + } + + *lba = offset >> n->lba_shift; + *nlb = (count >> n->lba_shift) - 1; + return 0; +} + +int nvme_ns_get_fd(nvme_ns_t n) +{ + return n->fd; +} + +nvme_subsystem_t nvme_ns_get_subsystem(nvme_ns_t n) +{ + return n->s; +} + +nvme_ctrl_t nvme_ns_get_ctrl(nvme_ns_t n) +{ + return n->c; +} + +int nvme_ns_get_nsid(nvme_ns_t n) +{ + return n->nsid; +} + +const char *nvme_ns_get_sysfs_dir(nvme_ns_t n) +{ + return n->sysfs_dir; +} + +const char *nvme_ns_get_name(nvme_ns_t n) +{ + return n->name; +} + +const char *nvme_ns_get_generic_name(nvme_ns_t n) +{ + return n->generic_name; +} + +const char *nvme_ns_get_model(nvme_ns_t n) +{ + return n->c ? n->c->model : n->s->model; +} + +const char *nvme_ns_get_serial(nvme_ns_t n) +{ + return n->c ? n->c->serial : n->s->serial; +} + +const char *nvme_ns_get_firmware(nvme_ns_t n) +{ + return n->c ? n->c->firmware : n->s->firmware; +} + +int nvme_ns_get_lba_size(nvme_ns_t n) +{ + return n->lba_size; +} + +int nvme_ns_get_meta_size(nvme_ns_t n) +{ + return n->meta_size; +} + +uint64_t nvme_ns_get_lba_count(nvme_ns_t n) +{ + return n->lba_count; +} + +uint64_t nvme_ns_get_lba_util(nvme_ns_t n) +{ + return n->lba_util; +} + +enum nvme_csi nvme_ns_get_csi(nvme_ns_t n) +{ + return n->csi; +} + +const uint8_t *nvme_ns_get_eui64(nvme_ns_t n) +{ + return n->eui64; +} + +const uint8_t *nvme_ns_get_nguid(nvme_ns_t n) +{ + return n->nguid; +} + +void nvme_ns_get_uuid(nvme_ns_t n, uuid_t out) +{ + uuid_copy(out, n->uuid); +} + +int nvme_ns_identify(nvme_ns_t n, struct nvme_id_ns *ns) +{ + return nvme_identify_ns(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), ns); +} + +int nvme_ns_identify_descs(nvme_ns_t n, struct nvme_ns_id_desc *descs) +{ + return nvme_identify_ns_descs(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), descs); +} + +int nvme_ns_verify(nvme_ns_t n, off_t offset, size_t count) +{ + struct nvme_io_args args = { + .args_size = sizeof(args), + .fd = nvme_ns_get_fd(n), + .nsid = nvme_ns_get_nsid(n), + .control = 0, + .dsm = 0, + .dspec = 0, + .reftag = 0, + .apptag = 0, + .appmask = 0, + .storage_tag = 0, + .data_len = 0, + .data = NULL, + .metadata_len = 0, + .metadata = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + if (nvme_bytes_to_lba(n, offset, count, &args.slba, &args.nlb)) + return -1; + + return nvme_verify(&args); +} + +int nvme_ns_write_uncorrectable(nvme_ns_t n, off_t offset, size_t count) +{ + struct nvme_io_args args = { + .args_size = sizeof(args), + .fd = nvme_ns_get_fd(n), + .nsid = nvme_ns_get_nsid(n), + .control = 0, + .dsm = 0, + .dspec = 0, + .reftag = 0, + .apptag = 0, + .appmask = 0, + .storage_tag = 0, + .data_len = 0, + .data = NULL, + .metadata_len = 0, + .metadata = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + if (nvme_bytes_to_lba(n, offset, count, &args.slba, &args.nlb)) + return -1; + + return nvme_write_uncorrectable(&args); +} + +int nvme_ns_write_zeros(nvme_ns_t n, off_t offset, size_t count) +{ + struct nvme_io_args args = { + .args_size = sizeof(args), + .fd = nvme_ns_get_fd(n), + .nsid = nvme_ns_get_nsid(n), + .control = 0, + .dsm = 0, + .dspec = 0, + .reftag = 0, + .apptag = 0, + .appmask = 0, + .storage_tag = 0, + .data_len = 0, + .data = NULL, + .metadata_len = 0, + .metadata = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + if (nvme_bytes_to_lba(n, offset, count, &args.slba, &args.nlb)) + return -1; + + return nvme_write_zeros(&args); +} + +int nvme_ns_write(nvme_ns_t n, void *buf, off_t offset, size_t count) +{ + struct nvme_io_args args = { + .args_size = sizeof(args), + .fd = nvme_ns_get_fd(n), + .nsid = nvme_ns_get_nsid(n), + .control = 0, + .dsm = 0, + .dspec = 0, + .reftag = 0, + .apptag = 0, + .appmask = 0, + .storage_tag = 0, + .data_len = count, + .data = buf, + .metadata_len = 0, + .metadata = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + if (nvme_bytes_to_lba(n, offset, count, &args.slba, &args.nlb)) + return -1; + + return nvme_write(&args); +} + +int nvme_ns_read(nvme_ns_t n, void *buf, off_t offset, size_t count) +{ + struct nvme_io_args args = { + .args_size = sizeof(args), + .fd = nvme_ns_get_fd(n), + .nsid = nvme_ns_get_nsid(n), + .control = 0, + .dsm = 0, + .dspec = 0, + .reftag = 0, + .apptag = 0, + .appmask = 0, + .storage_tag = 0, + .data_len = count, + .data = buf, + .metadata_len = 0, + .metadata = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + if (nvme_bytes_to_lba(n, offset, count, &args.slba, &args.nlb)) + return -1; + + return nvme_read(&args); +} + +int nvme_ns_compare(nvme_ns_t n, void *buf, off_t offset, size_t count) +{ + struct nvme_io_args args = { + .args_size = sizeof(args), + .fd = nvme_ns_get_fd(n), + .nsid = nvme_ns_get_nsid(n), + .control = 0, + .dsm = 0, + .dspec = 0, + .reftag = 0, + .apptag = 0, + .appmask = 0, + .storage_tag = 0, + .data_len = count, + .data = buf, + .metadata_len = 0, + .metadata = NULL, + .timeout = NVME_DEFAULT_IOCTL_TIMEOUT, + .result = NULL, + }; + + if (nvme_bytes_to_lba(n, offset, count, &args.slba, &args.nlb)) + return -1; + + return nvme_compare(&args); +} + +int nvme_ns_flush(nvme_ns_t n) +{ + return nvme_flush(nvme_ns_get_fd(n), nvme_ns_get_nsid(n)); +} + +static void nvme_ns_parse_descriptors(struct nvme_ns *n, + struct nvme_ns_id_desc *descs) +{ + void *d = descs; + int i, len; + + for (i = 0; i < NVME_IDENTIFY_DATA_SIZE; i += len) { + struct nvme_ns_id_desc *desc = d + i; + + if (!desc->nidl) + break; + len = desc->nidl + sizeof(*desc); + + switch (desc->nidt) { + case NVME_NIDT_EUI64: + memcpy(n->eui64, desc->nid, sizeof(n->eui64)); + break; + case NVME_NIDT_NGUID: + memcpy(n->nguid, desc->nid, sizeof(n->nguid)); + break; + case NVME_NIDT_UUID: + memcpy(n->uuid, desc->nid, sizeof(n->uuid)); + break; + case NVME_NIDT_CSI: + memcpy(&n->csi, desc->nid, sizeof(n->csi)); + break; + } + } +} + +static int nvme_ns_init(struct nvme_ns *n) +{ + struct nvme_id_ns ns = { }; + uint8_t buffer[NVME_IDENTIFY_DATA_SIZE] = { }; + struct nvme_ns_id_desc *descs = (void *)buffer; + uint8_t flbas; + int ret; + + ret = nvme_ns_identify(n, &ns); + if (ret) + return ret; + + nvme_id_ns_flbas_to_lbaf_inuse(ns.flbas, &flbas); + n->lba_shift = ns.lbaf[flbas].ds; + n->lba_size = 1 << n->lba_shift; + n->lba_count = le64_to_cpu(ns.nsze); + n->lba_util = le64_to_cpu(ns.nuse); + n->meta_size = le16_to_cpu(ns.lbaf[flbas].ms); + + if (!nvme_ns_identify_descs(n, descs)) + nvme_ns_parse_descriptors(n, descs); + + return 0; +} + +static void nvme_ns_set_generic_name(struct nvme_ns *n, const char *name) +{ + char generic_name[PATH_MAX]; + int instance, head_instance; + int ret; + + ret = sscanf(name, "nvme%dn%d", &instance, &head_instance); + if (ret != 2) + return; + + sprintf(generic_name, "ng%dn%d", instance, head_instance); + n->generic_name = strdup(generic_name); +} + +static nvme_ns_t nvme_ns_open(const char *name) +{ + struct nvme_ns *n; + + n = calloc(1, sizeof(*n)); + if (!n) { + errno = ENOMEM; + return NULL; + } + + n->name = strdup(name); + n->fd = nvme_open(n->name); + if (n->fd < 0) + goto free_ns; + + nvme_ns_set_generic_name(n, name); + + if (nvme_get_nsid(n->fd, &n->nsid) < 0) + goto close_fd; + + if (nvme_ns_init(n) != 0) + goto close_fd; + + list_head_init(&n->paths); + list_node_init(&n->entry); + + return n; + +close_fd: + close(n->fd); +free_ns: + free(n->name); + free(n); + return NULL; +} + +static struct nvme_ns *__nvme_scan_namespace(const char *sysfs_dir, const char *name) +{ + struct nvme_ns *n; + char *path; + int ret; + + ret = asprintf(&path, "%s/%s", sysfs_dir, name); + if (ret < 0) { + errno = ENOMEM; + return NULL; + } + + n = nvme_ns_open(name); + if (!n) + goto free_path; + + n->sysfs_dir = path; + return n; + +free_path: + free(path); + return NULL; +} + +nvme_ns_t nvme_scan_namespace(const char *name) +{ + return __nvme_scan_namespace(nvme_ns_sysfs_dir, name); +} + +static int nvme_ctrl_scan_namespace(nvme_root_t r, struct nvme_ctrl *c, + char *name) +{ + struct nvme_ns *n; + + nvme_msg(r, LOG_DEBUG, "scan controller %s namespace %s\n", + c->name, name); + if (!c->s) { + nvme_msg(r, LOG_DEBUG, "no subsystem for %s\n", name); + errno = EINVAL; + return -1; + } + n = __nvme_scan_namespace(c->sysfs_dir, name); + if (!n) { + nvme_msg(r, LOG_DEBUG, "failed to scan namespace %s\n", name); + return -1; + } + + n->s = c->s; + n->c = c; + list_add(&c->namespaces, &n->entry); + return 0; +} + +static void nvme_subsystem_set_ns_path(nvme_subsystem_t s, nvme_ns_t n) +{ + nvme_ctrl_t c; + nvme_path_t p; + int ns_ctrl, ns_nsid, ret; + + ret = sscanf(nvme_ns_get_name(n), "nvme%dn%d", &ns_ctrl, &ns_nsid); + if (ret != 2) + return; + + nvme_subsystem_for_each_ctrl(s, c) { + nvme_ctrl_for_each_path(c, p) { + int p_subsys, p_ctrl, p_nsid; + + ret = sscanf(nvme_path_get_name(p), "nvme%dc%dn%d", + &p_subsys, &p_ctrl, &p_nsid); + if (ret != 3) + continue; + if (ns_ctrl == p_subsys && ns_nsid == p_nsid) { + list_add(&n->paths, &p->nentry); + p->n = n; + } + } + } +} + +static int nvme_subsystem_scan_namespace(nvme_root_t r, nvme_subsystem_t s, + char *name, nvme_scan_filter_t f, void *f_args) +{ + struct nvme_ns *n; + + nvme_msg(r, LOG_DEBUG, "scan subsystem %s namespace %s\n", + s->name, name); + n = __nvme_scan_namespace(s->sysfs_dir, name); + if (!n) { + nvme_msg(r, LOG_DEBUG, "failed to scan namespace %s\n", name); + return -1; + } + if (f && !f(NULL, NULL, n, f_args)) { + nvme_msg(r, LOG_DEBUG, "filter out namespace %s\n", name); + __nvme_free_ns(n); + return 0; + } + n->s = s; + list_add(&s->namespaces, &n->entry); + nvme_subsystem_set_ns_path(s, n); + return 0; +} + +struct nvme_ns *nvme_subsystem_lookup_namespace(struct nvme_subsystem *s, + __u32 nsid) +{ + nvme_root_t r = s->h ? s->h->r : NULL; + struct nvme_ns *n; + char *name; + int ret; + + ret = asprintf(&name, "%sn%u", s->name, nsid); + if (ret < 0) + return NULL; + n = __nvme_scan_namespace(s->sysfs_dir, name); + free(name); + if (!n) { + nvme_msg(r, LOG_DEBUG, "failed to scan namespace %d\n", nsid); + return NULL; + } + + n->s = s; + list_add(&s->namespaces, &n->entry); + return n; +} diff --git a/src/nvme/tree.h b/src/nvme/tree.h new file mode 100644 index 0000000..9f7a621 --- /dev/null +++ b/src/nvme/tree.h @@ -0,0 +1,1231 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#ifndef _LIBNVME_TREE_H +#define _LIBNVME_TREE_H + +#include <stdio.h> +#include <stdbool.h> +#include <stddef.h> + +#include <sys/types.h> +#include <uuid/uuid.h> + +#include "ioctl.h" +#include "util.h" + +/** + * DOC: tree.h + * + * libnvme tree object interface + */ + +typedef struct nvme_ns *nvme_ns_t; +typedef struct nvme_path *nvme_path_t; +typedef struct nvme_ctrl *nvme_ctrl_t; +typedef struct nvme_subsystem *nvme_subsystem_t; +typedef struct nvme_host *nvme_host_t; +typedef struct nvme_root *nvme_root_t; + +typedef bool (*nvme_scan_filter_t)(nvme_subsystem_t, nvme_ctrl_t, + nvme_ns_t, void *); + +/** + * nvme_create_root() - Initialize root object + * @fp: File descriptor for logging messages + * @log_level: Logging level to use + * + * Return: Initialized &nvme_root_t object + */ +nvme_root_t nvme_create_root(FILE *fp, int log_level); + +/** + * nvme_free_tree() - Free root object + * @r: &nvme_root_t object + * + * Free an &nvme_root_t object and all attached objects + */ +void nvme_free_tree(nvme_root_t r); + +/** + * nvme_first_host() - Start host iterator + * @r: &nvme_root_t object + * + * Return: First &nvme_host_t object in an iterator + */ +nvme_host_t nvme_first_host(nvme_root_t r); + +/** + * nvme_next_host() - Next host iterator + * @r: &nvme_root_t object + * @h: Previous &nvme_host_t iterator + * + * Return: Next &nvme_host_t object in an iterator + */ +nvme_host_t nvme_next_host(nvme_root_t r, nvme_host_t h); + +/** + * nvme_host_get_root() - Returns nvme_root_t object + * @h: &nvme_host_t object + * + * Return: &nvme_root_t object from @h + */ +nvme_root_t nvme_host_get_root(nvme_host_t h); + +/** + * nvme_lookup_host() - Lookup nvme_host_t object + * @r: &nvme_root_t object + * @hostnqn: Host NQN + * @hostid: Host ID + * + * Lookup a nvme_host_t object based on @hostnqn and @hostid + * or create one if not found. + * + * Return: &nvme_host_t object + */ +nvme_host_t nvme_lookup_host(nvme_root_t r, const char *hostnqn, + const char *hostid); + +/** + * nvme_host_get_dhchap_key() - Return host key + * @h: Host for which the key should be returned + * + * Return: DH-HMAC-CHAP host key or NULL if not set + */ +const char *nvme_host_get_dhchap_key(nvme_host_t h); + +/** + * nvme_host_set_dhchap_key() - set host key + * @h: Host for which the key should be set + * @key: DH-HMAC-CHAP Key to set or NULL to clear existing key + */ +void nvme_host_set_dhchap_key(nvme_host_t h, const char *key); + +/** + * nvme_default_host() - Initializes the default host + * @r: &nvme_root_t object + * + * Initializes the default host object based on the values in + * /etc/nvme/hostnqn and /etc/nvme/hostid and attaches it to @r. + * + * Return: &nvme_host_t object + */ +nvme_host_t nvme_default_host(nvme_root_t r); + +/** + * nvme_first_subsystem() - Start subsystem iterator + * @h: &nvme_host_t object + * + * Return: first &nvme_subsystem_t object in an iterator + */ +nvme_subsystem_t nvme_first_subsystem(nvme_host_t h); + +/** + * nvme_next_subsystem() - Next subsystem iterator + * @h: &nvme_host_t object + * @s: Previous &nvme_subsystem_t iterator + * + * Return: next &nvme_subsystem_t object in an iterator + */ +nvme_subsystem_t nvme_next_subsystem(nvme_host_t h, nvme_subsystem_t s); + +/** + * nvme_lookup_subsystem() - Lookup nvme_subsystem_t object + * @h: &nvme_host_t object + * @name: Name of the subsystem (may be NULL) + * @subsysnqn: Subsystem NQN + * + * Lookup a &nvme_subsystem_t object in @h base on @name (if present) + * and @subsystemnqn or create one if not found. + * + * Return: nvme_subsystme_t object + */ +nvme_subsystem_t nvme_lookup_subsystem(struct nvme_host *h, + const char *name, + const char *subsysnqn); + +/** + * nvme_free_subsystem() - Free a subsystem + * @s: subsystem + * + * Frees @s and all related objects. + */ +void nvme_free_subsystem(struct nvme_subsystem *s); + +/** + * nvme_subsystem_get_host() - Returns nvme_host_t object + * @s: subsystem + * + * Return: &nvme_host_t object from @s + */ +nvme_host_t nvme_subsystem_get_host(nvme_subsystem_t s); + +/** + * nvme_ctrl_first_ns() - Start namespace iterator + * @c: Controller instance + * + * Return: First &nvme_ns_t object of an @c iterator + */ +nvme_ns_t nvme_ctrl_first_ns(nvme_ctrl_t c); + +/** + * nvme_ctrl_next_ns() - Next namespace iterator + * @c: Controller instance + * @n: Previous nvme_ns_t iterator + * + * Return: Next nvme_ns_t object of an @c iterator + */ +nvme_ns_t nvme_ctrl_next_ns(nvme_ctrl_t c, nvme_ns_t n); + +/** + * nvme_ctrl_first_path() - Start path iterator + * @c: Controller instance + * + * Return: First &nvme_path_t object of an @c iterator + */ +nvme_path_t nvme_ctrl_first_path(nvme_ctrl_t c); + +/** + * nvme_ctrl_next_path() - Next path iterator + * @c: Controller instance + * @p: Previous &nvme_path_t object of an @c iterator + * + * Return: Next &nvme_path_t object of an @c iterator + */ +nvme_path_t nvme_ctrl_next_path(nvme_ctrl_t c, nvme_path_t p); + +/** + * nvme_subsystem_first_ctrl() - First ctrl iterator + * @s: &nvme_subsystem_t object + * + * Return: First controller of an @s iterator + */ +nvme_ctrl_t nvme_subsystem_first_ctrl(nvme_subsystem_t s); + +/** + * nvme_subsystem_next_ctrl() - Next ctrl iterator + * @s: &nvme_subsystem_t object + * @c: Previous controller instance of an @s iterator + * + * Return: Next controller of an @s iterator + */ +nvme_ctrl_t nvme_subsystem_next_ctrl(nvme_subsystem_t s, nvme_ctrl_t c); + +/** + * nvme_namespace_first_path() - Start path iterator + * @ns: Namespace instance + * + * Return: First &nvme_path_t object of an @ns iterator + */ +nvme_path_t nvme_namespace_first_path(nvme_ns_t ns); + +/** + * nvme_namespace_next_path() - Next path iterator + * @ns: Namespace instance + * @p: Previous &nvme_path_t object of an @ns iterator + * + * Return: Next &nvme_path_t object of an @ns iterator + */ +nvme_path_t nvme_namespace_next_path(nvme_ns_t c, nvme_path_t p); + +/** + * nvme_lookup_ctrl() - Lookup nvme_ctrl_t object + * @s: &nvme_subsystem_t object + * @transport: Transport name + * @traddr: Transport address + * @host_traddr: Host transport address + * @host_iface: Host interface name + * @trsvcid: Transport service identifier + * @p: Previous controller instance + * + * Lookup a controller in @s based on @transport, @traddr, + * @host_traddr, @host_iface, and @trsvcid. @transport must be specified, + * other fields may be required depending on the transport. A new + * object is created if none is found. If @p is specified the lookup + * will start at @p instead of the first controller. + * + * Return: Controller instance + */ +nvme_ctrl_t nvme_lookup_ctrl(nvme_subsystem_t s, const char *transport, + const char *traddr, const char *host_traddr, + const char *host_iface, const char *trsvcid, + nvme_ctrl_t p); + + +/** + * nvme_create_ctrl() - Allocate an unconnected NVMe controller + * @r: NVMe root element + * @subsysnqn: Subsystem NQN + * @transport: Transport type + * @traddr: Transport address + * @host_traddr: Host transport address + * @host_iface: Host interface name + * @trsvcid: Transport service ID + * + * Creates an unconnected controller to be used for nvme_add_ctrl(). + * + * Return: Controller instance + */ +nvme_ctrl_t nvme_create_ctrl(nvme_root_t r, + const char *subsysnqn, const char *transport, + const char *traddr, const char *host_traddr, + const char *host_iface, const char *trsvcid); + + +/** + * nvme_subsystem_first_ns() - Start namespace iterator + * @s: &nvme_subsystem_t object + * + * Return: First &nvme_ns_t object of an @s iterator + */ +nvme_ns_t nvme_subsystem_first_ns(nvme_subsystem_t s); + +/** + * nvme_subsystem_next_ns() - Next namespace iterator + * @s: &nvme_subsystem_t object + * @n: Previous &nvme_ns_t iterator + * + * Return: Next &nvme_ns_t object of an @s iterator + */ +nvme_ns_t nvme_subsystem_next_ns(nvme_subsystem_t s, nvme_ns_t n); + +/** + * nvme_for_each_host_safe() - Traverse host list + * @r: &nvme_root_t object + * @h: &nvme_host_t object + * @_h: Temporary &nvme_host_t object + */ +#define nvme_for_each_host_safe(r, h, _h) \ + for (h = nvme_first_host(r), \ + _h = nvme_next_host(r, h); \ + h != NULL; \ + h = _h, _h = nvme_next_host(r, h)) + +/** + * nvme_for_each_host() - Traverse host list + * @r: &nvme_root_t object + * @h: &nvme_host_t object + */ +#define nvme_for_each_host(r, h) \ + for (h = nvme_first_host(r); h != NULL; \ + h = nvme_next_host(r, h)) + +/** + * nvme_for_each_subsystem_safe() - Traverse subsystems + * @h: &nvme_host_t object + * @s: &nvme_subsystem_t object + * @_s: Temporary &nvme_subsystem_t object + */ +#define nvme_for_each_subsystem_safe(h, s, _s) \ + for (s = nvme_first_subsystem(h), \ + _s = nvme_next_subsystem(h, s); \ + s != NULL; \ + s = _s, _s = nvme_next_subsystem(h, s)) + +/** + * nvme_for_each_subsystem() - Traverse subsystems + * @h: &nvme_host_t object + * @s: &nvme_subsystem_t object + */ +#define nvme_for_each_subsystem(h, s) \ + for (s = nvme_first_subsystem(h); s != NULL; \ + s = nvme_next_subsystem(h, s)) + +/** + * nvme_subsystem_for_each_ctrl_safe() - Traverse controllers + * @s: &nvme_subsystem_t object + * @c: Controller instance + * @_c: A &nvme_ctrl_t_node to use as temporary storage + */ +#define nvme_subsystem_for_each_ctrl_safe(s, c, _c) \ + for (c = nvme_subsystem_first_ctrl(s), \ + _c = nvme_subsystem_next_ctrl(s, c); \ + c != NULL; \ + c = _c, _c = nvme_subsystem_next_ctrl(s, c)) + +/** + * nvme_subsystem_for_each_ctrl() - Traverse controllers + * @s: &nvme_subsystem_t object + * @c: Controller instance + */ +#define nvme_subsystem_for_each_ctrl(s, c) \ + for (c = nvme_subsystem_first_ctrl(s); c != NULL; \ + c = nvme_subsystem_next_ctrl(s, c)) + +/** + * nvme_ctrl_for_each_ns_safe() - Traverse namespaces + * @c: Controller instance + * @n: &nvme_ns_t object + * @_n: A &nvme_ns_t_node to use as temporary storage + */ +#define nvme_ctrl_for_each_ns_safe(c, n, _n) \ + for (n = nvme_ctrl_first_ns(c), \ + _n = nvme_ctrl_next_ns(c, n); \ + n != NULL; \ + n = _n, _n = nvme_ctrl_next_ns(c, n)) + +/** + * nvme_ctrl_for_each_ns() - Traverse namespaces + * @c: Controller instance + * @n: &nvme_ns_t object + */ +#define nvme_ctrl_for_each_ns(c, n) \ + for (n = nvme_ctrl_first_ns(c); n != NULL; \ + n = nvme_ctrl_next_ns(c, n)) + +/** + * nvme_ctrl_for_each_path_safe() - Traverse paths + * @c: Controller instance + * @p: &nvme_path_t object + * @_p: A &nvme_path_t_node to use as temporary storage + */ +#define nvme_ctrl_for_each_path_safe(c, p, _p) \ + for (p = nvme_ctrl_first_path(c), \ + _p = nvme_ctrl_next_path(c, p); \ + p != NULL; \ + p = _p, _p = nvme_ctrl_next_path(c, p)) + +/** + * nvme_ctrl_for_each_path() - Traverse paths + * @c: Controller instance + * @p: &nvme_path_t object + */ +#define nvme_ctrl_for_each_path(c, p) \ + for (p = nvme_ctrl_first_path(c); p != NULL; \ + p = nvme_ctrl_next_path(c, p)) + +/** + * nvme_subsystem_for_each_ns_safe() - Traverse namespaces + * @s: &nvme_subsystem_t object + * @n: &nvme_ns_t object + * @_n: A &nvme_ns_t_node to use as temporary storage + */ +#define nvme_subsystem_for_each_ns_safe(s, n, _n) \ + for (n = nvme_subsystem_first_ns(s), \ + _n = nvme_subsystem_next_ns(s, n); \ + n != NULL; \ + n = _n, _n = nvme_subsystem_next_ns(s, n)) + +/** + * nvme_subsystem_for_each_ns() - Traverse namespaces + * @s: &nvme_subsystem_t object + * @n: &nvme_ns_t object + */ +#define nvme_subsystem_for_each_ns(s, n) \ + for (n = nvme_subsystem_first_ns(s); n != NULL; \ + n = nvme_subsystem_next_ns(s, n)) + +/** + * nvme_namespace_for_each_path_safe() - Traverse paths + * @ns: Namespace instance + * @p: &nvme_path_t object + * @_p: A &nvme_path_t_node to use as temporary storage + */ +#define nvme_namespace_for_each_path_safe(n, p, _p) \ + for (p = nvme_namespace_first_path(n), \ + _p = nvme_namespace_next_path(n, p); \ + p != NULL; \ + p = _p, _p = nvme_namespace_next_path(n, p)) + +/** + * nvme_namespace_for_each_path() - Traverse paths + * @ns: Namespace instance + * @p: &nvme_path_t object + */ +#define nvme_namespace_for_each_path(c, p) \ + for (p = nvme_namespace_first_path(c); p != NULL; \ + p = nvme_namespace_next_path(c, p)) + +/** + * nvme_ns_get_fd() - Get associated filedescriptor + * @n: Namespace instance + * + * Return: File descriptor associated with @n or -1 + */ +int nvme_ns_get_fd(nvme_ns_t n); + +/** + * nvme_ns_get_nsid() - NSID of a namespace + * @n: Namespace instance + * + * Return: NSID of @n + */ +int nvme_ns_get_nsid(nvme_ns_t n); + +/** + * nvme_ns_get_lba_size() - LBA size of a namespace + * @n: Namespace instance + * + * Return: LBA size of @n + */ +int nvme_ns_get_lba_size(nvme_ns_t n); + +/** + * nvme_ns_get_meta_size() - Metadata size of a namespace + * @n: Namespace instance + * + * Return: Metadata size of @n + */ +int nvme_ns_get_meta_size(nvme_ns_t n); + +/** + * nvme_ns_get_lba_count() - LBA count of a namespace + * @n: Namespace instance + * + * Return: LBA count of @n + */ +uint64_t nvme_ns_get_lba_count(nvme_ns_t n); + +/** + * nvme_ns_get_lba_util() - LBA utilisation of a namespace + * @n: Namespace instance + * + * Return: LBA utilisation of @n + */ +uint64_t nvme_ns_get_lba_util(nvme_ns_t n); + +/** + * nvme_ns_get_csi() - Command set identifier of a namespace + * @n: Namespace instance + * + * Return: The namespace's command set identifier in use + */ +enum nvme_csi nvme_ns_get_csi(nvme_ns_t n); + +/** + * nvme_ns_get_eui64() - 64-bit eui of a namespace + * @n: Namespace instance + * + * Return: A pointer to the 64-bit eui + */ +const uint8_t *nvme_ns_get_eui64(nvme_ns_t n); + +/** + * nvme_ns_get_nguid() - 128-bit nguid of a namespace + * @n: Namespace instance + * + * Return: A pointer to the 128-bit nguid + */ +const uint8_t *nvme_ns_get_nguid(nvme_ns_t n); + +/** + * nvme_ns_get_uuid() - UUID of a namespace + * @n: Namespace instance + * @out: buffer for the UUID + * + * Copies the namespace's uuid into @out + */ +void nvme_ns_get_uuid(nvme_ns_t n, uuid_t out); + +/** + * nvme_ns_get_sysfs_dir() - sysfs directory of a namespace + * @n: Namespace instance + * + * Return: sysfs directory name of @n + */ +const char *nvme_ns_get_sysfs_dir(nvme_ns_t n); + +/** + * nvme_ns_get_name() - sysfs name of a namespace + * @n: Namespace instance + * + * Return: sysfs name of @n + */ +const char *nvme_ns_get_name(nvme_ns_t n); + +/** + * nvme_ns_get_generic_name() - Returns name of generic namespace chardev. + * @n: Namespace instance + * + * Return: Name of generic namespace chardev + */ +const char *nvme_ns_get_generic_name(nvme_ns_t n); + +/** + * nvme_ns_get_firmware() - Firmware string of a namespace + * @n: Namespace instance + * + * Return: Firmware string of @n + */ +const char *nvme_ns_get_firmware(nvme_ns_t n); + +/** + * nvme_ns_get_serial() - Serial number of a namespace + * @n: Namespace instance + * + * Return: Serial number string of @n + */ +const char *nvme_ns_get_serial(nvme_ns_t n); + +/** + * nvme_ns_get_model() - Model of a namespace + * @n: Namespace instance + * + * Return: Model string of @n + */ +const char *nvme_ns_get_model(nvme_ns_t n); + +/** + * nvme_ns_get_subsystem() - &nvme_subsystem_t of a namespace + * @n: Namespace instance + * + * Return: nvme_subsystem_t object of @n + */ +nvme_subsystem_t nvme_ns_get_subsystem(nvme_ns_t n); + +/** + * nvme_ns_get_ctrl() - &nvme_ctrl_t of a namespace + * @n: Namespace instance + * + * nvme_ctrl_t object may be NULL for a multipathed namespace + * + * Return: nvme_ctrl_t object of @n if present + */ +nvme_ctrl_t nvme_ns_get_ctrl(nvme_ns_t n); + +/** + * nvme_free_ns() - Free a namespace object + * @n: Namespace instance + */ +void nvme_free_ns(struct nvme_ns *n); + +/** + * nvme_ns_read() - Read from a namespace + * @n: Namespace instance + * @buf: Buffer into which the data will be transferred + * @offset: LBA offset of @n + * @count: Number of sectors in @buf + * + * Return: Number of sectors read or -1 on error. + */ +int nvme_ns_read(nvme_ns_t n, void *buf, off_t offset, size_t count); + +/** + * nvme_ns_write() - Write to a namespace + * @n: Namespace instance + * @buf: Buffer with data to be written + * @offset: LBA offset of @n + * @count: Number of sectors in @buf + * + * Return: Number of sectors written or -1 on error + */ +int nvme_ns_write(nvme_ns_t n, void *buf, off_t offset, size_t count); + +/** + * nvme_ns_verify() - Verify data on a namespace + * @n: Namespace instance + * @offset: LBA offset of @n + * @count: Number of sectors to be verified + * + * Return: Number of sectors verified + */ +int nvme_ns_verify(nvme_ns_t n, off_t offset, size_t count); + +/** + * nvme_ns_compare() - Compare data on a namespace + * @n: Namespace instance + * @buf: Buffer with data to be compared + * @offset: LBA offset of @n + * @count: Number of sectors in @buf + * + * Return: Number of sectors compared + */ +int nvme_ns_compare(nvme_ns_t n, void *buf, off_t offset, size_t count); + +/** + * nvme_ns_write_zeros() - Write zeros to a namespace + * @n: Namespace instance + * @offset: LBA offset in @n + * @count: Number of sectors to be written + * + * Return: Number of sectors written + */ +int nvme_ns_write_zeros(nvme_ns_t n, off_t offset, size_t count); + +/** + * nvme_ns_write_uncorrectable() - Issus a 'write uncorrectable' command + * @n: Namespace instance + * @offset: LBA offset in @n + * @count: Number of sectors to be written + * + * Return: Number of sectors written + */ +int nvme_ns_write_uncorrectable(nvme_ns_t n, off_t offset, size_t count); + +/** + * nvme_ns_flush() - Flush data to a namespace + * @n: Namespace instance + * + * Return: 0 on success, -1 on error. + */ +int nvme_ns_flush(nvme_ns_t n); + +/** + * nvme_ns_identify() - Issue an 'identify namespace' command + * @n: Namespace instance + * @ns: &nvme_id_ns buffer + * + * Writes the data returned by the 'identify namespace' command + * into @ns. + * + * Return: 0 on success, -1 on error. + */ +int nvme_ns_identify(nvme_ns_t n, struct nvme_id_ns *ns); + +/** + * nvme_ns_identify_descs() - Issue an 'identify descriptors' command + * @n: Namespace instance + * @descs: List of identify descriptors + * + * Writes the data returned by the 'identify descriptors' command + * into @descs. + * + * Return: 0 on success, -1 on error. + */ +int nvme_ns_identify_descs(nvme_ns_t n, struct nvme_ns_id_desc *descs); + +/** + * nvme_path_get_name() - sysfs name of an &nvme_path_t object + * @p: &nvme_path_t object + * + * Return: sysfs name of @p + */ +const char *nvme_path_get_name(nvme_path_t p); + +/** + * nvme_path_get_sysfs_dir() - sysfs directory of an nvme_path_t object + * @p: &nvme_path_t object + * + * Return: sysfs directory of @p + */ +const char *nvme_path_get_sysfs_dir(nvme_path_t p); + +/** + * nvme_path_get_ana_state() - ANA state of an nvme_path_t object + * @p: &nvme_path_t object + * + * Return: ANA (Asynchronous Namespace Access) state of @p + */ +const char *nvme_path_get_ana_state(nvme_path_t p); + +/** + * nvme_path_get_ctrl() - Parent controller of an nvme_path_t object + * @p: &nvme_path_t object + * + * Return: Parent controller if present + */ +nvme_ctrl_t nvme_path_get_ctrl(nvme_path_t p); + +/** + * nvme_path_get_ns() - Parent namespace of an nvme_path_t object + * @p: &nvme_path_t object + * + * Return: Parent namespace if present + */ +nvme_ns_t nvme_path_get_ns(nvme_path_t p); + +/** + * nvme_ctrl_get_fd() - Get associated file descriptor + * @c: Controller instance + * + * Return: File descriptor associated with @c or -1 + */ +int nvme_ctrl_get_fd(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_name() - sysfs name of a controller + * @c: Controller instance + * + * Return: sysfs name of @c + */ +const char *nvme_ctrl_get_name(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_sysfs_dir() - sysfs directory of a controller + * @c: Controller instance + * + * Return: sysfs directory name of @c + */ +const char *nvme_ctrl_get_sysfs_dir(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_address() - Address string of a controller + * @c: Controller instance + * + * Return: NVMe-over-Fabrics address string of @c or empty string + * of no address is present. + */ +const char *nvme_ctrl_get_address(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_firmware() - Firmware string of a controller + * @c: Controller instance + * + * Return: Firmware string of @c + */ +const char *nvme_ctrl_get_firmware(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_model() - Model of a controller + * @c: Controller instance + * + * Return: Model string of @c + */ +const char *nvme_ctrl_get_model(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_state() - Running state of an controller + * @c: Controller instance + * + * Return: String indicating the running state of @c + */ +const char *nvme_ctrl_get_state(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_numa_node() - NUMA node of a controller + * @c: Controller instance + * + * Return: String indicating the NUMA node + */ +const char *nvme_ctrl_get_numa_node(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_queue_count() - Queue count of a controller + * @c: Controller instance + * + * Return: Queue count of @c + */ +const char *nvme_ctrl_get_queue_count(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_serial() - Serial number of a controller + * @c: Conroller instance + * + * Return: Serial number string of @c + */ +const char *nvme_ctrl_get_serial(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_sqsize() - SQ size of a controller + * @c: Controller instance + * + * Return: SQ size (as string) of @c + */ +const char *nvme_ctrl_get_sqsize(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_transport() - Transport type of a controller + * @c: Controller instance + * + * Return: Transport type of @c + */ +const char *nvme_ctrl_get_transport(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_subsysnqn() - Subsystem NQN of a controller + * @c: Controller instance + * + * Return: Subsystem NQN of @c + */ +const char *nvme_ctrl_get_subsysnqn(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_subsystem() - Parent subsystem of a controller + * @c: Controller instance + * + * Return: Parent nvme_subsystem_t object + */ +nvme_subsystem_t nvme_ctrl_get_subsystem(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_traddr() - Transport address of a controller + * @c: Controller instance + * + * Return: Transport address of @c + */ +const char *nvme_ctrl_get_traddr(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_trsvcid() - Transport service identifier of a controller + * @c: Controller instance + * + * Return: Transport service identifier of @c (if present) + */ +const char *nvme_ctrl_get_trsvcid(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_host_traddr() - Host transport address of a controller + * @c: Controller instance + * + * Return: Host transport address of @c (if present) + */ +const char *nvme_ctrl_get_host_traddr(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_host_iface() - Host interface name of a controller + * @c: Controller instance + * + * Return: Host interface name of @c (if present) + */ +const char *nvme_ctrl_get_host_iface(nvme_ctrl_t c); + +/** + * nvme_ctrl_get_dhchap_key() - Return controller key + * @c: Controller for which the key should be set + * + * Return: DH-HMAC-CHAP controller key or NULL if not set + */ +const char *nvme_ctrl_get_dhchap_key(nvme_ctrl_t c); + +/** + * nvme_ctrl_set_dhchap_key() - Set controller key + * @c: Controller for which the key should be set + * @key: DH-HMAC-CHAP Key to set or NULL to clear existing key + */ +void nvme_ctrl_set_dhchap_key(nvme_ctrl_t c, const char *key); + +/** + * nvme_ctrl_get_config() - Fabrics configuration of a controller + * @c: Controller instance + * + * Return: Fabrics configuration of @c + */ +struct nvme_fabrics_config *nvme_ctrl_get_config(nvme_ctrl_t c); + +/** + * nvme_ctrl_set_discovered() - Set the 'discovered' flag + * @c: nvme_ctrl_t object + * @discovered: Value of the 'discovered' flag + * + * Set the 'discovered' flag of @c to @discovered + */ +void nvme_ctrl_set_discovered(nvme_ctrl_t c, bool discovered); + +/** + * nvme_ctrl_is_discovered() - Returns the value of the 'discovered' flag + * @c: Controller instance + * + * Return: Value of the 'discovered' flag of @c + */ +bool nvme_ctrl_is_discovered(nvme_ctrl_t c); + +/** + * nvme_ctrl_set_persistent() - Set the 'persistent' flag + * @c: Controller instance + * @persistent: value of the 'persistent' flag + * + * Set the 'persistent' flag of @c to @persistent + */ +void nvme_ctrl_set_persistent(nvme_ctrl_t c, bool persistent); + +/** + * nvme_ctrl_is_persistent() - Returns the value of the 'persistent' flag + * @c: Controller instance + * + * Return: Value of the 'persistent' flag of @c + */ +bool nvme_ctrl_is_persistent(nvme_ctrl_t c); + +/** + * nvme_ctrl_set_discovery_ctrl() - Set the 'discovery_ctrl' flag + * @c: Controller to be modified + * @discovery: value of the discovery_ctrl flag + * + * Sets the 'discovery_ctrl' flag in @c to specify whether + * @c connects to a discovery subsystem. + * + */ +void nvme_ctrl_set_discovery_ctrl(nvme_ctrl_t c, bool discovery); + +/** + * nvme_ctrl_is_discovery_ctrl() - Check the 'discovery_ctrl' flag + * @c: Controller to be checked + * + * Returns the value of the 'discovery_ctrl' flag which specifies whether + * @c connects to a discovery subsystem. + * + * Return: Value of the 'discover_ctrl' flag + */ +bool nvme_ctrl_is_discovery_ctrl(nvme_ctrl_t c); + +/** + * nvme_ctrl_identify() - Issues an 'identify controller' command + * @c: Controller instance + * @id: Identify controller data structure + * + * Issues an 'identify controller' command to @c and copies the + * data into @id. + * + * Return: 0 on success or -1 on failure. + */ +int nvme_ctrl_identify(nvme_ctrl_t c, struct nvme_id_ctrl *id); + +/** + * nvme_disconnect_ctrl() - Disconnect a controller + * @c: Controller instance + * + * Issues a 'disconnect' fabrics command to @c + * + * Return: 0 on success, -1 on failure. + */ +int nvme_disconnect_ctrl(nvme_ctrl_t c); + +/** + * nvme_scan_ctrl() - Scan on a controller + * @r: nvme_root_t object + * @name: Name of the controller + * + * Scans a controller with sysfs name @name and add it to @r. + * + * Return: nvme_ctrl_t object + */ +nvme_ctrl_t nvme_scan_ctrl(nvme_root_t r, const char *name); + +/** + * nvme_rescan_ctrl() - Rescan an existing controller + * @c: Controller instance + */ +void nvme_rescan_ctrl(nvme_ctrl_t c); + +/** + * nvme_init_ctrl() - Initialize nvme_ctrl_t object for an existing controller. + * @h: nvme_host_t object + * @c: nvme_ctrl_t object + * @instance: Instance number (e.g. 1 for nvme1) + * + * Return: The ioctl() return code. Typically 0 on success. + */ +int nvme_init_ctrl(nvme_host_t h, nvme_ctrl_t c, int instance); + +/** + * nvme_free_ctrl() - Free controller + * @c: Controller instance + */ +void nvme_free_ctrl(struct nvme_ctrl *c); + +/** + * nvme_unlink_ctrl() - Unlink controller + * @c: Controller instance + */ +void nvme_unlink_ctrl(struct nvme_ctrl *c); + +/** + * nvme_subsystem_get_nqn() - Retrieve NQN from subsystem + * @s: nvme_subsystem_t object + * + * Return: NQN of systemstem + */ +const char *nvme_subsystem_get_nqn(nvme_subsystem_t s); + +/** + * nvme_subsystem_get_sysfs_dir() - sysfs directory of an nvme_subsystem_t object + * @s: nvme_subsystem_t object + * + * Return: sysfs directory name of @s + */ +const char *nvme_subsystem_get_sysfs_dir(nvme_subsystem_t s); + +/** + * nvme_subsystem_get_name() - sysfs name of an nvme_subsystem_t object + * @s: nvme_subsystem_t object + * + * Return: sysfs name of @s + */ +const char *nvme_subsystem_get_name(nvme_subsystem_t s); + +/** + * nvme_subsystem_get_type() - Returns the type of a subsystem + * @s: nvme_subsystem_t object + * + * Returns the subsystem type of @s. + * + * Return: 'nvm' or 'discovery' + */ +const char *nvme_subsystem_get_type(nvme_subsystem_t s); + +/** + * nvme_scan_topology() - Scan NVMe topology and apply filter + * @r: nvme_root_t object + * @f: filter to apply + * @f_args: user-specified argument to @f + * + * Scans the NVMe topology and filters out the resulting elements + * by applying @f. + * + * Return: Number of elements scanned + */ +int nvme_scan_topology(nvme_root_t r, nvme_scan_filter_t f, void *f_args); + +/** + * nvme_host_get_hostnqn() - Host NQN of an nvme_host_t object + * @h: nvme_host_t object + * + * Return: Host NQN of @h + */ +const char *nvme_host_get_hostnqn(nvme_host_t h); + +/** + * nvme_host_get_hostid() - Host ID of an nvme_host_t object + * @h: nvme_host_t object + * + * Return: Host ID of @h + */ +const char *nvme_host_get_hostid(nvme_host_t h); + +/** + * nvme_free_host() - Free nvme_host_t object + * @h: nvme_host_t object + */ +void nvme_free_host(nvme_host_t h); + +/** + * nvme_scan() - Scan NVMe topology + * @config_file: Configuration file + * + * Return: nvme_root_t object of found elements + */ +nvme_root_t nvme_scan(const char *config_file); + +/** + * nvme_read_config() - Read NVMe JSON configuration file + * @r: nvme_root_t object + * @config_file: JSON configuration file + * + * Read in the contents of @config_file and merge them with + * the elements in @r. + * + * Returns: 0 on success, -1 on failure with errno set. + */ +int nvme_read_config(nvme_root_t r, const char *config_file); + +/** + * nvme_refresh_topology() - Refresh nvme_root_t object contents + * @r: nvme_root_t object + * + * Removes all elements in @r and rescans the existing topology. + */ +void nvme_refresh_topology(nvme_root_t r); + +/** + * nvme_update_config() - Update JSON configuration + * @r: nvme_root_t object + * + * Updates the JSON configuration file with the contents of @r. + * + * Return: 0 on success, -1 on failure. + */ +int nvme_update_config(nvme_root_t r); + +/** + * nvme_dump_config() - Print the JSON configuration + * @r: nvme_root_t object + * + * Prints the current contents of the JSON configuration + * file to stdout. + * + * Return: 0 on success, -1 on failure. + */ +int nvme_dump_config(nvme_root_t r); + +/** + * nvme_dump_tree() - Dump internal object tree + * @r: nvme_root_t object + * + * Prints the internal object tree in JSON format + * to stdout. + * + * Return: 0 on success, -1 on failure. + */ +int nvme_dump_tree(nvme_root_t r); + +/** + * nvme_get_attr() - Read sysfs attribute + * @d: sysfs directory + * @attr: sysfs attribute name + * + * Return: String with the contents of @attr or %NULL in case of an empty value + * or in case of an error (indicated by non-zero errno code). + */ +char *nvme_get_attr(const char *d, const char *attr); + +/** + * nvme_get_subsys_attr() - Read subsystem sysfs attribute + * @s: nvme_subsystem_t object + * @attr: sysfs attribute name + * + * Return: String with the contents of @attr or %NULL in case of an empty value + * or in case of an error (indicated by non-zero errno code). + */ +char *nvme_get_subsys_attr(nvme_subsystem_t s, const char *attr); + +/** + * nvme_get_ctrl_attr() - Read controller sysfs attribute + * @c: Controller instance + * @attr: sysfs attribute name + * + * Return: String with the contents of @attr or %NULL in case of an empty value + * or in case of an error (indicated by non-zero errno code). + */ +char *nvme_get_ctrl_attr(nvme_ctrl_t c, const char *attr); + +/** + * nvme_get_ns_attr() - Read namespace sysfs attribute + * @n: nvme_ns_t object + * @attr: sysfs attribute name + * + * Return: String with the contents of @attr or %NULL in case of an empty value + * or in case of an error (indicated by non-zero errno code). + */ +char *nvme_get_ns_attr(nvme_ns_t n, const char *attr); + +/** + * nvme_subsystem_lookup_namespace() - lookup namespace by NSID + * @s: nvme_subsystem_t object + * @nsid: Namespace id + * + * Return: nvme_ns_t of the namespace with id @nsid in subsystem @s + */ +nvme_ns_t nvme_subsystem_lookup_namespace(struct nvme_subsystem *s, + __u32 nsid); + +/** + * nvme_get_path_attr() - Read path sysfs attribute + * @p: nvme_path_t object + * @attr: sysfs attribute name + * + * Return: String with the contents of @attr or %NULL in case of an empty value + * or in case of an error (indicated by non-zero errno code). + */ +char *nvme_get_path_attr(nvme_path_t p, const char *attr); + +/** + * nvme_scan_namespace() - scan namespace based on sysfs name + * @name: sysfs name of the namespace to scan + * + * Return: nvme_ns_t object or NULL if not found. + */ +nvme_ns_t nvme_scan_namespace(const char *name); + +/** + * nvme_host_get_hostsymname() - Get the host's symbolic name + * @h: Host for which the symbolic name should be returned. + * + * Return: The symbolic name or NULL if a symbolic name hasn't been + * configure. + */ +const char *nvme_host_get_hostsymname(nvme_host_t h); + +/** + * nvme_host_set_hostsymname() - Set the host's symbolic name + * @h: Host for which the symbolic name should be set. + * @hostsymname: Symbolic name + */ +void nvme_host_set_hostsymname(nvme_host_t h, const char *hostsymname); + +#endif /* _LIBNVME_TREE_H */ diff --git a/src/nvme/types.h b/src/nvme/types.h new file mode 100644 index 0000000..84acb01 --- /dev/null +++ b/src/nvme/types.h @@ -0,0 +1,7069 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#ifndef _LIBNVME_TYPES_H +#define _LIBNVME_TYPES_H + +#include <stdbool.h> +#include <stdint.h> + +#include <linux/types.h> + +/** + * DOC: types.h + * + * NVMe standard definitions + */ + +/** + * NVME_GET() - extract field from complex value + * @value: The original value of a complex field + * @name: The name of the sub-field within an nvme value + * + * By convention, this library defines _SHIFT and _MASK such that mask can be + * applied after the shift to isolate a specific set of bits that decode to a + * sub-field. + * + * Returns: The 'name' field from 'value' + */ +#define NVME_GET(value, name) \ + (((value) >> NVME_##name##_SHIFT) & NVME_##name##_MASK) + +/** + * NVME_SET() - set field into complex value + * @value: The value to be set in its completed position + * @name: The name of the sub-field within an nvme value + * + * Returns: The 'name' field from 'value' + */ +#define NVME_SET(value, name) \ + (((value) & NVME_##name##_MASK) << NVME_##name##_SHIFT) + +/** + * enum nvme_constants - A place to stash various constant nvme values + * @NVME_NSID_ALL: A broadcast value that is used to specify all + * namespaces + * @NVME_NSID_NONE: The invalid namespace id, for when the nsid + * parameter is not used in a command + * @NVME_UUID_NONE: Use to omit a uuid command parameter + * @NVME_CNTLID_NONE: Use to omit a cntlid command parameter + * @NVME_CNSSPECID_NONE: Use to omit a cns_specific_id command parameter + * @NVME_LOG_LSP_NONE: Use to omit a log lsp command parameter + * @NVME_LOG_LSI_NONE: Use to omit a log lsi command parameter + * @NVME_LOG_LPO_NONE: Use to omit a log lpo command parameter + * @NVME_IDENTIFY_DATA_SIZE: The transfer size for nvme identify commands + * @NVME_LOG_SUPPORTED_LOG_PAGES_MAX: The lagest possible index in the supported + * log pages log. + * @NVME_ID_NVMSET_LIST_MAX: The largest possible nvmset index in identify + * nvmeset + * @NVME_ID_UUID_LIST_MAX: The largest possible uuid index in identify + * uuid list + * @NVME_ID_CTRL_LIST_MAX: The largest possible controller index in + * identify controller list + * @NVME_ID_NS_LIST_MAX: The largest possible namespace index in + * identify namespace list + * @NVME_ID_SECONDARY_CTRL_MAX: The largest possible secondary controller index + * in identify secondary controller + * @NVME_ID_DOMAIN_LIST_MAX: The largest possible domain index in the + * in domain list + * @NVME_ID_ENDURANCE_GROUP_LIST_MAX: The largest possible endurance group + * index in the endurance group list + * @NVME_ID_ND_DESCRIPTOR_MAX: The largest possible namespace granularity + * index in the namespace granularity descriptor + * list + * @NVME_FEAT_LBA_RANGE_MAX: The largest possible LBA range index in feature + * lba range type + * @NVME_LOG_ST_MAX_RESULTS: The largest possible self test result index in the + * device self test log + * @NVME_LOG_FID_SUPPORTED_EFFECTS_MAX: The largest possible FID index in the + * feature identifiers effects log. + * @NVME_LOG_MI_CMD_SUPPORTED_EFFECTS_MAX: The largest possible MI Command index + * in the MI Command effects log. + * @NVME_LOG_MI_CMD_SUPPORTED_EFFECTS_RESERVED: The reserved space in the MI Command + * effects log. + * @NVME_LOG_TELEM_BLOCK_SIZE: Specification defined size of Telemetry Data Blocks + * @NVME_DSM_MAX_RANGES: The largest possible range index in a data-set + * management command + * @NVME_NQN_LENGTH: Max length for NVMe Qualified Name + * @NVMF_TRADDR_SIZE: Max Transport Address size + * @NVMF_TSAS_SIZE: Max Transport Specific Address Subtype size + * @NVME_ZNS_CHANGED_ZONES_MAX: Max number of zones in the changed zones log + * page + */ +enum nvme_constants { + NVME_NSID_ALL = 0xffffffff, + NVME_NSID_NONE = 0, + NVME_UUID_NONE = 0, + NVME_CNTLID_NONE = 0, + NVME_CNSSPECID_NONE = 0, + NVME_LOG_LSP_NONE = 0, + NVME_LOG_LSI_NONE = 0, + NVME_LOG_LPO_NONE = 0, + NVME_IDENTIFY_DATA_SIZE = 4096, + NVME_LOG_SUPPORTED_LOG_PAGES_MAX = 256, + NVME_ID_NVMSET_LIST_MAX = 31, + NVME_ID_UUID_LIST_MAX = 127, + NVME_ID_CTRL_LIST_MAX = 2047, + NVME_ID_NS_LIST_MAX = 1024, + NVME_ID_SECONDARY_CTRL_MAX = 127, + NVME_ID_DOMAIN_LIST_MAX = 31, + NVME_ID_ENDURANCE_GROUP_LIST_MAX = 2047, + NVME_ID_ND_DESCRIPTOR_MAX = 16, + NVME_FEAT_LBA_RANGE_MAX = 64, + NVME_LOG_ST_MAX_RESULTS = 20, + NVME_LOG_TELEM_BLOCK_SIZE = 512, + NVME_LOG_FID_SUPPORTED_EFFECTS_MAX = 256, + NVME_LOG_MI_CMD_SUPPORTED_EFFECTS_MAX = 256, + NVME_LOG_MI_CMD_SUPPORTED_EFFECTS_RESERVED = 768, + NVME_DSM_MAX_RANGES = 256, + NVME_NQN_LENGTH = 256, + NVMF_TRADDR_SIZE = 256, + NVMF_TSAS_SIZE = 256, + NVME_ZNS_CHANGED_ZONES_MAX = 511, +}; + +/** + * enum nvme_csi - Defined command set indicators + * @NVME_CSI_NVM: NVM Command Set Indicator + * @NVME_CSI_KV: Key Value Command Set + * @NVME_CSI_ZNS: Zoned Namespace Command Set + */ +enum nvme_csi { + NVME_CSI_NVM = 0, + NVME_CSI_KV = 1, + NVME_CSI_ZNS = 2, +}; + +/** + * enum nvme_register_offsets - controller registers for all transports. This + * is the layout of BAR0/1 for PCIe, and + * properties for fabrics. + * @NVME_REG_CAP: Controller Capabilities + * @NVME_REG_VS: Version + * @NVME_REG_INTMS: Interrupt Mask Set + * @NVME_REG_INTMC: Interrupt Mask Clear + * @NVME_REG_CC: Controller Configuration + * @NVME_REG_CSTS: Controller Status + * @NVME_REG_NSSR: NVM Subsystem Reset + * @NVME_REG_AQA: Admin Queue Attributes + * @NVME_REG_ASQ: Admin SQ Base Address + * @NVME_REG_ACQ: Admin CQ Base Address + * @NVME_REG_CMBLOC: Controller Memory Buffer Location + * @NVME_REG_CMBSZ: Controller Memory Buffer Size + * @NVME_REG_BPINFO: Boot Partition Information + * @NVME_REG_BPRSEL: Boot Partition Read Select + * @NVME_REG_BPMBL: Boot Partition Memory Buffer Location + * @NVME_REG_CMBMSC: Controller Memory Buffer Memory Space Control + * @NVME_REG_CMBSTS: Controller Memory Buffer Status + * @NVME_REG_PMRCAP: Persistent Memory Capabilities + * @NVME_REG_PMRCTL: Persistent Memory Region Control + * @NVME_REG_PMRSTS: Persistent Memory Region Status + * @NVME_REG_PMREBS: Persistent Memory Region Elasticity Buffer Size + * @NVME_REG_PMRSWTP: Memory Region Sustained Write Throughput + * @NVME_REG_PMRMSCL: Persistent Memory Region Controller Memory Space Control Lower + * @NVME_REG_PMRMSCU: Persistent Memory Region Controller Memory Space Control Upper + */ +enum nvme_register_offsets { + NVME_REG_CAP = 0x0000, + NVME_REG_VS = 0x0008, + NVME_REG_INTMS = 0x000c, + NVME_REG_INTMC = 0x0010, + NVME_REG_CC = 0x0014, + NVME_REG_CSTS = 0x001c, + NVME_REG_NSSR = 0x0020, + NVME_REG_AQA = 0x0024, + NVME_REG_ASQ = 0x0028, + NVME_REG_ACQ = 0x0030, + NVME_REG_CMBLOC = 0x0038, + NVME_REG_CMBSZ = 0x003c, + NVME_REG_BPINFO = 0x0040, + NVME_REG_BPRSEL = 0x0044, + NVME_REG_BPMBL = 0x0048, + NVME_REG_CMBMSC = 0x0050, + NVME_REG_CMBSTS = 0x0058, + NVME_REG_PMRCAP = 0x0e00, + NVME_REG_PMRCTL = 0x0e04, + NVME_REG_PMRSTS = 0x0e08, + NVME_REG_PMREBS = 0x0e0c, + NVME_REG_PMRSWTP = 0x0e10, + NVME_REG_PMRMSCL = 0x0e14, + NVME_REG_PMRMSCU = 0x0e18, +}; + +/** + * nvme_is_64bit_reg() - Checks if offset of the controller register is a know + * 64bit value. + * @offset: Offset of controller register field in bytes + * + * This function does not care about transport so that the offset is not going + * to be checked inside of this function for the unsupported fields in a + * specific transport. For example, BPMBL(Boot Partition Memory Buffer + * Location) register is not supported by fabrics, but it can be checked here. + * + * Returns true if given offset is 64bit register, otherwise it returns false. + */ +static inline bool nvme_is_64bit_reg(__u32 offset) +{ + switch (offset) { + case NVME_REG_CAP: + case NVME_REG_ASQ: + case NVME_REG_ACQ: + case NVME_REG_BPMBL: + case NVME_REG_CMBMSC: + return true; + default: + return false; + } +} + +enum nvme_cap { + NVME_CAP_MQES_SHIFT = 0, + NVME_CAP_CQR_SHIFT = 16, + NVME_CAP_AMS_SHIFT = 17, + NVME_CAP_TO_SHIFT = 24, + NVME_CAP_DSTRD_SHIFT = 32, + NVME_CAP_NSSRC_SHIFT = 36, + NVME_CAP_CSS_SHIFT = 37, + NVME_CAP_BPS_SHIFT = 45, + NVME_CAP_MPSMIN_SHIFT = 48, + NVME_CAP_MPSMAX_SHIFT = 52, + NVME_CAP_PMRS_SHIFT = 56, + NVME_CAP_CMBS_SHIFT = 57, + NVME_CAP_MQES_MASK = 0xffff, + NVME_CAP_CQR_MASK = 0x1, + NVME_CAP_AMS_MASK = 0x3, + NVME_CAP_TO_MASK = 0xff, + NVME_CAP_DSTRD_MASK = 0xf, + NVME_CAP_NSSRC_MASK = 0x1, + NVME_CAP_CSS_MASK = 0xff, + NVME_CAP_BPS_MASK = 0x1, + NVME_CAP_MPSMIN_MASK = 0xf, + NVME_CAP_MPSMAX_MASK = 0xf, + NVME_CAP_PMRS_MASK = 0x1, + NVME_CAP_CMBS_MASK = 0x1, + NVME_CAP_AMS_WRR = 1 << 0, + NVME_CAP_AMS_VS = 1 << 1, + NVME_CAP_CSS_NVM = 1 << 0, + NVME_CAP_CSS_CSI = 1 << 6, + NVME_CAP_CSS_ADMIN = 1 << 7, +}; + +#define NVME_CAP_MQES(cap) NVME_GET(cap, CAP_MQES) +#define NVME_CAP_CQR(cap) NVME_GET(cap, CAP_CQR) +#define NVME_CAP_AMS(cap) NVME_GET(cap, CAP_AMS) +#define NVME_CAP_TO(cap) NVME_GET(cap, CAP_TO) +#define NVME_CAP_DSTRD(cap) NVME_GET(cap, CAP_DSTRD) +#define NVME_CAP_NSSRC(cap) NVME_GET(cap, CAP_NSSRC) +#define NVME_CAP_CSS(cap) NVME_GET(cap, CAP_CSS) +#define NVME_CAP_BPS(cap) NVME_GET(cap, CAP_BPS) +#define NVME_CAP_MPSMIN(cap) NVME_GET(cap, CAP_MPSMIN) +#define NVME_CAP_MPSMAX(cap) NVME_GET(cap, CAP_MPSMAX) +#define NVME_CAP_CMBS(cap) NVME_GET(cap, CAP_CMBS) +#define NVME_CAP_PMRS(cap) NVME_GET(cap, CAP_PMRS) + +enum nvme_vs { + NVME_VS_TER_SHIFT = 0, + NVME_VS_MNR_SHIFT = 8, + NVME_VS_MJR_SHIFT = 16, + NVME_VS_TER_MASK = 0xff, + NVME_VS_MNR_MASK = 0xff, + NVME_VS_MJR_MASK = 0xffff, +}; + +#define NVME_VS_TER(vs) NVME_GET(vs, VS_TER) +#define NVME_VS_MNR(vs) NVME_GET(vs, VS_MNR) +#define NVME_VS_MJR(vs) NVME_GET(vs, VS_MJR) + +#define NVME_MAJOR(ver) NVME_VS_MJR(ver) +#define NVME_MINOR(ver) NVME_VS_MNR(ver) +#define NVME_TERTIARY(ver) NVME_VS_TER(ver) + +enum nvme_cc { + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_EN_MASK = 0x1, + NVME_CC_CSS_MASK = 0x7, + NVME_CC_MPS_MASK = 0xf, + NVME_CC_AMS_MASK = 0x7, + NVME_CC_SHN_MASK = 0x3, + NVME_CC_IOSQES_MASK = 0xf, + NVME_CC_IOCQES_MASK = 0xf, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 6, + NVME_CC_CSS_ADMIN = 7, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 1, + NVME_CC_AMS_VS = 7, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 1, + NVME_CC_SHN_ABRUPT = 2, +}; + +#define NVME_CC_EN(cc) NVME_GET(cc, CC_EN) +#define NVME_CC_CSS(cc) NVME_GET(cc, CC_CSS) +#define NVME_CC_MPS(cc) NVME_GET(cc, CC_MPS) +#define NVME_CC_AMS(cc) NVME_GET(cc, CC_AMS) +#define NVME_CC_SHN(cc) NVME_GET(cc, CC_SHN) +#define NVME_CC_IOSQES(cc) NVME_GET(cc, CC_IOSQES) +#define NVME_CC_IOCQES(cc) NVME_GET(cc, CC_IOCQES) + +enum nvme_csts { + NVME_CSTS_RDY_SHIFT = 0, + NVME_CSTS_CFS_SHIFT = 1, + NVME_CSTS_SHST_SHIFT = 2, + NVME_CSTS_NSSRO_SHIFT = 4, + NVME_CSTS_PP_SHIFT = 5, + NVME_CSTS_RDY_MASK = 0x1, + NVME_CSTS_CFS_MASK = 0x1, + NVME_CSTS_SHN_MASK = 0x3, + NVME_CSTS_NSSRO_MASK = 0x1, + NVME_CSTS_PP_MASK = 0x1, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 1, + NVME_CSTS_SHST_CMPLT = 2, + NVME_CSTS_SHST_MASK = 3, +}; + +#define NVME_CSTS_RDY(csts) NVME_GET(csts, CSTS_RDY) +#define NVME_CSTS_CFS(csts) NVME_GET(csts, CSTS_CFS) +#define NVME_CSTS_SHST(csts) NVME_GET(csts, CSTS_SHST) +#define NVME_CSTS_NSSRO(csts) NVME_GET(csts, CSTS_NSSRO) +#define NVME_CSTS_PP(csts) NVME_GET(csts, CSTS_PP) + +enum nvme_aqa { + NVME_AQA_ASQS_SHIFT = 0, + NVME_AQA_ACQS_SHIFT = 16, + NVME_AQA_ASQS_MASK = 0xfff, + NVME_AQA_ACQS_MASK = 0xfff, +}; + +#define NVME_AQA_ASQS(aqa) NVME_GET(aqa, AQA_ASQS) +#define NVME_AQA_ACQS(aqa) NVME_GET(aqa, AQA_ACQS) + +enum nvme_cmbloc { + NVME_CMBLOC_BIR_SHIFT = 0, + NVME_CMBLOC_CQMMS_SHIFT = 3, + NVME_CMBLOC_CQPDS_SHIFT = 4, + NVME_CMBLOC_CDPLMS_SHIFT = 5, + NVME_CMBLOC_CDPCILS_SHIFT = 6, + NVME_CMBLOC_CDMMMS_SHIFT = 7, + NVME_CMBLOC_CQDA_SHIFT = 8, + NVME_CMBLOC_OFST_SHIFT = 12, + NVME_CMBLOC_BIR_MASK = 0x7, + NVME_CMBLOC_CQMMS_MASK = 0x1, + NVME_CMBLOC_CQPDS_MASK = 0x1, + NVME_CMBLOC_CDPLMS_MASK = 0x1, + NVME_CMBLOC_CDPCILS_MASK = 0x1, + NVME_CMBLOC_CDMMMS_MASK = 0x1, + NVME_CMBLOC_CQDA_MASK = 0x1, + NVME_CMBLOC_OFST_MASK = 0xfffff, +}; + +#define NVME_CMBLOC_BIR(cmbloc) NVME_GET(cmbloc, CMBLOC_BIR) +#define NVME_CMBLOC_CQMMS(cmbloc) NVME_GET(cmbloc, CMBLOC_CQMMS) +#define NVME_CMBLOC_CQPDS(cmbloc) NVME_GET(cmbloc, CMBLOC_CQPDS) +#define NVME_CMBLOC_CDPLMS(cmbloc) NVME_GET(cmbloc, CMBLOC_CDPLMS) +#define NVME_CMBLOC_CDPCILS(cmbloc) NVME_GET(cmbloc, CMBLOC_CDPCILS) +#define NVME_CMBLOC_CDMMMS(cmbloc) NVME_GET(cmbloc, CMBLOC_CDMMMS) +#define NVME_CMBLOC_CQDA(cmbloc) NVME_GET(cmbloc, CMBLOC_CQDA) +#define NVME_CMBLOC_OFST(cmbloc) NVME_GET(cmbloc, CMBLOC_OFST) + +enum nvme_cmbsz { + NVME_CMBSZ_SQS_SHIFT = 0, + NVME_CMBSZ_CQS_SHIFT = 1, + NVME_CMBSZ_LISTS_SHIFT = 2, + NVME_CMBSZ_RDS_SHIFT = 3, + NVME_CMBSZ_WDS_SHIFT = 4, + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SQS_MASK = 0x1, + NVME_CMBSZ_CQS_MASK = 0x1, + NVME_CMBSZ_LISTS_MASK = 0x1, + NVME_CMBSZ_RDS_MASK = 0x1, + NVME_CMBSZ_WDS_MASK = 0x1, + NVME_CMBSZ_SZU_MASK = 0xf, + NVME_CMBSZ_SZ_MASK = 0xfffff, + NVME_CMBSZ_SZU_4K = 0, + NVME_CMBSZ_SZU_64K = 1, + NVME_CMBSZ_SZU_1M = 2, + NVME_CMBSZ_SZU_16M = 3, + NVME_CMBSZ_SZU_256M = 4, + NVME_CMBSZ_SZU_4G = 5, + NVME_CMBSZ_SZU_64G = 6, +}; + +#define NVME_CMBSZ_SQS(cmbsz) NVME_GET(cmbsz, CMBSZ_SQS) +#define NVME_CMBSZ_CQS(cmbsz) NVME_GET(cmbsz, CMBSZ_CQS) +#define NVME_CMBSZ_LISTS(cmbsz) NVME_GET(cmbsz, CMBSZ_LISTS) +#define NVME_CMBSZ_RDS(cmbsz) NVME_GET(cmbsz, CMBSZ_RDS) +#define NVME_CMBSZ_WDS(cmbsz) NVME_GET(cmbsz, CMBSZ_WDS) +#define NVME_CMBSZ_SZU(cmbsz) NVME_GET(cmbsz, CMBSZ_SZU) +#define NVME_CMBSZ_SZ(cmbsz) NVME_GET(cmbsz, CMBSZ_SZ) + +/** + * nvme_cmb_size() - Calculate size of the controller memory buffer + * @cmbsz: Value from controller register %NVME_REG_CMBSZ + * + * Returns size of controller memory buffer in bytes + */ +static inline __u64 nvme_cmb_size(__u32 cmbsz) +{ + return ((__u64)NVME_CMBSZ_SZ(cmbsz)) * + (1ULL << (12 + 4 * NVME_CMBSZ_SZU(cmbsz))); +} + +enum nvme_bpinfo { + NVME_BPINFO_BPSZ_SHIFT = 0, + NVME_BPINFO_BRS_SHIFT = 24, + NVME_BPINFO_ABPID_SHIFT = 31, + NVME_BPINFO_BPSZ_MASK = 0x7fff, + NVME_BPINFO_BRS_MASK = 0x3, + NVME_BPINFO_ABPID_MASK = 0x1, + NVME_BPINFO_BRS_NONE = 0, + NVME_BPINFO_BRS_READ_IN_PROGRESS = 1, + NVME_BPINFO_BRS_READ_SUCCESS = 2, + NVME_BPINFO_BRS_READ_ERROR = 3, +}; + +#define NVME_BPINFO_BPSZ(bpinfo) NVME_GET(bpinfo, BPINFO_BPSZ) +#define NVME_BPINFO_BRS(bpinfo) NVME_GET(bpinfo, BPINFO_BRS) +#define NVME_BPINFO_ABPID(bpinfo) NVME_GET(bpinfo, BPINFO_ABPID) + +enum nvme_bprsel { + NVME_BPRSEL_BPRSZ_SHIFT = 0, + NVME_BPRSEL_BPROF_SHIFT = 10, + NVME_BPRSEL_BPID_SHIFT = 31, + NVME_BPRSEL_BPRSZ_MASK = 0x3ff, + NVME_BPRSEL_BPROF_MASK = 0xfff, + NVME_BPRSEL_BPID_MASK = 0x1, +}; + +#define NVME_BPRSEL_BPRSZ(bprsel) NVME_GET(bprsel, BPRSEL_BPRSZ) +#define NVME_BPRSEL_BPROF(bprsel) NVME_GET(bprsel, BPRSEL_BPROF) +#define NVME_BPRSEL_BPID(bprsel) NVME_GET(bprsel, BPRSEL_BPID) + +enum nvme_cmbmsc { + NVME_CMBMSC_CRE_SHIFT = 0, + NVME_CMBMSC_CMSE_SHIFT = 1, + NVME_CMBMSC_CBA_SHIFT = 12, + NVME_CMBMSC_CRE_MASK = 0x1, + NVME_CMBMSC_CMSE_MASK = 0x1, +}; +static const __u64 NVME_CMBMSC_CBA_MASK = 0xfffffffffffffull; + +#define NVME_CMBMSC_CRE(cmbmsc) NVME_GET(cmbmsc, CMBMSC_CRE) +#define NVME_CMBMSC_CMSE(cmbmsc) NVME_GET(cmbmsc, CMBMSC_CMSE) +#define NVME_CMBMSC_CBA(cmbmsc) NVME_GET(cmbmsc, CMBMSC_CBA) + +enum nvme_cmbsts { + NVME_CMBSTS_CBAI_SHIFT = 0, + NVME_CMBSTS_CBAI_MASK = 0x1, +}; + +#define NVME_CMBSTS_CBAI(cmbsts) NVME_GET(cmbsts, CMBSTS_CBAI) + +enum nvme_pmrcap { + NVME_PMRCAP_RDS_SHIFT = 3, + NVME_PMRCAP_WDS_SHIFT = 4, + NVME_PMRCAP_BIR_SHIFT = 5, + NVME_PMRCAP_PMRTU_SHIFT = 8, + NVME_PMRCAP_PMRWMB_SHIFT = 10, + NVME_PMRCAP_PMRTO_SHIFT = 16, + NVME_PMRCAP_CMSS_SHIFT = 24, + NVME_PMRCAP_RDS_MASK = 0x1, + NVME_PMRCAP_WDS_MASK = 0x1, + NVME_PMRCAP_BIR_MASK = 0x7, + NVME_PMRCAP_PMRTU_MASK = 0x3, + NVME_PMRCAP_PMRWMB_MASK = 0xf, + NVME_PMRCAP_PMRTO_MASK = 0xff, + NVME_PMRCAP_CMSS_MASK = 0x1, + NVME_PMRCAP_PMRTU_500MS = 0, + NVME_PMRCAP_PMRTU_60S = 1, +}; + +#define NVME_PMRCAP_RDS(pmrcap) NVME_GET(pmrcap, PMRCAP_RDS) +#define NVME_PMRCAP_WDS(pmrcap) NVME_GET(pmrcap, PMRCAP_WDS) +#define NVME_PMRCAP_BIR(pmrcap) NVME_GET(pmrcap, PMRCAP_BIR) +#define NVME_PMRCAP_PMRTU(pmrcap) NVME_GET(pmrcap, PMRCAP_PMRTU) +#define NVME_PMRCAP_PMRWMB(pmrcap) NVME_GET(pmrcap, PMRCAP_PMRWMB) +#define NVME_PMRCAP_PMRTO(pmrcap) NVME_GET(pmrcap, PMRCAP_PMRTO) +#define NVME_PMRCAP_CMSS(pmrcap) NVME_GET(pmrcap, PMRCAP_CMSS) + +enum nvme_pmrctl { + NVME_PMRCTL_EN_SHIFT = 0, + NVME_PMRCTL_EN_MASK = 0x1, +}; + +#define NVME_PMRCTL_EN(pmrctl) NVME_GET(pmrctl, PMRCTL_EN) + +enum nvme_pmrsts { + NVME_PMRSTS_ERR_SHIFT = 0, + NVME_PMRSTS_NRDY_SHIFT = 8, + NVME_PMRSTS_HSTS_SHIFT = 9, + NVME_PMRSTS_CBAI_SHIFT = 12, + NVME_PMRSTS_ERR_MASK = 0xff, + NVME_PMRSTS_NRDY_MASK = 0x1, + NVME_PMRSTS_HSTS_MASK = 0x7, + NVME_PMRSTS_CBAI_MASK = 0x1, +}; + +#define NVME_PMRSTS_ERR(pmrsts) NVME_GET(pmrsts, PMRSTS_ERR) +#define NVME_PMRSTS_NRDY(pmrsts) NVME_GET(pmrsts, PMRSTS_NRDY) +#define NVME_PMRSTS_HSTS(pmrsts) NVME_GET(pmrsts, PMRSTS_HSTS) +#define NVME_PMRSTS_CBAI(pmrsts) NVME_GET(pmrsts, PMRSTS_CBAI) + +enum nvme_pmrebs { + NVME_PMREBS_PMRSZU_SHIFT = 0, + NVME_PMREBS_RBB_SHIFT = 4, + NVME_PMREBS_PMRWBZ_SHIFT = 8, + NVME_PMREBS_PMRSZU_MASK = 0xf, + NVME_PMREBS_RBB_MASK = 0x1, + NVME_PMREBS_PMRWBZ_MASK = 0xffffff, + NVME_PMREBS_PMRSZU_B = 0, + NVME_PMREBS_PMRSZU_1K = 1, + NVME_PMREBS_PMRSZU_1M = 2, + NVME_PMREBS_PMRSZU_1G = 3, +}; + +#define NVME_PMREBS_PMRSZU(pmrebs) NVME_GET(pmrebs, PMREBS_PMRSZU) +#define NVME_PMREBS_RBB(pmrebs) NVME_GET(pmrebs, PMREBS_RBB) +#define NVME_PMREBS_PMRWBZ(pmrebs) NVME_GET(pmrebs, PMREBS_PMRWBZ) + +/** + * nvme_pmr_size() - Calculate size of persistent memory region elasticity + * buffer + * @pmrebs: Value from controller register %NVME_REG_PMREBS + * + * Returns size of controller persistent memory buffer in bytes + */ +static inline __u64 nvme_pmr_size(__u32 pmrebs) +{ + return ((__u64)NVME_PMREBS_PMRWBZ(pmrebs)) * + (1ULL << (10 * NVME_PMREBS_PMRSZU(pmrebs))); +} + +enum nvme_pmrswtp { + NVME_PMRSWTP_PMRSWTU_SHIFT = 0, + NVME_PMRSWTP_PMRSWTV_SHIFT = 8, + NVME_PMRSWTP_PMRSWTU_MASK = 0xf, + NVME_PMRSWTP_PMRSWTV_MASK = 0xffffff, + NVME_PMRSWTP_PMRSWTU_BPS = 0, + NVME_PMRSWTP_PMRSWTU_KBPS = 1, + NVME_PMRSWTP_PMRSWTU_MBPS = 2, + NVME_PMRSWTP_PMRSWTU_GBPS = 3, +}; + +#define NVME_PMRSWTP_PMRSWTU(pmrswtp) NVME_GET(pmrswtp, PMRSWTP_PMRSWTU) +#define NVME_PMRSWTP_PMRSWTV(pmrswtp) NVME_GET(pmrswtp, PMRSWTP_PMRSWTU) + +/** + * nvme_pmr_throughput() - Calculate throughput of persistent memory buffer + * @pmrswtp: Value from controller register %NVME_REG_PMRSWTP + * + * Returns throughput of controller persistent memory buffer in bytes/second + */ +static inline __u64 nvme_pmr_throughput(__u32 pmrswtp) +{ + return ((__u64)NVME_PMRSWTP_PMRSWTV(pmrswtp)) * + (1ULL << (10 * NVME_PMRSWTP_PMRSWTU(pmrswtp))); +} + +enum nvme_pmrmsc { + NVME_PMRMSC_CMSE_SHIFT = 1, + NVME_PMRMSC_CBA_SHIFT = 12, + NVME_PMRMSC_CMSE_MASK = 0x1, +}; +static const __u64 NVME_PMRMSC_CBA_MASK = 0xfffffffffffffull; + +#define NVME_PMRMSC_CMSE(pmrmsc) NVME_GET(pmrmsc, PMRMSC_CMSE) +#define NVME_PMRMSC_CBA(pmrmsc) NVME_GET(pmrmsc, PMRMSC_CBA) + +/** + * enum nvme_psd_flags - Possible flag values in nvme power state descriptor + * @NVME_PSD_FLAGS_MXPS: Indicates the scale for the Maximum Power + * field. If this bit is cleared, then the scale of the + * Maximum Power field is in 0.01 Watts. If this bit is + * set, then the scale of the Maximum Power field is in + * 0.0001 Watts. + * @NVME_PSD_FLAGS_NOPS: Indicates whether the controller processes I/O + * commands in this power state. If this bit is cleared, + * then the controller processes I/O commands in this + * power state. If this bit is set, then the controller + * does not process I/O commands in this power state. + */ +enum nvme_psd_flags { + NVME_PSD_FLAGS_MXPS = 1 << 0, + NVME_PSD_FLAGS_NOPS = 1 << 1, +}; + +/** + * enum nvme_psd_ps - Known values for &struct nvme_psd %ips and %aps. Use with + * nvme_psd_power_scale() to extract the power scale field + * to match this enum. + * @NVME_PSD_PS_100_MICRO_WATT: 0.0001 watt scale + * @NVME_PSD_PS_10_MILLI_WATT: 0.01 watt scale + */ +enum nvme_psd_ps { + NVME_PSD_PS_100_MICRO_WATT = 1, + NVME_PSD_PS_10_MILLI_WATT = 2, +}; + +/** + * nvme_psd_power_scale() - power scale occupies the upper 3 bits + * @ps: power scale value + */ +static inline unsigned int nvme_psd_power_scale(__u8 ps) +{ + return ps >> 6; +} + +/** + * enum nvme_psd_workload - Specifies a workload hint in the Power Management + * Feature (see &struct nvme_psd.apw) to inform the + * NVM subsystem or indicate the conditions for the + * active power level. + * @NVME_PSD_WORKLOAD_1: Extended Idle Period with a Burst of Random Write + * consists of five minutes of idle followed by + * thirty-two random write commands of size 1 MiB + * submitted to a single controller while all other + * controllers in the NVM subsystem are idle, and then + * thirty (30) seconds of idle. + * @NVME_PSD_WORKLOAD_2: Heavy Sequential Writes consists of 80,000 + * sequential write commands of size 128 KiB submitted to + * a single controller while all other controllers in the + * NVM subsystem are idle. The submission queue(s) + * should be sufficiently large allowing the host to + * ensure there are multiple commands pending at all + * times during the workload. + */ +enum nvme_psd_workload { + NVME_PSD_WORKLOAD_1 = 1, + NVME_PSD_WORKLOAD_2 = 2, +}; + +/** + * struct nvme_id_psd - + * @mp: Maximum Power indicates the sustained maximum power consumed by the + * NVM subsystem in this power state. The power in Watts is equal to + * the value in this field multiplied by the scale specified in the Max + * Power Scale bit (see &enum nvme_psd_flags). A value of 0 indicates + * Maximum Power is not reported. + * @rsvd2: Reserved + * @flags: Additional decoding flags, see &enum nvme_psd_flags. + * @enlat: Entry Latency indicates the maximum latency in microseconds + * associated with entering this power state. A value of 0 indicates + * Entry Latency is not reported. + * @exlat: Exit Latency indicates the maximum latency in microseconds + * associated with exiting this power state. A value of 0 indicates + * Exit Latency is not reported. + * @rrt: Relative Read Throughput indicates the read throughput rank + * associated with this power state relative to others. The value in + * this is less than the number of supported power states. + * @rrl: Relative Reade Latency indicates the read latency rank associated + * with this power state relative to others. The value in this field is + * less than the number of supported power states. + * @rwt: Relative Write Throughput indicates write throughput rank associated + * with this power state relative to others. The value in this field is + * less than the number of supported power states + * @rwl: Relative Write Latency indicates the write latency rank associated + * with this power state relative to others. The value in this field is + * less than the number of supported power states + * @idlp: Idle Power indicates the typical power consumed by the NVM + * subsystem over 30 seconds in this power state when idle. + * @ips: Idle Power Scale indicates the scale for &struct nvme_id_psd.idlp, + * see &enum nvme_psd_ps for decoding this field. + * @rsvd19: Reserved + * @actp: Active Power indicates the largest average power consumed by the + * NVM subsystem over a 10 second period in this power state with + * the workload indicated in the Active Power Workload field. + * @apws: Bits 7-6: Active Power Scale(APS) indicates the scale for the &struct + * nvme_id_psd.actp, see &enum nvme_psd_ps for decoding this value. + * Bits 2-0: Active Power Workload(APW) indicates the workload + * used to calculate maximum power for this power state. + * See &enum nvme_psd_workload for decoding this field. + * @rsvd23: Reserved + */ +struct nvme_id_psd { + __le16 mp; + __u8 rsvd2; + __u8 flags; + __le32 enlat; + __le32 exlat; + __u8 rrt; + __u8 rrl; + __u8 rwt; + __u8 rwl; + __le16 idlp; + __u8 ips; + __u8 rsvd19; + __le16 actp; + __u8 apws; + __u8 rsvd23[9]; +}; + +/** + * struct nvme_id_ctrl - Identify Controller data structure + * @vid: PCI Vendor ID, the company vendor identifier that is assigned by + * the PCI SIG. + * @ssvid: PCI Subsystem Vendor ID, the company vendor identifier that is + * assigned by the PCI SIG for the subsystem. + * @sn: Serial Number in ascii + * @mn: Model Number in ascii + * @fr: Firmware Revision in ascii, the currently active firmware + * revision for the NVM subsystem + * @rab: Recommended Arbitration Burst, reported as a power of two + * @ieee: IEEE assigned Organization Unique Identifier + * @cmic: Controller Multipath IO and Namespace Sharing Capabilities of + * the controller and NVM subsystem. See &enum nvme_id_ctrl_cmic. + * @mdts: Max Data Transfer Size is the largest data transfer size. The + * host should not submit a command that exceeds this maximum data + * transfer size. The value is in units of the minimum memory page + * size (CAP.MPSMIN) and is reported as a power of two + * @cntlid: Controller ID, the NVM subsystem unique controller identifier + * associated with the controller. + * @ver: Version, this field contains the value reported in the Version + * register, or property (see &enum nvme_registers %NVME_REG_VS). + * @rtd3r: RTD3 Resume Latency, the expected latency in microseconds to resume + * from Runtime D3 + * @rtd3e: RTD3 Exit Latency, the typical latency in microseconds to enter + * Runtime D3. + * @oaes: Optional Async Events Supported, see @enum nvme_id_ctrl_oaes. + * @ctratt: Controller Attributes, see @enum nvme_id_ctrl_ctratt. + * @rrls: Read Recovery Levels. If a bit is set, then the corresponding + * Read Recovery Level is supported. If a bit is cleared, then the + * corresponding Read Recovery Level is not supported. + * @rsvd102: Reserved + * @cntrltype: Controller Type, see &enum nvme_id_ctrl_cntrltype + * @fguid: FRU GUID, a 128-bit value that is globally unique for a given + * Field Replaceable Unit + * @crdt1: Controller Retry Delay time in 100 millisecod units if CQE CRD + * field is 1 + * @crdt2: Controller Retry Delay time in 100 millisecod units if CQE CRD + * field is 2 + * @crdt3: Controller Retry Delay time in 100 millisecod units if CQE CRD + * field is 3 + * @rsvd134: Reserved + * @nvmsr: NVM Subsystem Report, see &enum nvme_id_ctrl_nvmsr + * @vwci: VPD Write Cycle Information, see &enum nvme_id_ctrl_vwci + * @mec: Management Endpoint Capabilities, see &enum nvme_id_ctrl_mec + * @oacs: Optional Admin Command Support,the optional Admin commands and + * features supported by the controller, see &enum nvme_id_ctrl_oacs. + * @acl: Abort Command Limit, the maximum number of concurrently + * executing Abort commands supported by the controller. This is a + * 0's based value. + * @aerl: Async Event Request Limit, the maximum number of concurrently + * outstanding Asynchronous Event Request commands supported by the + * controller This is a 0's based value. + * @frmw: Firmware Updates indicates capabilities regarding firmware + * updates. See &enum nvme_id_ctrl_frmw. + * @lpa: Log Page Attributes, see &enum nvme_id_ctrl_lpa. + * @elpe: Error Log Page Entries, the maximum number of Error Information + * log entries that are stored by the controller. This field is a + * 0's based value. + * @npss: Number of Power States Supported, the number of NVM Express + * power states supported by the controller, indicating the number + * of valid entries in &struct nvme_id_ctrl.psd. This is a 0's + * based value. + * @avscc: Admin Vendor Specific Command Configuration, see + * &enum nvme_id_ctrl_avscc. + * @apsta: Autonomous Power State Transition Attributes, see + * &enum nvme_id_ctrl_apsta. + * @wctemp: Warning Composite Temperature Threshold indicates + * the minimum Composite Temperature field value (see &struct + * nvme_smart_log.critical_comp_time) that indicates an overheating + * condition during which controller operation continues. + * @cctemp: Critical Composite Temperature Threshold, field indicates the + * minimum Composite Temperature field value (see &struct + * nvme_smart_log.critical_comp_time) that indicates a critical + * overheating condition. + * @mtfa: Maximum Time for Firmware Activation indicates the maximum time + * the controller temporarily stops processing commands to activate + * the firmware image, specified in 100 millisecond units. This + * field is always valid if the controller supports firmware + * activation without a reset. + * @hmpre: Host Memory Buffer Preferred Size indicates the preferred size + * that the host is requested to allocate for the Host Memory + * Buffer feature in 4 KiB units. + * @hmmin: Host Memory Buffer Minimum Size indicates the minimum size that + * the host is requested to allocate for the Host Memory Buffer + * feature in 4 KiB units. + * @tnvmcap: Total NVM Capacity, the total NVM capacity in the NVM subsystem. + * The value is in bytes. + * @unvmcap: Unallocated NVM Capacity, the unallocated NVM capacity in the + * NVM subsystem. The value is in bytes. + * @rpmbs: Replay Protected Memory Block Support, see + * &enum nvme_id_ctrl_rpmbs. + * @edstt: Extended Device Self-test Time, if Device Self-test command is + * supported (see &struct nvme_id_ctrl.oacs, %NVME_CTRL_OACS_SELF_TEST), + * then this field indicates the nominal amount of time in one + * minute units that the controller takes to complete an extended + * device self-test operation when in power state 0. + * @dsto: Device Self-test Options, see &enum nvme_id_ctrl_dsto. + * @fwug: Firmware Update Granularity indicates the granularity and + * alignment requirement of the firmware image being updated by the + * Firmware Image Download command. The value is reported in 4 KiB + * units. A value of 0h indicates no information on granularity is + * provided. A value of FFh indicates no restriction + * @kas: Keep Alive Support indicates the granularity of the Keep Alive + * Timer in 100 millisecond units. + * @hctma: Host Controlled Thermal Management Attributes, see + * &enum nvme_id_ctrl_hctm. + * @mntmt: Minimum Thermal Management Temperature indicates the minimum + * temperature, in degrees Kelvin, that the host may request in the + * Thermal Management Temperature 1 field and Thermal Management + * Temperature 2 field of a Set Features command with the Feature + * Identifier field set to %NVME_FEAT_FID_HCTM. + * @mxtmt: Maximum Thermal Management Temperature indicates the maximum + * temperature, in degrees Kelvin, that the host may request in the + * Thermal Management Temperature 1 field and Thermal Management + * Temperature 2 field of the Set Features command with the Feature + * Identifier set to %NVME_FEAT_FID_HCTM. + * @sanicap: Sanitize Capabilities, see &enum nvme_id_ctrl_sanicap + * @hmminds: Host Memory Buffer Minimum Descriptor Entry Size indicates the + * minimum usable size of a Host Memory Buffer Descriptor Entry in + * 4 KiB units. + * @hmmaxd: Host Memory Maximum Descriptors Entries indicates the number of + * usable Host Memory Buffer Descriptor Entries. + * @nsetidmax: NVM Set Identifier Maximum, defines the maximum value of a valid + * NVM Set Identifier for any controller in the NVM subsystem. + * @endgidmax: Endurance Group Identifier Maximum, defines the maximum value of + * a valid Endurance Group Identifier for any controller in the NVM + * subsystem. + * @anatt: ANA Transition Time indicates the maximum amount of time, in + * seconds, for a transition between ANA states or the maximum + * amount of time, in seconds, that the controller reports the ANA + * change state. + * @anacap: Asymmetric Namespace Access Capabilities, see + * &enum nvme_id_ctrl_anacap. + * @anagrpmax: ANA Group Identifier Maximum indicates the maximum value of a + * valid ANA Group Identifier for any controller in the NVM + * subsystem. + * @nanagrpid: Number of ANA Group Identifiers indicates the number of ANA + * groups supported by this controller. + * @pels: Persistent Event Log Size indicates the maximum reportable size + * for the Persistent Event Log. + * @domainid: Domain Identifier indicates the identifier of the domain + * that contains this controller. + * @rsvd358: Reserved + * @megcap: Max Endurance Group Capacity indicates the maximum capacity + * of a single Endurance Group. + * @rsvd384: Reserved + * @sqes: Submission Queue Entry Size, see &enum nvme_id_ctrl_sqes. + * @cqes: Completion Queue Entry Size, see &enum nvme_id_ctrl_cqes. + * @maxcmd: Maximum Outstanding Commands indicates the maximum number of + * commands that the controller processes at one time for a + * particular queue. + * @nn: Number of Namespaces indicates the maximum value of a valid + * nsid for the NVM subsystem. If the MNAN (&struct nvme_id_ctrl.mnan + * field is cleared to 0h, then this field also indicates the + * maximum number of namespaces supported by the NVM subsystem. + * @oncs: Optional NVM Command Support, see &enum nvme_id_ctrl_oncs. + * @fuses: Fused Operation Support, see &enum nvme_id_ctrl_fuses. + * @fna: Format NVM Attributes, see &enum nvme_id_ctrl_fna. + * @vwc: Volatile Write Cache, see &enum nvme_id_ctrl_vwc. + * @awun: Atomic Write Unit Normal indicates the size of the write + * operation guaranteed to be written atomically to the NVM across + * all namespaces with any supported namespace format during normal + * operation. This field is specified in logical blocks and is a + * 0's based value. + * @awupf: Atomic Write Unit Power Fail indicates the size of the write + * operation guaranteed to be written atomically to the NVM across + * all namespaces with any supported namespace format during a + * power fail or error condition. This field is specified in + * logical blocks and is a 0’s based value. + * @icsvscc: NVM Vendor Specific Command Configuration, see + * &enum nvme_id_ctrl_nvscc. + * @nwpc: Namespace Write Protection Capabilities, see + * &enum nvme_id_ctrl_nwpc. + * @acwu: Atomic Compare & Write Unit indicates the size of the write + * operation guaranteed to be written atomically to the NVM across + * all namespaces with any supported namespace format for a Compare + * and Write fused operation. This field is specified in logical + * blocks and is a 0’s based value. + * @ocfs: Optional Copy Formats Supported, each bit n means controller + * supports Copy Format n. + * @sgls: SGL Support, see &enum nvme_id_ctrl_sgls + * @mnan: Maximum Number of Allowed Namespaces indicates the maximum + * number of namespaces supported by the NVM subsystem. + * @maxdna: Maximum Domain Namespace Attachments indicates the maximum + * of the sum of the numver of namespaces attached to each I/O + * controller in the Domain. + * @maxcna: Maximum I/O Controller Namespace Attachments indicates the + * maximum number of namespaces that are allowed to be attached to + * this I/O controller. + * @rsvd564: Reserved + * @subnqn: NVM Subsystem NVMe Qualified Name, UTF-8 null terminated string + * @rsvd1024: Reserved + * @ioccsz: I/O Queue Command Capsule Supported Size, defines the maximum + * I/O command capsule size in 16 byte units. + * @iorcsz: I/O Queue Response Capsule Supported Size, defines the maximum + * I/O response capsule size in 16 byte units. + * @icdoff: In Capsule Data Offset, defines the offset where data starts + * within a capsule. This value is applicable to I/O Queues only. + * @fcatt: Fabrics Controller Attributes, see &enum nvme_id_ctrl_fcatt. + * @msdbd: Maximum SGL Data Block Descriptors indicates the maximum + * number of SGL Data Block or Keyed SGL Data Block descriptors + * that a host is allowed to place in a capsule. A value of 0h + * indicates no limit. + * @ofcs: Optional Fabric Commands Support, see &enum nvme_id_ctrl_ofcs. + * @dctype: Discovery Controller Type (DCTYPE). This field indicates what + * type of Discovery controller the controller is (see enum + * nvme_id_ctrl_dctype) + * @rsvd1807: Reserved + * @psd: Power State Descriptors, see &struct nvme_id_psd. + * @vs: Vendor Specific + */ +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __le16 rrls; + __u8 rsvd102[9]; + __u8 cntrltype; + __u8 fguid[16]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[119]; + __u8 nvmsr; + __u8 vwci; + __u8 mec; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __le16 nsetidmax; + __le16 endgidmax; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __le32 pels; + __le16 domainid; + __u8 rsvd358[10]; + __u8 megcap[16]; + __u8 rsvd384[128]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 icsvscc; + __u8 nwpc; + __le16 acwu; + __le16 ocfs; + __le32 sgls; + __le32 mnan; + __u8 maxdna[16]; + __le32 maxcna; + __u8 rsvd564[204]; + char subnqn[NVME_NQN_LENGTH]; + __u8 rsvd1024[768]; + + /* Fabrics Only */ + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 fcatt; + __u8 msdbd; + __le16 ofcs; + __u8 dctype; + __u8 rsvd1807[241]; + + struct nvme_id_psd psd[32]; + __u8 vs[1024]; +}; + +/** + * enum nvme_id_ctrl_cmic - Controller Multipath IO and Namespace Sharing + * Capabilities of the controller and NVM subsystem. + * @NVME_CTRL_CMIC_MULTI_PORT: If set, then the NVM subsystem may contain + * more than one NVM subsystem port, otherwise + * the NVM subsystem contains only a single + * NVM subsystem port. + * @NVME_CTRL_CMIC_MULTI_CTRL: If set, then the NVM subsystem may contain + * two or more controllers, otherwise the + * NVM subsystem contains only a single + * controller. An NVM subsystem that contains + * multiple controllers may be used by + * multiple hosts, or may provide multiple + * paths for a single host. + * @NVME_CTRL_CMIC_MULTI_SRIOV: If set, then the controller is associated + * with an SR-IOV Virtual Function, otherwise + * it is associated with a PCI Function + * or a Fabrics connection. + * @NVME_CTRL_CMIC_MULTI_ANA_REPORTING: If set, then the NVM subsystem supports + * Asymmetric Namespace Access Reporting. + */ +enum nvme_id_ctrl_cmic { + NVME_CTRL_CMIC_MULTI_PORT = 1 << 0, + NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1, + NVME_CTRL_CMIC_MULTI_SRIOV = 1 << 2, + NVME_CTRL_CMIC_MULTI_ANA_REPORTING = 1 << 3, +}; + +/** + * enum nvme_id_ctrl_oaes - Optional Asynchronous Events Supported + * @NVME_CTRL_OAES_NA: Namespace Attribute Notices event supported + * @NVME_CTRL_OAES_FA: Firmware Activation Notices event supported + * @NVME_CTRL_OAES_ANA: ANA Change Notices supported + * @NVME_CTRL_OAES_PLEA: Predictable Latency Event Aggregate Log + * Change Notices event supported + * @NVME_CTRL_OAES_LBAS: LBA Status Information Notices event supported + * @NVME_CTRL_OAES_EGE: Endurance Group Events Aggregate Log Change + * Notices event supported + * @NVME_CTRL_OAES_NS: Normal NVM Subsystem Shutdown event supported + * @NVME_CTRL_OAES_ZD: Zone Descriptor Change Notifications supported + * @NVME_CTRL_OAES_DL: Discover Log Page Change Notifications supported + */ +enum nvme_id_ctrl_oaes { + NVME_CTRL_OAES_NA = 1 << 8, + NVME_CTRL_OAES_FA = 1 << 9, + NVME_CTRL_OAES_ANA = 1 << 11, + NVME_CTRL_OAES_PLEA = 1 << 12, + NVME_CTRL_OAES_LBAS = 1 << 13, + NVME_CTRL_OAES_EGE = 1 << 14, + NVME_CTRL_OAES_NS = 1 << 15, + NVME_CTRL_OAES_ZD = 1 << 27, + NVME_CTRL_OAES_DL = 1 << 31, +}; + +/** + * enum nvme_id_ctrl_ctratt - Controller attributes + * @NVME_CTRL_CTRATT_128_ID: 128-bit Host Identifier supported + * @NVME_CTRL_CTRATT_NON_OP_PSP: Non-Operational Poser State Permissive Mode + * supported + * @NVME_CTRL_CTRATT_NVM_SETS: NVM Sets supported + * @NVME_CTRL_CTRATT_READ_RECV_LVLS: Read Recovery Levels supported + * @NVME_CTRL_CTRATT_ENDURANCE_GROUPS: Endurance Groups supported + * @NVME_CTRL_CTRATT_PREDICTABLE_LAT: Predictable Latency Mode supported + * @NVME_CTRL_CTRATT_TBKAS: Traffic Based Keep Alive Support + * @NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY: Namespace Granularity reporting + * supported + * @NVME_CTRL_CTRATT_SQ_ASSOCIATIONS: SQ Associations supported + * @NVME_CTRL_CTRATT_UUID_LIST: UUID List reporting supported + * @NVME_CTRL_CTRATT_MDS: Multi-Domain Subsystem supported + * @NVME_CTRL_CTRATT_FIXED_CAP: Fixed Capacity Management supported + * @NVME_CTRL_CTRATT_VARIABLE_CAP: Variable Capacity Managment supported + * @NVME_CTRL_CTRATT_DEL_ENDURANCE_GROUPS: Delete Endurance Groups supported + * @NVME_CTRL_CTRATT_DEL_NVM_SETS: Delete NVM Sets supported + * @NVME_CTRL_CTRATT_ELBAS: Extended LBA Formats supported + */ +enum nvme_id_ctrl_ctratt { + NVME_CTRL_CTRATT_128_ID = 1 << 0, + NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1, + NVME_CTRL_CTRATT_NVM_SETS = 1 << 2, + NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3, + NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4, + NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, + NVME_CTRL_CTRATT_TBKAS = 1 << 6, + NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, + NVME_CTRL_CTRATT_SQ_ASSOCIATIONS = 1 << 8, + NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, + NVME_CTRL_CTRATT_MDS = 1 << 10, + NVME_CTRL_CTRATT_FIXED_CAP = 1 << 11, + NVME_CTRL_CTRATT_VARIABLE_CAP = 1 << 12, + NVME_CTRL_CTRATT_DEL_ENDURANCE_GROUPS = 1 << 13, + NVME_CTRL_CTRATT_DEL_NVM_SETS = 1 << 14, + NVME_CTRL_CTRATT_ELBAS = 1 << 15, +}; + +/** + * enum nvme_id_ctrl_cntrltype - Controller types + * @NVME_CTRL_CNTRLTYPE_IO: NVM I/O controller + * @NVME_CTRL_CNTRLTYPE_DISCOVERY: Discovery controller + * @NVME_CTRL_CNTRLTYPE_ADMIN: Admin controller + */ +enum nvme_id_ctrl_cntrltype { + NVME_CTRL_CNTRLTYPE_IO = 1, + NVME_CTRL_CNTRLTYPE_DISCOVERY = 2, + NVME_CTRL_CNTRLTYPE_ADMIN = 3, +}; + +/** + * enum nvme_id_ctrl_dctype - Discovery Controller types + * @NVME_CTRL_DCTYPE_NOT_REPORTED: Not reported (I/O, Admin, and pre-TP8010) + * @NVME_CTRL_DCTYPE_DDC: Direct Discovery controller + * @NVME_CTRL_DCTYPE_CDC: Central Discovery controller + */ +enum nvme_id_ctrl_dctype { + NVME_CTRL_DCTYPE_NOT_REPORTED = 0, + NVME_CTRL_DCTYPE_DDC = 1, + NVME_CTRL_DCTYPE_CDC = 2, +}; + +/** + * enum nvme_id_ctrl_nvmsr - This field reports information associated with the + * NVM Subsystem, see &struct nvme_id_ctrl.nvmsr. + * @NVME_CTRL_NVMSR_NVMESD: If set, then the NVM Subsystem is part of an NVMe + * Storage Device; if cleared, then the NVM Subsystem + * is not part of an NVMe Storage Device. + * @NVME_CTRL_NVMSR_NVMEE: If set’, then the NVM Subsystem is part of an NVMe + * Enclosure; if cleared, then the NVM Subsystem is + * not part of an NVMe Enclosure. + */ +enum nvme_id_ctrl_nvmsr { + NVME_CTRL_NVMSR_NVMESD = 1 << 0, + NVME_CTRL_NVMSR_NVMEE = 1 << 1, +}; + +/** + * enum nvme_id_ctrl_vwci - This field indicates information about remaining + * number of times that VPD contents are able to be + * updated using the VPD Write command, see &struct + * nvme_id_ctrl.vwci. + * @NVME_CTRL_VWCI_VWCR: Mask to get value of VPD Write Cycles Remaining. If + * the VPD Write Cycle Remaining Valid bit is set, then + * this field contains a value indicating the remaining + * number of times that VPD contents are able to be + * updated using the VPD Write command. If this field is + * set to 7Fh, then the remaining number of times that + * VPD contents are able to be updated using the VPD + * Write command is greater than or equal to 7Fh. + * @NVME_CTRL_VWCI_VWCRV: VPD Write Cycle Remaining Valid. If this bit is set, + * then the VPD Write Cycle Remaining field is valid. If + * this bit is cleared, then the VPD Write Cycles + * Remaining field is invalid and cleared to 0h. + */ +enum nvme_id_ctrl_vwci { + NVME_CTRL_VWCI_VWCR = 0x7f << 0, + NVME_CTRL_VWCI_VWCRV = 1 << 7, +}; + +/** + * enum nvme_id_ctrl_mec - Flags indicatings the capabilities of the Management + * Endpoint in the Controller, &struct nvme_id_ctrl.mec. + * @NVME_CTRL_MEC_SMBUSME: If set, then the NVM Subsystem contains a Management + * Endpoint on an SMBus/I2C port. + * @NVME_CTRL_MEC_PCIEME: If set, then the NVM Subsystem contains a Management + * Endpoint on a PCIe port. + */ +enum nvme_id_ctrl_mec { + NVME_CTRL_MEC_SMBUSME = 1 << 0, + NVME_CTRL_MEC_PCIEME = 1 << 1, +}; + +/** + * enum nvme_id_ctrl_oacs - Flags indicating the optional Admin commands and + * features supported by the controller, see + * &struct nvme_id_ctrl.oacs. + * @NVME_CTRL_OACS_SECURITY: If set, then the controller supports the + * Security Send and Security Receive commands. + * @NVME_CTRL_OACS_FORMAT: If set then the controller supports the Format + * NVM command. + * @NVME_CTRL_OACS_FW: If set, then the controller supports the + * Firmware Commit and Firmware Image Download commands. + * @NVME_CTRL_OACS_NS_MGMT: If set, then the controller supports the + * Namespace Management capability + * @NVME_CTRL_OACS_SELF_TEST: If set, then the controller supports the Device + * Self-test command. + * @NVME_CTRL_OACS_DIRECTIVES: If set, then the controller supports Directives + * and the Directive Send and Directive Receive + * commands. + * @NVME_CTRL_OACS_NVME_MI: If set, then the controller supports the NVMe-MI + * Send and NVMe-MI Receive commands. + * @NVME_CTRL_OACS_VIRT_MGMT: If set, then the controller supports the + * Virtualization Management command. + * @NVME_CTRL_OACS_DBBUF_CFG: If set, then the controller supports the + * Doorbell Buffer Config command. + * @NVME_CTRL_OACS_LBA_STATUS: If set, then the controller supports the Get LBA + * Status capability. + */ +enum nvme_id_ctrl_oacs { + NVME_CTRL_OACS_SECURITY = 1 << 0, + NVME_CTRL_OACS_FORMAT = 1 << 1, + NVME_CTRL_OACS_FW = 1 << 2, + NVME_CTRL_OACS_NS_MGMT = 1 << 3, + NVME_CTRL_OACS_SELF_TEST = 1 << 4, + NVME_CTRL_OACS_DIRECTIVES = 1 << 5, + NVME_CTRL_OACS_NVME_MI = 1 << 6, + NVME_CTRL_OACS_VIRT_MGMT = 1 << 7, + NVME_CTRL_OACS_DBBUF_CFG = 1 << 8, + NVME_CTRL_OACS_LBA_STATUS = 1 << 9, +}; + +/** + * enum nvme_id_ctrl_frmw - Flags and values indicates capabilities regarding + * firmware updates from &struct nvme_id_ctrl.frmw. + * @NVME_CTRL_FRMW_1ST_RO: If set, the first firmware slot is readonly + * @NVME_CTRL_FRMW_NR_SLOTS: Mask to get the value of the number of + * firmware slots that the controller supports. + * @NVME_CTRL_FRMW_FW_ACT_NO_RESET: If set, the controller supports firmware + * activation without a reset. + */ +enum nvme_id_ctrl_frmw { + NVME_CTRL_FRMW_1ST_RO = 1 << 0, + NVME_CTRL_FRMW_NR_SLOTS = 3 << 1, + NVME_CTRL_FRMW_FW_ACT_NO_RESET = 1 << 4, +}; + +/** + * enum nvme_id_ctrl_lpa - Flags indicating optional attributes for log pages + * that are accessed via the Get Log Page command. + * @NVME_CTRL_LPA_SMART_PER_NS: + * @NVME_CTRL_LPA_CMD_EFFECTS: + * @NVME_CTRL_LPA_EXTENDED: + * @NVME_CTRL_LPA_TELEMETRY: + * @NVME_CTRL_LPA_PERSETENT_EVENT: + */ +enum nvme_id_ctrl_lpa { + NVME_CTRL_LPA_SMART_PER_NS = 1 << 0, + NVME_CTRL_LPA_CMD_EFFECTS = 1 << 1, + NVME_CTRL_LPA_EXTENDED = 1 << 2, + NVME_CTRL_LPA_TELEMETRY = 1 << 3, + NVME_CTRL_LPA_PERSETENT_EVENT = 1 << 4, +}; + +/** + * enum nvme_id_ctrl_avscc - Flags indicating the configuration settings for + * Admin Vendor Specific command handling. + * @NVME_CTRL_AVSCC_AVS: If set, all Admin Vendor Specific Commands use the + * optional vendor specific command format with NDT and + * NDM fields. + */ +enum nvme_id_ctrl_avscc { + NVME_CTRL_AVSCC_AVS = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_apsta - Flags indicating the attributes of the autonomous + * power state transition feature. + * @NVME_CTRL_APSTA_APST: If set, then the controller supports autonomous power + * state transitions. + */ +enum nvme_id_ctrl_apsta { + NVME_CTRL_APSTA_APST = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_rpmbs - This field indicates if the controller supports + * one or more Replay Protected Memory Blocks, from + * &struct nvme_id_ctrl.rpmbs. + * @NVME_CTRL_RPMBS_NR_UNITS: Mask to get the value of the Number of RPMB Units + * @NVME_CTRL_RPMBS_AUTH_METHOD: Mask to get the value of the Authentication Method + * @NVME_CTRL_RPMBS_TOTAL_SIZE: Mask to get the value of Total Size + * @NVME_CTRL_RPMBS_ACCESS_SIZE: Mask to get the value of Access Size + */ +enum nvme_id_ctrl_rpmbs { + NVME_CTRL_RPMBS_NR_UNITS = 7 << 0, + NVME_CTRL_RPMBS_AUTH_METHOD = 7 << 3, + NVME_CTRL_RPMBS_TOTAL_SIZE = 0xff << 16, + NVME_CTRL_RPMBS_ACCESS_SIZE = 0xff << 24, +}; + +/** + * enum nvme_id_ctrl_dsto - Flags indicating the optional Device Self-test + * command or operation behaviors supported by the + * controller or NVM subsystem. + * @NVME_CTRL_DSTO_ONE_DST: If set, then the NVM subsystem supports only one + * device self-test operation in progress at a time. + */ +enum nvme_id_ctrl_dsto { + NVME_CTRL_DSTO_ONE_DST = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_hctm - Flags indicate the attributes of the host + * controlled thermal management feature + * @NVME_CTRL_HCTMA_HCTM: then the controller supports host controlled thermal + * management, and the Set Features command and Get + * Features command with the Feature Identifier field + * set to %NVME_FEAT_FID_HCTM. + */ +enum nvme_id_ctrl_hctm { + NVME_CTRL_HCTMA_HCTM = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_sanicap - Indicates attributes for sanitize operations. + * @NVME_CTRL_SANICAP_CES: Crypto Erase Support. If set, then the + * controller supports the Crypto Erase sanitize operation. + * @NVME_CTRL_SANICAP_BES: Block Erase Support. If set, then the controller + * supports the Block Erase sanitize operation. + * @NVME_CTRL_SANICAP_OWS: Overwrite Support. If set, then the controller + * supports the Overwrite sanitize operation. + * @NVME_CTRL_SANICAP_NDI: No-Deallocate Inhibited. If set and the No- + * Deallocate Response Mode bit is set, then the + * controller deallocates after the sanitize + * operation even if the No-Deallocate After + * Sanitize bit is set in a Sanitize command. + * @NVME_CTRL_SANICAP_NODMMAS: No-Deallocate Modifies Media After Sanitize, + * mask to extract value. + */ +enum nvme_id_ctrl_sanicap { + NVME_CTRL_SANICAP_CES = 1 << 0, + NVME_CTRL_SANICAP_BES = 1 << 1, + NVME_CTRL_SANICAP_OWS = 1 << 2, + NVME_CTRL_SANICAP_NDI = 1 << 29, + NVME_CTRL_SANICAP_NODMMAS = 3 << 30, +}; + +/** + * enum nvme_id_ctrl_anacap - This field indicates the capabilities associated + * with Asymmetric Namespace Access Reporting. + * @NVME_CTRL_ANACAP_OPT: If set, then the controller is able to + * report ANA Optimized state. + * @NVME_CTRL_ANACAP_NON_OPT: If set, then the controller is able to + * report ANA Non-Optimized state. + * @NVME_CTRL_ANACAP_INACCESSIBLE: If set, then the controller is able to + * report ANA Inaccessible state. + * @NVME_CTRL_ANACAP_PERSISTENT_LOSS: If set, then the controller is able to + * report ANA Persistent Loss state. + * @NVME_CTRL_ANACAP_CHANGE: If set, then the controller is able to + * report ANA Change state. + * @NVME_CTRL_ANACAP_GRPID_NO_CHG: If set, then the ANAGRPID field in the + * Identify Namespace data structure + * (&struct nvme_id_ns.anagrpid), does not + * change while the namespace is attached to + * any controller. + * @NVME_CTRL_ANACAP_GRPID_MGMT: If set, then the controller supports a + * non-zero value in the ANAGRPID field of + * the Namespace Management command. + */ +enum nvme_id_ctrl_anacap { + NVME_CTRL_ANACAP_OPT = 1 << 0, + NVME_CTRL_ANACAP_NON_OPT = 1 << 1, + NVME_CTRL_ANACAP_INACCESSIBLE = 1 << 2, + NVME_CTRL_ANACAP_PERSISTENT_LOSS = 1 << 3, + NVME_CTRL_ANACAP_CHANGE = 1 << 4, + NVME_CTRL_ANACAP_GRPID_NO_CHG = 1 << 6, + NVME_CTRL_ANACAP_GRPID_MGMT = 1 << 7, +}; + +/** + * enum nvme_id_ctrl_sqes - Defines the required and maximum Submission Queue + * entry size when using the NVM Command Set. + * @NVME_CTRL_SQES_MIN: Mask to get the value of the required Submission Queue + * Entry size when using the NVM Command Set. + * @NVME_CTRL_SQES_MAX: Mask to get the value of the maximum Submission Queue + * entry size when using the NVM Command Set. + */ +enum nvme_id_ctrl_sqes { + NVME_CTRL_SQES_MIN = 0xf << 0, + NVME_CTRL_SQES_MAX = 0xf << 4, +}; + +/** + * enum nvme_id_ctrl_cqes - Defines the required and maximum Completion Queue + * entry size when using the NVM Command Set. + * @NVME_CTRL_CQES_MIN: Mask to get the value of the required Completion Queue + * Entry size when using the NVM Command Set. + * @NVME_CTRL_CQES_MAX: Mask to get the value of the maximum Completion Queue + * entry size when using the NVM Command Set. + */ +enum nvme_id_ctrl_cqes { + NVME_CTRL_CQES_MIN = 0xf << 0, + NVME_CTRL_CQES_MAX = 0xf << 4, +}; + +/** + * enum nvme_id_ctrl_oncs - This field indicates the optional NVM commands and + * features supported by the controller. + * @NVME_CTRL_ONCS_COMPARE: If set, then the controller supports + * the Compare command. + * @NVME_CTRL_ONCS_WRITE_UNCORRECTABLE: If set, then the controller supports + * the Write Uncorrectable command. + * @NVME_CTRL_ONCS_DSM: If set, then the controller supports + * the Dataset Management command. + * @NVME_CTRL_ONCS_WRITE_ZEROES: If set, then the controller supports + * the Write Zeroes command. + * @NVME_CTRL_ONCS_SAVE_FEATURES: If set, then the controller supports + * the Save field set to a non-zero value + * in the Set Features command and the + * Select field set to a non-zero value in + * the Get Features command. + * @NVME_CTRL_ONCS_RESERVATIONS: If set, then the controller supports + * reservations. + * @NVME_CTRL_ONCS_TIMESTAMP: If set, then the controller supports + * the Timestamp feature. + * @NVME_CTRL_ONCS_VERIFY: If set, then the controller supports + * the Verify command. + */ +enum nvme_id_ctrl_oncs { + NVME_CTRL_ONCS_COMPARE = 1 << 0, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, + NVME_CTRL_ONCS_DSM = 1 << 2, + NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, + NVME_CTRL_ONCS_SAVE_FEATURES = 1 << 4, + NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, + NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, + NVME_CTRL_ONCS_VERIFY = 1 << 7, +}; + +/** + * enum nvme_id_ctrl_fuses - This field indicates the fused operations that the + * controller supports. + * @NVME_CTRL_FUSES_COMPARE_AND_WRITE: If set, then the controller supports the + * Compare and Write fused operation. + */ +enum nvme_id_ctrl_fuses { + NVME_CTRL_FUSES_COMPARE_AND_WRITE = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_fna - This field indicates attributes for the Format NVM + * command. + * @NVME_CTRL_FNA_FMT_ALL_NAMESPACES: If set, then all namespaces in an NVM + * subsystem shall be configured with the + * same attributes and a format (excluding + * secure erase) of any namespace results in + * a format of all namespaces in an NVM + * subsystem. If cleared, then the + * controller supports format on a per + * namespace basis. + * @NVME_CTRL_FNA_SEC_ALL_NAMESPACES: If set, then any secure erase performed + * as part of a format operation results in + * a secure erase of all namespaces in the + * NVM subsystem. If cleared, then any + * secure erase performed as part of a + * format results in a secure erase of the + * particular namespace specified. + * @NVME_CTRL_FNA_CRYPTO_ERASE: If set, then cryptographic erase is + * supported. If cleared, then cryptographic + * erase is not supported. + */ +enum nvme_id_ctrl_fna { + NVME_CTRL_FNA_FMT_ALL_NAMESPACES = 1 << 0, + NVME_CTRL_FNA_SEC_ALL_NAMESPACES = 1 << 1, + NVME_CTRL_FNA_CRYPTO_ERASE = 1 << 2, +}; + +/** + * enum nvme_id_ctrl_vwc - + * @NVME_CTRL_VWC_PRESENT: If set, indicates a volatile write cache is present. + * If a volatile write cache is present, then the host + * controls whether the volatile write cache is enabled + * with a Set Features command specifying the value + * %NVME_FEAT_FID_VOLATILE_WC. + * @NVME_CTRL_VWC_FLUSH: Mask to get the value of the flush command behavior. + */ +enum nvme_id_ctrl_vwc { + NVME_CTRL_VWC_PRESENT = 1 << 0, + NVME_CTRL_VWC_FLUSH = 3 << 1, +}; + +/** + * enum nvme_id_ctrl_nvscc - This field indicates the configuration settings + * for NVM Vendor Specific command handling. + * @NVME_CTRL_NVSCC_FMT: If set, all NVM Vendor Specific Commands use the + * format format with NDT and NDM fields. + */ +enum nvme_id_ctrl_nvscc { + NVME_CTRL_NVSCC_FMT = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_nwpc - This field indicates the optional namespace write + * protection capabilities supported by the + * controller. + * @NVME_CTRL_NWPC_WRITE_PROTECT: If set, then the controller shall + * support the No Write Protect and + * Write Protect namespace write + * protection states and may support + * the Write Protect Until Power + * Cycle state and Permanent Write + * Protect namespace write + * protection states. + * @NVME_CTRL_NWPC_WRITE_PROTECT_POWER_CYCLE: If set, then the controller + * supports the Write Protect Until + * Power Cycle state. + * @NVME_CTRL_NWPC_WRITE_PROTECT_PERMANENT: If set, then the controller + * supports the Permanent Write + * Protect state. + */ +enum nvme_id_ctrl_nwpc { + NVME_CTRL_NWPC_WRITE_PROTECT = 1 << 0, + NVME_CTRL_NWPC_WRITE_PROTECT_POWER_CYCLE= 1 << 1, + NVME_CTRL_NWPC_WRITE_PROTECT_PERMANENT = 1 << 2, +}; + +/** + * enum nvme_id_ctrl_sgls - This field indicates if SGLs are supported for the + * NVM Command Set and the particular SGL types supported. + * @NVME_CTRL_SGLS_SUPPORTED: + * @NVME_CTRL_SGLS_KEYED: + * @NVME_CTRL_SGLS_BIT_BUCKET: + * @NVME_CTRL_SGLS_MPTR_BYTE_ALIGNED: + * @NVME_CTRL_SGLS_OVERSIZE: + * @NVME_CTRL_SGLS_MPTR_SGL: + * @NVME_CTRL_SGLS_OFFSET: + * @NVME_CTRL_SGLS_TPORT: + */ +enum nvme_id_ctrl_sgls { + NVME_CTRL_SGLS_SUPPORTED = 3 << 0, + NVME_CTRL_SGLS_KEYED = 1 << 2, + NVME_CTRL_SGLS_BIT_BUCKET = 1 << 16, + NVME_CTRL_SGLS_MPTR_BYTE_ALIGNED = 1 << 17, + NVME_CTRL_SGLS_OVERSIZE = 1 << 18, + NVME_CTRL_SGLS_MPTR_SGL = 1 << 19, + NVME_CTRL_SGLS_OFFSET = 1 << 20, + NVME_CTRL_SGLS_TPORT = 1 << 21, +}; + +/** + * enum nvme_id_ctrl_fcatt - This field indicates attributes of the controller + * that are specific to NVMe over Fabrics. + * @NVME_CTRL_FCATT_DYNAMIC: If cleared, then the NVM subsystem uses a dynamic + * controller model. If set, then the NVM subsystem + * uses a static controller model. + */ +enum nvme_id_ctrl_fcatt { + NVME_CTRL_FCATT_DYNAMIC = 1 << 0, +}; + +/** + * enum nvme_id_ctrl_ofcs - Indicate whether the controller supports optional + * fabric commands. + * @NVME_CTRL_OFCS_DISCONNECT: If set, then the controller supports the + * Disconnect command and deletion of individual + * I/O Queues. + */ +enum nvme_id_ctrl_ofcs { + NVME_CTRL_OFCS_DISCONNECT = 1 << 0, +}; + +/** + * struct nvme_lbaf - LBA Format Data Structure + * @ms: Metadata Size indicates the number of metadata bytes provided per LBA + * based on the LBA Data Size indicated. + * @ds: LBA Data Size indicates the LBA data size supported, reported as a + * power of two. + * @rp: Relative Performance, see &enum nvme_lbaf_rp. + */ +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +/** + * enum nvme_lbaf_rp - This field indicates the relative performance of the LBA + * format indicated relative to other LBA formats supported + * by the controller. + * @NVME_LBAF_RP_BEST: Best performance + * @NVME_LBAF_RP_BETTER: Better performance + * @NVME_LBAF_RP_GOOD: Good performance + * @NVME_LBAF_RP_DEGRADED: Degraded performance + * @NVME_LBAF_RP_MASK: Mask to get the relative performance value from the + * field + */ +enum nvme_lbaf_rp { + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_LBAF_RP_MASK = 3, +}; + +/** + * struct nvme_id_ns - Identify Namespace data structure + * @nsze: Namespace Size indicates the total size of the namespace in + * logical blocks. The number of logical blocks is based on the + * formatted LBA size. + * @ncap: Namespace Capacity indicates the maximum number of logical blocks + * that may be allocated in the namespace at any point in time. The + * number of logical blocks is based on the formatted LBA size. + * @nuse: Namespace Utilization indicates the current number of logical + * blocks allocated in the namespace. This field is smaller than or + * equal to the Namespace Capacity. The number of logical blocks is + * based on the formatted LBA size. + * @nsfeat: Namespace Features, see &enum nvme_id_nsfeat. + * @nlbaf: Number of LBA Formats defines the number of supported LBA data + * size and metadata size combinations supported by the namespace + * and the highest possible index to &struct nvme_id_ns.lbaf. + * @flbas: Formatted LBA Size, see &enum nvme_id_ns_flbas. + * @mc: Metadata Capabilities, see &enum nvme_id_ns_mc. + * @dpc: End-to-end Data Protection Capabilities, see + * &enum nvme_id_ns_dpc. + * @dps: End-to-end Data Protection Type Settings, see + * &enum nvme_id_ns_dps. + * @nmic: Namespace Multi-path I/O and Namespace Sharing Capabilities, see + * &enum nvme_id_ns_nmic. + * @rescap: Reservation Capabilities, see &enum nvme_id_ns_rescap. + * @fpi: Format Progress Indicator, see &enum nvme_nd_ns_fpi. + * @dlfeat: Deallocate Logical Block Features, see &enum nvme_id_ns_dlfeat. + * @nawun: Namespace Atomic Write Unit Normal indicates the + * namespace specific size of the write operation guaranteed to be + * written atomically to the NVM during normal operation. + * @nawupf: Namespace Atomic Write Unit Power Fail indicates the + * namespace specific size of the write operation guaranteed to be + * written atomically to the NVM during a power fail or error + * condition. + * @nacwu: Namespace Atomic Compare & Write Unit indicates the namespace + * specific size of the write operation guaranteed to be written + * atomically to the NVM for a Compare and Write fused command. + * @nabsn: Namespace Atomic Boundary Size Normal indicates the atomic + * boundary size for this namespace for the NAWUN value. This field + * is specified in logical blocks. + * @nabo: Namespace Atomic Boundary Offset indicates the LBA on this + * namespace where the first atomic boundary starts. + * @nabspf: Namespace Atomic Boundary Size Power Fail indicates the atomic + * boundary size for this namespace specific to the Namespace Atomic + * Write Unit Power Fail value. This field is specified in logical + * blocks. + * @noiob: Namespace Optimal I/O Boundary indicates the optimal I/O boundary + * for this namespace. This field is specified in logical blocks. + * The host should construct Read and Write commands that do not + * cross the I/O boundary to achieve optimal performance. + * @nvmcap: NVM Capacity indicates the total size of the NVM allocated to + * this namespace. The value is in bytes. + * @npwg: Namespace Preferred Write Granularity indicates the smallest + * recommended write granularity in logical blocks for this + * namespace. This is a 0's based value. + * @npwa: Namespace Preferred Write Alignment indicates the recommended + * write alignment in logical blocks for this namespace. This is a + * 0's based value. + * @npdg: Namespace Preferred Deallocate Granularity indicates the + * recommended granularity in logical blocks for the Dataset + * Management command with the Attribute - Deallocate bit. + * @npda: Namespace Preferred Deallocate Alignment indicates the + * recommended alignment in logical blocks for the Dataset + * Management command with the Attribute - Deallocate bit + * @nows: Namespace Optimal Write Size indicates the size in logical blocks + * for optimal write performance for this namespace. This is a 0's + * based value. + * @mssrl: Maximum Single Source Range Length indicates the maximum number + * of logical blocks that may be specified in each valid Source Range + * field of a Copy command. + * @mcl: Maximum Copy Length indicates the maximum number of logical + * blocks that may be specified in a Copy command. + * @msrc: Maximum Source Range Count indicates the maximum number of Source + * Range entries that may be used to specify source data in a Copy + * command. This is a 0’s based value. + * @rsvd81: Reserved + * @nulbaf: Number of Unique Capability LBA Formats defines the number of + * supported user data size and metadata size combinations supported + * by the namespace that may not share the same capabilities. LBA + * formats shall be allocated in order and packed sequentially. + * @rsvd83: Reserved + * @anagrpid: ANA Group Identifier indicates the ANA Group Identifier of the + * ANA group of which the namespace is a member. + * @rsvd96: Reserved + * @nsattr: Namespace Attributes, see &enum nvme_id_ns_attr. + * @nvmsetid: NVM Set Identifier indicates the NVM Set with which this + * namespace is associated. + * @endgid: Endurance Group Identifier indicates the Endurance Group with + * which this namespace is associated. + * @nguid: Namespace Globally Unique Identifier contains a 128-bit value + * that is globally unique and assigned to the namespace when the + * namespace is created. This field remains fixed throughout the + * life of the namespace and is preserved across namespace and + * controller operations + * @eui64: IEEE Extended Unique Identifier contains a 64-bit IEEE Extended + * Unique Identifier (EUI-64) that is globally unique and assigned + * to the namespace when the namespace is created. This field + * remains fixed throughout the life of the namespace and is + * preserved across namespace and controller operations + * @lbaf: LBA Format, see &struct nvme_lbaf. + * @lbstm: Logical Block Storage Tag Mask for end-to-end protection + * @vs: Vendor Specific + */ +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 dlfeat; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __le16 mssrl; + __le32 mcl; + __u8 msrc; + __u8 rsvd81; + __u8 nulbaf; + __u8 rsvd83[9]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __le16 nvmsetid; + __le16 endgid; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[64]; + __le64 lbstm; + __u8 vs[3704]; +}; + +/** + * enum nvme_id_nsfeat - This field defines features of the namespace. + * @NVME_NS_FEAT_THIN: If set, indicates that the namespace supports thin + * provisioning. Specifically, the Namespace Capacity + * reported may be less than the Namespace Size. + * @NVME_NS_FEAT_NATOMIC: If set, indicates that the fields NAWUN, NAWUPF, and + * NACWU are defined for this namespace and should be + * used by the host for this namespace instead of the + * AWUN, AWUPF, and ACWU fields in the Identify + * Controller data structure. + * @NVME_NS_FEAT_DULBE: If set, indicates that the controller supports the + * Deallocated or Unwritten Logical Block error for + * this namespace. + * @NVME_NS_FEAT_ID_REUSE: If set, indicates that the value in the NGUID field + * for this namespace, if non- zero, is never reused by + * the controller and that the value in the EUI64 field + * for this namespace, if non-zero, is never reused by + * the controller. + * @NVME_NS_FEAT_IO_OPT: If set, indicates that the fields NPWG, NPWA, NPDG, + * NPDA, and NOWS are defined for this namespace and + * should be used by the host for I/O optimization + */ +enum nvme_id_nsfeat { + NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FEAT_NATOMIC = 1 << 1, + NVME_NS_FEAT_DULBE = 1 << 2, + NVME_NS_FEAT_ID_REUSE = 1 << 3, + NVME_NS_FEAT_IO_OPT = 1 << 4, +}; + +/** + * enum nvme_id_ns_flbas - This field indicates the LBA data size & metadata + * size combination that the namespace has been + * formatted with + * @NVME_NS_FLBAS_LOWER_MASK: Mask to get the index of one of the supported + * LBA Formats's least significant + * 4bits indicated in + * :c:type:`struct nvme_id_ns <nvme_id_ns>`.lbaf. + * @NVME_NS_FLBAS_META_EXT: Applicable only if format contains metadata. If + * this bit is set, indicates that the metadata is + * transferred at the end of the data LBA, creating an + * extended data LBA. If cleared, indicates that all + * of the metadata for a command is transferred as a + * separate contiguous buffer of data. + * @NVME_NS_FLBAS_HIGHER_MASK: Mask to get the index of one of + * the supported LBA Formats's most significant + * 2bits indicated in + * :c:type:`struct nvme_id_ns <nvme_id_ns>`.lbaf. + */ +enum nvme_id_ns_flbas { + NVME_NS_FLBAS_LOWER_MASK = 15 << 0, + NVME_NS_FLBAS_META_EXT = 1 << 4, + NVME_NS_FLBAS_HIGHER_MASK = 3 << 5, +}; + +/** + * enum nvme_id_ns_mc - This field indicates the capabilities for metadata. + * @NVME_NS_MC_EXTENDED: If set, indicates the namespace supports the metadata + * being transferred as part of a separate buffer that is + * specified in the Metadata Pointer. + * @NVME_NS_MC_SEPARATE: If set, indicates that the namespace supports the + * metadata being transferred as part of an extended data LBA. + */ +enum nvme_id_ns_mc { + NVME_NS_MC_EXTENDED = 1 << 0, + NVME_NS_MC_SEPARATE = 1 << 1, +}; + +/** + * enum nvme_id_ns_dpc - This field indicates the capabilities for the + * end-to-end data protection feature. + * @NVME_NS_DPC_PI_TYPE1: If set, indicates that the namespace supports + * Protection Information Type 1. + * @NVME_NS_DPC_PI_TYPE2: If set, indicates that the namespace supports + * Protection Information Type 2. + * @NVME_NS_DPC_PI_TYPE3: If set, indicates that the namespace supports + * Protection Information Type 3. + * @NVME_NS_DPC_PI_FIRST: If set, indicates that the namespace supports + * protection information transferred as the first eight + * bytes of metadata. + * @NVME_NS_DPC_PI_LAST: If set, indicates that the namespace supports + * protection information transferred as the last eight + * bytes of metadata. + */ +enum nvme_id_ns_dpc { + NVME_NS_DPC_PI_TYPE1 = 1 << 0, + NVME_NS_DPC_PI_TYPE2 = 1 << 1, + NVME_NS_DPC_PI_TYPE3 = 1 << 2, + NVME_NS_DPC_PI_FIRST = 1 << 3, + NVME_NS_DPC_PI_LAST = 1 << 4, +}; + +/** + * enum nvme_id_ns_dps - This field indicates the Type settings for the + * end-to-end data protection feature. + * @NVME_NS_DPS_PI_NONE: Protection information is not enabled + * @NVME_NS_DPS_PI_TYPE1: Protection information is enabled, Type 1 + * @NVME_NS_DPS_PI_TYPE2: Protection information is enabled, Type 2 + * @NVME_NS_DPS_PI_TYPE3: Protection information is enabled, Type 3 + * @NVME_NS_DPS_PI_MASK: Mask to get the value of the PI type + * @NVME_NS_DPS_PI_FIRST: If set, indicates that the protection information, if + * enabled, is transferred as the first eight bytes of + * metadata. + */ +enum nvme_id_ns_dps { + NVME_NS_DPS_PI_NONE = 0, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, + NVME_NS_DPS_PI_MASK = 7 << 0, + NVME_NS_DPS_PI_FIRST = 1 << 3, +}; + +/** + * enum nvme_id_ns_nmic - This field specifies multi-path I/O and namespace + * sharing capabilities of the namespace. + * @NVME_NS_NMIC_SHARED: If set, then the namespace may be attached to two or + * more controllers in the NVM subsystem concurrently + */ +enum nvme_id_ns_nmic { + NVME_NS_NMIC_SHARED = 1 << 0, +}; + +/** + * enum nvme_id_ns_rescap - This field indicates the reservation capabilities + * of the namespace. + * @NVME_NS_RESCAP_PTPL: If set, indicates that the namespace supports the + * Persist Through Power Loss capability. + * @NVME_NS_RESCAP_WE: If set, indicates that the namespace supports the + * Write Exclusive reservation type. + * @NVME_NS_RESCAP_EA: If set, indicates that the namespace supports the + * Exclusive Access reservation type. + * @NVME_NS_RESCAP_WERO: If set, indicates that the namespace supports the + * Write Exclusive - Registrants Only reservation type. + * @NVME_NS_RESCAP_EARO: If set, indicates that the namespace supports the + * Exclusive Access - Registrants Only reservation type. + * @NVME_NS_RESCAP_WEAR: If set, indicates that the namespace supports the + * Write Exclusive - All Registrants reservation type. + * @NVME_NS_RESCAP_EAAR: If set, indicates that the namespace supports the + * Exclusive Access - All Registrants reservation type. + * @NVME_NS_RESCAP_IEK_13: If set, indicates that Ignore Existing Key is used + * as defined in revision 1.3 or later of this specification. + */ +enum nvme_id_ns_rescap { + NVME_NS_RESCAP_PTPL = 1 << 0, + NVME_NS_RESCAP_WE = 1 << 1, + NVME_NS_RESCAP_EA = 1 << 2, + NVME_NS_RESCAP_WERO = 1 << 3, + NVME_NS_RESCAP_EARO = 1 << 4, + NVME_NS_RESCAP_WEAR = 1 << 5, + NVME_NS_RESCAP_EAAR = 1 << 6, + NVME_NS_RESCAP_IEK_13 = 1 << 7, +}; + +/** + * enum nvme_nd_ns_fpi - If a format operation is in progress, this field + * indicates the percentage of the namespace that remains + * to be formatted. + * @NVME_NS_FPI_REMAINING: Mask to get the format percent remaining value + * @NVME_NS_FPI_SUPPORTED: If set, indicates that the namespace supports the + * Format Progress Indicator defined for the field. + */ +enum nvme_nd_ns_fpi { + NVME_NS_FPI_REMAINING = 0x7f << 0, + NVME_NS_FPI_SUPPORTED = 1 << 7, +}; + +/** + * enum nvme_id_ns_dlfeat - This field indicates information about features + * that affect deallocating logical blocks for this + * namespace. + * @NVME_NS_DLFEAT_RB: Mask to get the value of the read behavior + * @NVME_NS_DLFEAT_RB_NR: Read behvaior is not reported + * @NVME_NS_DLFEAT_RB_ALL_0S: A deallocated logical block returns all bytes + * cleared to 0h. + * @NVME_NS_DLFEAT_RB_ALL_FS: A deallocated logical block returns all bytes + * set to FFh. + * @NVME_NS_DLFEAT_WRITE_ZEROES: If set, indicates that the controller supports + * the Deallocate bit in the Write Zeroes command + * for this namespace. + * @NVME_NS_DLFEAT_CRC_GUARD: If set, indicates that the Guard field for + * deallocated logical blocks that contain + * protection information is set to the CRC for + * the value read from the deallocated logical + * block and its metadata + */ +enum nvme_id_ns_dlfeat { + NVME_NS_DLFEAT_RB = 7 << 0, + NVME_NS_DLFEAT_RB_NR = 0, + NVME_NS_DLFEAT_RB_ALL_0S = 1, + NVME_NS_DLFEAT_RB_ALL_FS = 2, + NVME_NS_DLFEAT_WRITE_ZEROES = 1 << 3, + NVME_NS_DLFEAT_CRC_GUARD = 1 << 4, +}; + +/** + * enum nvme_id_ns_attr - Specifies attributes of the namespace. + * @NVME_NS_NSATTR_WRITE_PROTECTED: If set, then the namespace is currently + * write protected and all write access to the + * namespace shall fail. + */ +enum nvme_id_ns_attr { + NVME_NS_NSATTR_WRITE_PROTECTED = 1 << 0 +}; + +/** + * struct nvme_ns_id_desc - + * @nidt: Namespace Identifier Type, see &enum nvme_ns_id_desc_nidt + * @nidl: Namespace Identifier Length contains the length in bytes of the + * &struct nvme_id_ns.nid. + * @rsvd: Reserved + * @nid: Namespace Identifier contains a value that is globally unique and + * assigned to the namespace when the namespace is created. The length + * is defined in &struct nvme_id_ns.nidl. + */ +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 rsvd; + __u8 nid[]; +}; + +/** + * enum nvme_ns_id_desc_nidt - Known namespace identifier types + * @NVME_NIDT_EUI64: IEEE Extended Unique Identifier, the NID field contains a + * copy of the EUI64 field in the struct nvme_id_ns.eui64. + * @NVME_NIDT_NGUID: Namespace Globally Unique Identifier, the NID field + * contains a copy of the NGUID field in struct nvme_id_ns.nguid. + * @NVME_NIDT_UUID: The NID field contains a 128-bit Universally Unique + * Identifier (UUID) as specified in RFC 4122. + * @NVME_NIDT_CSI: The NID field contains the command set indentifier. + */ +enum nvme_ns_id_desc_nidt { + NVME_NIDT_EUI64 = 1, + NVME_NIDT_NGUID = 2, + NVME_NIDT_UUID = 3, + NVME_NIDT_CSI = 4, +}; + +enum nvme_ns_id_desc_nidt_lens { + NVME_NIDT_EUI64_LEN = 8, + NVME_NIDT_NGUID_LEN = 16, + NVME_NIDT_UUID_LEN = 16, + NVME_NIDT_CSI_LEN = 1, +}; + +/** + * struct nvme_nvmset_attr - NVM Set Attributes Entry + * @nvmsetid: NVM Set Identifier + * @endgid: Endurance Group Identifier + * @rsvd4: Reserved + * @rr4kt: Random 4 KiB Read Typical indicates the typical + * time to complete a 4 KiB random read in 100 nanosecond units + * when the NVM Set is in a Predictable Latency Mode Deterministic + * Window and there is 1 outstanding command per NVM Set. + * @ows: Optimal Write Size + * @tnvmsetcap: Total NVM Set Capacity + * @unvmsetcap: Unallocated NVM Set Capacity + * @rsvd48: Reserved + */ +struct nvme_nvmset_attr { + __le16 nvmsetid; + __le16 endgid; + __u8 rsvd4[4]; + __le32 rr4kt; + __le32 ows; + __u8 tnvmsetcap[16]; + __u8 unvmsetcap[16]; + __u8 rsvd48[80]; +}; + +/** + * struct nvme_id_nvmset_list - + * @nid: Nvmset id + * @rsvd1: Reserved + * @ent: nvmset id list + */ +struct nvme_id_nvmset_list { + __u8 nid; + __u8 rsvd1[127]; + struct nvme_nvmset_attr ent[NVME_ID_NVMSET_LIST_MAX]; +}; + +/** + * struct nvme_id_independent_id_ns - + * @nsfeat: common namespace features + * @nmic: Namespace Multi-path I/O and Namespace + * Sharing Capabilities + * @rescap: Reservation Capabilities + * @fpi: Format Progress Indicator + * @anagrpid: ANA Group Identifier + * @nsattr: Namespace Attributes + * @rsvd9: reserved + * @nvmsetid: NVM Set Identifier + * @endgid: Endurance Group Identifier + * @nstat: Namespace Status + * @rsvd15: reserved + */ +struct nvme_id_independent_id_ns { + __u8 nsfeat; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __le32 anagrpid; + __u8 nsattr; + __u8 rsvd9; + __le16 nvmsetid; + __le16 endgid; + __u8 nstat; + __u8 rsvd15[4081]; +}; + +/** + * struct nvme_id_ns_granularity_desc - + * @nszegran: Namespace Size Granularity + * @ncapgran: Namespace Capacity Granularity + */ +struct nvme_id_ns_granularity_desc { + __le64 nszegran; + __le64 ncapgran; +}; + +/** + * struct nvme_id_ns_granularity_list - + * @attributes: Namespace Granularity Attributes + * @num_descriptors: Number of Descriptors + * @rsvd5: reserved + * @entry: Namespace Granularity Descriptor + * @rsvd288: reserved + */ +struct nvme_id_ns_granularity_list { + __le32 attributes; + __u8 num_descriptors; + __u8 rsvd5[27]; + struct nvme_id_ns_granularity_desc entry[NVME_ID_ND_DESCRIPTOR_MAX]; + __u8 rsvd288[3808]; +}; + +/** + * struct nvme_id_uuid_list_entry - + * @header: UUID Lists Entry Header + * @rsvd1: reserved + * @uuid: 128-bit Universally Unique Identifier + */ +struct nvme_id_uuid_list_entry { + __u8 header; + __u8 rsvd1[15]; + __u8 uuid[16]; +}; + +/** + * enum nvme_id_uuid - + * @NVME_ID_UUID_HDR_ASSOCIATION_MASK: + * @NVME_ID_UUID_ASSOCIATION_NONE: + * @NVME_ID_UUID_ASSOCIATION_VENDOR: + * @NVME_ID_UUID_ASSOCIATION_SUBSYSTEM_VENDOR: + */ +enum nvme_id_uuid { + NVME_ID_UUID_HDR_ASSOCIATION_MASK = 0x3, + NVME_ID_UUID_ASSOCIATION_NONE = 0, + NVME_ID_UUID_ASSOCIATION_VENDOR = 1, + NVME_ID_UUID_ASSOCIATION_SUBSYSTEM_VENDOR = 2, +}; + +/** + * struct nvme_id_uuid_list - + * @rsvd0: reserved + * @entry: UUID list entry + */ +struct nvme_id_uuid_list { + __u8 rsvd0[32]; + struct nvme_id_uuid_list_entry entry[NVME_ID_UUID_LIST_MAX]; +}; + +/** + * struct nvme_ctrl_list - + * @num: Number of Identifiers + * @identifier: NVM subsystem unique controller identifier + */ +struct nvme_ctrl_list { + __le16 num; + __le16 identifier[NVME_ID_CTRL_LIST_MAX]; +}; + +/** + * struct nvme_ns_list - + * @ns: Namespace Identifier + */ +struct nvme_ns_list { + __le32 ns[NVME_ID_NS_LIST_MAX]; +}; + +/** + * struct nvme_id_ctrl_nvm - + * @vsl: Verify Size Limit + * @wzsl: Write Zeroes Size Limit + * @wusl: Write Uncorrectable Size Limit + * @dmrl: Dataset Management Ranges Limit + * @dmrsl: Dataset Management Range Size Limit + * @dmsl: Dataset Management Size Limit + * @rsvd16: reserved + */ +struct nvme_id_ctrl_nvm { + __u8 vsl; + __u8 wzsl; + __u8 wusl; + __u8 dmrl; + __u32 dmrsl; + __u64 dmsl; + __u8 rsvd16[4080]; +}; + +/** + * struct nvme_nvm_id_ns - + * @lbstm: Logical Block Storage Tag Mask + * @pic: Protection Information Capabilities + * @rsvd9: Reserved + * @elbaf: List of Extended LBA Format Support + * @rsvd268: Reserved + */ +struct nvme_nvm_id_ns { + __le64 lbstm; + __u8 pic; + __u8 rsvd9[3]; + __le32 elbaf[64]; + __u8 rsvd268[3828]; +}; + +/** + * struct nvme_zns_lbafe - + * @zsze: Zone Size + * @zdes: Zone Descriptor Extension Size + * @rsvd9: reserved + */ +struct nvme_zns_lbafe { + __le64 zsze; + __u8 zdes; + __u8 rsvd9[7]; +}; + +/** + * struct nvme_zns_id_ns - Zoned Namespace Command Set Specific + * Identify Namespace Data Structure + * @zoc: Zone Operation Characteristics + * @ozcs: Optional Zoned Command Support + * @mar: Maximum Active Resources + * @mor: Maximum Open Resources + * @rrl: Reset Recommended Limit + * @frl: Finish Recommended Limit + * @rrl1: Reset Recommended Limit 1 + * @rrl2: Reset Recommended Limit 2 + * @rrl3: Reset Recommended Limit 3 + * @frl1: Finish Recommended Limit 1 + * @frl2: Finish Recommended Limit 2 + * @frl3: Finish Recommended Limit 3 + * @numzrwa: Number of ZRWA Resources + * @zrwafg: ZRWA Flush Granularity + * @zrwasz: ZRWA Size + * @zrwacap: ZRWA Capability + * @rsvd53: Reserved + * @lbafe: LBA Format Extension + * @vs: Vendor Specific + */ +struct nvme_zns_id_ns { + __le16 zoc; + __le16 ozcs; + __le32 mar; + __le32 mor; + __le32 rrl; + __le32 frl; + __le32 rrl1; + __le32 rrl2; + __le32 rrl3; + __le32 frl1; + __le32 frl2; + __le32 frl3; + __le32 numzrwa; + __le16 zrwafg; + __le16 zrwasz; + __u8 zrwacap; + __u8 rsvd53[2763]; + struct nvme_zns_lbafe lbafe[64]; + __u8 vs[256]; +}; + +/** + * struct nvme_zns_id_ctrl - + * @zasl: + * @rsvd1: Reserved + */ +struct nvme_zns_id_ctrl { + __u8 zasl; + __u8 rsvd1[4095]; +}; + +/** + * struct nvme_primary_ctrl_cap - + * @cntlid: Controller Identifier + * @portid: Port Identifier + * @crt: Controller Resource Types + * @rsvd5: reserved + * @vqfrt: VQ Resources Flexible Total + * @vqrfa: VQ Resources Flexible Assigned + * @vqrfap: VQ Resources Flexible Allocated to Primary + * @vqprt: VQ Resources Private Total + * @vqfrsm: VQ Resources Flexible Secondary Maximum + * @vqgran: VQ Flexible Resource Preferred Granularity + * @rsvd48: reserved + * @vifrt: VI Resources Flexible Total + * @virfa: VI Resources Flexible Assigned + * @virfap: VI Resources Flexible Allocated to Primary + * @viprt: VI Resources Private Total + * @vifrsm: VI Resources Flexible Secondary Maximum + * @vigran: VI Flexible Resource Preferred Granularity + * @rsvd80: reserved + */ +struct nvme_primary_ctrl_cap { + __le16 cntlid; + __le16 portid; + __u8 crt; + __u8 rsvd5[27]; + __le32 vqfrt; + __le32 vqrfa; + __le16 vqrfap; + __le16 vqprt; + __le16 vqfrsm; + __le16 vqgran; + __u8 rsvd48[16]; + __le32 vifrt; + __le32 virfa; + __le16 virfap; + __le16 viprt; + __le16 vifrsm; + __le16 vigran; + __u8 rsvd80[4016]; +}; + +/** + * struct nvme_secondary_ctrl - + * @scid: Secondary Controller Identifier + * @pcid: Primary Controller Identifier + * @scs: Secondary Controller State + * @rsvd5: Reserved + * @vfn: Virtual Function Number + * @nvq: Number of VQ Flexible Resources Assigned + * @nvi: Number of VI Flexible Resources Assigned + * @rsvd14: Reserved + */ +struct nvme_secondary_ctrl { + __le16 scid; + __le16 pcid; + __u8 scs; + __u8 rsvd5[3]; + __le16 vfn; + __le16 nvq; + __le16 nvi; + __u8 rsvd14[18]; +}; + +/** + * struct nvme_secondary_ctrl_list - + * @num: Number of Identifiers + * @rsvd: Reserved + * @sc_entry: Secondary Controller Entry + */ +struct nvme_secondary_ctrl_list { + __u8 num; + __u8 rsvd[31]; + struct nvme_secondary_ctrl sc_entry[NVME_ID_SECONDARY_CTRL_MAX]; +}; + +/** + * struct nvme_id_iocs - NVMe Identify IO Command Set data structure + * @iocsc: List of supported IO Command Set Combination vectors + */ +struct nvme_id_iocs { + __u64 iocsc[512]; +}; + +/** + * struct nvme_id_domain_attr - Domain Attributes Entry + * @dom_id: Domain Identifier + * @rsvd2: Reserved + * @dom_cap: Total Domain Capacity + * @unalloc_dom_cap: Unallocated Domain Capacity + * @max_egrp_dom_cap: Max Endurance Group Domain Capacity + * @rsvd64: Reserved + */ +struct nvme_id_domain_attr { + __le16 dom_id; + __u8 rsvd2[14]; + __u8 dom_cap[16]; + __u8 unalloc_dom_cap[16]; + __u8 max_egrp_dom_cap[16]; + __u8 rsvd64[64]; +}; + +/** + * struct nvme_id_domain_list - + * @num: Number of domain attributes + * @rsvd: Reserved + * @domain_attr: List of domain attributes + */ +struct nvme_id_domain_list { + __u8 num; + __u8 rsvd[127]; + struct nvme_id_domain_attr domain_attr[NVME_ID_DOMAIN_LIST_MAX]; +}; + +/** + * struct nvme_id_endurance_group_list - + * @num: Number of Identifiers + * @identifier: Endurance Group Identifier + */ +struct nvme_id_endurance_group_list { + __le16 num; + __le16 identifier[NVME_ID_ENDURANCE_GROUP_LIST_MAX]; +}; + +/** + * struct nvme_supported_log_pages - + * @lid_support: Log Page Identifier Supported + * + * Supported Log Pages (Log Identifier 00h) + */ +struct nvme_supported_log_pages { + __le32 lid_support[NVME_LOG_SUPPORTED_LOG_PAGES_MAX]; +}; + +/** + * struct nvme_error_log_page - Error Information Log Entry (Log Identifier 01h) + * @error_count: Error Count: a 64-bit incrementing error count, + * indicating a unique identifier for this error. The error + * count starts at %1h, is incremented for each unique error + * log entry, and is retained across power off conditions. + * A value of %0h indicates an invalid entry; this value + * is used when there are lost entries or when there are + * fewer errors than the maximum number of entries the + * controller supports. If the value of this field is + * %FFFFFFFFh, then the field shall be set to 1h when + * incremented (i.e., rolls over to %1h). Prior to NVMe + * 1.4, processing of incrementing beyond %FFFFFFFFh is + * unspecified. + * @sqid: Submission Queue ID: indicates the Submission Queue + * Identifier of the command that the error information is + * associated with. If the error is not specific to + * a particular command, then this field shall be set to + * %FFFFh. + * @cmdid: Command ID: indicates the Command Identifier of the + * command that the error is associated with. If the error + * is not specific to a particular command, then this field + * shall be set to %FFFFh. + * @status_field: Bits 15-1: Status Field: indicates the Status Field for + * the command that completed. If the error is not specific + * to a particular command, then this field reports the most + * applicable status value. + * Bit 0: Phase Tag: may indicate the Phase Tag posted for + * the command. + * @parm_error_location: Parameter Error Location: indicates the byte and bit of + * the command parameter that the error is associated with, + * if applicable. If the parameter spans multiple bytes or + * bits, then the location indicates the first byte and bit + * of the parameter. + * Bits 10-8: Bit in command that contained the error. + * Valid values are 0 to 7. + * Bits 7-0: Byte in command that contained the error. + * Valid values are 0 to 63. + * @lba: LBA: This field indicates the first LBA that experienced + * the error condition, if applicable. + * @nsid: Namespace: This field indicates the NSID of the namespace + * that the error is associated with, if applicable. + * @vs: Vendor Specific Information Available: If there is + * additional vendor specific error information available, + * this field provides the log page identifier associated + * with that page. A value of %0h indicates that no additional + * information is available. Valid values are in the range + * of %80h to %FFh. + * @trtype: Transport Type (TRTYPE): indicates the Transport Type of + * the transport associated with the error. The values in + * this field are the same as the TRTYPE values in the + * Discovery Log Page Entry. If the error is not transport + * related, this field shall be cleared to %0h. If the error + * is transport related, this field shall be set to the type + * of the transport - see &enum nvme_trtype. + * @rsvd: Reserved + * @cs: Command Specific Information: This field contains command + * specific information. If used, the command definition + * specifies the information returned. + * @trtype_spec_info: Transport Type Specific Information + * @rsvd2: Reserved + */ +struct nvme_error_log_page { + __le64 error_count; + __le16 sqid; + __le16 cmdid; + __le16 status_field; + __le16 parm_error_location; + __le64 lba; + __le32 nsid; + __u8 vs; + __u8 trtype; + __u8 rsvd[2]; + __le64 cs; + __le16 trtype_spec_info; + __u8 rsvd2[22]; +}; + +/** + * enum nvme_err_pel - + * @NVME_ERR_PEL_BYTE_MASK: + * @NVME_ERR_PEL_BIT_MASK: + */ +enum nvme_err_pel { + NVME_ERR_PEL_BYTE_MASK = 0xf, + NVME_ERR_PEL_BIT_MASK = 0x70, +}; + +/** + * struct nvme_smart_log - SMART / Health Information Log (Log Identifier 02h) + * @critical_warning: This field indicates critical warnings for the state + * of the controller. Critical warnings may result in an + * asynchronous event notification to the host. Bits in + * this field represent the current associated state and + * are not persistent (see &enum nvme_smart_crit). + * @temperature: Composite Temperature: Contains a value corresponding + * to a temperature in Kelvins that represents the current + * composite temperature of the controller and namespace(s) + * associated with that controller. The manner in which + * this value is computed is implementation specific and + * may not represent the actual temperature of any physical + * point in the NVM subsystem. Warning and critical + * overheating composite temperature threshold values are + * reported by the WCTEMP and CCTEMP fields in the Identify + * Controller data structure. + * @avail_spare: Available Spare: Contains a normalized percentage (0% + * to 100%) of the remaining spare capacity available. + * @spare_thresh: Available Spare Threshold: When the Available Spare + * falls below the threshold indicated in this field, an + * asynchronous event completion may occur. The value is + * indicated as a normalized percentage (0% to 100%). + * The values 101 to 255 are reserved. + * @percent_used: Percentage Used: Contains a vendor specific estimate + * of the percentage of NVM subsystem life used based on + * the actual usage and the manufacturer's prediction of + * NVM life. A value of 100 indicates that the estimated + * endurance of the NVM in the NVM subsystem has been + * consumed, but may not indicate an NVM subsystem failure. + * The value is allowed to exceed 100. Percentages greater + * than 254 shall be represented as 255. This value shall + * be updated once per power-on hour (when the controller + * is not in a sleep state). + * @endu_grp_crit_warn_sumry: Endurance Group Critical Warning Summary: This field + * indicates critical warnings for the state of Endurance + * Groups. Bits in this field represent the current associated + * state and are not persistent (see &enum nvme_smart_egcw). + * @rsvd7: Reserved + * @data_units_read: Data Units Read: Contains the number of 512 byte data + * units the host has read from the controller; this value + * does not include metadata. This value is reported in + * thousands (i.e., a value of 1 corresponds to 1000 + * units of 512 bytes read) and is rounded up (e.g., one + * indicates the that number of 512 byte data units read + * is from 1 to 1000, three indicates that the number of + * 512 byte data units read is from 2001 to 3000). When + * the LBA size is a value other than 512 bytes, the + * controller shall convert the amount of data read to + * 512 byte units. For the NVM command set, logical blocks + * read as part of Compare, Read, and Verify operations + * shall be included in this value. A value of %0h in + * this field indicates that the number of Data Units Read + * is not reported. + * @data_units_written: Data Units Written: Contains the number of 512 byte + * data units the host has written to the controller; + * this value does not include metadata. This value is + * reported in thousands (i.e., a value of 1 corresponds + * to 1000 units of 512 bytes written) and is rounded up + * (e.g., one indicates that the number of 512 byte data + * units written is from 1 to 1,000, three indicates that + * the number of 512 byte data units written is from 2001 + * to 3000). When the LBA size is a value other than 512 + * bytes, the controller shall convert the amount of data + * written to 512 byte units. For the NVM command set, + * logical blocks written as part of Write operations shall + * be included in this value. Write Uncorrectable commands + * and Write Zeroes commands shall not impact this value. + * A value of %0h in this field indicates that the number + * of Data Units Written is not reported. + * @host_reads: Host Read Commands: Contains the number of read commands + * completed by the controller. For the NVM command set, + * this value is the sum of the number of Compare commands + * and the number of Read commands. + * @host_writes: Host Write Commands: Contains the number of write + * commands completed by the controller. For the NVM + * command set, this is the number of Write commands. + * @ctrl_busy_time: Controller Busy Time: Contains the amount of time the + * controller is busy with I/O commands. The controller + * is busy when there is a command outstanding to an I/O + * Queue (specifically, a command was issued via an I/O + * Submission Queue Tail doorbell write and the corresponding + * completion queue entry has not been posted yet to the + * associated I/O Completion Queue). This value is + * reported in minutes. + * @power_cycles: Power Cycles: Contains the number of power cycles. + * @power_on_hours: Power On Hours: Contains the number of power-on hours. + * This may not include time that the controller was + * powered and in a non-operational power state. + * @unsafe_shutdowns: Unsafe Shutdowns: Contains the number of unsafe + * shutdowns. This count is incremented when a Shutdown + * Notification (CC.SHN) is not received prior to loss of power. + * @media_errors: Media and Data Integrity Errors: Contains the number + * of occurrences where the controller detected an + * unrecovered data integrity error. Errors such as + * uncorrectable ECC, CRC checksum failure, or LBA tag + * mismatch are included in this field. Errors introduced + * as a result of a Write Uncorrectable command may or + * may not be included in this field. + * @num_err_log_entries: Number of Error Information Log Entries: Contains the + * number of Error Information log entries over the life + * of the controller. + * @warning_temp_time: Warning Composite Temperature Time: Contains the amount + * of time in minutes that the controller is operational + * and the Composite Temperature is greater than or equal + * to the Warning Composite Temperature Threshold (WCTEMP) + * field and less than the Critical Composite Temperature + * Threshold (CCTEMP) field in the Identify Controller + * data structure. If the value of the WCTEMP or CCTEMP + * field is %0h, then this field is always cleared to %0h + * regardless of the Composite Temperature value. + * @critical_comp_time: Critical Composite Temperature Time: Contains the amount + * of time in minutes that the controller is operational + * and the Composite Temperature is greater than or equal + * to the Critical Composite Temperature Threshold (CCTEMP) + * field in the Identify Controller data structure. If + * the value of the CCTEMP field is %0h, then this field + * is always cleared to 0h regardless of the Composite + * Temperature value. + * @temp_sensor: Temperature Sensor 1-8: Contains the current temperature + * in degrees Kelvin reported by temperature sensors 1-8. + * The physical point in the NVM subsystem whose temperature + * is reported by the temperature sensor and the temperature + * accuracy is implementation specific. An implementation + * that does not implement the temperature sensor reports + * a value of %0h. + * @thm_temp1_trans_count: Thermal Management Temperature 1 Transition Count: + * Contains the number of times the controller transitioned + * to lower power active power states or performed vendor + * specific thermal management actions while minimizing + * the impact on performance in order to attempt to reduce + * the Composite Temperature because of the host controlled + * thermal management feature (i.e., the Composite + * Temperature rose above the Thermal Management + * Temperature 1). This counter shall not wrap once the + * value %FFFFFFFFh is reached. A value of %0h, indicates + * that this transition has never occurred or this field + * is not implemented. + * @thm_temp2_trans_count: Thermal Management Temperature 2 Transition Count + * @thm_temp1_total_time: Total Time For Thermal Management Temperature 1: + * Contains the number of seconds that the controller + * had transitioned to lower power active power states or + * performed vendor specific thermal management actions + * while minimizing the impact on performance in order to + * attempt to reduce the Composite Temperature because of + * the host controlled thermal management feature. This + * counter shall not wrap once the value %FFFFFFFFh is + * reached. A value of %0h, indicates that this transition + * has never occurred or this field is not implemented. + * @thm_temp2_total_time: Total Time For Thermal Management Temperature 2 + * @rsvd232: Reserved + */ +struct nvme_smart_log { + __u8 critical_warning; + __u8 temperature[2]; + __u8 avail_spare; + __u8 spare_thresh; + __u8 percent_used; + __u8 endu_grp_crit_warn_sumry; + __u8 rsvd7[25]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 host_reads[16]; + __u8 host_writes[16]; + __u8 ctrl_busy_time[16]; + __u8 power_cycles[16]; + __u8 power_on_hours[16]; + __u8 unsafe_shutdowns[16]; + __u8 media_errors[16]; + __u8 num_err_log_entries[16]; + __le32 warning_temp_time; + __le32 critical_comp_time; + __le16 temp_sensor[8]; + __le32 thm_temp1_trans_count; + __le32 thm_temp2_trans_count; + __le32 thm_temp1_total_time; + __le32 thm_temp2_total_time; + __u8 rsvd232[280]; +}; + +/** + * enum nvme_smart_crit - Critical Warning + * @NVME_SMART_CRIT_SPARE: If set, then the available spare capacity has fallen + * below the threshold. + * @NVME_SMART_CRIT_TEMPERATURE: If set, then a temperature is either greater + * than or equal to an over temperature threshold; or + * less than or equal to an under temperature threshold. + * @NVME_SMART_CRIT_DEGRADED: If set, then the NVM subsystem reliability has + * been degraded due to significant media related errors + * or any internal error that degrades NVM subsystem + * reliability. + * @NVME_SMART_CRIT_MEDIA: If set, then all of the media has been placed in read + * only mode. The controller shall not set this bit if + * the read-only condition on the media is a result of + * a change in the write protection state of a namespace. + * @NVME_SMART_CRIT_VOLATILE_MEMORY: If set, then the volatile memory backup + * device has failed. This field is only valid if the + * controller has a volatile memory backup solution. + * @NVME_SMART_CRIT_PMR_RO: If set, then the Persistent Memory Region has become + * read-only or unreliable. + */ +enum nvme_smart_crit { + NVME_SMART_CRIT_SPARE = 1 << 0, + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_SMART_CRIT_DEGRADED = 1 << 2, + NVME_SMART_CRIT_MEDIA = 1 << 3, + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, + NVME_SMART_CRIT_PMR_RO = 1 << 5, +}; + +/** + * enum nvme_smart_egcw - Endurance Group Critical Warning Summary + * @NVME_SMART_EGCW_SPARE: If set, then the available spare capacity of one or + * more Endurance Groups has fallen below the threshold. + * @NVME_SMART_EGCW_DEGRADED: If set, then the reliability of one or more + * Endurance Groups has been degraded due to significant + * media related errors or any internal error that + * degrades NVM subsystem reliability. + * @NVME_SMART_EGCW_RO: If set, then the namespaces in one or more Endurance + * Groups have been placed in read only mode not as + * a result of a change in the write protection state + * of a namespace. + */ +enum nvme_smart_egcw { + NVME_SMART_EGCW_SPARE = 1 << 0, + NVME_SMART_EGCW_DEGRADED = 1 << 2, + NVME_SMART_EGCW_RO = 1 << 3, +}; + +/** + * struct nvme_firmware_slot - + * @afi: Active Firmware Info + * @rsvd1: Reserved + * @frs: Firmware Revision for Slot + * @rsvd2: Reserved + */ +struct nvme_firmware_slot { + __u8 afi; + __u8 rsvd1[7]; + char frs[7][8]; + __u8 rsvd2[448]; +}; + +/** + * struct nvme_cmd_effects_log - + * @acs: Admin Command Supported + * @iocs: I/O Command Supported + * @rsvd: Reserved + */ +struct nvme_cmd_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 rsvd[2048]; +}; + +/** + * enum nvme_cmd_effects - + * @NVME_CMD_EFFECTS_CSUPP: Command Supported + * @NVME_CMD_EFFECTS_LBCC: Logical Block Content Change + * @NVME_CMD_EFFECTS_NCC: Namespace Capability Change + * @NVME_CMD_EFFECTS_NIC: Namespace Inventory Change + * @NVME_CMD_EFFECTS_CCC: Controller Capability Change + * @NVME_CMD_EFFECTS_CSE_MASK: Command Submission and Execution + * @NVME_CMD_EFFECTS_UUID_SEL: UUID Selection Supported + */ +enum nvme_cmd_effects { + NVME_CMD_EFFECTS_CSUPP = 1 << 0, + NVME_CMD_EFFECTS_LBCC = 1 << 1, + NVME_CMD_EFFECTS_NCC = 1 << 2, + NVME_CMD_EFFECTS_NIC = 1 << 3, + NVME_CMD_EFFECTS_CCC = 1 << 4, + NVME_CMD_EFFECTS_CSE_MASK = 3 << 16, + NVME_CMD_EFFECTS_UUID_SEL = 1 << 19, +}; + +/** + * struct nvme_st_result - Self-test Result + * @dsts: Device Self-test Status: Indicates the device self-test code and the + * status of the operation (see &enum nvme_status_result and &enum nvme_st_code). + * @seg: Segment Number: Iindicates the segment number where the first self-test + * failure occurred. If Device Self-test Status (@dsts) is not set to + * #NVME_ST_RESULT_KNOWN_SEG_FAIL, then this field should be ignored. + * @vdi: Valid Diagnostic Information: Indicates the diagnostic failure + * information that is reported. See &enum nvme_st_valid_diag_info. + * @rsvd: Reserved + * @poh: Power On Hours (POH): Indicates the number of power-on hours at the + * time the device self-test operation was completed or aborted. This + * does not include time that the controller was powered and in a low + * power state condition. + * @nsid: Namespace Identifier (NSID): Indicates the namespace that the Failing + * LBA occurred on. Valid only when the NSID Valid bit + * (#NVME_ST_VALID_DIAG_INFO_NSID) is set in the Valid Diagnostic + * Information (@vdi) field. + * @flba: Failing LBA: indicates the LBA of the logical block that caused the + * test to fail. If the device encountered more than one failed logical + * block during the test, then this field only indicates one of those + * failed logical blocks. Valid only when the NSID Valid bit + * (#NVME_ST_VALID_DIAG_INFO_FLBA) is set in the Valid Diagnostic + * Information (@vdi) field. + * @sct: Status Code Type: This field may contain additional information related + * to errors or conditions. Bits 2:0 may contain additional information + * relating to errors or conditions that occurred during the device + * self-test operation represented in the same format used in the Status + * Code Type field of the completion queue entry (refer to &enum nvme_status_field). + * Valid only when the NSID Valid bit (#NVME_ST_VALID_DIAG_INFO_SCT) is + * set in the Valid Diagnostic Information (@vdi) field. + * @sc: Status Code: This field may contain additional information relating + * to errors or conditions that occurred during the device self-test + * operation represented in the same format used in the Status Code field + * of the completion queue entry. Valid only when the SCT Valid bit + * (#NVME_ST_VALID_DIAG_INFO_SC) is set in the Valid Diagnostic + * Information (@vdi) field. + * @vs: Vendor Specific. + */ +struct nvme_st_result { + __u8 dsts; + __u8 seg; + __u8 vdi; + __u8 rsvd; + __le64 poh; + __le32 nsid; + __le64 flba; + __u8 sct; + __u8 sc; + __u8 vs[2]; +} __attribute__((packed)); + +/** + * enum nvme_status_result - Result of the device self-test operation + * @NVME_ST_RESULT_NO_ERR: Operation completed without error. + * @NVME_ST_RESULT_ABORTED: Operation was aborted by a Device Self-test command. + * @NVME_ST_RESULT_CLR: Operation was aborted by a Controller Level Reset. + * @NVME_ST_RESULT_NS_REMOVED: Operation was aborted due to a removal of + * a namespace from the namespace inventory. + * @NVME_ST_RESULT_ABORTED_FORMAT: Operation was aborted due to the processing + * of a Format NVM command. + * @NVME_ST_RESULT_FATAL_ERR: A fatal error or unknown test error occurred + * while the controller was executing the device + * self-test operation and the operation did + * not complete. + * @NVME_ST_RESULT_UNKNOWN_SEG_FAIL: Operation completed with a segment that failed + * and the segment that failed is not known. + * @NVME_ST_RESULT_KNOWN_SEG_FAIL: Operation completed with one or more failed + * segments and the first segment that failed + * is indicated in the Segment Number field. + * @NVME_ST_RESULT_ABORTED_UNKNOWN: Operation was aborted for unknown reason. + * @NVME_ST_RESULT_ABORTED_SANITIZE: Operation was aborted due to a sanitize operation. + * @NVME_ST_RESULT_NOT_USED: Entry not used (does not contain a test result). + * @NVME_ST_RESULT_MASK: Mask to get the status result value from + * the &struct nvme_st_result.dsts field. + */ +enum nvme_status_result { + NVME_ST_RESULT_NO_ERR = 0x0, + NVME_ST_RESULT_ABORTED = 0x1, + NVME_ST_RESULT_CLR = 0x2, + NVME_ST_RESULT_NS_REMOVED = 0x3, + NVME_ST_RESULT_ABORTED_FORMAT = 0x4, + NVME_ST_RESULT_FATAL_ERR = 0x5, + NVME_ST_RESULT_UNKNOWN_SEG_FAIL = 0x6, + NVME_ST_RESULT_KNOWN_SEG_FAIL = 0x7, + NVME_ST_RESULT_ABORTED_UNKNOWN = 0x8, + NVME_ST_RESULT_ABORTED_SANITIZE = 0x9, + NVME_ST_RESULT_NOT_USED = 0xf, + NVME_ST_RESULT_MASK = 0xf, +}; + +/** + * enum nvme_st_code - Self-test Code value + * @NVME_ST_CODE_RESERVED: Reserved. + * @NVME_ST_CODE_SHORT: Short device self-test operation. + * @NVME_ST_CODE_EXTENDED: Extended device self-test operation. + * @NVME_ST_CODE_VS: Vendor specific. + * @NVME_ST_CODE_SHIFT: Shift amount to get the code value from the + * &struct nvme_st_result.dsts field. + */ +enum nvme_st_code { + NVME_ST_CODE_RESERVED = 0x0, + NVME_ST_CODE_SHORT = 0x1, + NVME_ST_CODE_EXTENDED = 0x2, + NVME_ST_CODE_VS = 0xe, + NVME_ST_CODE_SHIFT = 4, +}; + +/** + * enum nvme_st_curr_op - Current Device Self-Test Operation + * @NVME_ST_CURR_OP_NOT_RUNNING: No device self-test operation in progress. + * @NVME_ST_CURR_OP_SHORT: Short device self-test operation in progress. + * @NVME_ST_CURR_OP_EXTENDED: Extended device self-test operation in progress. + * @NVME_ST_CURR_OP_VS: Vendor specific. + * @NVME_ST_CURR_OP_RESERVED: Reserved. + * @NVME_ST_CURR_OP_MASK: Mask to get the current operation value from the + * &struct nvme_self_test_log.current_operation field. + * @NVME_ST_CURR_OP_CMPL_MASK: Mask to get the current operation completion value + * from the &struct nvme_self_test_log.completion field. + */ +enum nvme_st_curr_op { + NVME_ST_CURR_OP_NOT_RUNNING = 0x0, + NVME_ST_CURR_OP_SHORT = 0x1, + NVME_ST_CURR_OP_EXTENDED = 0x2, + NVME_ST_CURR_OP_VS = 0xe, + NVME_ST_CURR_OP_RESERVED = 0xf, + NVME_ST_CURR_OP_MASK = 0xf, + NVME_ST_CURR_OP_CMPL_MASK = 0x7f, +}; + +/** + * enum nvme_st_valid_diag_info - Valid Diagnostic Information + * @NVME_ST_VALID_DIAG_INFO_NSID: NSID Valid: if set, then the contents of + * the Namespace Identifier field are valid. + * @NVME_ST_VALID_DIAG_INFO_FLBA: FLBA Valid: if set, then the contents of + * the Failing LBA field are valid. + * @NVME_ST_VALID_DIAG_INFO_SCT: SCT Valid: if set, then the contents of + * the Status Code Type field are valid. + * @NVME_ST_VALID_DIAG_INFO_SC: SC Valid: if set, then the contents of + * the Status Code field are valid. + */ +enum nvme_st_valid_diag_info { + NVME_ST_VALID_DIAG_INFO_NSID = 1 << 0, + NVME_ST_VALID_DIAG_INFO_FLBA = 1 << 1, + NVME_ST_VALID_DIAG_INFO_SCT = 1 << 2, + NVME_ST_VALID_DIAG_INFO_SC = 1 << 3, +}; + +/** + * struct nvme_self_test_log - Device Self-test (Log Identifier 06h) + * @current_operation: Current Device Self-Test Operation: indicates the status + * of the current device self-test operation. If a device + * self-test operation is in process (i.e., this field is set + * to #NVME_ST_CURR_OP_SHORT or #NVME_ST_CURR_OP_EXTENDED), + * then the controller shall not set this field to + * #NVME_ST_CURR_OP_NOT_RUNNING until a new Self-test Result + * Data Structure is created (i.e., if a device self-test + * operation completes or is aborted, then the controller + * shall create a Self-test Result Data Structure prior to + * setting this field to #NVME_ST_CURR_OP_NOT_RUNNING). + * See &enum nvme_st_curr_op. + * @completion: Current Device Self-Test Completion: indicates the percentage + * of the device self-test operation that is complete (e.g., + * a value of 25 indicates that 25% of the device self-test + * operation is complete and 75% remains to be tested). + * If the @current_operation field is cleared to + * #NVME_ST_CURR_OP_NOT_RUNNING (indicating there is no device + * self-test operation in progress), then this field is ignored. + * @rsvd: Reserved + * @result: Self-test Result Data Structures, see &struct nvme_st_result. + */ +struct nvme_self_test_log { + __u8 current_operation; + __u8 completion; + __u8 rsvd[2]; + struct nvme_st_result result[NVME_LOG_ST_MAX_RESULTS]; +} __attribute__((packed)); + +/** + * enum nvme_cmd_get_log_telemetry_host_lsp - + * @NVME_LOG_TELEM_HOST_LSP_RETAIN: + * @NVME_LOG_TELEM_HOST_LSP_CREATE: + */ +enum nvme_cmd_get_log_telemetry_host_lsp { + NVME_LOG_TELEM_HOST_LSP_RETAIN = 0, + NVME_LOG_TELEM_HOST_LSP_CREATE = 1, +}; + +/** + * struct nvme_telemetry_log - Retrieve internal data specific to the + * manufacturer. + * @lpi: Log Identifier, either %NVME_LOG_LID_TELEMETRY_HOST or + * %NVME_LOG_LID_TELEMETRY_CTRL + * @rsvd1: Reserved + * @ieee: IEEE OUI Identifier is the Organization Unique Identifier (OUI) + * for the controller vendor that is able to interpret the data. + * @dalb1: Telemetry Controller-Initiated Data Area 1 Last Block is + * the value of the last block in this area. + * @dalb2: Telemetry Controller-Initiated Data Area 1 Last Block is + * the value of the last block in this area. + * @dalb3: Telemetry Controller-Initiated Data Area 1 Last Block is + * the value of the last block in this area. + * @rsvd14: Reserved + * @dalb4: Telemetry Controller-Initiated Data Area 4 Last Block is + * the value of the last block in this area. + * @rsvd20: Reserved + * @hostdgn: Telemetry Host-Initiated Data Generation Number is a + * value that is incremented each time the host initiates a + * capture of its internal controller state in the controller . + * @ctrlavail: Telemetry Controller-Initiated Data Available, if cleared, + * then the controller telemetry log does not contain saved + * internal controller state. If this field is set to 1h, the + * controller log contains saved internal controller state. If + * this field is set to 1h, the data will be latched until the + * host releases it by reading the log with RAE cleared. + * @ctrldgn: Telemetry Controller-Initiated Data Generation Number is + * a value that is incremented each time the controller initiates a + * capture of its internal controller state in the controller . + * @rsnident: Reason Identifieris a vendor specific identifier that describes + * the operating conditions of the controller at the time of + * capture. + * @data_area: Telemetry data blocks, vendor specific information data. + * + * This log consists of a header describing the log and zero or more Telemetry + * Data Blocks. All Telemetry Data Blocks are %NVME_LOG_TELEM_BLOCK_SIZE, 512 + * bytes, in size. This log captures the controller’s internal state. + */ +struct nvme_telemetry_log { + __u8 lpi; + __u8 rsvd1[4]; + __u8 ieee[3]; + __le16 dalb1; + __le16 dalb2; + __le16 dalb3; + __u8 rsvd14[2]; + __le32 dalb4; + __u8 rsvd20[361]; + __u8 hostdgn; + __u8 ctrlavail; + __u8 ctrldgn; + __u8 rsnident[128]; + __u8 data_area[]; +}; + +/** + * struct nvme_endurance_group_log - + * @critical_warning: Critical Warning + * @rsvd1: Reserved + * @avl_spare: Available Spare + * @avl_spare_threshold: Available Spare Threshold + * @percent_used: Percentage Used + * @rsvd6: Reserved + * @endurance_estimate: Endurance Estimate + * @data_units_read: Data Units Read + * @data_units_written: Data Units Written + * @media_units_written: Media Units Written + * @host_read_cmds: Host Read Commands + * @host_write_cmds: Host Write Commands + * @media_data_integrity_err: Media and Data Integrity Errors + * @num_err_info_log_entries: Number of Error Information Log Entries + * @rsvd160: Reserved + */ +struct nvme_endurance_group_log { + __u8 critical_warning; + __u8 rsvd1[2]; + __u8 avl_spare; + __u8 avl_spare_threshold; + __u8 percent_used; + __u8 rsvd6[26]; + __u8 endurance_estimate[16]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 media_units_written[16]; + __u8 host_read_cmds[16]; + __u8 host_write_cmds[16]; + __u8 media_data_integrity_err[16]; + __u8 num_err_info_log_entries[16]; + __u8 rsvd160[352]; +}; + +/** + * enum nvme_eg_critical_warning_flags - + * @NVME_EG_CRITICAL_WARNING_SPARE: + * @NVME_EG_CRITICAL_WARNING_DEGRADED: + * @NVME_EG_CRITICAL_WARNING_READ_ONLY: + */ +enum nvme_eg_critical_warning_flags { + NVME_EG_CRITICAL_WARNING_SPARE = 1 << 0, + NVME_EG_CRITICAL_WARNING_DEGRADED = 1 << 2, + NVME_EG_CRITICAL_WARNING_READ_ONLY = 1 << 3, +}; + +/** + * struct nvme_aggregate_endurance_group_event - + * @num_entries: Number or entries + * @entries: List of entries + */ +struct nvme_aggregate_endurance_group_event { + __le64 num_entries; + __le16 entries[]; +}; + +/** + * struct nvme_nvmset_predictable_lat_log - + * @status: Status + * @rsvd1: Reserved + * @event_type: Event Type + * @rsvd4: Reserved + * @dtwin_rt: DTWIN Reads Typical + * @dtwin_wt: DTWIN Writes Typical + * @dtwin_tmax: DTWIN Time Maximum + * @ndwin_tmin_hi: NDWIN Time Minimum High + * @ndwin_tmin_lo: NDWIN Time Minimum Low + * @rsvd72: Reserved + * @dtwin_re: DTWIN Reads Estimate + * @dtwin_we: DTWIN Writes Estimate + * @dtwin_te: DTWIN Time Estimate + * @rsvd152: Reserved + */ +struct nvme_nvmset_predictable_lat_log { + __u8 status; + __u8 rsvd1; + __le16 event_type; + __u8 rsvd4[28]; + __le64 dtwin_rt; + __le64 dtwin_wt; + __le64 dtwin_tmax; + __le64 ndwin_tmin_hi; + __le64 ndwin_tmin_lo; + __u8 rsvd72[56]; + __le64 dtwin_re; + __le64 dtwin_we; + __le64 dtwin_te; + __u8 rsvd152[360]; +}; + +/** + * enum nvme_nvmeset_pl_status - + * @NVME_NVMSET_PL_STATUS_DISABLED: + * @NVME_NVMSET_PL_STATUS_DTWIN: + * @NVME_NVMSET_PL_STATUS_NDWIN: + */ +enum nvme_nvmeset_pl_status { + NVME_NVMSET_PL_STATUS_DISABLED = 0, + NVME_NVMSET_PL_STATUS_DTWIN = 1, + NVME_NVMSET_PL_STATUS_NDWIN = 2, +}; + +/** + * enum nvme_nvmset_pl_events - + * @NVME_NVMSET_PL_EVENT_DTWIN_READ_WARN: + * @NVME_NVMSET_PL_EVENT_DTWIN_WRITE_WARN: + * @NVME_NVMSET_PL_EVENT_DTWIN_TIME_WARN: + * @NVME_NVMSET_PL_EVENT_DTWIN_EXCEEDED: + * @NVME_NVMSET_PL_EVENT_DTWIN_EXCURSION: + */ +enum nvme_nvmset_pl_events { + NVME_NVMSET_PL_EVENT_DTWIN_READ_WARN = 1 << 0, + NVME_NVMSET_PL_EVENT_DTWIN_WRITE_WARN = 1 << 1, + NVME_NVMSET_PL_EVENT_DTWIN_TIME_WARN = 1 << 2, + NVME_NVMSET_PL_EVENT_DTWIN_EXCEEDED = 1 << 14, + NVME_NVMSET_PL_EVENT_DTWIN_EXCURSION = 1 << 15, +}; + +/** + * struct nvme_aggregate_predictable_lat_event - + * @num_entries: Number of entries + * @entries: Entry list + */ +struct nvme_aggregate_predictable_lat_event { + __le64 num_entries; + __le16 entries[]; +}; + +/** + * struct nvme_ana_group_desc - + * @grpid: ANA group id + * @nnsids: Number of namespaces in @nsids + * @chgcnt: Change counter + * @state: ANA state + * @rsvd17: Reserved + * @nsids: List of namespaces + */ +struct nvme_ana_group_desc { + __le32 grpid; + __le32 nnsids; + __le64 chgcnt; + __u8 state; + __u8 rsvd17[15]; + __le32 nsids[]; +}; + +/** + * enum nvme_ana_state - + * @NVME_ANA_STATE_OPTIMIZED: + * @NVME_ANA_STATE_NONOPTIMIZED: + * @NVME_ANA_STATE_INACCESSIBLE: + * @NVME_ANA_STATE_PERSISTENT_LOSS: + * @NVME_ANA_STATE_CHANGE: + */ +enum nvme_ana_state { + NVME_ANA_STATE_OPTIMIZED = 0x1, + NVME_ANA_STATE_NONOPTIMIZED = 0x2, + NVME_ANA_STATE_INACCESSIBLE = 0x3, + NVME_ANA_STATE_PERSISTENT_LOSS = 0x4, + NVME_ANA_STATE_CHANGE = 0xf, +}; + +/** + * struct nvme_ana_log - + * @chgcnt: Change Count + * @ngrps: Number of ANA Group Descriptors + * @rsvd10: Reserved + * @descs: ANA Group Descriptor + */ +struct nvme_ana_log { + __le64 chgcnt; + __le16 ngrps; + __u8 rsvd10[6]; + struct nvme_ana_group_desc descs[]; +}; + +/** + * struct nvme_persistent_event_log - + * @lid: Log Identifier + * @rsvd1: Reserved + * @tnev: Total Number of Events + * @tll: Total Log Length + * @rv: Log Revision + * @rsvd17: Reserved + * @lhl: Log Header Length + * @ts: Timestamp + * @poh: Power on Hours + * @pcc: Power Cycle Count + * @vid: PCI Vendor ID + * @ssvid: PCI Subsystem Vendor ID + * @sn: Serial Number + * @mn: Model Number + * @subnqn: NVM Subsystem NVMe Qualified Name + * @gen_number: Generation Number + * @rci: Reporting Context Information + * @rsvd378: Reserved + * @seb: Supported Events Bitmap + */ +struct nvme_persistent_event_log { + __u8 lid; + __u8 rsvd1[3]; + __le32 tnev; + __le64 tll; + __u8 rv; + __u8 rsvd17; + __le16 lhl; + __le64 ts; + __u8 poh[16]; + __le64 pcc; + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char subnqn[NVME_NQN_LENGTH]; + __le16 gen_number; + __le32 rci; + __u8 rsvd378[102]; + __u8 seb[32]; +} __attribute__((packed)); + +/** + * struct nvme_persistent_event_entry - + * @etype: Event Type + * @etype_rev: Event Type Revision + * @ehl: Event Header Length + * @ehai: Event Header Additional Info + * @cntlid: Controller Identifier + * @ets: Event Timestamp + * @pelpid: Port Identifier + * @rsvd16: Reserved + * @vsil: Vendor Specific Information Length + * @el: Event Length + */ +struct nvme_persistent_event_entry { + __u8 etype; + __u8 etype_rev; + __u8 ehl; + __u8 ehai; + __le16 cntlid; + __le64 ets; + __le16 pelpid; + __u8 rsvd16[4]; + __le16 vsil; + __le16 el; +} __attribute__((packed)); + +/** + * enum nvme_persistent_event_types - + * @NVME_PEL_SMART_HEALTH_EVENT: + * @NVME_PEL_FW_COMMIT_EVENT: + * @NVME_PEL_TIMESTAMP_EVENT: + * @NVME_PEL_POWER_ON_RESET_EVENT: + * @NVME_PEL_NSS_HW_ERROR_EVENT: + * @NVME_PEL_CHANGE_NS_EVENT: + * @NVME_PEL_FORMAT_START_EVENT: + * @NVME_PEL_FORMAT_COMPLETION_EVENT: + * @NVME_PEL_SANITIZE_START_EVENT: + * @NVME_PEL_SANITIZE_COMPLETION_EVENT: + * @NVME_PEL_SET_FEATURE_EVENT: + * @NVME_PEL_TELEMETRY_CRT: + * @NVME_PEL_THERMAL_EXCURSION_EVENT: + */ +enum nvme_persistent_event_types { + NVME_PEL_SMART_HEALTH_EVENT = 0x01, + NVME_PEL_FW_COMMIT_EVENT = 0x02, + NVME_PEL_TIMESTAMP_EVENT = 0x03, + NVME_PEL_POWER_ON_RESET_EVENT = 0x04, + NVME_PEL_NSS_HW_ERROR_EVENT = 0x05, + NVME_PEL_CHANGE_NS_EVENT = 0x06, + NVME_PEL_FORMAT_START_EVENT = 0x07, + NVME_PEL_FORMAT_COMPLETION_EVENT = 0x08, + NVME_PEL_SANITIZE_START_EVENT = 0x09, + NVME_PEL_SANITIZE_COMPLETION_EVENT = 0x0a, + NVME_PEL_SET_FEATURE_EVENT = 0x0b, + NVME_PEL_TELEMETRY_CRT = 0x0c, + NVME_PEL_THERMAL_EXCURSION_EVENT = 0x0d, +}; + +/** + * struct nvme_fw_commit_event - + * @old_fw_rev: Old Firmware Revision + * @new_fw_rev: New Firmware Revision + * @fw_commit_action: Firmware Commit Action + * @fw_slot: Firmware Slot + * @sct_fw: Status Code Type for Firmware Commit Command + * @sc_fw: Status Returned for Firmware Commit Command + * @vndr_assign_fw_commit_rc: Vendor Assigned Firmware Commit Result Code + */ +struct nvme_fw_commit_event { + __le64 old_fw_rev; + __le64 new_fw_rev; + __u8 fw_commit_action; + __u8 fw_slot; + __u8 sct_fw; + __u8 sc_fw; + __le16 vndr_assign_fw_commit_rc; +} __attribute__((packed)); + +/** + * struct nvme_time_stamp_change_event - + * @previous_timestamp: Previous Timestamp + * @ml_secs_since_reset: Milliseconds Since Reset + */ +struct nvme_time_stamp_change_event { + __le64 previous_timestamp; + __le64 ml_secs_since_reset; +}; + +/** + * struct nvme_power_on_reset_info_list - + * @cid: Controller ID + * @fw_act: Firmware Activation + * @op_in_prog: Operation in Progress + * @rsvd4: Reserved + * @ctrl_power_cycle: Controller Power Cycle + * @power_on_ml_seconds: Power on milliseconds + * @ctrl_time_stamp: Controller Timestamp + */ +struct nvme_power_on_reset_info_list { + __le16 cid; + __u8 fw_act; + __u8 op_in_prog; + __u8 rsvd4[12]; + __le32 ctrl_power_cycle; + __le64 power_on_ml_seconds; + __le64 ctrl_time_stamp; +} __attribute__((packed)); + +/** + * struct nvme_nss_hw_err_event - + * @nss_hw_err_event_code: NVM Subsystem Hardware Error Event Code + * @rsvd2: Reserved + * @add_hw_err_info: Additional Hardware Error Information + */ +struct nvme_nss_hw_err_event { + __le16 nss_hw_err_event_code; + __u8 rsvd2[2]; + __u8 *add_hw_err_info; +}; + +/** + * struct nvme_change_ns_event - + * @nsmgt_cdw10: Namespace Management CDW10 + * @rsvd4: Reserved + * @nsze: Namespace Size + * @rsvd16: Reserved + * @nscap: Namespace Capacity + * @flbas: Formatted LBA Size + * @dps: End-to-end Data Protection Type Settings + * @nmic: Namespace Multi-path I/O and Namespace Sharing Capabilities + * @rsvd35: Reserved + * @ana_grp_id: ANA Group Identifier + * @nvmset_id: NVM Set Identifier + * @rsvd42: Reserved + * @nsid: Namespace ID + */ +struct nvme_change_ns_event { + __le32 nsmgt_cdw10; + __u8 rsvd4[4]; + __le64 nsze; + __u8 rsvd16[8]; + __le64 nscap; + __u8 flbas; + __u8 dps; + __u8 nmic; + __u8 rsvd35; + __le32 ana_grp_id; + __le16 nvmset_id; + __le16 rsvd42; + __le32 nsid; +}; + +/** + * struct nvme_format_nvm_start_event - + * @nsid: Namespace Identifier + * @fna: Format NVM Attributes + * @rsvd5: Reserved + * @format_nvm_cdw10: Format NVM CDW10 + */ +struct nvme_format_nvm_start_event { + __le32 nsid; + __u8 fna; + __u8 rsvd5[3]; + __le32 format_nvm_cdw10; +}; + +/** + * struct nvme_format_nvm_compln_event - + * @nsid: Namespace Identifier + * @smallest_fpi: Smallest Format Progress Indicator + * @format_nvm_status: Format NVM Status + * @compln_info: Completion Information + * @status_field: Status Field + */ +struct nvme_format_nvm_compln_event { + __le32 nsid; + __u8 smallest_fpi; + __u8 format_nvm_status; + __le16 compln_info; + __le32 status_field; +}; + +/** + * struct nvme_sanitize_start_event - + * @sani_cap: SANICAP + * @sani_cdw10: Sanitize CDW10 + * @sani_cdw11: Sanitize CDW11 + */ +struct nvme_sanitize_start_event { + __le32 sani_cap; + __le32 sani_cdw10; + __le32 sani_cdw11; +}; + +/** + * struct nvme_sanitize_compln_event - + * @sani_prog: Sanitize Progress + * @sani_status: Sanitize Status + * @cmpln_info: Completion Information + * @rsvd6: Reserved + */ +struct nvme_sanitize_compln_event { + __le16 sani_prog; + __le16 sani_status; + __le16 cmpln_info; + __u8 rsvd6[2]; +}; + +/** + * struct nvme_set_feature_event - + * @layout: Set Feature Event Layout + * @cdw_mem: Command Dwords Memory buffer + */ +struct nvme_set_feature_event { + __le32 layout; + __le32 cdw_mem[0]; +}; + +/** + * struct nvme_thermal_exc_event - + * @over_temp: Over Temperature + * @threshold: temperature threshold + */ +struct nvme_thermal_exc_event { + __u8 over_temp; + __u8 threshold; +}; + +/** + * struct nvme_lba_rd - + * @rslba: Range Starting LBA + * @rnlb: Range Number of Logical Blocks + * @rsvd12: Reserved + */ +struct nvme_lba_rd { + __le64 rslba; + __le32 rnlb; + __u8 rsvd12[4]; +}; + +/** + * struct nvme_lbas_ns_element - + * @neid: Namespace Element Identifier + * @nlrd: Number of LBA Range Descriptors + * @ratype: Recommended Action Type + * @rsvd8: Reserved + * @lba_rd: LBA Range Descriptor + */ +struct nvme_lbas_ns_element { + __le32 neid; + __le32 nlrd; + __u8 ratype; + __u8 rsvd8[7]; + struct nvme_lba_rd lba_rd[]; +}; + +/** + * enum nvme_lba_status_atype - + * @NVME_LBA_STATUS_ATYPE_SCAN_UNTRACKED: + * @NVME_LBA_STATUS_ATYPE_SCAN_TRACKED: + */ +enum nvme_lba_status_atype { + NVME_LBA_STATUS_ATYPE_SCAN_UNTRACKED = 0x10, + NVME_LBA_STATUS_ATYPE_SCAN_TRACKED = 0x11, +}; + +/** + * struct nvme_lba_status_log - + * @lslplen: LBA Status Log Page Length + * @nlslne: Number of LBA Status Log Namespace Elements + * @estulb: Estimate of Unrecoverable Logical Blocks + * @rsvd12: Reserved + * @lsgc: LBA Status Generation Counter + * @elements: LBA Status Log Namespace Element List + */ +struct nvme_lba_status_log { + __le32 lslplen; + __le32 nlslne; + __le32 estulb; + __u8 rsvd12[2]; + __le16 lsgc; + struct nvme_lbas_ns_element elements[]; +}; + +/** + * struct nvme_eg_event_aggregate_log - + * @nr_entries: Number of Entries + * @egids: Endurance Group Identifier + */ +struct nvme_eg_event_aggregate_log { + __le64 nr_entries; + __le16 egids[]; +}; + +/** + * enum nvme_fid_supported_effects - + * @NVME_FID_SUPPORTED_EFFECTS_FSUPP: + * @NVME_FID_SUPPORTED_EFFECTS_UDCC: + * @NVME_FID_SUPPORTED_EFFECTS_NCC: + * @NVME_FID_SUPPORTED_EFFECTS_NIC: + * @NVME_FID_SUPPORTED_EFFECTS_CCC: + * @NVME_FID_SUPPORTED_EFFECTS_UUID_SEL: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_SHIFT: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_MASK: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_NS: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_CTRL: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_NVM_SET: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_ENDGRP: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_DOMAIN: + * @NVME_FID_SUPPORTED_EFFECTS_SCOPE_NSS: + * + * FID Supported and Effects Data Structure definitions + */ +enum nvme_fid_supported_effects { + NVME_FID_SUPPORTED_EFFECTS_FSUPP = 1 << 0, + NVME_FID_SUPPORTED_EFFECTS_UDCC = 1 << 1, + NVME_FID_SUPPORTED_EFFECTS_NCC = 1 << 2, + NVME_FID_SUPPORTED_EFFECTS_NIC = 1 << 3, + NVME_FID_SUPPORTED_EFFECTS_CCC = 1 << 4, + NVME_FID_SUPPORTED_EFFECTS_UUID_SEL = 1 << 19, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_SHIFT = 20, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_MASK = 0xfff, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_NS = 1 << 0, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_CTRL = 1 << 1, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_NVM_SET= 1 << 2, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_ENDGRP = 1 << 3, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_DOMAIN = 1 << 4, + NVME_FID_SUPPORTED_EFFECTS_SCOPE_NSS = 1 << 5, +}; + +/** + * struct nvme_fid_supported_effects_log - + * @fid_support: Feature Identifier Supported + * + * Feature Identifiers Supported and Effects (Log Identifier 12h) + */ +struct nvme_fid_supported_effects_log { + __le32 fid_support[NVME_LOG_FID_SUPPORTED_EFFECTS_MAX]; +}; + +/** + * enum nvme_mi_cmd_supported_effects - bit field definitions + * @NVME_MI_CMD_SUPPORTED_EFFECTS_CSUPP: Command Supported + * @NVME_MI_CMD_SUPPORTED_EFFECTS_UDCC: User Data Conttent Change + * @NVME_MI_CMD_SUPPORTED_EFFECTS_NCC: Namespace Capability Change + * @NVME_MI_CMD_SUPPORTED_EFFECTS_NIC: Namespace Inventory Change + * @NVME_MI_CMD_SUPPORTED_EFFECTS_CCC: Controller Capability Change + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_SHIFT: 20 bit shift + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_MASK: 12 bit mask - 0xfff + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_NS: Namespace Scope + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_CTRL: Controller Scope + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_NVM_SET: NVM Set Scope + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_ENDGRP: Endurance Group Scope + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_DOMAIN: Domain Scope + * @NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_NSS: NVM Subsystem Scope + * + * MI Command Supported and Effects Data Structure definitions + */ +enum nvme_mi_cmd_supported_effects { + NVME_MI_CMD_SUPPORTED_EFFECTS_CSUPP = 1 << 0, + NVME_MI_CMD_SUPPORTED_EFFECTS_UDCC = 1 << 1, + NVME_MI_CMD_SUPPORTED_EFFECTS_NCC = 1 << 2, + NVME_MI_CMD_SUPPORTED_EFFECTS_NIC = 1 << 3, + NVME_MI_CMD_SUPPORTED_EFFECTS_CCC = 1 << 4, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_SHIFT = 20, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_MASK = 0xfff, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_NS = 1 << 0, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_CTRL = 1 << 1, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_NVM_SET = 1 << 2, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_ENDGRP = 1 << 3, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_DOMAIN = 1 << 4, + NVME_MI_CMD_SUPPORTED_EFFECTS_SCOPE_NSS = 1 << 5, +}; + +/** + * struct nvme_mi_cmd_supported_effects_log - + * @mi_cmd_support: NVMe-MI Commands Supported + * + * NVMe-MI Commands Supported and Effects (Log Identifier 13h) + */ +struct nvme_mi_cmd_supported_effects_log { + __le32 mi_cmd_support[NVME_LOG_MI_CMD_SUPPORTED_EFFECTS_MAX]; + __le32 reserved1[NVME_LOG_MI_CMD_SUPPORTED_EFFECTS_RESERVED]; +}; + +/** + * struct nvme_boot_partition - + * @lid: Boot Partition Identifier + * @rsvd1: Reserved + * @bpinfo: Boot Partition Information + * @rsvd8: Reserved + * @boot_partition_data: Contains the contents of the + * specified Boot Partition + */ +struct nvme_boot_partition { + __u8 lid; + __u8 rsvd1[3]; + __le32 bpinfo; + __u8 rsvd8[8]; + __u8 boot_partition_data[]; +}; + +/** + * struct nvme_media_unit_stat_desc - + * @muid: Media Unit Identifier + * @domainid: Domain Identifier + * @endgid: Endurance Group Identifier + * @nvmsetid: NVM Set Identifier + * @cap_adj_fctr: Capacity Adjustment Factor + * @avl_spare: Available Spare + * @percent_used: Percentage Used + * @mucs: Number of Channels attached to media units + * @cio: Channel Identifiers Offset + */ +struct nvme_media_unit_stat_desc { + __le16 muid; + __le16 domainid; + __le16 endgid; + __le16 nvmsetid; + __le16 cap_adj_fctr; + __u8 avl_spare; + __u8 percent_used; + __u8 mucs; + __u8 cio; +}; + +/** + * struct nvme_media_unit_stat_log - + * @nmu: Number unit status descriptor + * @cchans: Number of Channels + * @sel_config: Selected Configuration + * @rsvd6: Reserved + * @mus_desc: Media unit statistic descriptors + */ +struct nvme_media_unit_stat_log { + __le16 nmu; + __le16 cchans; + __le16 sel_config; + __u8 rsvd6[10]; + struct nvme_media_unit_stat_desc mus_desc[]; +}; + +/** + * struct nvme_media_unit_config_desc - + * @muid: Media Unit Identifier + * @mudl: Media Unit Descriptor Length + * + * Media Unit Configuration Descriptor + * Structure Definitions + */ +struct nvme_media_unit_config_desc { + __le16 muid; + __u8 rsvd2[4]; + __le16 mudl; +}; + +/** + * struct nvme_channel_config_desc - + * @chanid: Channel Identifier + * @chmus: Number Channel Media Units + * + * Channel Configuration Descriptor + * Structure Definitions + */ +struct nvme_channel_config_desc { + __le16 chanid; + __le16 chmus; + struct nvme_media_unit_config_desc mu_config_desc[]; +}; + +/** + * struct nvme_end_grp_chan_desc - + * @egchans: Number of Channels + * + * Endurance group Channel Configuration Descriptor + * Structure Definitions + */ +struct nvme_end_grp_chan_desc { + __le16 egchans; + struct nvme_channel_config_desc chan_config_desc[]; +}; + +/** + * struct nvme_end_grp_config_desc - + * @endgid: Endurance Group Identifier + * @cap_adj_factor: Capacity Adjustment Factor + * @tegcap: Total Endurance Group Capacity + * @segcap: Spare Endurance Group Capacity + * @end_est: Endurance Estimate + * @egsets: Number of NVM Sets + * @nvmsetid: NVM Set Identifier + * + * Endurance Group Configuration Descriptor + * Structure Definitions + */ +struct nvme_end_grp_config_desc { + __le16 endgid; + __le16 cap_adj_factor; + __u8 rsvd4[12]; + __u8 tegcap[16]; + __u8 segcap[16]; + __u8 end_est[16]; + __u8 rsvd64[16]; + __le16 egsets; + __le16 nvmsetid[]; +}; + +/** + * struct nvme_cap_config_desc - + * @cap_config_id: Capacity Configuration Identifier + * @domainid: Domain Identifier + * @egcn: Number Endurance Group Configuration + * Descriptors + * + * Capacity Configuration structure definitions + */ +struct nvme_capacity_config_desc { + __le16 cap_config_id; + __le16 domainid; + __le16 egcn; + __u8 rsvd6[26]; + struct nvme_end_grp_config_desc egcd[]; +}; + +/** + * struct nvme_supported_cap_config_list_log - + * @sccn: number of capacity configuration + * + * Supported Capacity Configuration list log page + * structure definitions + */ +struct nvme_supported_cap_config_list_log { + __u8 sccn; + __u8 rsvd1[15]; + struct nvme_capacity_config_desc cap_config_desc[]; +}; + +/** + * struct nvme_resv_notification_log - + * @lpc: Log Page Count + * @rnlpt: See &enum nvme_resv_notify_rnlpt. + * @nalp: Number of Available Log Pages + * @rsvd9: Reserved + * @nsid: Namespace ID + * @rsvd16: Reserved + */ +struct nvme_resv_notification_log { + __le64 lpc; + __u8 rnlpt; + __u8 nalp; + __u8 rsvd9[2]; + __le32 nsid; + __u8 rsvd16[48]; +}; + +/** + * enum nvme_resv_notify_rnlpt - + * @NVME_RESV_NOTIFY_RNLPT_EMPTY: + * @NVME_RESV_NOTIFY_RNLPT_REGISTRATION_PREEMPTED: + * @NVME_RESV_NOTIFY_RNLPT_RESERVATION_RELEASED: + * @NVME_RESV_NOTIFY_RNLPT_RESERVATION_PREEMPTED: + */ +enum nvme_resv_notify_rnlpt { + NVME_RESV_NOTIFY_RNLPT_EMPTY = 0, + NVME_RESV_NOTIFY_RNLPT_REGISTRATION_PREEMPTED = 1, + NVME_RESV_NOTIFY_RNLPT_RESERVATION_RELEASED = 2, + NVME_RESV_NOTIFY_RNLPT_RESERVATION_PREEMPTED = 3, +}; + +/** + * struct nvme_sanitize_log_page - Sanitize Status (Log Identifier 81h) + * @sprog: Sanitize Progress (SPROG): indicates the fraction complete of the + * sanitize operation. The value is a numerator of the fraction + * complete that has 65,536 (10000h) as its denominator. This value + * shall be set to FFFFh if the @sstat field is not set to + * %NVME_SANITIZE_SSTAT_STATUS_IN_PROGESS. + * @sstat: Sanitize Status (SSTAT): indicates the status associated with + * the most recent sanitize operation. See &enum nvme_sanitize_sstat. + * @scdw10: Sanitize Command Dword 10 Information (SCDW10): contains the value + * of the Command Dword 10 field of the Sanitize command that started + * the sanitize operation. + * @eto: Estimated Time For Overwrite: indicates the number of seconds required + * to complete an Overwrite sanitize operation with 16 passes in + * the background when the No-Deallocate Modifies Media After Sanitize + * field is not set to 10b. A value of 0h indicates that the sanitize + * operation is expected to be completed in the background when the + * Sanitize command that started that operation is completed. A value + * of FFFFFFFFh indicates that no time period is reported. + * @etbe: Estimated Time For Block Erase: indicates the number of seconds + * required to complete a Block Erase sanitize operation in the + * background when the No-Deallocate Modifies Media After Sanitize + * field is not set to 10b. A value of 0h indicates that the sanitize + * operation is expected to be completed in the background when the + * Sanitize command that started that operation is completed. + * A value of FFFFFFFFh indicates that no time period is reported. + * @etce: Estimated Time For Crypto Erase: indicates the number of seconds + * required to complete a Crypto Erase sanitize operation in the + * background when the No-Deallocate Modifies Media After Sanitize + * field is not set to 10b. A value of 0h indicates that the sanitize + * operation is expected to be completed in the background when the + * Sanitize command that started that operation is completed. + * A value of FFFFFFFFh indicates that no time period is reported. + * @etond: Estimated Time For Overwrite With No-Deallocate Media Modification: + * indicates the number of seconds required to complete an Overwrite + * sanitize operation and the associated additional media modification + * after the Overwrite sanitize operation in the background when + * the No-Deallocate After Sanitize bit was set to 1 in the Sanitize + * command that requested the Overwrite sanitize operation; and + * the No-Deallocate Modifies Media After Sanitize field is set to 10b. + * A value of 0h indicates that the sanitize operation is expected + * to be completed in the background when the Sanitize command that + * started that operation is completed. A value of FFFFFFFFh indicates + * that no time period is reported. + * @etbend: Estimated Time For Block Erase With No-Deallocate Media Modification: + * indicates the number of seconds required to complete a Block Erase + * sanitize operation and the associated additional media modification + * after the Block Erase sanitize operation in the background when + * the No-Deallocate After Sanitize bit was set to 1 in the Sanitize + * command that requested the Overwrite sanitize operation; and + * the No-Deallocate Modifies Media After Sanitize field is set to 10b. + * A value of 0h indicates that the sanitize operation is expected + * to be completed in the background when the Sanitize command that + * started that operation is completed. A value of FFFFFFFFh indicates + * that no time period is reported. + * @etcend: Estimated Time For Crypto Erase With No-Deallocate Media Modification: + * indicates the number of seconds required to complete a Crypto Erase + * sanitize operation and the associated additional media modification + * after the Crypto Erase sanitize operation in the background when + * the No-Deallocate After Sanitize bit was set to 1 in the Sanitize + * command that requested the Overwrite sanitize operation; and + * the No-Deallocate Modifies Media After Sanitize field is set to 10b. + * A value of 0h indicates that the sanitize operation is expected + * to be completed in the background when the Sanitize command that + * started that operation is completed. A value of FFFFFFFFh indicates + * that no time period is reported. + * @rsvd32: Reserved + */ +struct nvme_sanitize_log_page { + __le16 sprog; + __le16 sstat; + __le32 scdw10; + __le32 eto; + __le32 etbe; + __le32 etce; + __le32 etond; + __le32 etbend; + __le32 etcend; + __u8 rsvd32[480]; +}; + +/** + * enum nvme_sanitize_sstat - Sanitize Status (SSTAT) + * @NVME_SANITIZE_SSTAT_STATUS_SHIFT: Shift amount to get the status value of + * the most recent sanitize operation from + * the &struct nvme_sanitize_log_page.sstat + * field. + * @NVME_SANITIZE_SSTAT_STATUS_MASK: Mask to get the status value of the most + * recent sanitize operation. + * @NVME_SANITIZE_SSTAT_STATUS_NEVER_SANITIZED: The NVM subsystem has never been + * sanitized. + * @NVME_SANITIZE_SSTAT_STATUS_COMPLETE_SUCCESS: The most recent sanitize operation + * completed successfully including any + * additional media modification. + * @NVME_SANITIZE_SSTAT_STATUS_IN_PROGESS: A sanitize operation is currently in progress. + * @NVME_SANITIZE_SSTAT_STATUS_COMPLETED_FAILED: The most recent sanitize operation + * failed. + * @NVME_SANITIZE_SSTAT_STATUS_ND_COMPLETE_SUCCESS: The most recent sanitize operation + * for which No-Deallocate After Sanitize was + * requested has completed successfully with + * deallocation of all user data. + * @NVME_SANITIZE_SSTAT_COMPLETED_PASSES_SHIFT: Shift amount to get the number + * of completed passes if the most recent + * sanitize operation was an Overwrite. This + * value shall be cleared to 0h if the most + * recent sanitize operation was not + * an Overwrite. + * @NVME_SANITIZE_SSTAT_COMPLETED_PASSES_MASK: Mask to get the number of completed + * passes. + * @NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED_SHIFT: Shift amount to get the Global + * Data Erased value from the + * &struct nvme_sanitize_log_page.sstat field. + * @NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED_MASK: Mask to get the Global Data Erased + * value. + * @NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED: Global Data Erased: if set, then no + * namespace user data in the NVM subsystem + * has been written to and no Persistent + * Memory Region in the NVM subsystem has + * been enabled since being manufactured and + * the NVM subsystem has never been sanitized; + * or since the most recent successful sanitize + * operation. + */ +enum nvme_sanitize_sstat { + NVME_SANITIZE_SSTAT_STATUS_SHIFT = 0, + NVME_SANITIZE_SSTAT_STATUS_MASK = 0x7, + NVME_SANITIZE_SSTAT_STATUS_NEVER_SANITIZED = 0, + NVME_SANITIZE_SSTAT_STATUS_COMPLETE_SUCCESS = 1, + NVME_SANITIZE_SSTAT_STATUS_IN_PROGESS = 2, + NVME_SANITIZE_SSTAT_STATUS_COMPLETED_FAILED = 3, + NVME_SANITIZE_SSTAT_STATUS_ND_COMPLETE_SUCCESS = 4, + NVME_SANITIZE_SSTAT_COMPLETED_PASSES_SHIFT = 3, + NVME_SANITIZE_SSTAT_COMPLETED_PASSES_MASK = 0x1f, + NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED_SHIFT = 8, + NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED_MASK = 0x1, + NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED = 1 << NVME_SANITIZE_SSTAT_GLOBAL_DATA_ERASED_SHIFT, +}; + +/** + * struct nvme_zns_changed_zone_log - ZNS Changed Zone List log + * @nrzid: Number of Zone Identifiers + * @rsvd2: Reserved + * @zid: Zone Identifier + */ +struct nvme_zns_changed_zone_log { + __le16 nrzid; + __u8 rsvd2[6]; + __le64 zid[NVME_ZNS_CHANGED_ZONES_MAX]; +}; + +/** + * enum nvme_zns_zt - + * @NVME_ZONE_TYPE_SEQWRITE_REQ: + */ +enum nvme_zns_zt { + NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2, +}; + +/** + * enum nvme_zns_za - + * @NVME_ZNS_ZA_ZFC: Zone Finished by Controller + * @NVME_ZNS_ZA_FZR: Finish Zone Recommended + * @NVME_ZNS_ZA_RZR: Reset Zone Recommended + * @NVME_ZNS_ZA_ZRWAV: + * @NVME_ZNS_ZA_ZDEV: Zone Descriptor Extension Valid + */ +enum nvme_zns_za { + NVME_ZNS_ZA_ZFC = 1 << 0, + NVME_ZNS_ZA_FZR = 1 << 1, + NVME_ZNS_ZA_RZR = 1 << 2, + NVME_ZNS_ZA_ZRWAV = 1 << 3, + NVME_ZNS_ZA_ZDEV = 1 << 7, +}; + +/** + * enum nvme_zns_zs - + * @NVME_ZNS_ZS_EMPTY: Empty state + * @NVME_ZNS_ZS_IMPL_OPEN: Implicitly open state + * @NVME_ZNS_ZS_EXPL_OPEN: Explicitly open state + * @NVME_ZNS_ZS_CLOSED: Closed state + * @NVME_ZNS_ZS_READ_ONLY: Read only state + * @NVME_ZNS_ZS_FULL: Full state + * @NVME_ZNS_ZS_OFFLINE: Offline state + */ +enum nvme_zns_zs { + NVME_ZNS_ZS_EMPTY = 0x1, + NVME_ZNS_ZS_IMPL_OPEN = 0x2, + NVME_ZNS_ZS_EXPL_OPEN = 0x3, + NVME_ZNS_ZS_CLOSED = 0x4, + NVME_ZNS_ZS_READ_ONLY = 0xd, + NVME_ZNS_ZS_FULL = 0xe, + NVME_ZNS_ZS_OFFLINE = 0xf, +}; + +/** + * struct nvme_zns_desc - + * @zt: Zone Type + * @zs: Zone State + * @za: Zone Attributes + * @zai: Zone Attributes Information + * @rsvd4: Reserved + * @zcap: Zone Capacity + * @zslba: Zone Start Logical Block Address + * @wp: Write Pointer + * @rsvd32: Reserved + */ +struct nvme_zns_desc { + __u8 zt; + __u8 zs; + __u8 za; + __u8 zai; + __u8 rsvd4[4]; + __le64 zcap; + __le64 zslba; + __le64 wp; + __u8 rsvd32[32]; +}; + +/** + * struct nvme_zone_report - + * @nr_zones: Number of descriptors in @entries + * @rsvd8: Reserved + * @entries: Zoned namespace descriptors + */ +struct nvme_zone_report { + __le64 nr_zones; + __u8 rsvd8[56]; + struct nvme_zns_desc entries[]; +}; + +/** + * struct nvme_lba_status_desc - + * @dslba: Descriptor Starting LBA + * @nlb: Number of Logical Blocks + * @rsvd12: Reserved + * @status: Additional status about this LBA range + * @rsvd14: Reserved + */ +struct nvme_lba_status_desc { + __le64 dslba; + __le32 nlb; + __u8 rsvd12; + __u8 status; + __u8 rsvd14[2]; +}; + +/** + * struct nvme_lba_status - + * @nlsd: Number of LBA Status Descriptors + * @cmpc: Completion Condition + * @rsvd5: Reserved + * @descs: LBA status descriptor Entry + */ +struct nvme_lba_status { + __le32 nlsd; + __u8 cmpc; + __u8 rsvd5[3]; + struct nvme_lba_status_desc descs[]; +}; + +/** + * struct nvme_feat_auto_pst - + * @apst_entry: See &enum nvme_apst_entry + */ +struct nvme_feat_auto_pst { + __le64 apst_entry[32]; +}; + +/** + * enum nvme_apst_entry - + * @NVME_APST_ENTRY_ITPS_SHIFT: + * @NVME_APST_ENTRY_ITPT_SHIFT: + * @NVME_APST_ENTRY_ITPS_MASK: + * @NVME_APST_ENTRY_ITPT_MASK: + */ +enum nvme_apst_entry { + NVME_APST_ENTRY_ITPS_SHIFT = 3, + NVME_APST_ENTRY_ITPT_SHIFT = 8, + NVME_APST_ENTRY_ITPS_MASK = 0x1f, + NVME_APST_ENTRY_ITPT_MASK = 0xffffff, +}; + +/** + * struct nvme_metadata_element_desc - Metadata Element Descriptor + * @type: Element Type (ET) + * @rev: Element Revision (ER) + * @len: Element Length (ELEN) + * @val: Element Value (EVAL), UTF-8 string + */ +struct nvme_metadata_element_desc { + __u8 type; + __u8 rev; + __u16 len; + __u8 val[0]; +}; + +/** + * struct nvme_host_metadata - Host Metadata Data Structure + * @ndesc: Number of metadata element descriptors + * @rsvd1: Reserved + * @descs: Metadata element descriptors + * @descs_buf: Metadata element descriptor buffer + */ +struct nvme_host_metadata { + __u8 ndesc; + __u8 rsvd1; + union { + struct nvme_metadata_element_desc descs[0]; + __u8 descs_buf[4094]; + }; +}; + +/** + * enum nvme_ctrl_metadata_type - Controller Metadata Element Types + * @NVME_CTRL_METADATA_OS_CTRL_NAME: Name of the controller in + * the operating system. + * @NVME_CTRL_METADATA_OS_DRIVER_NAME: Name of the driver in the + * operating system. + * @NVME_CTRL_METADATA_OS_DRIVER_VER: Version of the driver in + * the operating system. + * @NVME_CTRL_METADATA_PRE_BOOT_CTRL_NAME: Name of the controller in + * the pre-boot environment. + * @NVME_CTRL_METADATA_PRE_BOOT_DRIVER_NAME: Name of the driver in the + * pre-boot environment. + * @NVME_CTRL_METADATA_PRE_BOOT_DRIVER_VER: Version of the driver in the + * pre-boot environment. + * @NVME_CTRL_METADATA_SYS_PROC_MODEL: Model of the processor. + * @NVME_CTRL_METADATA_CHIPSET_DRV_NAME: Chipset driver name. + * @NVME_CTRL_METADATA_CHIPSET_DRV_VERSION: Chipsset driver version. + * @NVME_CTRL_METADATA_OS_NAME_AND_BUILD: Operating system name and build. + * @NVME_CTRL_METADATA_SYS_PROD_NAME: System product name. + * @NVME_CTRL_METADATA_FIRMWARE_VERSION: Host firmware (e.g UEFI) version. + * @NVME_CTRL_METADATA_OS_DRIVER_FILENAME: Operating system driver filename. + * @NVME_CTRL_METADATA_DISPLAY_DRV_NAME: Display driver name. + * @NVME_CTRL_METADATA_DISPLAY_DRV_VERSION: Display driver version. + * @NVME_CTRL_METADATA_HOST_DET_FAIL_REC: Failure record. + */ +enum nvme_ctrl_metadata_type { + NVME_CTRL_METADATA_OS_CTRL_NAME = 0x01, + NVME_CTRL_METADATA_OS_DRIVER_NAME = 0x02, + NVME_CTRL_METADATA_OS_DRIVER_VER = 0x03, + NVME_CTRL_METADATA_PRE_BOOT_CTRL_NAME = 0x04, + NVME_CTRL_METADATA_PRE_BOOT_DRIVER_NAME = 0x05, + NVME_CTRL_METADATA_PRE_BOOT_DRIVER_VER = 0x06, + NVME_CTRL_METADATA_SYS_PROC_MODEL = 0x07, + NVME_CTRL_METADATA_CHIPSET_DRV_NAME = 0x08, + NVME_CTRL_METADATA_CHIPSET_DRV_VERSION = 0x09, + NVME_CTRL_METADATA_OS_NAME_AND_BUILD = 0x0a, + NVME_CTRL_METADATA_SYS_PROD_NAME = 0x0b, + NVME_CTRL_METADATA_FIRMWARE_VERSION = 0x0c, + NVME_CTRL_METADATA_OS_DRIVER_FILENAME = 0x0d, + NVME_CTRL_METADATA_DISPLAY_DRV_NAME = 0x0e, + NVME_CTRL_METADATA_DISPLAY_DRV_VERSION = 0x0f, + NVME_CTRL_METADATA_HOST_DET_FAIL_REC = 0x10, +}; + +/** + * enum nvme_ns_metadata_type - Namespace Metadata Element Types + * @NVME_NS_METADATA_OS_NS_NAME: Name of the namespace in the the + * operating system + * @NVME_NS_METADATA_PRE_BOOT_NS_NAME: Name of the namespace in the pre-boot + * environment. + * @NVME_NS_METADATA_OS_NS_QUAL_1: First qualifier of the Operating System + * Namespace Name. + * @NVME_NS_METADATA_OS_NS_QUAL_2: Second qualifier of the Operating System + * Namespace Name. + */ +enum nvme_ns_metadata_type { + NVME_NS_METADATA_OS_NS_NAME = 0x01, + NVME_NS_METADATA_PRE_BOOT_NS_NAME = 0x02, + NVME_NS_METADATA_OS_NS_QUAL_1 = 0x03, + NVME_NS_METADATA_OS_NS_QUAL_2 = 0x04, +}; + +/** + * struct nvme_timestamp - + * @timestamp: Timestamp value based on origin and synch field + * @attr: Attribute + * @rsvd: Reserved + */ +struct nvme_timestamp { + __u8 timestamp[6]; + __u8 attr; + __u8 rsvd; +}; + +/** + * struct nvme_lba_range_type_entry - + * @type: Specifies the Type of the LBA range + * @attributes: Specifies attributes of the LBA range + * @rsvd2: Reserved + * @slba: Starting LBA + * @nlb: Number of Logical Blocks + * @guid: Unique Identifier + * @rsvd48: Reserved + */ +struct nvme_lba_range_type_entry { + __u8 type; + __u8 attributes; + __u8 rsvd2[14]; + __u64 slba; + __u64 nlb; + __u8 guid[16]; + __u8 rsvd48[16]; +}; + +/** + * enum nvme_lbart - + * @NVME_LBART_TYPE_GP: General Purpose + * @NVME_LBART_TYPE_FS: Filesystem + * @NVME_LBART_TYPE_RAID: RAID + * @NVME_LBART_TYPE_CACHE: Cache + * @NVME_LBART_TYPE_SWAP: Page / swap file + * @NVME_LBART_ATTRIB_TEMP: Temp + * @NVME_LBART_ATTRIB_HIDE: Hidden + */ +enum nvme_lbart { + NVME_LBART_TYPE_GP = 0, + NVME_LBART_TYPE_FS = 1, + NVME_LBART_TYPE_RAID = 2, + NVME_LBART_TYPE_CACHE = 3, + NVME_LBART_TYPE_SWAP = 4, + NVME_LBART_ATTRIB_TEMP = 1 << 0, + NVME_LBART_ATTRIB_HIDE = 1 << 1, +}; + +/** + * struct nvme_lba_range_type - + * @entry: LBA range type entry + */ +struct nvme_lba_range_type { + struct nvme_lba_range_type_entry entry[NVME_FEAT_LBA_RANGE_MAX]; +}; + +/** + * struct nvme_plm_config - + * @ee: Enable Event + * @rsvd2: Reserved + * @dtwinrt: DTWIN Reads Threshold + * @dtwinwt: DTWIN Writes Threshold + * @dtwintt: DTWIN Time Threshold + * @rsvd56: Reserved + */ +struct nvme_plm_config { + __le16 ee; + __u8 rsvd2[30]; + __le64 dtwinrt; + __le64 dtwinwt; + __le64 dtwintt; + __u8 rsvd56[456]; +}; + +/** + * struct nvme_feat_host_behavior - + * @acre: Advanced Command Retry Enable + * @rsvd1: Reserved + */ +struct nvme_feat_host_behavior { + __u8 acre; + __u8 rsvd1[511]; +}; + +/** + * enum nvme_host_behavior_support - + * @NVME_ENABLE_ACRE: Enable Advanced Command Retry Enable + */ +enum nvme_host_behavior_support { + NVME_ENABLE_ACRE = 1 << 0, +}; + +/** + * struct nvme_dsm_range - + * @cattr: Context Attributes + * @nlb: Length in logical blocks + * @slba: Starting LBA + */ +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +/** + * struct nvme_copy_range - + * @rsvd0: Reserved + * @slba: Starting LBA + * @nlb: Number of Logical Blocks + * @rsvd18: Reserved + * @eilbrt: Expected Initial Logical Block Reference Tag + * @elbatm: Expected Logical Block Application Tag Mask + * @elbat: Expected Logical Block Application Tag + */ +struct nvme_copy_range { + __u8 rsvd0[8]; + __le64 slba; + __le16 nlb; + __u8 rsvd18[6]; + __le32 eilbrt; + __le16 elbatm; + __le16 elbat; +}; + +/** + * struct nvme_registered_ctrl - + * @cntlid: Controller ID + * @rcsts: Reservation Status + * @rsvd3: Reserved + * @hostid: Host Identifier + * @rkey: Reservation Key + */ +struct nvme_registered_ctrl { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 hostid; + __le64 rkey; +}; + +/** + * struct nvme_registered_ctrl_ext - + * @cntlid: Controller ID + * @rcsts: Reservation Status + * @rsvd3: Reserved + * @rkey: Reservation Key + * @hostid: Host Identifier + * @rsvd32: Reserved + */ +struct nvme_registered_ctrl_ext { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 rkey; + __u8 hostid[16]; + __u8 rsvd32[32]; +}; + +/** + * struct nvme_resv_status - + * @gen: Generation + * @rtype: Reservation Type + * @regctl: Number of Registered Controllers + * @rsvd7: Reserved + * @ptpls: Persist Through Power Loss State + * @rsvd10: Reserved + * @rsvd24: Reserved + * @regctl_eds: Registered Controller Extended Data Structure + * @regctl_ds: Registered Controller Data Structure + */ +struct nvme_resv_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 rsvd7[2]; + __u8 ptpls; + __u8 rsvd10[14]; + union { + struct { + __u8 rsvd24[40]; + struct nvme_registered_ctrl_ext regctl_eds[0]; + }; + struct nvme_registered_ctrl regctl_ds[0]; + }; +}; + +/** + * struct nvme_streams_directive_params - + * @msl: Max Streams Limit + * @nssa: NVM Subsystem Streams Available + * @nsso: NVM Subsystem Streams Open + * @nssc: NVM Subsystem Stream Capability + * @rsvd: Reserved + * @sws: Stream Write Size + * @sgs: Stream Granularity Size + * @nsa: Namespace Streams Allocated + * @nso: Namespace Streams Open + * @rsvd2: Reserved + */ +struct nvme_streams_directive_params { + __le16 msl; + __le16 nssa; + __le16 nsso; + __u8 nssc; + __u8 rsvd[9]; + __le32 sws; + __le16 sgs; + __le16 nsa; + __le16 nso; + __u8 rsvd2[6]; +}; + +/** + * struct nvme_streams_directive_status - + * @osc: Open Stream Count + * @sid: Stream Identifier + */ +struct nvme_streams_directive_status { + __le16 osc; + __le16 sid[]; +}; + +/** + * struct nvme_id_directives - + * @supported: Identify directive is supported + * @enabled: Identify directive is Enabled + * @rsvd64: Reserved + */ +struct nvme_id_directives { + __u8 supported[32]; + __u8 enabled[32]; + __u8 rsvd64[4032]; +}; + +/** + * enum nvme_directive_types - + * @NVME_ID_DIR_ID_BIT: Identify directive is supported + * @NVME_ID_DIR_SD_BIT: Streams directive is supported + */ +enum nvme_directive_types { + NVME_ID_DIR_ID_BIT = 0, + NVME_ID_DIR_SD_BIT = 1, +}; + +/** + * struct nvme_host_mem_buf_attrs - + * @hsize: Host Memory Buffer Size + * @hmdlal: Host Memory Descriptor List Lower Address + * @hmdlau: Host Memory Descriptor List Upper Address + * @hmdlec: Host Memory Descriptor List Entry Count + * @rsvd16: Reserved + */ +struct nvme_host_mem_buf_attrs { + __le32 hsize; + __le32 hmdlal; + __le32 hmdlau; + __le32 hmdlec; + __u8 rsvd16[4080]; + +}; + +/** + * enum nvme_ae_type - + * @NVME_AER_ERROR: Error event + * @NVME_AER_SMART: SMART / Health Status event + * @NVME_AER_NOTICE: Notice event + * @NVME_AER_CSS: NVM Command Set Specific events + * @NVME_AER_VS: Vendor Specific event + */ +enum nvme_ae_type { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +/** + * enum nvme_ae_info_error - + * @NVME_AER_ERROR_INVALID_DB_REG: Write to Invalid Doorbell Register + * @NVME_AER_ERROR_INVALID_DB_VAL: Invalid Doorbell Write Value + * @NVME_AER_ERROR_DIAG_FAILURE: Diagnostic Failure + * @NVME_AER_ERROR_PERSISTENT_INTERNAL_ERROR: Persistent Internal Error + * @NVME_AER_ERROR_TRANSIENT_INTERNAL_ERROR: Transient Internal Error + * @NVME_AER_ERROR_FW_IMAGE_LOAD_ERROR: Firmware Image Load Error + */ +enum nvme_ae_info_error { + NVME_AER_ERROR_INVALID_DB_REG = 0x00, + NVME_AER_ERROR_INVALID_DB_VAL = 0x01, + NVME_AER_ERROR_DIAG_FAILURE = 0x02, + NVME_AER_ERROR_PERSISTENT_INTERNAL_ERROR = 0x03, + NVME_AER_ERROR_TRANSIENT_INTERNAL_ERROR = 0x04, + NVME_AER_ERROR_FW_IMAGE_LOAD_ERROR = 0x05, +}; + +/** + * enum nvme_ae_info_smart - + * @NVME_AER_SMART_SUBSYSTEM_RELIABILITY: NVM subsystem Reliability + * @NVME_AER_SMART_TEMPERATURE_THRESHOLD: Temperature Threshold + * @NVME_AER_SMART_SPARE_THRESHOLD: Spare Below Threshold + */ +enum nvme_ae_info_smart { + NVME_AER_SMART_SUBSYSTEM_RELIABILITY = 0x00, + NVME_AER_SMART_TEMPERATURE_THRESHOLD = 0x01, + NVME_AER_SMART_SPARE_THRESHOLD = 0x02, +}; + +/** + * enum nvme_ae_info_css_nvm - + * @NVME_AER_CSS_NVM_RESERVATION: Reservation Log Page Available + * @NVME_AER_CSS_NVM_SANITIZE_COMPLETED: Sanitize Operation Completed + * @NVME_AER_CSS_NVM_UNEXPECTED_SANITIZE_DEALLOC: Sanitize Operation Completed + * With Unexpected Deallocation + */ +enum nvme_ae_info_css_nvm { + NVME_AER_CSS_NVM_RESERVATION = 0x00, + NVME_AER_CSS_NVM_SANITIZE_COMPLETED = 0x01, + NVME_AER_CSS_NVM_UNEXPECTED_SANITIZE_DEALLOC = 0x02, +}; + +/** + * enum nvme_ae_info_notice - + * @NVME_AER_NOTICE_NS_CHANGED: Namespace Attribute Changed + * @NVME_AER_NOTICE_FW_ACT_STARTING: Firmware Activation Starting + * @NVME_AER_NOTICE_TELEMETRY: Telemetry Log Changed + * @NVME_AER_NOTICE_ANA: Asymmetric Namespace Access Change + * @NVME_AER_NOTICE_PL_EVENT: Predictable Latency Event Aggregate Log Change + * @NVME_AER_NOTICE_LBA_STATUS_ALERT: LBA Status Information Alert + * @NVME_AER_NOTICE_EG_EVENT: Endurance Group Event Aggregate Log Page Change + * @NVME_AER_NOTICE_DISC_CHANGED: Discovery Log Page Change + */ +enum nvme_ae_info_notice { + NVME_AER_NOTICE_NS_CHANGED = 0x00, + NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, + NVME_AER_NOTICE_TELEMETRY = 0x02, + NVME_AER_NOTICE_ANA = 0x03, + NVME_AER_NOTICE_PL_EVENT = 0x04, + NVME_AER_NOTICE_LBA_STATUS_ALERT = 0x05, + NVME_AER_NOTICE_EG_EVENT = 0x06, + NVME_AER_NOTICE_DISC_CHANGED = 0xf0, +}; + +/** + * enum nvme_subsys_type - Type of the NVM subsystem. + * @NVME_NQN_DISC: Discovery type target subsystem. Describes a referral to another + * Discovery Service composed of Discovery controllers that provide + * additional discovery records. Multiple Referral entries may + * be reported for each Discovery Service (if that Discovery Service + * has multiple NVM subsystem ports or supports multiple protocols). + * @NVME_NQN_NVME: NVME type target subsystem. Describes an NVM subsystem whose + * controllers may have attached namespaces (an NVM subsystem + * that is not composed of Discovery controllers). Multiple NVM + * Subsystem entries may be reported for each NVM subsystem if + * that NVM subsystem has multiple NVM subsystem ports. + * @NVME_NQN_CURR: Current Discovery type target subsystem. Describes this Discovery + * subsystem (the Discovery Service that contains the controller + * processing the Get Log Page command). Multiple Current Discovery + * Subsystem entries may be reported for this Discovery subsystem + * if the current Discovery subsystem has multiple NVM subsystem + * ports. + */ +enum nvme_subsys_type { + NVME_NQN_DISC = 1, + NVME_NQN_NVME = 2, + NVME_NQN_CURR = 3, +}; + +#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" +#define NVME_RDMA_IP_PORT 4420 +#define NVME_DISC_IP_PORT 8009 + +/* However the max length of a qualified name is another size */ +#define NVMF_NQN_SIZE 223 +#define NVMF_TRSVCID_SIZE 32 + +/** + * enum nvmf_disc_eflags - Discovery Log Page entry flags. + * @NVMF_DISC_EFLAGS_NONE: Indicates that none of the DUPRETINFO or EPCSD + * features are supported. + * @NVMF_DISC_EFLAGS_DUPRETINFO: Duplicate Returned Information (DUPRETINFO): + * Indicates that using the content of this entry + * to access this Discovery Service returns the same + * information that is returned by using the content + * of other entries in this log page that also have + * this flag set. + * @NVMF_DISC_EFLAGS_EPCSD: Explicit Persistent Connection Support for Discovery (EPCSD): + * Indicates that Explicit Persistent Connections are + * supported for the Discovery controller. + * @NVMF_DISC_EFLAGS_BOTH: Indicates that both the DUPRETINFO and EPCSD + * features are supported. + */ +enum nvmf_disc_eflags { + NVMF_DISC_EFLAGS_NONE = 0, + NVMF_DISC_EFLAGS_DUPRETINFO = 1, + NVMF_DISC_EFLAGS_EPCSD = 2, + NVMF_DISC_EFLAGS_BOTH = 3, +}; + +/** + * union nvmf_tsas - Transport Specific Address Subtype + * @common: Common transport specific attributes + * @rdma: RDMA transport specific attribute settings + * @qptype: RDMA QP Service Type (RDMA_QPTYPE): Specifies the type of RDMA + * Queue Pair. See &enum nvmf_rdma_qptype. + * @prtype: RDMA Provider Type (RDMA_PRTYPE): Specifies the type of RDMA + * provider. See &enum nvmf_rdma_prtype. + * @cms: RDMA Connection Management Service (RDMA_CMS): Specifies the type + * of RDMA IP Connection Management Service. See &enum nvmf_rdma_cms. + * @pkey: RDMA_PKEY: Specifies the Partition Key when AF_IB (InfiniBand) + * address family type is used. + * @tcp: TCP transport specific attribute settings + * @sectype: Security Type (SECTYPE): Specifies the type of security used by the + * NVMe/TCP port. If SECTYPE is a value of 0h (No Security), then the + * host shall set up a normal TCP connection. See &enum nvmf_tcp_sectype. + */ +union nvmf_tsas { + char common[NVMF_TSAS_SIZE]; + struct rdma { + __u8 qptype; + __u8 prtype; + __u8 cms; + __u8 rsvd3[5]; + __u16 pkey; + __u8 rsvd10[246]; + } rdma; + struct tcp { + __u8 sectype; + } tcp; +}; + +/** + * struct nvmf_disc_log_entry - Discovery Log Page entry + * @trtype: Transport Type (TRTYPE): Specifies the NVMe Transport type. + * See &enum nvmf_trtype. + * @adrfam: Address Family (ADRFAM): Specifies the address family. + * See &enum nvmf_addr_family. + * @subtype: Subsystem Type (SUBTYPE): Specifies the type of the NVM subsystem + * that is indicated in this entry. See &enum nvme_subsys_type. + * @treq: Transport Requirements (TREQ): Indicates requirements for the NVMe + * Transport. See &enum nvmf_treq. + * @portid: Port ID (PORTID): Specifies a particular NVM subsystem port. + * Different NVMe Transports or address families may utilize the same + * Port ID value (e.g. a Port ID may support both iWARP and RoCE). + * @cntlid: Controller ID (CNTLID): Specifies the controller ID. If the NVM + * subsystem uses a dynamic controller model, then this field shall + * be set to FFFFh. If the NVM subsystem uses a static controller model, + * then this field may be set to a specific controller ID (values 0h + * to FFEFh are valid). If the NVM subsystem uses a static controller + * model and the value indicated is FFFEh, then the host should remember + * the Controller ID returned as part of the Fabrics Connect command + * in order to re-establish an association in the future with the same + * controller. + * @asqsz: Admin Max SQ Size (ASQSZ): Specifies the maximum size of an Admin + * Submission Queue. This applies to all controllers in the NVM + * subsystem. The value shall be a minimum of 32 entries. + * @eflags: Entry Flags (EFLAGS): Indicates additional information related to + * the current entry. See &enum nvmf_disc_eflags. + * @rsvd12: Reserved + * @trsvcid: Transport Service Identifier (TRSVCID): Specifies the NVMe Transport + * service identifier as an ASCII string. The NVMe Transport service + * identifier is specified by the associated NVMe Transport binding + * specification. + * @rsvd64: Reserved + * @subnqn: NVM Subsystem Qualified Name (SUBNQN): NVMe Qualified Name (NQN) + * that uniquely identifies the NVM subsystem. For a subsystem, if that + * Discovery subsystem has a unique NQN (i.e., the NVM Subsystem NVMe + * Qualified Name (SUBNQN) field in that Discovery subsystem's Identify + * Controller data structure contains a unique NQN value), then the + * value returned shall be that unique NQN. If the Discovery subsystem + * does not have a unique NQN, then the value returned shall be the + * well-known Discovery Service NQN (nqn.2014-08.org.nvmexpress.discovery). + * @traddr: Transport Address (TRADDR): Specifies the address of the NVM subsystem + * that may be used for a Connect command as an ASCII string. The + * Address Family field describes the reference for parsing this field. + * @tsas: Transport specific attribute settings + */ +struct nvmf_disc_log_entry { + __u8 trtype; + __u8 adrfam; + __u8 subtype; + __u8 treq; + __le16 portid; + __le16 cntlid; + __le16 asqsz; + __le16 eflags; + __u8 rsvd12[20]; + char trsvcid[NVMF_TRSVCID_SIZE]; + __u8 rsvd64[192]; + char subnqn[NVME_NQN_LENGTH]; + char traddr[NVMF_TRADDR_SIZE]; + union nvmf_tsas tsas; +}; + +/** + * enum nvmf_trtype - Transport Type codes for Discovery Log Page entry TRTYPE field + * @NVMF_TRTYPE_UNSPECIFIED: Not indicated + * @NVMF_TRTYPE_RDMA: RDMA + * @NVMF_TRTYPE_FC: Fibre Channel + * @NVMF_TRTYPE_TCP: TCP + * @NVMF_TRTYPE_LOOP: Intra-host Transport (i.e., loopback), reserved + * for host usage. + * @NVMF_TRTYPE_MAX: Maximum value for &enum nvmf_trtype + */ +enum nvmf_trtype { + NVMF_TRTYPE_UNSPECIFIED = 0, + NVMF_TRTYPE_RDMA = 1, + NVMF_TRTYPE_FC = 2, + NVMF_TRTYPE_TCP = 3, + NVMF_TRTYPE_LOOP = 254, + NVMF_TRTYPE_MAX, +}; + +/** + * enum nvmf_addr_family - Address Family codes for Discovery Log Page entry ADRFAM field + * @NVMF_ADDR_FAMILY_PCI: PCIe + * @NVMF_ADDR_FAMILY_IP4: AF_INET: IPv4 address family. + * @NVMF_ADDR_FAMILY_IP6: AF_INET6: IPv6 address family. + * @NVMF_ADDR_FAMILY_IB: AF_IB: InfiniBand address family. + * @NVMF_ADDR_FAMILY_FC: Fibre Channel address family. + * @NVMF_ADDR_FAMILY_LOOP: Intra-host Transport (i.e., loopback), reserved + * for host usage. + */ +enum nvmf_addr_family { + NVMF_ADDR_FAMILY_PCI = 0, + NVMF_ADDR_FAMILY_IP4 = 1, + NVMF_ADDR_FAMILY_IP6 = 2, + NVMF_ADDR_FAMILY_IB = 3, + NVMF_ADDR_FAMILY_FC = 4, + NVMF_ADDR_FAMILY_LOOP = 254, +}; + +/** + * enum nvmf_treq - Transport Requirements codes for Discovery Log Page entry TREQ field + * @NVMF_TREQ_NOT_SPECIFIED: Not specified + * @NVMF_TREQ_REQUIRED: Required + * @NVMF_TREQ_NOT_REQUIRED: Not Required + * @NVMF_TREQ_DISABLE_SQFLOW: SQ flow control disable supported + */ +enum nvmf_treq { + NVMF_TREQ_NOT_SPECIFIED = 0, + NVMF_TREQ_REQUIRED = 1, + NVMF_TREQ_NOT_REQUIRED = 2, + NVMF_TREQ_DISABLE_SQFLOW = 4, +}; + +/** + * enum nvmf_rdma_qptype - RDMA QP Service Type codes for Discovery Log Page + * entry TSAS RDMA_QPTYPE field + * @NVMF_RDMA_QPTYPE_CONNECTED: Reliable Connected + * @NVMF_RDMA_QPTYPE_DATAGRAM: Reliable Datagram + */ +enum nvmf_rdma_qptype { + NVMF_RDMA_QPTYPE_CONNECTED = 1, + NVMF_RDMA_QPTYPE_DATAGRAM = 2, +}; + +/** + * enum nvmf_rdma_prtype - RDMA Provider Type codes for Discovery Log Page + * entry TSAS RDMA_PRTYPE field + * @NVMF_RDMA_PRTYPE_NOT_SPECIFIED: No Provider Specified + * @NVMF_RDMA_PRTYPE_IB: InfiniBand + * @NVMF_RDMA_PRTYPE_ROCE: InfiniBand RoCE + * @NVMF_RDMA_PRTYPE_ROCEV2: InfiniBand RoCEV2 + * @NVMF_RDMA_PRTYPE_IWARP: iWARP + */ +enum nvmf_rdma_prtype { + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, + NVMF_RDMA_PRTYPE_IB = 2, + NVMF_RDMA_PRTYPE_ROCE = 3, + NVMF_RDMA_PRTYPE_ROCEV2 = 4, + NVMF_RDMA_PRTYPE_IWARP = 5, +}; + +/** + * enum nvmf_rdma_cms - RDMA Connection Management Service Type codes for + * Discovery Log Page entry TSAS RDMA_CMS field + * @NVMF_RDMA_CMS_RDMA_CM: Sockets based endpoint addressing + * + */ +enum nvmf_rdma_cms { + NVMF_RDMA_CMS_RDMA_CM = 1, +}; + +/** + * enum nvmf_tcp_sectype - Transport Specific Address Subtype Definition for + * NVMe/TCP Transport + * @NVMF_TCP_SECTYPE_NONE: No Security + * @NVMF_TCP_SECTYPE_TLS: Transport Layer Security version 1.2 + * @NVMF_TCP_SECTYPE_TLS13: Transport Layer Security version 1.3 or a subsequent + * version. The TLS protocol negotiates the version and + * cipher suite for each TCP connection. + */ +enum nvmf_tcp_sectype { + NVMF_TCP_SECTYPE_NONE = 0, + NVMF_TCP_SECTYPE_TLS = 1, + NVMF_TCP_SECTYPE_TLS13 = 2, +}; + +/** + * struct nvmf_discovery_log - Discovery Log Page (Log Identifier 70h) + * @genctr: Generation Counter (GENCTR): Indicates the version of the discovery + * information, starting at a value of 0h. For each change in the + * Discovery Log Page, this counter is incremented by one. If the value + * of this field is FFFFFFFF_FFFFFFFFh, then the field shall be cleared + * to 0h when incremented (i.e., rolls over to 0h). + * @numrec: Number of Records (NUMREC): Indicates the number of records + * contained in the log. + * @recfmt: Record Format (RECFMT): Specifies the format of the Discovery Log + * Page. If a new format is defined, this value is incremented by one. + * The format of the record specified in this definition shall be 0h. + * @rsvd14: Reserved + * @entries: Discovery Log Page Entries - see &struct nvmf_disc_log_entry. + */ +struct nvmf_discovery_log { + __le64 genctr; + __le64 numrec; + __le16 recfmt; + __u8 rsvd14[1006]; + struct nvmf_disc_log_entry entries[]; +}; + +/* + * Discovery Information Management (DIM) command. This is sent by a + * host to a Discovery Controller (DC) to perform explicit registration. + */ +#define NVMF_ENAME_LEN 256 +#define NVMF_EVER_LEN 64 + +/** + * enum nvmf_dim_tas - Discovery Information Management Task + * @NVMF_DIM_TAS_REGISTER: Register + * @NVMF_DIM_TAS_DEREGISTER: Deregister + * @NVMF_DIM_TAS_UPDATE: Update + */ +enum nvmf_dim_tas { + NVMF_DIM_TAS_REGISTER = 0x00, + NVMF_DIM_TAS_DEREGISTER = 0x01, + NVMF_DIM_TAS_UPDATE = 0x02, +}; + +/** + * enum nvmf_dim_entfmt - Discovery Information Management Entry Format + * @NVMF_DIM_ENTFMT_BASIC: Basic discovery information entry + * @NVMF_DIM_ENTFMT_EXTENDED: Extended discovery information entry + */ +enum nvmf_dim_entfmt +{ + NVMF_DIM_ENTFMT_BASIC = 0x01, + NVMF_DIM_ENTFMT_EXTENDED = 0x02, +}; + +/** + * enum nvmf_dim_etype -Discovery Information Management Entity Type + * @NVMF_DIM_ETYPE_HOST: Host + * @NVMF_DIM_ETYPE_DDC: Direct Discovery controller + * @NVMF_DIM_ETYPE_CDC: Centralized Discovery controller + */ +enum nvmf_dim_etype +{ + NVMF_DIM_ETYPE_HOST = 0x01, + NVMF_DIM_ETYPE_DDC = 0x02, + NVMF_DIM_ETYPE_CDC = 0x03, +}; + +/** + * enum nvmf_exattype - Extended Attribute Type + * @NVMF_EXATTYPE_HOSTID: Host Identifier + * @NVMF_EXATTYPE_SYMNAME: Symblic Name + */ +enum nvmf_exattype +{ + NVMF_EXATTYPE_HOSTID = 0x01, + NVMF_EXATTYPE_SYMNAME = 0x02, +}; + +/** + * struct nvmf_ext_attr - Extended Attribute (EXAT) + * @exattype: Extended Attribute Type (EXATTYPE) - see @enum nvmf_exattype + * @exatlen: Extended Attribute Length (EXATLEN) + * @exatval: Extended Attribute Value (EXATVAL) - size allocated for array + * must be a multiple of 4 bytes + */ +struct nvmf_ext_attr +{ + __le16 exattype; + __le16 exatlen; + __u8 exatval[]; +}; + +/** + * struct nvmf_ext_die - Extended Discovery Information Entry (DIE) + * @trtype: Transport Type (&enum nvmf_trtype) + * @adrfam: Address Family (&enum nvmf_addr_family) + * @subtype: Subsystem Type (&enum nvme_subsys_type) + * @treq: Transport Requirements (&enum nvmf_treq) + * @portid: Port ID + * @cntlid: Controller ID + * @asqsz: Admin Max SQ Size + * @rsvd10: Reserved + * @trsvcid: Transport Service Identifier + * @resv64: Reserved + * @nqn: NVM Qualified Name + * @traddr: Transport Address + * @tsas: Transport Specific Address Subtype (&union nvmf_tsas) + * @tel: Total Entry Length + * @numexat: Number of Extended Attributes + * @resv1030: Reserved + * @exat: Extented Attributes 0 (&struct nvmf_ext_attr) + */ +struct nvmf_ext_die +{ + __u8 trtype; + __u8 adrfam; + __u8 subtype; + __u8 treq; + __le16 portid; + __le16 cntlid; + __le16 asqsz; + __u8 rsvd10[22]; + char trsvcid[NVMF_TRSVCID_SIZE]; + __u8 resv64[192]; + char nqn[NVME_NQN_LENGTH]; + char traddr[NVMF_TRADDR_SIZE]; + union nvmf_tsas tsas; + __le32 tel; + __le16 numexat; + __u8 resv1030[2]; + struct nvmf_ext_attr exat[]; +}; + +/** + * union nvmf_die - Discovery Information Entry (DIE) + * @basic: Basic format (&struct nvmf_disc_log_entry) + * @extended: Extended format (&struct nvmf_ext_die) + * + * Depending on the ENTFMT specified in the DIM, DIEs can be entered + * with the Basic or Extended formats. For Basic format, each entry + * has a fixed length. Therefore, the "basic" field defined below can + * be accessed as a C array. For the Extended format, however, each + * entry is of variable length (TEL). Therefore, the "extended" field + * defined below cannot be accessed as a C array. Instead, the + * "extended" field is akin to a linked-list, where one can "walk" + * through the list. To move to the next entry, one simply adds the + * current entry's length (TEL) to the "walk" pointer. The number of + * entries in the list is specified by NUMENT. Although extended + * entries are of a variable lengths (TEL), TEL is always a mutiple of + * 4 bytes. + */ +union nvmf_die +{ + struct nvmf_disc_log_entry basic[0]; + struct nvmf_ext_die extended; +}; + +/** + * struct nvmf_dim_data - Discovery Information Management (DIM) - Data + * @tdl: Total Data Length + * @rsvd4: Reserved + * @nument: Number of entries + * @entfmt: Entry Format (&enum nvmf_dim_entfmt) + * @etype: Entity Type (&enum nvmf_dim_etype) + * @portlcl: Port Local + * @rsvd21: Reserved + * @ektype: Entry Key Type + * @eid: Entity Identifier (e.g. Host NQN) + * @ename: Entity Name (e.g. hostname) + * @ever: Entity Version (e.g. OS Name/Version) + * @rsvd600: Reserved + * @die: Discovery Information Entry (see @nument above) + */ +struct nvmf_dim_data +{ + __le32 tdl; + __u8 rsvd4[4]; + __le64 nument; + __le16 entfmt; + __le16 etype; + __u8 portlcl; + __u8 rsvd21; + __le16 ektype; + char eid[NVME_NQN_LENGTH]; + char ename[NVMF_ENAME_LEN]; + char ever[NVMF_EVER_LEN]; + __u8 rsvd600[424]; + union nvmf_die die[]; +}; + +/** + * struct nvmf_connect_data - Data payload for the 'connect' command + * @hostid: Host ID of the connecting host + * @cntlid: Requested controller ID + * @rsvd4: Reserved + * @subsysnqn: Subsystem NQN to connect to + * @hostnqn: Host NQN of the connecting host + * @rsvd5: Reserved + */ +struct nvmf_connect_data { + __u8 hostid[16]; + __le16 cntlid; + char rsvd4[238]; + char subsysnqn[NVME_NQN_LENGTH]; + char hostnqn[NVME_NQN_LENGTH]; + char rsvd5[256]; +}; + +/** + * struct nvme_mi_read_nvm_ss_info - + * @nump: Number of Ports + * @mjr: NVMe-MI Major Version Number + * @mnr: NVMe-MI Minor Version Number + * @rsvd3: Reserved + */ +struct nvme_mi_read_nvm_ss_info { + __u8 nump; + __u8 mjr; + __u8 mnr; + __u8 rsvd3[29]; +}; + +/** + * struct nvme_mi_port_pcie - + * @mps: PCIe Maximum Payload Size + * @sls: PCIe Supported Link Speeds Vector + * @cls: PCIe Current Link Speed + * @mlw: PCIe Maximum Link Width + * @nlw: PCIe Negotiated Link Width + * @pn: PCIe Port Number + * @rsvd14: Reserved + */ +struct nvme_mi_port_pcie { + __u8 mps; + __u8 sls; + __u8 cls; + __u8 mlw; + __u8 nlw; + __u8 pn; + __u8 rsvd14[18]; +}; + +/** + * struct nvme_mi_port_smb - + * @vpd_addr: Current VPD SMBus/I2C Address + * @mvpd_freq: Maximum VPD Access SMBus/I2C Frequency + * @mme_addr: Current Management Endpoint SMBus/I2C Address + * @mme_freq: Maximum Management Endpoint SMBus/I2C Frequency + * @nvmebm: NVMe Basic Management + * @rsvd13: Reserved + */ +struct nvme_mi_port_smb { + __u8 vpd_addr; + __u8 mvpd_freq; + __u8 mme_addr; + __u8 mme_freq; + __u8 nvmebm; + __u8 rsvd13[19]; +}; + +/** + * struct nvme_mi_read_port_info - + * @portt: Port Type + * @rsvd1: Reserved + * @mmctptus: Maximum MCTP Transmission Unit Size + * @meb: Management Endpoint Buffer Size + * @pcie: PCIe Port Specific Data + * @smb: SMBus Port Specific Data + */ +struct nvme_mi_read_port_info { + __u8 portt; + __u8 rsvd1; + __le16 mmctptus; + __le32 meb; + union { + struct nvme_mi_port_pcie pcie; + struct nvme_mi_port_smb smb; + }; +}; + +/** + * struct nvme_mi_read_ctrl_info - + * @portid: Port Identifier + * @rsvd1: Reserved + * @prii: PCIe Routing ID Information + * @pri: PCIe Routing ID + * @vid: PCI Vendor ID + * @did: PCI Device ID + * @ssvid: PCI Subsystem Vendor ID + * @ssid: PCI Subsystem Device ID + * @rsvd16: Reserved + */ +struct nvme_mi_read_ctrl_info { + __u8 portid; + __u8 rsvd1[4]; + __u8 prii; + __le16 pri; + __le16 vid; + __le16 did; + __le16 ssvid; + __le16 ssid; + __u8 rsvd16[16]; +}; + +/** + * struct nvme_mi_osc - + * @type: Command Type + * @opc: Opcode + */ +struct nvme_mi_osc { + __u8 type; + __u8 opc; +}; + +/** + * struct nvme_mi_read_sc_list - + * @numcmd: Number of Commands + * @cmds: MEB supported Command Data Structure + */ +struct nvme_mi_read_sc_list { + __le16 numcmd; + struct nvme_mi_osc cmds[]; +}; + +/** + * struct nvme_mi_nvm_ss_health_status - + * @nss: NVM Subsystem Status + * @sw: Smart Warnings + * @ctemp: Composite Temperature + * @pdlu: Percentage Drive Life Used + * @ccs: Composite Controller Status + * @rsvd8: Reserved + */ +struct nvme_mi_nvm_ss_health_status { + __u8 nss; + __u8 sw; + __u8 ctemp; + __u8 pdlu; + __le16 ccs; + __u8 rsvd8[2]; +}; + +/** + * enum nvme_mi_css - + * @NVME_MI_CCS_RDY: Ready + * @NVME_MI_CSS_CFS: Controller Fatal Status + * @NVME_MI_CSS_SHST: Shutdown Status + * @NVME_MI_CSS_NSSRO: NVM Subsystem Reset Occurred + * @NVME_MI_CSS_CECO: Controller Enable Change Occurred + * @NVME_MI_CSS_NAC: Namespace Attribute Changed + * @NVME_MI_CSS_FA: Firmware Activated + * @NVME_MI_CSS_CSTS: Controller Status Change + * @NVME_MI_CSS_CTEMP: Composite Temperature Change + * @NVME_MI_CSS_PDLU: Percentage Used + * @NVME_MI_CSS_SPARE: Available Spare + * @NVME_MI_CSS_CCWARN: Critical Warning + */ +enum nvme_mi_css { + NVME_MI_CCS_RDY = 1 << 0, + NVME_MI_CSS_CFS = 1 << 1, + NVME_MI_CSS_SHST = 1 << 2, + NVME_MI_CSS_NSSRO = 1 << 4, + NVME_MI_CSS_CECO = 1 << 5, + NVME_MI_CSS_NAC = 1 << 6, + NVME_MI_CSS_FA = 1 << 7, + NVME_MI_CSS_CSTS = 1 << 8, + NVME_MI_CSS_CTEMP = 1 << 9, + NVME_MI_CSS_PDLU = 1 << 10, + NVME_MI_CSS_SPARE = 1 << 11, + NVME_MI_CSS_CCWARN = 1 << 12, +}; + +/** + * struct nvme_mi_ctrl_health_status - + * @ctlid: Controller Identifier + * @csts: Controller Status + * @ctemp: Composite Temperature + * @pdlu: Percentage Used + * @spare: Available Spare + * @cwarn: Critical Warning + * @rsvd9: Reserved + */ +struct nvme_mi_ctrl_health_status { + __le16 ctlid; + __le16 csts; + __le16 ctemp; + __u8 pdlu; + __u8 spare; + __u8 cwarn; + __u8 rsvd9[7]; +}; + +/** + * enum nvme_mi_csts - + * @NVME_MI_CSTS_RDY: Ready + * @NVME_MI_CSTS_CFS: Controller Fatal Status + * @NVME_MI_CSTS_SHST: Shutdown Status + * @NVME_MI_CSTS_NSSRO: NVM Subsystem Reset Occurred + * @NVME_MI_CSTS_CECO: Controller Enable Change Occurred + * @NVME_MI_CSTS_NAC: Namespace Attribute Changed + * @NVME_MI_CSTS_FA: Firmware Activated + */ +enum nvme_mi_csts { + NVME_MI_CSTS_RDY = 1 << 0, + NVME_MI_CSTS_CFS = 1 << 1, + NVME_MI_CSTS_SHST = 1 << 2, + NVME_MI_CSTS_NSSRO = 1 << 4, + NVME_MI_CSTS_CECO = 1 << 5, + NVME_MI_CSTS_NAC = 1 << 6, + NVME_MI_CSTS_FA = 1 << 7, +}; + +/** + * enum nvme_mi_cwarn - + * @NVME_MI_CWARN_ST: Spare Threshold + * @NVME_MI_CWARN_TAUT: Temperature Above or Under Threshold + * @NVME_MI_CWARN_RD: Reliability Degraded + * @NVME_MI_CWARN_RO: Read Only + * @NVME_MI_CWARN_VMBF: Volatile Memory Backup Failed + */ +enum nvme_mi_cwarn { + NVME_MI_CWARN_ST = 1 << 0, + NVME_MI_CWARN_TAUT = 1 << 1, + NVME_MI_CWARN_RD = 1 << 2, + NVME_MI_CWARN_RO = 1 << 3, + NVME_MI_CWARN_VMBF = 1 << 4, +}; + +/** + * struct nvme_mi_vpd_mra - + * @nmravn: NVMe MultiRecord Area Version Number + * @ff: Form Factor + * @rsvd7: Reserved + * @i18vpwr: Initial 1.8 V Power Supply Requirements + * @m18vpwr: Maximum 1.8 V Power Supply Requirements + * @i33vpwr: Initial 3.3 V Power Supply Requirements + * @m33vpwr: Maximum 3.3 V Power Supply Requirements + * @rsvd17: Reserved + * @m33vapsr: Maximum 3.3 V aux Power Supply Requirements + * @i5vapsr: Initial 5 V Power Supply Requirements + * @m5vapsr: Maximum 5 V Power Supply Requirements + * @i12vapsr: Initial 12 V Power Supply Requirements + * @m12vapsr: Maximum 12 V Power Supply Requirements + * @mtl: Maximum Thermal Load + * @tnvmcap: Total NVM Capacity + * @rsvd37: Reserved + */ +struct nvme_mi_vpd_mra { + __u8 nmravn; + __u8 ff; + __u8 rsvd7[6]; + __u8 i18vpwr; + __u8 m18vpwr; + __u8 i33vpwr; + __u8 m33vpwr; + __u8 rsvd17; + __u8 m33vapsr; + __u8 i5vapsr; + __u8 m5vapsr; + __u8 i12vapsr; + __u8 m12vapsr; + __u8 mtl; + __u8 tnvmcap[16]; + __u8 rsvd37[27]; +}; + +/** + * struct nvme_mi_vpd_ppmra - + * @nppmravn: NVMe PCIe Port MultiRecord Area Version Number + * @pn: PCIe Port Number + * @ppi: Port Information + * @ls: PCIe Link Speed + * @mlw: PCIe Maximum Link Width + * @mctp: MCTP Support + * @refccap: Ref Clk Capability + * @pi: Port Identifier + * @rsvd13: Reserved + */ +struct nvme_mi_vpd_ppmra { + __u8 nppmravn; + __u8 pn; + __u8 ppi; + __u8 ls; + __u8 mlw; + __u8 mctp; + __u8 refccap; + __u8 pi; + __u8 rsvd13[3]; +}; + +/** + * struct nvme_mi_vpd_telem - + * @type: Type of the Element Descriptor + * @rev: Revision of the Element Descriptor + * @len: Number of bytes in the Element Descriptor + * @data: Type-specific information associated with + * the Element Descriptor + */ +struct nvme_mi_vpd_telem { + __u8 type; + __u8 rev; + __u8 len; + __u8 data[0]; +}; + +/** + * enum nvme_mi_elem - + * @NVME_MI_ELEM_EED: Extended Element Descriptor + * @NVME_MI_ELEM_USCE: Upstream Connector Element Descriptor + * @NVME_MI_ELEM_ECED: Expansion Connector Element Descriptor + * @NVME_MI_ELEM_LED: Label Element Descriptor + * @NVME_MI_ELEM_SMBMED: SMBus/I2C Mux Element Descriptor + * @NVME_MI_ELEM_PCIESED: PCIe Switch Element Descriptor + * @NVME_MI_ELEM_NVMED: NVM Subsystem Element Descriptor + */ +enum nvme_mi_elem { + NVME_MI_ELEM_EED = 1, + NVME_MI_ELEM_USCE = 2, + NVME_MI_ELEM_ECED = 3, + NVME_MI_ELEM_LED = 4, + NVME_MI_ELEM_SMBMED = 5, + NVME_MI_ELEM_PCIESED = 6, + NVME_MI_ELEM_NVMED = 7, +}; + +/** + * struct nvme_mi_vpd_tra - + * @vn: Version Number + * @rsvd6: Reserved + * @ec: Element Count + * @elems: Element Descriptor + */ +struct nvme_mi_vpd_tra { + __u8 vn; + __u8 rsvd6; + __u8 ec; + struct nvme_mi_vpd_telem elems[0]; +}; + +/** + * struct nvme_mi_vpd_mr_common - + * @type: NVMe Record Type ID + * @rf: Record Format + * @rlen: Record Length + * @rchksum: Record Checksum + * @hchksum: Header Checksum + * @nmra: NVMe MultiRecord Area + * @ppmra: NVMe PCIe Port MultiRecord Area + * @tmra: Topology MultiRecord Area + */ +struct nvme_mi_vpd_mr_common { + __u8 type; + __u8 rf; + __u8 rlen; + __u8 rchksum; + __u8 hchksum; + + union { + struct nvme_mi_vpd_mra nmra; + struct nvme_mi_vpd_ppmra ppmra; + struct nvme_mi_vpd_tra tmra; + }; +}; + +/** + * struct nvme_mi_vpd_hdr - + * @ipmiver: IPMI Format Version Number + * @iuaoff: Internal Use Area Starting Offset + * @ciaoff: Chassis Info Area Starting Offset + * @biaoff: Board Info Area Starting Offset + * @piaoff: Product Info Area Starting Offset + * @mrioff: MultiRecord Info Area Starting Offset + * @rsvd6: Reserved + * @chchk: Common Header Checksum + * @vpd: Vital Product Data + */ +struct nvme_mi_vpd_hdr { + __u8 ipmiver; + __u8 iuaoff; + __u8 ciaoff; + __u8 biaoff; + __u8 piaoff; + __u8 mrioff; + __u8 rsvd6; + __u8 chchk; + __u8 vpd[]; +}; + +/** + * enum nvme_status_field - Defines all parts of the nvme status field: status + * code, status code type, and additional flags. + * @NVME_SCT_GENERIC: Generic errors applicable to multiple opcodes + * @NVME_SCT_CMD_SPECIFIC: Errors associated to a specific opcode + * @NVME_SCT_MEDIA: Errors associated with media and data integrity + * @NVME_SCT_PATH: Errors associated with the paths connection + * @NVME_SCT_VS: Vendor specific errors + * @NVME_SCT_MASK: Mask to get the value of the Status Code Type + * @NVME_SC_MASK: Mask to get the value of the status code. + * @NVME_SC_SUCCESS: Successful Completion: The command + * completed without error. + * @NVME_SC_INVALID_OPCODE: Invalid Command Opcode: A reserved coded + * value or an unsupported value in the + * command opcode field. + * @NVME_SC_INVALID_FIELD: Invalid Field in Command: A reserved + * coded value or an unsupported value in a + * defined field. + * @NVME_SC_CMDID_CONFLICT: Command ID Conflict: The command + * identifier is already in use. + * @NVME_SC_DATA_XFER_ERROR: Data Transfer Error: Transferring the + * data or metadata associated with a + * command experienced an error. + * @NVME_SC_POWER_LOSS: Commands Aborted due to Power Loss + * Notification: Indicates that the command + * was aborted due to a power loss + * notification. + * @NVME_SC_INTERNAL: Internal Error: The command was not + * completed successfully due to an internal error. + * @NVME_SC_ABORT_REQ: Command Abort Requested: The command was + * aborted due to an Abort command being + * received that specified the Submission + * Queue Identifier and Command Identifier + * of this command. + * @NVME_SC_ABORT_QUEUE: Command Aborted due to SQ Deletion: The + * command was aborted due to a Delete I/O + * Submission Queue request received for the + * Submission Queue to which the command was + * submitted. + * @NVME_SC_FUSED_FAIL: Command Aborted due to Failed Fused Command: + * The command was aborted due to the other + * command in a fused operation failing. + * @NVME_SC_FUSED_MISSING: Aborted due to Missing Fused Command: The + * fused command was aborted due to the + * adjacent submission queue entry not + * containing a fused command that is the + * other command. + * @NVME_SC_INVALID_NS: Invalid Namespace or Format: The + * namespace or the format of that namespace + * is invalid. + * @NVME_SC_CMD_SEQ_ERROR: Command Sequence Error: The command was + * aborted due to a protocol violation in a + * multi-command sequence. + * @NVME_SC_SGL_INVALID_LAST: Invalid SGL Segment Descriptor: The + * command includes an invalid SGL Last + * Segment or SGL Segment descriptor. + * @NVME_SC_SGL_INVALID_COUNT: Invalid Number of SGL Descriptors: There + * is an SGL Last Segment descriptor or an + * SGL Segment descriptor in a location + * other than the last descriptor of a + * segment based on the length indicated. + * @NVME_SC_SGL_INVALID_DATA: Data SGL Length Invalid: This may occur + * if the length of a Data SGL is too short. + * This may occur if the length of a Data + * SGL is too long and the controller does + * not support SGL transfers longer than the + * amount of data to be transferred as + * indicated in the SGL Support field of the + * Identify Controller data structure. + * @NVME_SC_SGL_INVALID_METADATA: Metadata SGL Length Invalid: This may + * occur if the length of a Metadata SGL is + * too short. This may occur if the length + * of a Metadata SGL is too long and the + * controller does not support SGL transfers + * longer than the amount of data to be + * transferred as indicated in the SGL + * Support field of the Identify Controller + * data structure. + * @NVME_SC_SGL_INVALID_TYPE: SGL Descriptor Type Invalid: The type of + * an SGL Descriptor is a type that is not + * supported by the controller. + * @NVME_SC_CMB_INVALID_USE: Invalid Use of Controller Memory Buffer: + * The attempted use of the Controller + * Memory Buffer is not supported by the + * controller. + * @NVME_SC_PRP_INVALID_OFFSET: PRP Offset Invalid: The Offset field for + * a PRP entry is invalid. + * @NVME_SC_AWU_EXCEEDED: Atomic Write Unit Exceeded: The length + * specified exceeds the atomic write unit size. + * @NVME_SC_OP_DENIED: Operation Denied: The command was denied + * due to lack of access rights. Refer to + * the appropriate security specification. + * @NVME_SC_SGL_INVALID_OFFSET: SGL Offset Invalid: The offset specified + * in a descriptor is invalid. This may + * occur when using capsules for data + * transfers in NVMe over Fabrics + * implementations and an invalid offset in + * the capsule is specified. + * @NVME_SC_HOSTID_FORMAT: Host Identifier Inconsistent Format: The + * NVM subsystem detected the simultaneous + * use of 64- bit and 128-bit Host + * Identifier values on different + * controllers. + * @NVME_SC_KAT_EXPIRED: Keep Alive Timer Expired: The Keep Alive + * Timer expired. + * @NVME_SC_KAT_INVALID: Keep Alive Timeout Invalid: The Keep + * Alive Timeout value specified is invalid. + * @NVME_SC_CMD_ABORTED_PREMEPT: Command Aborted due to Preempt and Abort: + * The command was aborted due to a + * Reservation Acquire command. + * @NVME_SC_SANITIZE_FAILED: Sanitize Failed: The most recent sanitize + * operation failed and no recovery action + * has been successfully completed. + * @NVME_SC_SANITIZE_IN_PROGRESS: Sanitize In Progress: The requested + * function (e.g., command) is prohibited + * while a sanitize operation is in + * progress. + * @NVME_SC_SGL_INVALID_GRANULARITY: SGL Data Block Granularity Invalid: The + * Address alignment or Length granularity + * for an SGL Data Block descriptor is + * invalid. + * @NVME_SC_CMD_IN_CMBQ_NOT_SUPP: Command Not Supported for Queue in CMB: + * The implementation does not support + * submission of the command to a Submission + * Queue in the Controller Memory Buffer or + * command completion to a Completion Queue + * in the Controller Memory Buffer. + * @NVME_SC_NS_WRITE_PROTECTED: Namespace is Write Protected: The command + * is prohibited while the namespace is + * write protected as a result of a change + * in the namespace write protection state + * as defined by the Namespace Write + * Protection State Machine. + * @NVME_SC_CMD_INTERRUPTED: Command Interrupted: Command processing + * was interrupted and the controller is + * unable to successfully complete the + * command. The host should retry the + * command. + * @NVME_SC_TRAN_TPORT_ERROR: Transient Transport Error: A transient + * transport error was detected. If the + * command is retried on the same + * controller, the command is likely to + * succeed. A command that fails with a + * transient transport error four or more + * times should be treated as a persistent + * transport error that is not likely to + * succeed if retried on the same + * controller. + * @NVME_SC_PROHIBITED_BY_CMD_AND_FEAT: Command Prohibited by Command and Feature + * Lockdown: The command was aborted due to + * command execution being prohibited by + * the Command and Feature Lockdown. + * @NVME_SC_ADMIN_CMD_MEDIA_NOT_READY: Admin Command Media Not Ready: The Admin + * command requires access to media and + * the media is not ready. + * @NVME_SC_LBA_RANGE: LBA Out of Range: The command references + * an LBA that exceeds the size of the namespace. + * @NVME_SC_CAP_EXCEEDED: Capacity Exceeded: Execution of the + * command has caused the capacity of the + * namespace to be exceeded. + * @NVME_SC_NS_NOT_READY: Namespace Not Ready: The namespace is not + * ready to be accessed as a result of a + * condition other than a condition that is + * reported as an Asymmetric Namespace + * Access condition. + * @NVME_SC_RESERVATION_CONFLICT: Reservation Conflict: The command was + * aborted due to a conflict with a + * reservation held on the accessed + * namespace. + * @NVME_SC_FORMAT_IN_PROGRESS: Format In Progress: A Format NVM command + * is in progress on the namespace. + * @NVME_SC_CQ_INVALID: Completion Queue Invalid: The Completion + * Queue identifier specified in the command + * does not exist. + * @NVME_SC_QID_INVALID: Invalid Queue Identifier: The creation of + * the I/O Completion Queue failed due to an + * invalid queue identifier specified as + * part of the command. An invalid queue + * identifier is one that is currently in + * use or one that is outside the range + * supported by the controller. + * @NVME_SC_QUEUE_SIZE: Invalid Queue Size: The host attempted to + * create an I/O Completion Queue with an + * invalid number of entries. + * @NVME_SC_ABORT_LIMIT: Abort Command Limit Exceeded: The number + * of concurrently outstanding Abort commands + * has exceeded the limit indicated in the + * Identify Controller data structure. + * @NVME_SC_ABORT_MISSING: Abort Command is missing: The abort + * command is missing. + * @NVME_SC_ASYNC_LIMIT: Asynchronous Event Request Limit + * Exceeded: The number of concurrently + * outstanding Asynchronous Event Request + * commands has been exceeded. + * @NVME_SC_FIRMWARE_SLOT: Invalid Firmware Slot: The firmware slot + * indicated is invalid or read only. This + * error is indicated if the firmware slot + * exceeds the number supported. + * @NVME_SC_FIRMWARE_IMAGE: Invalid Firmware Image: The firmware + * image specified for activation is invalid + * and not loaded by the controller. + * @NVME_SC_INVALID_VECTOR: Invalid Interrupt Vector: The creation of + * the I/O Completion Queue failed due to an + * invalid interrupt vector specified as + * part of the command. + * @NVME_SC_INVALID_LOG_PAGE: Invalid Log Page: The log page indicated + * is invalid. This error condition is also + * returned if a reserved log page is + * requested. + * @NVME_SC_INVALID_FORMAT: Invalid Format: The LBA Format specified + * is not supported. + * @NVME_SC_FW_NEEDS_CONV_RESET: Firmware Activation Requires Conventional Reset: + * The firmware commit was successful, + * however, activation of the firmware image + * requires a conventional reset. + * @NVME_SC_INVALID_QUEUE: Invalid Queue Deletion: Invalid I/O + * Completion Queue specified to delete. + * @NVME_SC_FEATURE_NOT_SAVEABLE: Feature Identifier Not Saveable: The + * Feature Identifier specified does not + * support a saveable value. + * @NVME_SC_FEATURE_NOT_CHANGEABLE: Feature Not Changeable: The Feature + * Identifier is not able to be changed. + * @NVME_SC_FEATURE_NOT_PER_NS: Feature Not Namespace Specific: The + * Feature Identifier specified is not + * namespace specific. The Feature + * Identifier settings apply across all + * namespaces. + * @NVME_SC_FW_NEEDS_SUBSYS_RESET: Firmware Activation Requires NVM + * Subsystem Reset: The firmware commit was + * successful, however, activation of the + * firmware image requires an NVM Subsystem. + * @NVME_SC_FW_NEEDS_RESET: Firmware Activation Requires Controller + * Level Reset: The firmware commit was + * successful; however, the image specified + * does not support being activated without + * a reset. + * @NVME_SC_FW_NEEDS_MAX_TIME: Firmware Activation Requires Maximum Time + * Violation: The image specified if + * activated immediately would exceed the + * Maximum Time for Firmware Activation + * (MTFA) value reported in Identify + * Controller. + * @NVME_SC_FW_ACTIVATE_PROHIBITED: Firmware Activation Prohibited: The image + * specified is being prohibited from + * activation by the controller for vendor + * specific reasons. + * @NVME_SC_OVERLAPPING_RANGE: Overlapping Range: The downloaded + * firmware image has overlapping ranges. + * @NVME_SC_NS_INSUFFICIENT_CAP: Namespace Insufficient Capacity: Creating + * the namespace requires more free space + * than is currently available. + * @NVME_SC_NS_ID_UNAVAILABLE: Namespace Identifier Unavailable: The + * number of namespaces supported has been + * exceeded. + * @NVME_SC_NS_ALREADY_ATTACHED: Namespace Already Attached: The + * controller is already attached to the + * namespace specified. + * @NVME_SC_NS_IS_PRIVATE: Namespace Is Private: The namespace is + * private and is already attached to one + * controller. + * @NVME_SC_NS_NOT_ATTACHED: Namespace Not Attached: The request to + * detach the controller could not be + * completed because the controller is not + * attached to the namespace. + * @NVME_SC_THIN_PROV_NOT_SUPP: Thin Provisioning Not Supported: Thin + * provisioning is not supported by the + * controller. + * @NVME_SC_CTRL_LIST_INVALID: Controller List Invalid: The controller + * list provided contains invalid controller + * ids. + * @NVME_SC_SELF_TEST_IN_PROGRESS: Device Self-test In Progress: The controller + * or NVM subsystem already has a device + * self-test operation in process. + * @NVME_SC_BP_WRITE_PROHIBITED: Boot Partition Write Prohibited: The + * command is trying to modify a locked Boot + * Partition. + * @NVME_SC_INVALID_CTRL_ID: Invalid Controller Identifier: + * @NVME_SC_INVALID_SEC_CTRL_STATE: Invalid Secondary Controller State + * @NVME_SC_INVALID_CTRL_RESOURCES: Invalid Number of Controller Resources + * @NVME_SC_INVALID_RESOURCE_ID: Invalid Resource Identifier + * @NVME_SC_PMR_SAN_PROHIBITED: Sanitize Prohibited While Persistent + * Memory Region is Enabled + * @NVME_SC_ANA_GROUP_ID_INVALID: ANA Group Identifier Invalid: The specified + * ANA Group Identifier (ANAGRPID) is not + * supported in the submitted command. + * @NVME_SC_ANA_ATTACH_FAILED: ANA Attach Failed: The controller is not + * attached to the namespace as a result + * of an ANA condition. + * @NVME_SC_INSUFFICIENT_CAP: Insufficient Capacity: Requested operation + * requires more free space than is currently + * available. + * @NVME_SC_NS_ATTACHMENT_LIMIT_EXCEEDED: Namespace Attachment Limit Exceeded: + * Attaching the ns to a controller causes + * max number of ns attachments allowed + * to be exceeded. + * @NVME_SC_PROHIBIT_CMD_EXEC_NOT_SUPPORTED: Prohibition of Command Execution + * Not Supported + * @NVME_SC_IOCS_NOT_SUPPORTED: I/O Command Set Not Supported + * @NVME_SC_IOCS_NOT_ENABLED: I/O Command Set Not Enabled + * @NVME_SC_IOCS_COMBINATION_REJECTED: I/O Command Set Combination Rejected + * @NVME_SC_INVALID_IOCS: Invalid I/O Command Set + * @NVME_SC_ID_UNAVAILABLE: Identifier Unavailable + * @NVME_SC_INVALID_DISCOVERY_INFO: The discovery information provided in + * one or more extended discovery + * information entries is not applicable + * for the type of entity selected in + * the Entity Type (ETYPE) field of the + * Discovery Information Management + * command data portion’s header. + * @NVME_SC_ZONING_DATA_STRUCT_LOCKED:The requested Zoning data structure + * is locked on the CDC. + * @NVME_SC_ZONING_DATA_STRUCT_NOTFND:The requested Zoning data structure + * does not exist on the CDC. + * @NVME_SC_INSUFFICIENT_DISC_RES: The number of discover information + * entries provided in the data portion + * of the Discovery Information + * Management command for a registration + * task (i.e., TAS field cleared to 0h) + * exceeds the available capacity for + * new discovery information entries on + * the CDC or DDC. This may be a + * transient condition. + * @NVME_SC_REQSTD_FUNCTION_DISABLED: Fabric Zoning is not enabled on the + * CDC + * @NVME_SC_ZONEGRP_ORIGINATOR_INVL: The NQN contained in the ZoneGroup + * Originator field does not match the + * Host NQN used by the DDC to connect + * to the CDC. + * @NVME_SC_BAD_ATTRIBUTES: Conflicting Dataset Management Attributes + * @NVME_SC_INVALID_PI: Invalid Protection Information + * @NVME_SC_READ_ONLY: Attempted Write to Read Only Range + * @NVME_SC_CMD_SIZE_LIMIT_EXCEEDED: Command Size Limit Exceeded + * @NVME_SC_CONNECT_FORMAT: Incompatible Format: The NVM subsystem + * does not support the record format + * specified by the host. + * @NVME_SC_CONNECT_CTRL_BUSY: Controller Busy: The controller is + * already associated with a host. + * @NVME_SC_CONNECT_INVALID_PARAM: Connect Invalid Parameters: One or more + * of the command parameters. + * @NVME_SC_CONNECT_RESTART_DISC: Connect Restart Discovery: The NVM + * subsystem requested is not available. + * @NVME_SC_CONNECT_INVALID_HOST: Connect Invalid Host: The host is either + * not allowed to establish an association + * to any controller in the NVM subsystem or + * the host is not allowed to establish an + * association to the specified controller + * @NVME_SC_DISCONNECT_INVALID_QTYPE: Invalid Queue Type: The command was sent + * on the wrong queue type. + * @NVME_SC_DISCOVERY_RESTART: Discover Restart: The snapshot of the + * records is now invalid or out of date. + * @NVME_SC_AUTH_REQUIRED: Authentication Required: NVMe in-band + * authentication is required and the queue + * has not yet been authenticated. + * @NVME_SC_WRITE_FAULT: Write Fault: The write data could not be + * committed to the media. + * @NVME_SC_READ_ERROR: Unrecovered Read Error: The read data + * could not be recovered from the media. + * @NVME_SC_GUARD_CHECK: End-to-end Guard Check Error: The command + * was aborted due to an end-to-end guard + * check failure. + * @NVME_SC_APPTAG_CHECK: End-to-end Application Tag Check Error: + * The command was aborted due to an + * end-to-end application tag check failure. + * @NVME_SC_REFTAG_CHECK: End-to-end Reference Tag Check Error: The + * command was aborted due to an end-to-end + * reference tag check failure. + * @NVME_SC_COMPARE_FAILED: Compare Failure: The command failed due + * to a miscompare during a Compare command. + * @NVME_SC_ACCESS_DENIED: Access Denied: Access to the namespace + * and/or LBA range is denied due to lack of + * access rights. + * @NVME_SC_UNWRITTEN_BLOCK: Deallocated or Unwritten Logical Block: + * The command failed due to an attempt to + * read from or verify an LBA range + * containing a deallocated or unwritten + * logical block. + * @NVME_SC_STORAGE_TAG_CHECK: End-to-End Storage Tag Check Error: The + * command was aborted due to an end-to-end + * storage tag check failure. + * @NVME_SC_ANA_INTERNAL_PATH_ERROR: Internal Path Error: The command was not + * completed as the result of a controller + * internal error that is specific to the + * controller processing the command. + * @NVME_SC_ANA_PERSISTENT_LOSS: Asymmetric Access Persistent Loss: The + * requested function (e.g., command) is not + * able to be performed as a result of the + * relationship between the controller and + * the namespace being in the ANA Persistent + * Loss state. + * @NVME_SC_ANA_INACCESSIBLE: Asymmetric Access Inaccessible: The + * requested function (e.g., command) is not + * able to be performed as a result of the + * relationship between the controller and + * the namespace being in the ANA + * Inaccessible state. + * @NVME_SC_ANA_TRANSITION: Asymmetric Access Transition: The + * requested function (e.g., command) is not + * able to be performed as a result of the + * relationship between the controller and + * the namespace transitioning between + * Asymmetric Namespace Access states. + * @NVME_SC_CTRL_PATH_ERROR: Controller Pathing Error: A pathing error + * was detected by the controller. + * @NVME_SC_HOST_PATH_ERROR: Host Pathing Error: A pathing error was + * detected by the host. + * @NVME_SC_CMD_ABORTED_BY_HOST: Command Aborted By Host: The command was + * aborted as a result of host action. + * @NVME_SC_CRD: Mask to get value of Command Retry Delay + * index + * @NVME_SC_MORE: More bit. If set, more status information + * for this command as part of the Error + * Information log that may be retrieved with + * the Get Log Page command. + * @NVME_SC_DNR: Do Not Retry bit. If set, if the same + * command is re-submitted to any controller + * in the NVM subsystem, then that + * re-submitted command is expected to fail. + * @NVME_SC_ZNS_INVALID_OP_REQUEST: Invalid Zone Operation Request: + * The operation requested is invalid. This may be due to + * various conditions, including: attempting to allocate a + * ZRWA when a zone is not in the ZSE:Empty state; or + * invalid Flush Explicit ZRWA Range Send Zone Action + * operation. + * @NVME_SC_ZNS_ZRWA_RESOURCES_UNAVAILABLE: ZRWA Resources Unavailable: + * No ZRWAs are available. + * @NVME_SC_ZNS_BOUNDARY_ERROR: Zone Boundary Error: The command specifies + * logical blocks in more than one zone. + * @NVME_SC_ZNS_FULL: Zone Is Full: The accessed zone is in the + * ZSF:Full state. + * @NVME_SC_ZNS_READ_ONLY: Zone Is Read Only: The accessed zone is + * in the ZSRO:Read Only state. + * @NVME_SC_ZNS_OFFLINE: Zone Is Offline: The accessed zone is + * in the ZSO:Offline state. + * @NVME_SC_ZNS_INVALID_WRITE: Zone Invalid Write: The write to a zone + * was not at the write pointer. + * @NVME_SC_ZNS_TOO_MANY_ACTIVE: Too Many Active Zones: The controller + * does not allow additional active zones. + * @NVME_SC_ZNS_TOO_MANY_OPENS: Too Many Open Zones: The controller does + * not allow additional open zones. + * @NVME_SC_ZNS_INVAL_TRANSITION: Invalid Zone State Transition: The request + * is not a valid zone state transition. + */ +enum nvme_status_field { + /* + * Status Code Type indicators + */ + NVME_SCT_GENERIC = 0x0, + NVME_SCT_CMD_SPECIFIC = 0x1, + NVME_SCT_MEDIA = 0x2, + NVME_SCT_PATH = 0x3, + NVME_SCT_VS = 0x7, + NVME_SCT_MASK = 0x7, + NVME_SCT_SHIFT = 0x8, + + /* + * Status Code inidicators + */ + NVME_SC_MASK = 0xff, + NVME_SC_SHIFT = 0x0, + + /* + * Generic Command Status Codes: + */ + NVME_SC_SUCCESS = 0x0, + NVME_SC_INVALID_OPCODE = 0x1, + NVME_SC_INVALID_FIELD = 0x2, + NVME_SC_CMDID_CONFLICT = 0x3, + NVME_SC_DATA_XFER_ERROR = 0x4, + NVME_SC_POWER_LOSS = 0x5, + NVME_SC_INTERNAL = 0x6, + NVME_SC_ABORT_REQ = 0x7, + NVME_SC_ABORT_QUEUE = 0x8, + NVME_SC_FUSED_FAIL = 0x9, + NVME_SC_FUSED_MISSING = 0xa, + NVME_SC_INVALID_NS = 0xb, + NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, + NVME_SC_CMB_INVALID_USE = 0x12, + NVME_SC_PRP_INVALID_OFFSET = 0x13, + NVME_SC_AWU_EXCEEDED = 0x14, + NVME_SC_OP_DENIED = 0x15, + NVME_SC_SGL_INVALID_OFFSET = 0x16, + NVME_SC_HOSTID_FORMAT = 0x18, + NVME_SC_KAT_EXPIRED = 0x19, + NVME_SC_KAT_INVALID = 0x1a, + NVME_SC_CMD_ABORTED_PREMEPT = 0x1b, + NVME_SC_SANITIZE_FAILED = 0x1c, + NVME_SC_SANITIZE_IN_PROGRESS = 0x1d, + NVME_SC_SGL_INVALID_GRANULARITY = 0x1e, + NVME_SC_CMD_IN_CMBQ_NOT_SUPP = 0x1f, + NVME_SC_NS_WRITE_PROTECTED = 0x20, + NVME_SC_CMD_INTERRUPTED = 0x21, + NVME_SC_TRAN_TPORT_ERROR = 0x22, + NVME_SC_PROHIBITED_BY_CMD_AND_FEAT = 0x23, + NVME_SC_ADMIN_CMD_MEDIA_NOT_READY = 0x24, + NVME_SC_LBA_RANGE = 0x80, + NVME_SC_CAP_EXCEEDED = 0x81, + NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, + NVME_SC_FORMAT_IN_PROGRESS = 0x84, + + /* + * Command Specific Status Codes: + */ + NVME_SC_CQ_INVALID = 0x00, + NVME_SC_QID_INVALID = 0x01, + NVME_SC_QUEUE_SIZE = 0x02, + NVME_SC_ABORT_LIMIT = 0x03, + NVME_SC_ABORT_MISSING = 0x04, + NVME_SC_ASYNC_LIMIT = 0x05, + NVME_SC_FIRMWARE_SLOT = 0x06, + NVME_SC_FIRMWARE_IMAGE = 0x07, + NVME_SC_INVALID_VECTOR = 0x08, + NVME_SC_INVALID_LOG_PAGE = 0x09, + NVME_SC_INVALID_FORMAT = 0x0a, + NVME_SC_FW_NEEDS_CONV_RESET = 0x0b, + NVME_SC_INVALID_QUEUE = 0x0c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x0d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x0e, + NVME_SC_FEATURE_NOT_PER_NS = 0x0f, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x10, + NVME_SC_FW_NEEDS_RESET = 0x11, + NVME_SC_FW_NEEDS_MAX_TIME = 0x12, + NVME_SC_FW_ACTIVATE_PROHIBITED = 0x13, + NVME_SC_OVERLAPPING_RANGE = 0x14, + NVME_SC_NS_INSUFFICIENT_CAP = 0x15, + NVME_SC_NS_ID_UNAVAILABLE = 0x16, + NVME_SC_NS_ALREADY_ATTACHED = 0x18, + NVME_SC_NS_IS_PRIVATE = 0x19, + NVME_SC_NS_NOT_ATTACHED = 0x1a, + NVME_SC_THIN_PROV_NOT_SUPP = 0x1b, + NVME_SC_CTRL_LIST_INVALID = 0x1c, + NVME_SC_SELF_TEST_IN_PROGRESS = 0x1d, + NVME_SC_BP_WRITE_PROHIBITED = 0x1e, + NVME_SC_INVALID_CTRL_ID = 0x1f, + NVME_SC_INVALID_SEC_CTRL_STATE = 0x20, + NVME_SC_INVALID_CTRL_RESOURCES = 0x21, + NVME_SC_INVALID_RESOURCE_ID = 0x22, + NVME_SC_PMR_SAN_PROHIBITED = 0x23, + NVME_SC_ANA_GROUP_ID_INVALID = 0x24, + NVME_SC_ANA_ATTACH_FAILED = 0x25, + NVME_SC_INSUFFICIENT_CAP = 0x26, + NVME_SC_NS_ATTACHMENT_LIMIT_EXCEEDED = 0x27, + NVME_SC_PROHIBIT_CMD_EXEC_NOT_SUPPORTED = 0x28, + + /* + * Command Set Specific - Namespace Types commands: + */ + NVME_SC_IOCS_NOT_SUPPORTED = 0x29, + NVME_SC_IOCS_NOT_ENABLED = 0x2a, + NVME_SC_IOCS_COMBINATION_REJECTED = 0x2b, + NVME_SC_INVALID_IOCS = 0x2c, + NVME_SC_ID_UNAVAILABLE = 0x2d, + + /* + * Discovery Information Management + */ + NVME_SC_INVALID_DISCOVERY_INFO = 0x2f, + NVME_SC_ZONING_DATA_STRUCT_LOCKED = 0x30, + NVME_SC_ZONING_DATA_STRUCT_NOTFND = 0x31, + NVME_SC_INSUFFICIENT_DISC_RES = 0x32, + NVME_SC_REQSTD_FUNCTION_DISABLED = 0x33, + NVME_SC_ZONEGRP_ORIGINATOR_INVLD = 0x34, + + /* + * I/O Command Set Specific - NVM commands: + */ + NVME_SC_BAD_ATTRIBUTES = 0x80, + NVME_SC_INVALID_PI = 0x81, + NVME_SC_READ_ONLY = 0x82, + NVME_SC_CMD_SIZE_LIMIT_EXCEEDED = 0x83, + + /* + * I/O Command Set Specific - Fabrics commands: + */ + NVME_SC_CONNECT_FORMAT = 0x80, + NVME_SC_CONNECT_CTRL_BUSY = 0x81, + NVME_SC_CONNECT_INVALID_PARAM = 0x82, + NVME_SC_CONNECT_RESTART_DISC = 0x83, + NVME_SC_CONNECT_INVALID_HOST = 0x84, + NVME_SC_DISCONNECT_INVALID_QTYPE= 0x85, + NVME_SC_DISCOVERY_RESTART = 0x90, + NVME_SC_AUTH_REQUIRED = 0x91, + + /* + * I/O Command Set Specific - ZNS commands: + */ + NVME_SC_ZNS_INVALID_OP_REQUEST = 0xb6, + NVME_SC_ZNS_ZRWA_RESOURCES_UNAVAILABLE = 0xb7, + NVME_SC_ZNS_BOUNDARY_ERROR = 0xb8, + NVME_SC_ZNS_FULL = 0xb9, + NVME_SC_ZNS_READ_ONLY = 0xba, + NVME_SC_ZNS_OFFLINE = 0xbb, + NVME_SC_ZNS_INVALID_WRITE = 0xbc, + NVME_SC_ZNS_TOO_MANY_ACTIVE = 0xbd, + NVME_SC_ZNS_TOO_MANY_OPENS = 0xbe, + NVME_SC_ZNS_INVAL_TRANSITION = 0xbf, + + /* + * Media and Data Integrity Errors: + */ + NVME_SC_WRITE_FAULT = 0x80, + NVME_SC_READ_ERROR = 0x81, + NVME_SC_GUARD_CHECK = 0x82, + NVME_SC_APPTAG_CHECK = 0x83, + NVME_SC_REFTAG_CHECK = 0x84, + NVME_SC_COMPARE_FAILED = 0x85, + NVME_SC_ACCESS_DENIED = 0x86, + NVME_SC_UNWRITTEN_BLOCK = 0x87, + NVME_SC_STORAGE_TAG_CHECK = 0x88, + + /* + * Path-related Errors: + */ + NVME_SC_ANA_INTERNAL_PATH_ERROR = 0x00, + NVME_SC_ANA_PERSISTENT_LOSS = 0x01, + NVME_SC_ANA_INACCESSIBLE = 0x02, + NVME_SC_ANA_TRANSITION = 0x03, + NVME_SC_CTRL_PATH_ERROR = 0x60, + NVME_SC_HOST_PATH_ERROR = 0x70, + NVME_SC_CMD_ABORTED_BY_HOST = 0x71, + + /* + * Additional status field flags + */ + NVME_SC_CRD = 0x1800, + NVME_SC_MORE = 0x2000, + NVME_SC_DNR = 0x4000, +}; + +/** + * nvme_status_code_type() - Returns the NVMe Status Code Type + * @status_field: The NVMe Completion Queue Entry's Status Field + * + * See &enum nvme_status_field + */ +static inline __u16 nvme_status_code_type(__u16 status_field) +{ + return NVME_GET(status_field, SCT); +} + +/** + * nvme_status_code() - Returns the NVMe Status Code + * @status_field: The NVMe Completion Queue Entry's Status Field + * + * See &enum nvme_status_field + */ +static inline __u16 nvme_status_code(__u16 status_field) +{ + return NVME_GET(status_field, SC); +} + +/** + * enum nvme_admin_opcode - Known NVMe admin opcodes + * @nvme_admin_delete_sq: Delete I/O Submission Queue + * @nvme_admin_create_sq: Create I/O Submission Queue + * @nvme_admin_get_log_page: Get Log Page + * @nvme_admin_delete_cq: Delete I/O Completion Queue + * @nvme_admin_create_cq: Create I/O Completion Queue + * @nvme_admin_identify: Identify + * @nvme_admin_abort_cmd: Abort + * @nvme_admin_set_features: Set Features + * @nvme_admin_get_features: Get Features + * @nvme_admin_async_event: Asynchronous Event Request + * @nvme_admin_ns_mgmt: Namespace Management + * @nvme_admin_fw_activate: Firmware Commit + * @nvme_admin_fw_commit: Firmware Commit + * @nvme_admin_fw_download: Firmware Image Download + * @nvme_admin_dev_self_test: Device Self-test + * @nvme_admin_ns_attach: Namespace Attachment + * @nvme_admin_keep_alive: Keep Alive + * @nvme_admin_directive_send: Directive Send + * @nvme_admin_directive_recv: Directive Receive + * @nvme_admin_virtual_mgmt: Virtualization Management + * @nvme_admin_nvme_mi_send: NVMe-MI Send + * @nvme_admin_nvme_mi_recv: NVMe-MI Receive + * @nvme_admin_capacity_mgmt: Capacity Management + * @nvme_admin_discovery_info_mgmt: Discovery Information Management (DIM) + * @nvme_admin_fabric_zoning_recv: Fabric Zoning Receive + * @nvme_admin_lockdown: Lockdown + * @nvme_admin_fabric_zoning_lookup: Fabric Zoning Lookup + * @nvme_admin_fabric_zoning_send: Fabric Zoning Send + * @nvme_admin_dbbuf: Doorbell Buffer Config + * @nvme_admin_fabrics: Fabrics Commands + * @nvme_admin_format_nvm: Format NVM + * @nvme_admin_security_send: Security Send + * @nvme_admin_security_recv: Security Receive + * @nvme_admin_sanitize_nvm: Sanitize + * @nvme_admin_get_lba_status: Get LBA Status + */ +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0x00, + nvme_admin_create_sq = 0x01, + nvme_admin_get_log_page = 0x02, + nvme_admin_delete_cq = 0x04, + nvme_admin_create_cq = 0x05, + nvme_admin_identify = 0x06, + nvme_admin_abort_cmd = 0x08, + nvme_admin_set_features = 0x09, + nvme_admin_get_features = 0x0a, + nvme_admin_async_event = 0x0c, + nvme_admin_ns_mgmt = 0x0d, + nvme_admin_fw_commit = 0x10, + nvme_admin_fw_activate = nvme_admin_fw_commit, + nvme_admin_fw_download = 0x11, + nvme_admin_dev_self_test = 0x14, + nvme_admin_ns_attach = 0x15, + nvme_admin_keep_alive = 0x18, + nvme_admin_directive_send = 0x19, + nvme_admin_directive_recv = 0x1a, + nvme_admin_virtual_mgmt = 0x1c, + nvme_admin_nvme_mi_send = 0x1d, + nvme_admin_nvme_mi_recv = 0x1e, + nvme_admin_capacity_mgmt = 0x20, + nvme_admin_discovery_info_mgmt = 0x21, + nvme_admin_fabric_zoning_recv = 0x22, + nvme_admin_lockdown = 0x24, + nvme_admin_fabric_zoning_lookup = 0x25, + nvme_admin_fabric_zoning_send = 0x29, + nvme_admin_dbbuf = 0x7c, + nvme_admin_fabrics = 0x7f, + nvme_admin_format_nvm = 0x80, + nvme_admin_security_send = 0x81, + nvme_admin_security_recv = 0x82, + nvme_admin_sanitize_nvm = 0x84, + nvme_admin_get_lba_status = 0x86, +}; + +/** + * enum nvme_identify_cns - + * @NVME_IDENTIFY_CNS_NS: Identify Namespace data structure + * @NVME_IDENTIFY_CNS_CTRL: Identify Controller data structur + * @NVME_IDENTIFY_CNS_NS_ACTIVE_LIST: Active Namespace ID list + * @NVME_IDENTIFY_CNS_NS_DESC_LIST: Namespace Identification Descriptor list + * @NVME_IDENTIFY_CNS_NVMSET_LIST: NVM Set List + * @NVME_IDENTIFY_CNS_CSI_NS: I/O Command Set specific Identify + * Namespace data structure + * @NVME_IDENTIFY_CNS_CSI_CTRL: I/O Command Set specific Identify + * Controller data structure + * @NVME_IDENTIFY_CNS_CSI_NS_ACTIVE_LIST: Active Namespace ID list associated + * with the specified I/O Command Set + * @NVME_IDENTIFY_CNS_CSI_INDEPENDENT_ID_NS: I/O Command Set Independent Identify + * Namespace data structure + * @NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST: Allocated Namespace ID list + * @NVME_IDENTIFY_CNS_ALLOCATED_NS: Identify Namespace data structure for + * the specified allocated NSID + * @NVME_IDENTIFY_CNS_NS_CTRL_LIST: Controller List of controllers attached + * to the specified NSID + * @NVME_IDENTIFY_CNS_CTRL_LIST: Controller List of controllers that exist + * in the NVM subsystem + * @NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP: Primary Controller Capabilities data + * structure for the specified primary controller + * @NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST: Secondary Controller list of controllers + * associated with the primary controller + * processing the command + * @NVME_IDENTIFY_CNS_NS_GRANULARITY: A Namespace Granularity Lis + * @NVME_IDENTIFY_CNS_UUID_LIST: A UUID List + * @NVME_IDENTIFY_CNS_DOMAIN_LIST: Domain List + * @NVME_IDENTIFY_CNS_ENDURANCE_GROUP_ID: Endurance Group List + * @NVME_IDENTIFY_CNS_CSI_ALLOCATED_NS_LIST: I/O Command Set specific Allocated Namespace + * ID list + * @NVME_IDENTIFY_CNS_CSI_ID_NS_DATA_STRUCTURE: I/O Command Set specific ID Namespace + * Data Structure for Allocated Namespace ID + * @NVME_IDENTIFY_CNS_COMMAND_SET_STRUCTURE: Base Specification 2.0a section 5.17.2.21 + */ +enum nvme_identify_cns { + NVME_IDENTIFY_CNS_NS = 0x00, + NVME_IDENTIFY_CNS_CTRL = 0x01, + NVME_IDENTIFY_CNS_NS_ACTIVE_LIST = 0x02, + NVME_IDENTIFY_CNS_NS_DESC_LIST = 0x03, + NVME_IDENTIFY_CNS_NVMSET_LIST = 0x04, + NVME_IDENTIFY_CNS_CSI_NS = 0x05, + NVME_IDENTIFY_CNS_CSI_CTRL = 0x06, + NVME_IDENTIFY_CNS_CSI_NS_ACTIVE_LIST = 0x07, + NVME_IDENTIFY_CNS_CSI_INDEPENDENT_ID_NS = 0x08, + NVME_IDENTIFY_CNS_NS_USER_DATA_FORMAT = 0x09, + NVME_IDENTIFY_CNS_CSI_NS_USER_DATA_FORMAT = 0x0A, + NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST = 0x10, + NVME_IDENTIFY_CNS_ALLOCATED_NS = 0x11, + NVME_IDENTIFY_CNS_NS_CTRL_LIST = 0x12, + NVME_IDENTIFY_CNS_CTRL_LIST = 0x13, + NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP = 0x14, + NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST = 0x15, + NVME_IDENTIFY_CNS_NS_GRANULARITY = 0x16, + NVME_IDENTIFY_CNS_UUID_LIST = 0x17, + NVME_IDENTIFY_CNS_DOMAIN_LIST = 0x18, + NVME_IDENTIFY_CNS_ENDURANCE_GROUP_ID = 0x19, + NVME_IDENTIFY_CNS_CSI_ALLOCATED_NS_LIST = 0x1A, + NVME_IDENTIFY_CNS_CSI_ID_NS_DATA_STRUCTURE = 0x1B, + NVME_IDENTIFY_CNS_COMMAND_SET_STRUCTURE = 0x1C, +}; + +/** + * enum nvme_cmd_get_log_lid - + * @NVME_LOG_LID_SUPPORTED_LOG_PAGES: Supported Log Pages + * @NVME_LOG_LID_ERROR: Error Information + * @NVME_LOG_LID_SMART: SMART / Health Information + * @NVME_LOG_LID_FW_SLOT: Firmware Slot Information + * @NVME_LOG_LID_CHANGED_NS: Changed Namespace List + * @NVME_LOG_LID_CMD_EFFECTS: Commands Supported and Effects + * @NVME_LOG_LID_DEVICE_SELF_TEST: Device Self-test + * @NVME_LOG_LID_TELEMETRY_HOST: Telemetry Host-Initiated + * @NVME_LOG_LID_TELEMETRY_CTRL: Telemetry Controller-Initiated + * @NVME_LOG_LID_ENDURANCE_GROUP: Endurance Group Information + * @NVME_LOG_LID_PREDICTABLE_LAT_NVMSET: Predictable Latency Per NVM Set + * @NVME_LOG_LID_PREDICTABLE_LAT_AGG: Predictable Latency Event Aggregate + * @NVME_LOG_LID_ANA: Asymmetric Namespace Access + * @NVME_LOG_LID_PERSISTENT_EVENT: Persistent Event Log + * @NVME_LOG_LID_LBA_STATUS: LBA Status Information + * @NVME_LOG_LID_ENDURANCE_GRP_EVT: Endurance Group Event Aggregate + * @NVME_LOG_LID_MEDIA_UNIT_STATUS: Media Unit Status + * @NVME_LOG_LID_SUPPORTED_CAP_CONFIG_LIST: Supported Capacity Configuration Lis + * @NVME_LOG_LID_FID_SUPPORTED_EFFECTS: Feature Identifiers Supported and Effects + * @NVME_LOG_LID_MI_CMD_SUPPORTED_EFFECTS: NVMe-MI Commands Supported and Effects + * @NVME_LOG_LID_BOOT_PARTITION: Boot Partition + * @NVME_LOG_LID_DISCOVER: Discovery + * @NVME_LOG_LID_RESERVATION: Reservation Notification + * @NVME_LOG_LID_SANITIZE: Sanitize Status + * @NVME_LOG_LID_ZNS_CHANGED_ZONES: Changed Zone List + */ +enum nvme_cmd_get_log_lid { + NVME_LOG_LID_SUPPORTED_LOG_PAGES = 0x00, + NVME_LOG_LID_ERROR = 0x01, + NVME_LOG_LID_SMART = 0x02, + NVME_LOG_LID_FW_SLOT = 0x03, + NVME_LOG_LID_CHANGED_NS = 0x04, + NVME_LOG_LID_CMD_EFFECTS = 0x05, + NVME_LOG_LID_DEVICE_SELF_TEST = 0x06, + NVME_LOG_LID_TELEMETRY_HOST = 0x07, + NVME_LOG_LID_TELEMETRY_CTRL = 0x08, + NVME_LOG_LID_ENDURANCE_GROUP = 0x09, + NVME_LOG_LID_PREDICTABLE_LAT_NVMSET = 0x0a, + NVME_LOG_LID_PREDICTABLE_LAT_AGG = 0x0b, + NVME_LOG_LID_ANA = 0x0c, + NVME_LOG_LID_PERSISTENT_EVENT = 0x0d, + NVME_LOG_LID_LBA_STATUS = 0x0e, + NVME_LOG_LID_ENDURANCE_GRP_EVT = 0x0f, + NVME_LOG_LID_MEDIA_UNIT_STATUS = 0x10, + NVME_LOG_LID_SUPPORTED_CAP_CONFIG_LIST = 0x11, + NVME_LOG_LID_FID_SUPPORTED_EFFECTS = 0x12, + NVME_LOG_LID_MI_CMD_SUPPORTED_EFFECTS = 0x13, + NVME_LOG_LID_BOOT_PARTITION = 0x15, + NVME_LOG_LID_DISCOVER = 0x70, + NVME_LOG_LID_RESERVATION = 0x80, + NVME_LOG_LID_SANITIZE = 0x81, + NVME_LOG_LID_ZNS_CHANGED_ZONES = 0xbf, +}; + +/** + * enum nvme_features_id - + * @NVME_FEAT_FID_ARBITRATION: Arbitration + * @NVME_FEAT_FID_POWER_MGMT: Power Management + * @NVME_FEAT_FID_LBA_RANGE: LBA Range Type + * @NVME_FEAT_FID_TEMP_THRESH: Temperature Threshold + * @NVME_FEAT_FID_ERR_RECOVERY: Error Recovery + * @NVME_FEAT_FID_VOLATILE_WC: Volatile Write Cache + * @NVME_FEAT_FID_NUM_QUEUES: Number of Queues + * @NVME_FEAT_FID_IRQ_COALESCE: Interrupt Coalescing + * @NVME_FEAT_FID_IRQ_CONFIG: Interrupt Vector Configuration + * @NVME_FEAT_FID_WRITE_ATOMIC: Write Atomicity Normal + * @NVME_FEAT_FID_ASYNC_EVENT: Asynchronous Event Configuration + * @NVME_FEAT_FID_AUTO_PST: Autonomous Power State Transition + * @NVME_FEAT_FID_HOST_MEM_BUF: Host Memory Buffer + * @NVME_FEAT_FID_TIMESTAMP: Timestamp + * @NVME_FEAT_FID_KATO: Keep Alive Timer + * @NVME_FEAT_FID_HCTM: Host Controlled Thermal Management + * @NVME_FEAT_FID_NOPSC: Non-Operational Power State Config + * @NVME_FEAT_FID_RRL: Read Recovery Level Config + * @NVME_FEAT_FID_PLM_CONFIG: Predictable Latency Mode Config + * @NVME_FEAT_FID_PLM_WINDOW: Predictable Latency Mode Window + * @NVME_FEAT_FID_LBA_STS_INTERVAL: LBA Status Information Report Interval + * @NVME_FEAT_FID_HOST_BEHAVIOR: Host Behavior Support + * @NVME_FEAT_FID_SANITIZE: Endurance Group Event Configuration + * @NVME_FEAT_FID_ENDURANCE_EVT_CFG: Endurance Group Event Configuration + * @NVME_FEAT_FID_IOCS_PROFILE: I/O Command Set Profile + * @NVME_FEAT_FID_SPINUP_CONTROL: Spinup Control + * @NVME_FEAT_FID_ENH_CTRL_METADATA: Enhanced Controller Metadata + * @NVME_FEAT_FID_CTRL_METADATA: Controller Metadata + * @NVME_FEAT_FID_NS_METADATA: Namespace Metadata + * @NVME_FEAT_FID_SW_PROGRESS: Software Progress Marker + * @NVME_FEAT_FID_HOST_ID: Host Identifier + * @NVME_FEAT_FID_RESV_MASK: Reservation Notification Mask + * @NVME_FEAT_FID_RESV_PERSIST: Reservation Persistence + * @NVME_FEAT_FID_WRITE_PROTECT: Namespace Write Protection Config + */ +enum nvme_features_id { + NVME_FEAT_FID_ARBITRATION = 0x01, + NVME_FEAT_FID_POWER_MGMT = 0x02, + NVME_FEAT_FID_LBA_RANGE = 0x03, + NVME_FEAT_FID_TEMP_THRESH = 0x04, + NVME_FEAT_FID_ERR_RECOVERY = 0x05, + NVME_FEAT_FID_VOLATILE_WC = 0x06, + NVME_FEAT_FID_NUM_QUEUES = 0x07, + NVME_FEAT_FID_IRQ_COALESCE = 0x08, + NVME_FEAT_FID_IRQ_CONFIG = 0x09, + NVME_FEAT_FID_WRITE_ATOMIC = 0x0a, + NVME_FEAT_FID_ASYNC_EVENT = 0x0b, + NVME_FEAT_FID_AUTO_PST = 0x0c, + NVME_FEAT_FID_HOST_MEM_BUF = 0x0d, + NVME_FEAT_FID_TIMESTAMP = 0x0e, + NVME_FEAT_FID_KATO = 0x0f, + NVME_FEAT_FID_HCTM = 0x10, + NVME_FEAT_FID_NOPSC = 0x11, + NVME_FEAT_FID_RRL = 0x12, + NVME_FEAT_FID_PLM_CONFIG = 0x13, + NVME_FEAT_FID_PLM_WINDOW = 0x14, + NVME_FEAT_FID_LBA_STS_INTERVAL = 0x15, + NVME_FEAT_FID_HOST_BEHAVIOR = 0x16, + NVME_FEAT_FID_SANITIZE = 0x17, + NVME_FEAT_FID_ENDURANCE_EVT_CFG = 0x18, + NVME_FEAT_FID_IOCS_PROFILE = 0x19, /* XXX: Placeholder until assigned */ + NVME_FEAT_FID_SPINUP_CONTROL = 0x1a, + NVME_FEAT_FID_ENH_CTRL_METADATA = 0x7d, + NVME_FEAT_FID_CTRL_METADATA = 0x7e, + NVME_FEAT_FID_NS_METADATA = 0x7f, + NVME_FEAT_FID_SW_PROGRESS = 0x80, + NVME_FEAT_FID_HOST_ID = 0x81, + NVME_FEAT_FID_RESV_MASK = 0x82, + NVME_FEAT_FID_RESV_PERSIST = 0x83, + NVME_FEAT_FID_WRITE_PROTECT = 0x84, +}; + +/** + * enum nvme_feat - + * @NVME_FEAT_ARBITRATION_BURST_SHIFT: + * @NVME_FEAT_ARBITRATION_BURST_MASK: + * @NVME_FEAT_ARBITRATION_LPW_SHIFT: + * @NVME_FEAT_ARBITRATION_LPW_MASK: + * @NVME_FEAT_ARBITRATION_MPW_SHIFT: + * @NVME_FEAT_ARBITRATION_MPW_MASK: + * @NVME_FEAT_ARBITRATION_HPW_SHIFT: + * @NVME_FEAT_ARBITRATION_HPW_MASK: + * @NVME_FEAT_PWRMGMT_PS_SHIFT: + * @NVME_FEAT_PWRMGMT_PS_MASK: + * @NVME_FEAT_PWRMGMT_WH_SHIFT: + * @NVME_FEAT_PWRMGMT_WH_MASK: + * @NVME_FEAT_LBAR_NR_SHIFT: + * @NVME_FEAT_LBAR_NR_MASK: + * @NVME_FEAT_TT_TMPTH_SHIFT: + * @NVME_FEAT_TT_TMPTH_MASK: + * @NVME_FEAT_TT_TMPSEL_SHIFT: + * @NVME_FEAT_TT_TMPSEL_MASK: + * @NVME_FEAT_TT_THSEL_SHIFT: + * @NVME_FEAT_TT_THSEL_MASK: + * @NVME_FEAT_ERROR_RECOVERY_TLER_SHIFT: + * @NVME_FEAT_ERROR_RECOVERY_TLER_MASK: + * @NVME_FEAT_ERROR_RECOVERY_DULBE_SHIFT: + * @NVME_FEAT_ERROR_RECOVERY_DULBE_MASK: + * @NVME_FEAT_VWC_WCE_SHIFT: + * @NVME_FEAT_VWC_WCE_MASK: + * @NVME_FEAT_NRQS_NSQR_SHIFT: + * @NVME_FEAT_NRQS_NSQR_MASK: + * @NVME_FEAT_NRQS_NCQR_SHIFT: + * @NVME_FEAT_NRQS_NCQR_MASK: + * @NVME_FEAT_IRQC_THR_SHIFT: + * @NVME_FEAT_IRQC_THR_MASK: + * @NVME_FEAT_IRQC_TIME_SHIFT: + * @NVME_FEAT_IRQC_TIME_MASK: + * @NVME_FEAT_ICFG_IV_SHIFT: + * @NVME_FEAT_ICFG_IV_MASK: + * @NVME_FEAT_ICFG_CD_SHIFT: + * @NVME_FEAT_ICFG_CD_MASK: + * @NVME_FEAT_WA_DN_SHIFT: + * @NVME_FEAT_WA_DN_MASK: + * @NVME_FEAT_AE_SMART_SHIFT: + * @NVME_FEAT_AE_SMART_MASK: + * @NVME_FEAT_AE_NAN_SHIFT: + * @NVME_FEAT_AE_NAN_MASK: + * @NVME_FEAT_AE_FW_SHIFT: + * @NVME_FEAT_AE_FW_MASK: + * @NVME_FEAT_AE_TELEM_SHIFT: + * @NVME_FEAT_AE_TELEM_MASK: + * @NVME_FEAT_AE_ANA_SHIFT: + * @NVME_FEAT_AE_ANA_MASK: + * @NVME_FEAT_AE_PLA_SHIFT: + * @NVME_FEAT_AE_PLA_MASK: + * @NVME_FEAT_AE_LBAS_SHIFT: + * @NVME_FEAT_AE_LBAS_MASK: + * @NVME_FEAT_AE_EGA_SHIFT: + * @NVME_FEAT_AE_EGA_MASK: + * @NVME_FEAT_APST_APSTE_SHIFT: + * @NVME_FEAT_APST_APSTE_MASK: + * @NVME_FEAT_HMEM_EHM_SHIFT: + * @NVME_FEAT_HMEM_EHM_MASK: + * @NVME_FEAT_HCTM_TMT2_SHIFT: + * @NVME_FEAT_HCTM_TMT2_MASK: + * @NVME_FEAT_HCTM_TMT1_SHIFT: + * @NVME_FEAT_HCTM_TMT1_MASK: + * @NVME_FEAT_NOPS_NOPPME_SHIFT: + * @NVME_FEAT_NOPS_NOPPME_MASK: + * @NVME_FEAT_RRL_RRL_SHIFT: + * @NVME_FEAT_RRL_RRL_MASK: + * @NVME_FEAT_PLM_PLME_SHIFT: + * @NVME_FEAT_PLM_PLME_MASK: + * @NVME_FEAT_PLMW_WS_SHIFT: + * @NVME_FEAT_PLMW_WS_MASK: + * @NVME_FEAT_LBAS_LSIRI_SHIFT: + * @NVME_FEAT_LBAS_LSIRI_MASK: + * @NVME_FEAT_LBAS_LSIPI_SHIFT: + * @NVME_FEAT_LBAS_LSIPI_MASK: + * @NVME_FEAT_SC_NODRM_SHIFT: + * @NVME_FEAT_SC_NODRM_MASK: + * @NVME_FEAT_EG_ENDGID_SHIFT: + * @NVME_FEAT_EG_ENDGID_MASK: + * @NVME_FEAT_EG_EGCW_SHIFT: + * @NVME_FEAT_EG_EGCW_MASK: + * @NVME_FEAT_SPM_PBSLC_SHIFT: + * @NVME_FEAT_SPM_PBSLC_MASK: + * @NVME_FEAT_HOSTID_EXHID_SHIFT: + * @NVME_FEAT_HOSTID_EXHID_MASK: + * @NVME_FEAT_RM_REGPRE_SHIFT: + * @NVME_FEAT_RM_REGPRE_MASK: + * @NVME_FEAT_RM_RESREL_SHIFT: + * @NVME_FEAT_RM_RESREL_MASK: + * @NVME_FEAT_RM_RESPRE_SHIFT: + * @NVME_FEAT_RM_RESPRE_MASK: + * @NVME_FEAT_RP_PTPL_SHIFT: + * @NVME_FEAT_RP_PTPL_MASK: + * @NVME_FEAT_WP_WPS_SHIFT: + * @NVME_FEAT_WP_WPS_MASK: + * @NVME_FEAT_IOCSP_IOCSCI_SHIFT: + * @NVME_FEAT_IOCSP_IOCSCI_MASK: + */ +enum nvme_feat { + NVME_FEAT_ARBITRATION_BURST_SHIFT = 0, + NVME_FEAT_ARBITRATION_BURST_MASK = 0x7, + NVME_FEAT_ARBITRATION_LPW_SHIFT = 8, + NVME_FEAT_ARBITRATION_LPW_MASK = 0xff, + NVME_FEAT_ARBITRATION_MPW_SHIFT = 16, + NVME_FEAT_ARBITRATION_MPW_MASK = 0xff, + NVME_FEAT_ARBITRATION_HPW_SHIFT = 24, + NVME_FEAT_ARBITRATION_HPW_MASK = 0xff, + NVME_FEAT_PWRMGMT_PS_SHIFT = 0, + NVME_FEAT_PWRMGMT_PS_MASK = 0x1f, + NVME_FEAT_PWRMGMT_WH_SHIFT = 5, + NVME_FEAT_PWRMGMT_WH_MASK = 0x7, + NVME_FEAT_LBAR_NR_SHIFT = 0, + NVME_FEAT_LBAR_NR_MASK = 0x3f, + NVME_FEAT_TT_TMPTH_SHIFT = 0, + NVME_FEAT_TT_TMPTH_MASK = 0xffff, + NVME_FEAT_TT_TMPSEL_SHIFT = 16, + NVME_FEAT_TT_TMPSEL_MASK = 0xf, + NVME_FEAT_TT_THSEL_SHIFT = 20, + NVME_FEAT_TT_THSEL_MASK = 0x3, + NVME_FEAT_ERROR_RECOVERY_TLER_SHIFT = 0, + NVME_FEAT_ERROR_RECOVERY_TLER_MASK = 0xffff, + NVME_FEAT_ERROR_RECOVERY_DULBE_SHIFT = 16, + NVME_FEAT_ERROR_RECOVERY_DULBE_MASK = 0x1, + NVME_FEAT_VWC_WCE_SHIFT = 0, + NVME_FEAT_VWC_WCE_MASK = 0x1, + NVME_FEAT_NRQS_NSQR_SHIFT = 0, + NVME_FEAT_NRQS_NSQR_MASK = 0xffff, + NVME_FEAT_NRQS_NCQR_SHIFT = 16, + NVME_FEAT_NRQS_NCQR_MASK = 0xffff, + NVME_FEAT_IRQC_THR_SHIFT = 0, + NVME_FEAT_IRQC_THR_MASK = 0xff, + NVME_FEAT_IRQC_TIME_SHIFT = 8, + NVME_FEAT_IRQC_TIME_MASK = 0xff, + NVME_FEAT_ICFG_IV_SHIFT = 0, + NVME_FEAT_ICFG_IV_MASK = 0xffff, + NVME_FEAT_ICFG_CD_SHIFT = 16, + NVME_FEAT_ICFG_CD_MASK = 0x1, + NVME_FEAT_WA_DN_SHIFT = 0, + NVME_FEAT_WA_DN_MASK = 0x1, + NVME_FEAT_AE_SMART_SHIFT = 0, + NVME_FEAT_AE_SMART_MASK = 0xff, + NVME_FEAT_AE_NAN_SHIFT = 8, + NVME_FEAT_AE_NAN_MASK = 0x1, + NVME_FEAT_AE_FW_SHIFT = 9, + NVME_FEAT_AE_FW_MASK = 0x1, + NVME_FEAT_AE_TELEM_SHIFT = 10, + NVME_FEAT_AE_TELEM_MASK = 0x1, + NVME_FEAT_AE_ANA_SHIFT = 11, + NVME_FEAT_AE_ANA_MASK = 0x1, + NVME_FEAT_AE_PLA_SHIFT = 12, + NVME_FEAT_AE_PLA_MASK = 0x1, + NVME_FEAT_AE_LBAS_SHIFT = 13, + NVME_FEAT_AE_LBAS_MASK = 0x1, + NVME_FEAT_AE_EGA_SHIFT = 14, + NVME_FEAT_AE_EGA_MASK = 0x1, + NVME_FEAT_APST_APSTE_SHIFT = 0, + NVME_FEAT_APST_APSTE_MASK = 0x1, + NVME_FEAT_HMEM_EHM_SHIFT = 0, + NVME_FEAT_HMEM_EHM_MASK = 0x1, + NVME_FEAT_HCTM_TMT2_SHIFT = 0, + NVME_FEAT_HCTM_TMT2_MASK = 0xffff, + NVME_FEAT_HCTM_TMT1_SHIFT = 16, + NVME_FEAT_HCTM_TMT1_MASK = 0xffff, + NVME_FEAT_NOPS_NOPPME_SHIFT = 0, + NVME_FEAT_NOPS_NOPPME_MASK = 0x1, + NVME_FEAT_RRL_RRL_SHIFT = 0, + NVME_FEAT_RRL_RRL_MASK = 0xff, + NVME_FEAT_PLM_PLME_SHIFT = 0, + NVME_FEAT_PLM_PLME_MASK = 0x1, + NVME_FEAT_PLMW_WS_SHIFT = 0, + NVME_FEAT_PLMW_WS_MASK = 0x7, + NVME_FEAT_LBAS_LSIRI_SHIFT = 0, + NVME_FEAT_LBAS_LSIRI_MASK = 0xffff, + NVME_FEAT_LBAS_LSIPI_SHIFT = 16, + NVME_FEAT_LBAS_LSIPI_MASK = 0xffff, + NVME_FEAT_SC_NODRM_SHIFT = 0, + NVME_FEAT_SC_NODRM_MASK = 0x1, + NVME_FEAT_EG_ENDGID_SHIFT = 0, + NVME_FEAT_EG_ENDGID_MASK = 0xffff, + NVME_FEAT_EG_EGCW_SHIFT = 16, + NVME_FEAT_EG_EGCW_MASK = 0xff, + NVME_FEAT_SPM_PBSLC_SHIFT = 0, + NVME_FEAT_SPM_PBSLC_MASK = 0xff, + NVME_FEAT_HOSTID_EXHID_SHIFT = 0, + NVME_FEAT_HOSTID_EXHID_MASK = 0x1, + NVME_FEAT_RM_REGPRE_SHIFT = 1, + NVME_FEAT_RM_REGPRE_MASK = 0x1, + NVME_FEAT_RM_RESREL_SHIFT = 2, + NVME_FEAT_RM_RESREL_MASK = 0x1, + NVME_FEAT_RM_RESPRE_SHIFT = 0x3, + NVME_FEAT_RM_RESPRE_MASK = 0x1, + NVME_FEAT_RP_PTPL_SHIFT = 0, + NVME_FEAT_RP_PTPL_MASK = 0x1, + NVME_FEAT_WP_WPS_SHIFT = 0, + NVME_FEAT_WP_WPS_MASK = 0x7, + NVME_FEAT_IOCSP_IOCSCI_SHIFT = 0, + NVME_FEAT_IOCSP_IOCSCI_MASK = 0xff, +}; + +/** + * enum nvme_get_features_sel - + * @NVME_GET_FEATURES_SEL_CURRENT: Current value + * @NVME_GET_FEATURES_SEL_DEFAULT: Default value + * @NVME_GET_FEATURES_SEL_SAVED: Saved value + * @NVME_GET_FEATURES_SEL_SUPPORTED: Supported capabilities + */ +enum nvme_get_features_sel { + NVME_GET_FEATURES_SEL_CURRENT = 0, + NVME_GET_FEATURES_SEL_DEFAULT = 1, + NVME_GET_FEATURES_SEL_SAVED = 2, + NVME_GET_FEATURES_SEL_SUPPORTED = 3, +}; + +/** + * enum nvme_cmd_format_mset - Format NVM - Metadata Settings + * @NVME_FORMAT_MSET_SEPARATE: indicates that the metadata is transferred + * as part of a separate buffer. + * @NVME_FORMAT_MSET_EXTENDED: indicates that the metadata is transferred + * as part of an extended data LBA. + */ +enum nvme_cmd_format_mset { + NVME_FORMAT_MSET_SEPARATE = 0, + NVME_FORMAT_MSET_EXTENDED = 1, +}; + +/** + * enum nvme_cmd_format_pi - Format NVM - Protection Information + * @NVME_FORMAT_PI_DISABLE: Protection information is not enabled. + * @NVME_FORMAT_PI_TYPE1: Protection information is enabled, Type 1. + * @NVME_FORMAT_PI_TYPE2: Protection information is enabled, Type 2. + * @NVME_FORMAT_PI_TYPE3: Protection information is enabled, Type 3. + */ +enum nvme_cmd_format_pi { + NVME_FORMAT_PI_DISABLE = 0, + NVME_FORMAT_PI_TYPE1 = 1, + NVME_FORMAT_PI_TYPE2 = 2, + NVME_FORMAT_PI_TYPE3 = 3, +}; + +/** + * enum nvme_cmd_format_pil - Format NVM - Protection Information Location + * @NVME_FORMAT_PIL_LAST: Protection information is transferred as the last + * bytes of metadata. + * @NVME_FORMAT_PIL_FIRST: Protection information is transferred as the first + * bytes of metadata. + */ +enum nvme_cmd_format_pil { + NVME_FORMAT_PIL_LAST = 0, + NVME_FORMAT_PIL_FIRST = 1, +}; + +/** + * enum nvme_cmd_format_ses - Format NVM - Secure Erase Settings + * @NVME_FORMAT_SES_NONE: No secure erase operation requested. + * @NVME_FORMAT_SES_USER_DATA_ERASE: User Data Erase: All user data shall be erased, + * contents of the user data after the erase is + * indeterminate (e.g. the user data may be zero + * filled, one filled, etc.). If a User Data Erase + * is requested and all affected user data is + * encrypted, then the controller is allowed + * to use a cryptographic erase to perform + * the requested User Data Erase. + * @NVME_FORMAT_SES_CRYPTO_ERASE: Cryptographic Erase: All user data shall + * be erased cryptographically. This is + * accomplished by deleting the encryption key. + */ +enum nvme_cmd_format_ses { + NVME_FORMAT_SES_NONE = 0, + NVME_FORMAT_SES_USER_DATA_ERASE = 1, + NVME_FORMAT_SES_CRYPTO_ERASE = 2, +}; + +/** + * enum nvme_ns_mgmt_sel - + * @NVME_NS_MGMT_SEL_CREATE: Namespace Create selection + * @NVME_NS_MGMT_SEL_DELETE: Namespace Delete selection + */ +enum nvme_ns_mgmt_sel { + NVME_NS_MGMT_SEL_CREATE = 0, + NVME_NS_MGMT_SEL_DELETE = 1, +}; + +/** + * enum nvme_ns_attach_sel - + * @NVME_NS_ATTACH_SEL_CTRL_ATTACH: Namespace attach selection + * @NVME_NS_ATTACH_SEL_CTRL_DEATTACH: Namespace detach selection + */ +enum nvme_ns_attach_sel { + NVME_NS_ATTACH_SEL_CTRL_ATTACH = 0, + NVME_NS_ATTACH_SEL_CTRL_DEATTACH = 1, +}; + +/** + * enum nvme_fw_commit_ca - + * @NVME_FW_COMMIT_CA_REPLACE: Downloaded image replaces the existing + * image, if any, in the specified Firmware + * Slot. The newly placed image is not + * activated. + * @NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE: Downloaded image replaces the existing + * image, if any, in the specified Firmware + * Slot. The newly placed image is activated + * at the next Controller Level Reset. + * @NVME_FW_COMMIT_CA_SET_ACTIVE: The existing image in the specified + * Firmware Slot is activated at the + * next Controller Level Reset. + * @NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE_IMMEDIATE: Downloaded image replaces the existing + * image, if any, in the specified Firmware + * Slot and is then activated immediately. + * If there is not a newly downloaded image, + * then the existing image in the specified + * firmware slot is activated immediately. + * @NVME_FW_COMMIT_CA_REPLACE_BOOT_PARTITION: Downloaded image replaces the Boot + * Partition specified by the Boot + * Partition ID field. + * @NVME_FW_COMMIT_CA_ACTIVATE_BOOT_PARTITION: Mark the Boot Partition specified in + * the BPID field as active and update + * BPINFO.ABPID. + */ +enum nvme_fw_commit_ca { + NVME_FW_COMMIT_CA_REPLACE = 0, + NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE = 1, + NVME_FW_COMMIT_CA_SET_ACTIVE = 2, + NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE_IMMEDIATE = 3, + NVME_FW_COMMIT_CA_REPLACE_BOOT_PARTITION = 6, + NVME_FW_COMMIT_CA_ACTIVATE_BOOT_PARTITION = 7, +}; + +/** + * enum nvme_directive_dtype - + * @NVME_DIRECTIVE_DTYPE_IDENTIFY: Identify directive type + * @NVME_DIRECTIVE_DTYPE_STREAMS: Streams directive type + */ +enum nvme_directive_dtype { + NVME_DIRECTIVE_DTYPE_IDENTIFY = 0, + NVME_DIRECTIVE_DTYPE_STREAMS = 1, +}; + +/** + * enum nvme_directive_receive_doper - + * @NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM: + * @NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM: + * @NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS: + * @NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE: + */ +enum nvme_directive_receive_doper { + NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM = 0x01, + NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM = 0x01, + NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS = 0x02, + NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE = 0x03, +}; + +/** + * enum nvme_directive_send_doper - + * @NVME_DIRECTIVE_SEND_IDENTIFY_DOPER_ENDIR: + * @NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_IDENTIFIER: + * @NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_RESOURCE: + */ +enum nvme_directive_send_doper { + NVME_DIRECTIVE_SEND_IDENTIFY_DOPER_ENDIR = 0x01, + NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_IDENTIFIER = 0x01, + NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_RESOURCE = 0x02, +}; + +/** + * enum nvme_directive_send_identify_endir - + * @NVME_DIRECTIVE_SEND_IDENTIFY_ENDIR_DISABLE: + * @NVME_DIRECTIVE_SEND_IDENTIFY_ENDIR_ENABLE: + */ +enum nvme_directive_send_identify_endir { + NVME_DIRECTIVE_SEND_IDENTIFY_ENDIR_DISABLE = 0, + NVME_DIRECTIVE_SEND_IDENTIFY_ENDIR_ENABLE = 1, +}; + +/** + * enum nvme_sanitize_sanact - Sanitize Action + * @NVME_SANITIZE_SANACT_EXIT_FAILURE: Exit Failure Mode. + * @NVME_SANITIZE_SANACT_START_BLOCK_ERASE: Start a Block Erase sanitize operation. + * @NVME_SANITIZE_SANACT_START_OVERWRITE: Start an Overwrite sanitize operation. + * @NVME_SANITIZE_SANACT_START_CRYPTO_ERASE: Start a Crypto Erase sanitize operation. + */ +enum nvme_sanitize_sanact { + NVME_SANITIZE_SANACT_EXIT_FAILURE = 1, + NVME_SANITIZE_SANACT_START_BLOCK_ERASE = 2, + NVME_SANITIZE_SANACT_START_OVERWRITE = 3, + NVME_SANITIZE_SANACT_START_CRYPTO_ERASE = 4, +}; + +/** + * enum nvme_dst_stc - Action taken by the Device Self-test command + * @NVME_DST_STC_SHORT: Start a short device self-test operation + * @NVME_DST_STC_LONG: Start an extended device self-test operation + * @NVME_DST_STC_VS: Start a vendor specific device self-test operation + * @NVME_DST_STC_ABORT: Abort device self-test operation + */ +enum nvme_dst_stc { + NVME_DST_STC_SHORT = 0x1, + NVME_DST_STC_LONG = 0x2, + NVME_DST_STC_VS = 0xe, + NVME_DST_STC_ABORT = 0xf, +}; + +/** + * enum nvme_virt_mgmt_act - + * @NVME_VIRT_MGMT_ACT_PRIM_CTRL_FLEX_ALLOC: Primary Controller Flexible + * Allocation + * @NVME_VIRT_MGMT_ACT_OFFLINE_SEC_CTRL: Secondary Controller Offline + * @NVME_VIRT_MGMT_ACT_ASSIGN_SEC_CTRL: Secondary Controller Assign + * @NVME_VIRT_MGMT_ACT_ONLINE_SEC_CTRL: Secondary Controller Online + */ +enum nvme_virt_mgmt_act { + NVME_VIRT_MGMT_ACT_PRIM_CTRL_FLEX_ALLOC = 1, + NVME_VIRT_MGMT_ACT_OFFLINE_SEC_CTRL = 7, + NVME_VIRT_MGMT_ACT_ASSIGN_SEC_CTRL = 8, + NVME_VIRT_MGMT_ACT_ONLINE_SEC_CTRL = 9, +}; + +/** + * enum nvme_virt_mgmt_rt - + * @NVME_VIRT_MGMT_RT_VQ_RESOURCE: VQ Resources + * @NVME_VIRT_MGMT_RT_VI_RESOURCE: VI Resources + */ +enum nvme_virt_mgmt_rt { + NVME_VIRT_MGMT_RT_VQ_RESOURCE = 0, + NVME_VIRT_MGMT_RT_VI_RESOURCE = 1, +}; + +/** + * enum nvme_ns_write_protect_cfg - + * @NVME_NS_WP_CFG_NONE: No Write Protect + * @NVME_NS_WP_CFG_PROTECT: Write Protect + * @NVME_NS_WP_CFG_PROTECT_POWER_CYCLE: Write Protect Until Power Cycle + * @NVME_NS_WP_CFG_PROTECT_PERMANENT: Permanent Write Protect + */ +enum nvme_ns_write_protect_cfg { + NVME_NS_WP_CFG_NONE = 0, + NVME_NS_WP_CFG_PROTECT = 1, + NVME_NS_WP_CFG_PROTECT_POWER_CYCLE = 2, + NVME_NS_WP_CFG_PROTECT_PERMANENT = 3, +}; + +/** + * enum nvme_log_ana_lsp - + * @NVME_LOG_ANA_LSP_RGO_NAMESPACES: + * @NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY: + */ +enum nvme_log_ana_lsp { + NVME_LOG_ANA_LSP_RGO_NAMESPACES = 0, + NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY = 1, +}; + +/** + * enum nvme_pevent_log_action - + * @NVME_PEVENT_LOG_READ: Read Log Data + * @NVME_PEVENT_LOG_EST_CTX_AND_READ: Establish Context and Read Log Data + * @NVME_PEVENT_LOG_RELEASE_CTX: Release Context + */ +enum nvme_pevent_log_action { + NVME_PEVENT_LOG_READ = 0x0, + NVME_PEVENT_LOG_EST_CTX_AND_READ = 0x1, + NVME_PEVENT_LOG_RELEASE_CTX = 0x2, +}; + +/** + * enum nvme_feat_tmpthresh_thsel - + * @NVME_FEATURE_TEMPTHRESH_THSEL_OVER: Over temperature threshold select + * @NVME_FEATURE_TEMPTHRESH_THSEL_UNDER: Under temperature threshold select + */ +enum nvme_feat_tmpthresh_thsel { + NVME_FEATURE_TEMPTHRESH_THSEL_OVER = 0, + NVME_FEATURE_TEMPTHRESH_THSEL_UNDER = 1, +}; + +/** + * enum nvme_features_async_event_config_flags - + * @NVME_FEATURE_AENCFG_SMART_CRIT_SPARE: + * @NVME_FEATURE_AENCFG_SMART_CRIT_TEMPERATURE: + * @NVME_FEATURE_AENCFG_SMART_CRIT_DEGRADED: + * @NVME_FEATURE_AENCFG_SMART_CRIT_READ_ONLY: + * @NVME_FEATURE_AENCFG_SMART_CRIT_VOLATILE_BACKUP: + * @NVME_FEATURE_AENCFG_SMART_CRIT_READ_ONLY_PMR: + * @NVME_FEATURE_AENCFG_NOTICE_NAMESPACE_ATTRIBUTES: + * @NVME_FEATURE_AENCFG_NOTICE_FIRMWARE_ACTIVATION: + * @NVME_FEATURE_AENCFG_NOTICE_TELEMETRY_LOG: + * @NVME_FEATURE_AENCFG_NOTICE_ANA_CHANGE: + * @NVME_FEATURE_AENCFG_NOTICE_PL_EVENT: + * @NVME_FEATURE_AENCFG_NOTICE_LBA_STATUS: + * @NVME_FEATURE_AENCFG_NOTICE_EG_EVENT: + * @NVME_FEATURE_AENCFG_NOTICE_DISCOVERY_CHANGE: + */ +enum nvme_features_async_event_config_flags { + NVME_FEATURE_AENCFG_SMART_CRIT_SPARE = 1 << 0, + NVME_FEATURE_AENCFG_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_FEATURE_AENCFG_SMART_CRIT_DEGRADED = 1 << 2, + NVME_FEATURE_AENCFG_SMART_CRIT_READ_ONLY = 1 << 3, + NVME_FEATURE_AENCFG_SMART_CRIT_VOLATILE_BACKUP = 1 << 4, + NVME_FEATURE_AENCFG_SMART_CRIT_READ_ONLY_PMR = 1 << 5, + NVME_FEATURE_AENCFG_NOTICE_NAMESPACE_ATTRIBUTES = 1 << 8, + NVME_FEATURE_AENCFG_NOTICE_FIRMWARE_ACTIVATION = 1 << 9, + NVME_FEATURE_AENCFG_NOTICE_TELEMETRY_LOG = 1 << 10, + NVME_FEATURE_AENCFG_NOTICE_ANA_CHANGE = 1 << 11, + NVME_FEATURE_AENCFG_NOTICE_PL_EVENT = 1 << 12, + NVME_FEATURE_AENCFG_NOTICE_LBA_STATUS = 1 << 13, + NVME_FEATURE_AENCFG_NOTICE_EG_EVENT = 1 << 14, + NVME_FEATURE_AENCFG_NOTICE_DISCOVERY_CHANGE = 1 << 31, +}; + +/** + * enum nvme_feat_plm_window_select - + * @NVME_FEATURE_PLM_DTWIN: Deterministic Window select + * @NVME_FEATURE_PLM_NDWIN: Non-Deterministic Window select + */ +enum nvme_feat_plm_window_select { + NVME_FEATURE_PLM_DTWIN = 1, + NVME_FEATURE_PLM_NDWIN = 2, +}; + +/** + * enum nvme_feat_resv_notify_flags - + * @NVME_FEAT_RESV_NOTIFY_REGPRE: Mask Registration Preempted Notification + * @NVME_FEAT_RESV_NOTIFY_RESREL: Mask Reservation Released Notification + * @NVME_FEAT_RESV_NOTIFY_RESPRE: Mask Reservation Preempted Notification + */ +enum nvme_feat_resv_notify_flags { + NVME_FEAT_RESV_NOTIFY_REGPRE = 1 << 1, + NVME_FEAT_RESV_NOTIFY_RESREL = 1 << 2, + NVME_FEAT_RESV_NOTIFY_RESPRE = 1 << 3, +}; + +/** + * enum nvme_feat_nswpcfg_state - + * @NVME_FEAT_NS_NO_WRITE_PROTECT: No Write Protect + * @NVME_FEAT_NS_WRITE_PROTECT: Write Protect + * @NVME_FEAT_NS_WRITE_PROTECT_PWR_CYCLE: Write Protect Until Power Cycle + * @NVME_FEAT_NS_WRITE_PROTECT_PERMANENT: Permanent Write Protect + */ +enum nvme_feat_nswpcfg_state { + NVME_FEAT_NS_NO_WRITE_PROTECT = 0, + NVME_FEAT_NS_WRITE_PROTECT = 1, + NVME_FEAT_NS_WRITE_PROTECT_PWR_CYCLE = 2, + NVME_FEAT_NS_WRITE_PROTECT_PERMANENT = 3, +}; + +/** + * enum nvme_fctype - + * @nvme_fabrics_type_property_set: Property set + * @nvme_fabrics_type_connect: Connect + * @nvme_fabrics_type_property_get: Property Get + * @nvme_fabrics_type_auth_send: Authentication Send + * @nvme_fabrics_type_auth_receive: Authentication Receive + * @nvme_fabrics_type_disconnect: Disconnect + */ +enum nvme_fctype { + nvme_fabrics_type_property_set = 0x00, + nvme_fabrics_type_connect = 0x01, + nvme_fabrics_type_property_get = 0x04, + nvme_fabrics_type_auth_send = 0x05, + nvme_fabrics_type_auth_receive = 0x06, + nvme_fabrics_type_disconnect = 0x08, +}; + +/** + * enum nvme_io_opcode - + * @nvme_cmd_flush: Flush + * @nvme_cmd_write: Write + * @nvme_cmd_read: Read + * @nvme_cmd_write_uncor: Write Uncorrectable + * @nvme_cmd_compare: Compare + * @nvme_cmd_write_zeroes: write Zeros + * @nvme_cmd_dsm: Dataset Management + * @nvme_cmd_verify: Verify + * @nvme_cmd_resv_register: Reservation Register + * @nvme_cmd_resv_report: Reservation Report + * @nvme_cmd_resv_acquire: Reservation Acquire + * @nvme_cmd_resv_release: Reservation Release + * @nvme_cmd_copy: Copy + * @nvme_zns_cmd_mgmt_send: Zone Management Send + * @nvme_zns_cmd_mgmt_recv: Zone Management Receive + * @nvme_zns_cmd_append: Zone Append + */ +enum nvme_io_opcode { + nvme_cmd_flush = 0x00, + nvme_cmd_write = 0x01, + nvme_cmd_read = 0x02, + nvme_cmd_write_uncor = 0x04, + nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, + nvme_cmd_dsm = 0x09, + nvme_cmd_verify = 0x0c, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, + nvme_cmd_copy = 0x19, + nvme_zns_cmd_mgmt_send = 0x79, + nvme_zns_cmd_mgmt_recv = 0x7a, + nvme_zns_cmd_append = 0x7d, +}; + +/** + * enum nvme_io_control_flags - + * @NVME_IO_DTYPE_STREAMS: Directive Type Streams + * @NVME_IO_DEAC: Deallocate + * @NVME_IO_ZNS_APPEND_PIREMAP: Protection Information Remap + * @NVME_IO_PRINFO_PRCHK_REF: Protection Information Check Reference Tag + * @NVME_IO_PRINFO_PRCHK_APP: Protection Information Check Application Tag + * @NVME_IO_PRINFO_PRCHK_GUARD: Protection Information Check Guard field + * @NVME_IO_PRINFO_PRACT: Protection Information Action + * @NVME_IO_FUA: Force Unit Access + * @NVME_IO_LR: Limited Retry + */ +enum nvme_io_control_flags { + NVME_IO_DTYPE_STREAMS = 1 << 4, + NVME_IO_DEAC = 1 << 9, + NVME_IO_ZNS_APPEND_PIREMAP = 1 << 9, + NVME_IO_PRINFO_PRCHK_REF = 1 << 10, + NVME_IO_PRINFO_PRCHK_APP = 1 << 11, + NVME_IO_PRINFO_PRCHK_GUARD = 1 << 12, + NVME_IO_PRINFO_PRACT = 1 << 13, + NVME_IO_FUA = 1 << 14, + NVME_IO_LR = 1 << 15, +}; + +/** + * enum nvme_io_dsm_flags - + * @NVME_IO_DSM_FREQ_UNSPEC: No frequency information provided + * @NVME_IO_DSM_FREQ_TYPICAL: Typical number of reads and writes + * expected for this LBA range + * @NVME_IO_DSM_FREQ_RARE: Infrequent writes and infrequent + * reads to the LBA range indicated + * @NVME_IO_DSM_FREQ_READS: Infrequent writes and frequent + * reads to the LBA range indicated + * @NVME_IO_DSM_FREQ_WRITES: Frequent writes and infrequent + * reads to the LBA range indicated + * @NVME_IO_DSM_FREQ_RW: Frequent writes and frequent reads + * to the LBA range indicated + * @NVME_IO_DSM_FREQ_ONCE: + * @NVME_IO_DSM_FREQ_PREFETCH: + * @NVME_IO_DSM_FREQ_TEMP: + * @NVME_IO_DSM_LATENCY_NONE: No latency information provided + * @NVME_IO_DSM_LATENCY_IDLE: Longer latency acceptable + * @NVME_IO_DSM_LATENCY_NORM: Typical latency + * @NVME_IO_DSM_LATENCY_LOW: Smallest possible latency + * @NVME_IO_DSM_SEQ_REQ: + * @NVME_IO_DSM_COMPRESSED: + */ +enum nvme_io_dsm_flags { + NVME_IO_DSM_FREQ_UNSPEC = 0, + NVME_IO_DSM_FREQ_TYPICAL = 1, + NVME_IO_DSM_FREQ_RARE = 2, + NVME_IO_DSM_FREQ_READS = 3, + NVME_IO_DSM_FREQ_WRITES = 4, + NVME_IO_DSM_FREQ_RW = 5, + NVME_IO_DSM_FREQ_ONCE = 6, + NVME_IO_DSM_FREQ_PREFETCH = 7, + NVME_IO_DSM_FREQ_TEMP = 8, + NVME_IO_DSM_LATENCY_NONE = 0 << 4, + NVME_IO_DSM_LATENCY_IDLE = 1 << 4, + NVME_IO_DSM_LATENCY_NORM = 2 << 4, + NVME_IO_DSM_LATENCY_LOW = 3 << 4, + NVME_IO_DSM_SEQ_REQ = 1 << 6, + NVME_IO_DSM_COMPRESSED = 1 << 7, +}; + +/** + * enum nvme_dsm_attributes - + * @NVME_DSMGMT_IDR: Attribute – Integral Dataset for Read + * @NVME_DSMGMT_IDW: Attribute – Integral Dataset for Write + * @NVME_DSMGMT_AD: Attribute – Deallocate + */ +enum nvme_dsm_attributes { + NVME_DSMGMT_IDR = 1 << 0, + NVME_DSMGMT_IDW = 1 << 1, + NVME_DSMGMT_AD = 1 << 2, +}; + +/** + * enum nvme_resv_rtype - + * @NVME_RESERVATION_RTYPE_WE: Write Exclusive Reservation + * @NVME_RESERVATION_RTYPE_EA: Exclusive Access Reservation + * @NVME_RESERVATION_RTYPE_WERO: Write Exclusive - Registrants Only Reservation + * @NVME_RESERVATION_RTYPE_EARO: Exclusive Access - Registrants Only Reservation + * @NVME_RESERVATION_RTYPE_WEAR: Write Exclusive - All Registrants Reservation + * @NVME_RESERVATION_RTYPE_EAAR: Exclusive Access - All Registrants Reservation + */ +enum nvme_resv_rtype { + NVME_RESERVATION_RTYPE_WE = 1, + NVME_RESERVATION_RTYPE_EA = 2, + NVME_RESERVATION_RTYPE_WERO = 3, + NVME_RESERVATION_RTYPE_EARO = 4, + NVME_RESERVATION_RTYPE_WEAR = 5, + NVME_RESERVATION_RTYPE_EAAR = 6, +}; + +/** + * enum nvme_resv_racqa - + * @NVME_RESERVATION_RACQA_ACQUIRE: Acquire + * @NVME_RESERVATION_RACQA_PREEMPT: Preempt + * @NVME_RESERVATION_RACQA_PREEMPT_AND_ABORT: Preempt and Abort + */ +enum nvme_resv_racqa { + NVME_RESERVATION_RACQA_ACQUIRE = 0, + NVME_RESERVATION_RACQA_PREEMPT = 1, + NVME_RESERVATION_RACQA_PREEMPT_AND_ABORT = 2, +}; + +/** + * enum nvme_resv_rrega - + * @NVME_RESERVATION_RREGA_REGISTER_KEY: Register Reservation Key + * @NVME_RESERVATION_RREGA_UNREGISTER_KEY: Unregister Reservation Key + * @NVME_RESERVATION_RREGA_REPLACE_KEY: Replace Reservation Key + */ +enum nvme_resv_rrega { + NVME_RESERVATION_RREGA_REGISTER_KEY = 0, + NVME_RESERVATION_RREGA_UNREGISTER_KEY = 1, + NVME_RESERVATION_RREGA_REPLACE_KEY = 2, +}; + +/** + * enum nvme_resv_cptpl - + * @NVME_RESERVATION_CPTPL_NO_CHANGE: No change to PTPL state + * @NVME_RESERVATION_CPTPL_CLEAR: Reservations are released and + * registrants are cleared on a power on + * @NVME_RESERVATION_CPTPL_PERSIST: Reservations and registrants persist + * across a power loss + */ +enum nvme_resv_cptpl { + NVME_RESERVATION_CPTPL_NO_CHANGE = 0, + NVME_RESERVATION_CPTPL_CLEAR = 2, + NVME_RESERVATION_CPTPL_PERSIST = 3, +}; + +/** + * enum nvme_resv_rrela - + * @NVME_RESERVATION_RRELA_RELEASE: Release + * @NVME_RESERVATION_RRELA_CLEAR: Clear + */ +enum nvme_resv_rrela { + NVME_RESERVATION_RRELA_RELEASE = 0, + NVME_RESERVATION_RRELA_CLEAR = 1 +}; + +/** + * enum nvme_zns_send_action - + * @NVME_ZNS_ZSA_CLOSE: Close Zone + * @NVME_ZNS_ZSA_FINISH: Finish Zone + * @NVME_ZNS_ZSA_OPEN: Open Zone + * @NVME_ZNS_ZSA_RESET: Reset Zone + * @NVME_ZNS_ZSA_OFFLINE: Offline Zone + * @NVME_ZNS_ZSA_SET_DESC_EXT: Set Zone Descriptor Extension + * @NVME_ZNS_ZSA_ZRWA_FLUSH: Flush + */ +enum nvme_zns_send_action { + NVME_ZNS_ZSA_CLOSE = 0x1, + NVME_ZNS_ZSA_FINISH = 0x2, + NVME_ZNS_ZSA_OPEN = 0x3, + NVME_ZNS_ZSA_RESET = 0x4, + NVME_ZNS_ZSA_OFFLINE = 0x5, + NVME_ZNS_ZSA_SET_DESC_EXT = 0x10, + NVME_ZNS_ZSA_ZRWA_FLUSH = 0x11, +}; + +/** + * enum nvme_zns_recv_action - + * @NVME_ZNS_ZRA_REPORT_ZONES: Report Zones + * @NVME_ZNS_ZRA_EXTENDED_REPORT_ZONES: Extended Report Zones + */ +enum nvme_zns_recv_action { + NVME_ZNS_ZRA_REPORT_ZONES = 0x0, + NVME_ZNS_ZRA_EXTENDED_REPORT_ZONES = 0x1, +}; + +/** + * enum nvme_zns_report_options - + * @NVME_ZNS_ZRAS_REPORT_ALL: List all zones + * @NVME_ZNS_ZRAS_REPORT_EMPTY: List the zones in the ZSE:Empty state + * @NVME_ZNS_ZRAS_REPORT_IMPL_OPENED: List the zones in the ZSIO:Implicitly Opened state + * @NVME_ZNS_ZRAS_REPORT_EXPL_OPENED: List the zones in the ZSEO:Explicitly Opened state + * @NVME_ZNS_ZRAS_REPORT_CLOSED: List the zones in the ZSC:Closed state + * @NVME_ZNS_ZRAS_REPORT_FULL: List the zones in the ZSF:Full state + * @NVME_ZNS_ZRAS_REPORT_READ_ONLY: List the zones in the ZSRO:Read Only state + * @NVME_ZNS_ZRAS_REPORT_OFFLINE: List the zones in the ZSO:Offline state + */ +enum nvme_zns_report_options { + NVME_ZNS_ZRAS_REPORT_ALL = 0x0, + NVME_ZNS_ZRAS_REPORT_EMPTY = 0x1, + NVME_ZNS_ZRAS_REPORT_IMPL_OPENED = 0x2, + NVME_ZNS_ZRAS_REPORT_EXPL_OPENED = 0x3, + NVME_ZNS_ZRAS_REPORT_CLOSED = 0x4, + NVME_ZNS_ZRAS_REPORT_FULL = 0x5, + NVME_ZNS_ZRAS_REPORT_READ_ONLY = 0x6, + NVME_ZNS_ZRAS_REPORT_OFFLINE = 0x7, +}; + +#endif /* _LIBNVME_TYPES_H */ diff --git a/src/nvme/util.c b/src/nvme/util.c new file mode 100644 index 0000000..799c0cf --- /dev/null +++ b/src/nvme/util.c @@ -0,0 +1,777 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ + +#include <stdio.h> +#include <string.h> +#include <errno.h> + +#include <sys/param.h> +#include <sys/types.h> +#include <arpa/inet.h> +#include <netdb.h> +#include <unistd.h> + +#include <ccan/endian/endian.h> + +#include "private.h" +#include "util.h" +#include "log.h" + +static inline __u8 nvme_generic_status_to_errno(__u16 status) +{ + switch (status) { + case NVME_SC_INVALID_OPCODE: + case NVME_SC_INVALID_FIELD: + case NVME_SC_INVALID_NS: + case NVME_SC_SGL_INVALID_LAST: + case NVME_SC_SGL_INVALID_COUNT: + case NVME_SC_SGL_INVALID_DATA: + case NVME_SC_SGL_INVALID_METADATA: + case NVME_SC_SGL_INVALID_TYPE: + case NVME_SC_SGL_INVALID_OFFSET: + case NVME_SC_PRP_INVALID_OFFSET: + case NVME_SC_CMB_INVALID_USE: + case NVME_SC_KAT_INVALID: + return EINVAL; + case NVME_SC_CMDID_CONFLICT: + return EADDRINUSE; + case NVME_SC_DATA_XFER_ERROR: + case NVME_SC_INTERNAL: + case NVME_SC_SANITIZE_FAILED: + return EIO; + case NVME_SC_POWER_LOSS: + case NVME_SC_ABORT_REQ: + case NVME_SC_ABORT_QUEUE: + case NVME_SC_FUSED_FAIL: + case NVME_SC_FUSED_MISSING: + return EWOULDBLOCK; + case NVME_SC_CMD_SEQ_ERROR: + return EILSEQ; + case NVME_SC_SANITIZE_IN_PROGRESS: + case NVME_SC_FORMAT_IN_PROGRESS: + return EINPROGRESS; + case NVME_SC_NS_WRITE_PROTECTED: + case NVME_SC_NS_NOT_READY: + case NVME_SC_RESERVATION_CONFLICT: + case NVME_SC_OP_DENIED: + case NVME_SC_ADMIN_CMD_MEDIA_NOT_READY: + return EACCES; + case NVME_SC_LBA_RANGE: + return EREMOTEIO; + case NVME_SC_CAP_EXCEEDED: + case NVME_SC_AWU_EXCEEDED: + return ENOSPC; + } + return EIO; +} + +static inline __u8 nvme_cmd_specific_status_to_errno(__u16 status) +{ + switch (status) { + case NVME_SC_CQ_INVALID: + case NVME_SC_QID_INVALID: + case NVME_SC_QUEUE_SIZE: + case NVME_SC_FIRMWARE_SLOT: + case NVME_SC_FIRMWARE_IMAGE: + case NVME_SC_INVALID_VECTOR: + case NVME_SC_INVALID_LOG_PAGE: + case NVME_SC_INVALID_FORMAT: + case NVME_SC_INVALID_QUEUE: + case NVME_SC_NS_INSUFFICIENT_CAP: + case NVME_SC_NS_ID_UNAVAILABLE: + case NVME_SC_CTRL_LIST_INVALID: + case NVME_SC_BAD_ATTRIBUTES: + case NVME_SC_INVALID_PI: + case NVME_SC_INVALID_CTRL_ID: + case NVME_SC_INVALID_SEC_CTRL_STATE: + case NVME_SC_INVALID_CTRL_RESOURCES: + case NVME_SC_INVALID_RESOURCE_ID: + case NVME_SC_ANA_GROUP_ID_INVALID: + case NVME_SC_INSUFFICIENT_CAP: + case NVME_SC_INVALID_IOCS: + case NVME_SC_ID_UNAVAILABLE: + return EINVAL; + case NVME_SC_ABORT_LIMIT: + case NVME_SC_ASYNC_LIMIT: + case NVME_SC_NS_ATTACHMENT_LIMIT_EXCEEDED: + return EDQUOT; + case NVME_SC_FW_NEEDS_CONV_RESET: + case NVME_SC_FW_NEEDS_SUBSYS_RESET: + case NVME_SC_FW_NEEDS_MAX_TIME: + case NVME_SC_FW_NEEDS_RESET: + return ERESTART; + case NVME_SC_FEATURE_NOT_SAVEABLE: + case NVME_SC_FEATURE_NOT_CHANGEABLE: + case NVME_SC_FEATURE_NOT_PER_NS: + case NVME_SC_FW_ACTIVATE_PROHIBITED: + case NVME_SC_NS_IS_PRIVATE: + case NVME_SC_BP_WRITE_PROHIBITED: + case NVME_SC_READ_ONLY: + case NVME_SC_PMR_SAN_PROHIBITED: + return EPERM; + case NVME_SC_OVERLAPPING_RANGE: + case NVME_SC_NS_NOT_ATTACHED: + return ENOSPC; + case NVME_SC_NS_ALREADY_ATTACHED: + return EALREADY; + case NVME_SC_THIN_PROV_NOT_SUPP: + case NVME_SC_PROHIBIT_CMD_EXEC_NOT_SUPPORTED: + return EOPNOTSUPP; + case NVME_SC_ABORT_MISSING: + return EWOULDBLOCK; + case NVME_SC_SELF_TEST_IN_PROGRESS: + return EINPROGRESS; + } + + return EIO; +} + +static inline __u8 nvme_fabrics_status_to_errno(__u16 status) +{ + switch (status) { + case NVME_SC_CONNECT_FORMAT: + case NVME_SC_CONNECT_INVALID_PARAM: + case NVME_SC_DISCONNECT_INVALID_QTYPE: + return EINVAL; + case NVME_SC_CONNECT_CTRL_BUSY: + return EBUSY; + case NVME_SC_CONNECT_RESTART_DISC: + return ERESTART; + case NVME_SC_CONNECT_INVALID_HOST: + return ECONNREFUSED; + case NVME_SC_DISCOVERY_RESTART: + return EAGAIN; + case NVME_SC_AUTH_REQUIRED: + return EPERM; + } + + return EIO; +} + +__u8 nvme_status_to_errno(int status, bool fabrics) +{ + __u16 sc; + + if (!status) + return 0; + if (status < 0) + return errno; + + sc = nvme_status_code(status); + switch (nvme_status_code_type(status)) { + case NVME_SCT_GENERIC: + return nvme_generic_status_to_errno(sc); + case NVME_SCT_CMD_SPECIFIC: + if (fabrics) + return nvme_fabrics_status_to_errno(sc); + return nvme_cmd_specific_status_to_errno(sc); + default: + return EIO; + } +} + +static const char * const generic_status[] = { + [NVME_SC_SUCCESS] = "Successful Completion: The command completed without error", + [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode: A reserved coded value or an unsupported value in the command opcode field", + [NVME_SC_INVALID_FIELD] = "Invalid Field in Command: A reserved coded value or an unsupported value in a defined field", + [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict: The command identifier is already in use", + [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error: Transferring the data or metadata associated with a command experienced an error", + [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification: Indicates that the command was aborted due to a power loss notification", + [NVME_SC_INTERNAL] = "Internal Error: The command was not completed successfully due to an internal error", + [NVME_SC_ABORT_REQ] = "Command Abort Requested: The command was aborted due to an Abort command", + [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion: The command was aborted due to a Delete I/O Submission Queue", + [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command: The command was aborted due to the other command in a fused operation failing", + [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command: The fused command was aborted due to the adjacent submission queue entry not containing a fused command", + [NVME_SC_INVALID_NS] = "Invalid Namespace or Format: The namespace or the format of that namespace is invalid", + [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error: The command was aborted due to a protocol violation in a multi- command sequence", + [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor: The command includes an invalid SGL Last Segment or SGL Segment descriptor", + [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors: There is an SGL Last Segment descriptor or an SGL Segment descriptor in a location other than the last descriptor of a segment based on the length indicated", + [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid: The length of a Data SGL is too short or too long and the controller does not support SGL transfers longer than the amount of data to be transferred", + [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid: The length of a Metadata SGL is too short or too long and the controller does not support SGL transfers longer than the amount of data to be transferred", + [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid: The type of an SGL Descriptor is a type that is not supported by the controller", + [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer: The attempted use of the Controller Memory Buffer is not supported by the controller", + [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid: The Offset field for a PRP entry is invalid", + [NVME_SC_AWU_EXCEEDED] = "Atomic Write Unit Exceeded: The length specified exceeds the atomic write unit size", + [NVME_SC_OP_DENIED] = "Operation Denied: The command was denied due to lack of access rights", + [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid: The offset specified in a descriptor is invalid", + [NVME_SC_HOSTID_FORMAT] = "Host Identifier Inconsistent Format: The NVM subsystem detected the simultaneous use of 64- bit and 128-bit Host Identifier values on different controllers", + [NVME_SC_KAT_EXPIRED] = "Keep Alive Timer Expired: The Keep Alive Timer expired", + [NVME_SC_KAT_INVALID] = "Keep Alive Timeout Invalid: The Keep Alive Timeout value specified is invalid", + [NVME_SC_CMD_ABORTED_PREMEPT] = "Command Aborted due to Preempt and Abort: The command was aborted due to a Reservation Acquire command", + [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed: The most recent sanitize operation failed and no recovery action has been successfully completed", + [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress: The requested function is prohibited while a sanitize operation is in progress", + [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid: The Address alignment or Length granularity for an SGL Data Block descriptor is invalid", + [NVME_SC_CMD_IN_CMBQ_NOT_SUPP] = "Command Not Supported for Queue in CMB: The controller does not support Submission Queue in the Controller Memory Buffer or Completion Queue in the Controller Memory Buffer", + [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected: The command is prohibited while the namespace is write protected", + [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted: Command processing was interrupted and the controller is unable to successfully complete the command", + [NVME_SC_TRAN_TPORT_ERROR] = "Transient Transport Error: A transient transport error was detected", + [NVME_SC_PROHIBITED_BY_CMD_AND_FEAT] = "Command Prohibited by Command and Feature Lockdown: The command was aborted due to command execution being prohibited by the Command and Feature Lockdown", + [NVME_SC_ADMIN_CMD_MEDIA_NOT_READY] = "Admin Command Media Not Ready: The Admin command requires access to media and the media is not ready", + [NVME_SC_LBA_RANGE] = "LBA Out of Range: The command references an LBA that exceeds the size of the namespace", + [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded: Execution of the command has caused the capacity of the namespace to be exceeded", + [NVME_SC_NS_NOT_READY] = "Namespace Not Ready: The namespace is not ready to be accessed", + [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict: The command was aborted due to a conflict with a reservation held on the accessed namespace", + [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress: A Format NVM command is in progress on the namespace", +}; + +static const char * const cmd_spec_status[] = { + [NVME_SC_CQ_INVALID] = "Completion Queue Invalid: The Completion Queue identifier specified in the command does not exist", + [NVME_SC_QID_INVALID] = "Invalid Queue Identifier: The creation of the I/O Completion Queue failed due to an invalid queue identifier specified as part of the command", + [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size: The host attempted to create an I/O Completion Queue with an invalid number of entries", + [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded: The number of concurrently outstanding Abort commands has exceeded the limit indicated in the Identify Controller data structure", + [NVME_SC_ABORT_MISSING] = "Abort Command Is Missing: The abort command is missing", + [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded: The number of concurrently outstanding Asynchronous Event Request commands has been exceeded", + [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot: The firmware slot indicated is invalid or read only", + [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image: The firmware image specified for activation is invalid and not loaded by the controller", + [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector: The creation of the I/O Completion Queue failed due to an invalid interrupt vector specified as part of the command", + [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page: The log page indicated is invalid", + [NVME_SC_INVALID_FORMAT] = "Invalid Format: The LBA Format specified is not supported", + [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset: The firmware commit was successful, however, activation of the firmware image requires a conventional reset", + [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion: Invalid I/O Completion Queue specified to delete", + [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable: The Feature Identifier specified does not support a saveable value", + [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable: The Feature Identifier is not able to be changed", + [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific: The Feature Identifier specified is not namespace specific", + [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset: The firmware commit was successful, however, activation of the firmware image requires an NVM Subsystem", + [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Controller Level Reset: The firmware commit was successful; however, the image specified does not support being activated without a reset", + [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation: The image specified if activated immediately would exceed the Maximum Time for Firmware Activation (MTFA) value reported in Identify Controller", + [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited: The image specified is being prohibited from activation by the controller for vendor specific reasons", + [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range: The downloaded firmware image has overlapping ranges", + [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity: Creating the namespace requires more free space than is currently available", + [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable: The number of namespaces supported has been exceeded", + [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached: The controller is already attached to the namespace specified", + [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private: The namespace is private and is already attached to one controller", + [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached: The request to detach the controller could not be completed because the controller is not attached to the namespace", + [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported: Thin provisioning is not supported by the controller", + [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid: The controller list provided contains invalid controller ids", + [NVME_SC_SELF_TEST_IN_PROGRESS] = "Device Self-test In Progress: The controller or NVM subsystem already has a device self-test operation in process", + [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited: The command tried to modify a locked Boot Partition", + [NVME_SC_INVALID_CTRL_ID] = "Invalid Controller Identifier: An invalid controller id was specified", + [NVME_SC_INVALID_SEC_CTRL_STATE] = "Invalid Secondary Controller State: The requested secondary controller action is invalid based on the secondary and primary controllers current states", + [NVME_SC_INVALID_CTRL_RESOURCES] = "Invalid Number of Controller Resources: The specified number of Flexible Resources is invalid", + [NVME_SC_INVALID_RESOURCE_ID] = "Invalid Resource Identifier: At least one of the specified resource identifiers was invalid", + [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited While Persistent Memory Region is Enabled", + [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid: The specified ANA Group Identifier (ANAGRPID) is not supported in the submitted command", + [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed: The controller is not attached to the namespace as a result of an ANA condition", + [NVME_SC_INSUFFICIENT_CAP] = "Insufficient Capacity: Requested operation requires more free space than is currently available", + [NVME_SC_NS_ATTACHMENT_LIMIT_EXCEEDED] = "Namespace Attachment Limit Exceeded: Attaching the ns to a controller causes max number of ns attachments allowed to be exceeded", + [NVME_SC_PROHIBIT_CMD_EXEC_NOT_SUPPORTED] = "Prohibition of Command Execution Not Supported", + [NVME_SC_IOCS_NOT_SUPPORTED] = "The I/O command set is not supported", + [NVME_SC_IOCS_NOT_ENABLED] = "The I/O command set is not enabled", + [NVME_SC_IOCS_COMBINATION_REJECTED] = "The I/O command set combination is rejected", + [NVME_SC_INVALID_IOCS] = "The I/O command set is invalid", + [NVME_SC_ID_UNAVAILABLE] = "Identifier Unavailable: The number of Endurance Groups or NVM Sets supported has been exceeded", + [NVME_SC_INVALID_DISCOVERY_INFO] = "Discovery Info Entry not applicable to selected entity", + [NVME_SC_ZONING_DATA_STRUCT_LOCKED] = "The requested Zoning data structure is locked on the CDC", + [NVME_SC_ZONING_DATA_STRUCT_NOTFND] = "The requested Zoning data structure does not exist on the CDC", + [NVME_SC_INSUFFICIENT_DISC_RES] = "Discovery Info entries exceed Discovery Controller's capacity", + [NVME_SC_REQSTD_FUNCTION_DISABLED] = "Fabric Zoning is not enabled on the CDC", + [NVME_SC_ZONEGRP_ORIGINATOR_INVLD] = "The NQN contained in the ZoneGroup Originator field does not match the Host NQN used by the DDC to connect to the CDC", +}; + +static const char * const nvm_status[] = { + [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes: The attributes specified in the command are conflicting", + [NVME_SC_INVALID_PI] = "Invalid Protection Information: The command's Protection Information Field settings are invalid for the namespace's Protection Information format", + [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range: The LBA range specified contains read-only blocks", + [NVME_SC_CMD_SIZE_LIMIT_EXCEEDED] = "Command Size Limit Exceeded", + [NVME_SC_ZNS_INVALID_OP_REQUEST] = "Invalid Zone Operation Request: The operation requested is invalid", + [NVME_SC_ZNS_ZRWA_RESOURCES_UNAVAILABLE] = "ZRWA Resources Unavailable: No ZRWAs are available", + [NVME_SC_ZNS_BOUNDARY_ERROR] = "Zoned Boundary Error: Invalid Zone Boundary crossing", + [NVME_SC_ZNS_FULL] = "Zone Is Full: The accessed zone is in ZSF:Full state", + [NVME_SC_ZNS_READ_ONLY] = "Zone Is Read Only: The accessed zone is in ZSRO:Read Only state", + [NVME_SC_ZNS_OFFLINE] = "Zone Is Offline: The access zone is in ZSO:Offline state", + [NVME_SC_ZNS_INVALID_WRITE] = "Zone Invalid Write: The write to zone was not at the write pointer offset", + [NVME_SC_ZNS_TOO_MANY_ACTIVE] = "Too Many Active Zones: The controller does not allow additional active zones", + [NVME_SC_ZNS_TOO_MANY_OPENS] = "Too Many Open Zones: The controller does not allow additional open zones", + [NVME_SC_ZNS_INVAL_TRANSITION] = "Invalid Zone State Transition: The request is not a valid zone state transition", +}; + +static const char * const nvmf_status[] = { + [NVME_SC_CONNECT_FORMAT] = "Incompatible Format: The NVM subsystem does not support the record format specified by the host", + [NVME_SC_CONNECT_CTRL_BUSY] = "Controller Busy: The controller is already associated with a host", + [NVME_SC_CONNECT_INVALID_PARAM] = "Connect Invalid Parameters: One or more of the command parameters", + [NVME_SC_CONNECT_RESTART_DISC] = "Connect Restart Discovery: The NVM subsystem requested is not available", + [NVME_SC_CONNECT_INVALID_HOST] = "Connect Invalid Host: The host is not allowed to establish an association to either any controller in the NVM subsystem or the specified controller", + [NVME_SC_DISCONNECT_INVALID_QTYPE] = "Invalid Queue Type: The command was sent on the wrong queue type", + [NVME_SC_DISCOVERY_RESTART] = "Discover Restart: The snapshot of the records is now invalid or out of date", + [NVME_SC_AUTH_REQUIRED] = "Authentication Required: NVMe in-band authentication is required and the queue has not yet been authenticated", +}; + +static const char * const media_status[] = { + [NVME_SC_WRITE_FAULT] = "Write Fault: The write data could not be committed to the media", + [NVME_SC_READ_ERROR] = "Unrecovered Read Error: The read data could not be recovered from the media", + [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error: The command was aborted due to an end-to-end guard check failure", + [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error: The command was aborted due to an end-to-end application tag check failure", + [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error: The command was aborted due to an end-to-end reference tag check failure", + [NVME_SC_COMPARE_FAILED] = "Compare Failure: The command failed due to a miscompare during a Compare command", + [NVME_SC_ACCESS_DENIED] = "Access Denied: Access to the namespace and/or LBA range is denied due to lack of access rights", + [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block: The command failed due to an attempt to read from or verify an LBA range containing a deallocated or unwritten logical block", + [NVME_SC_STORAGE_TAG_CHECK] = "End-to-End Storage Tag Check Error: The command was aborted due to an end-to-end storage tag check failure", +}; + +static const char * const path_status[] = { + [NVME_SC_ANA_INTERNAL_PATH_ERROR] = "Internal Path Error: An internal error specific to the controller processing the commmand prevented completion", + [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss: The controller is in a persistent loss state with the requested namespace", + [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible: The controller is in an inaccessible state with the requested namespace", + [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition: The controller is currently transitioning states with the requested namespace", + [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error: A pathing error was detected by the controller", + [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error: A pathing error was detected by the host", + [NVME_SC_CMD_ABORTED_BY_HOST] = "Command Aborted By Host: The command was aborted as a result of host action", +}; + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#define ARGSTR(s, i) arg_str(s, ARRAY_SIZE(s), i) + +static const char *arg_str(const char * const *strings, + size_t array_size, size_t idx) +{ + if (idx < array_size && strings[idx]) + return strings[idx]; + return "unrecognized"; +} + +const char *nvme_status_to_string(int status, bool fabrics) +{ + const char *s = "Unknown status"; + __u16 sc, sct; + + if (status < 0) + return strerror(errno); + + sc = nvme_status_code(status); + sct = nvme_status_code_type(status); + + switch (sct) { + case NVME_SCT_GENERIC: + s = ARGSTR(generic_status, sc); + break; + case NVME_SCT_CMD_SPECIFIC: + if (sc < ARRAY_SIZE(cmd_spec_status)) + s = ARGSTR(cmd_spec_status, sc); + else if (fabrics) + s = ARGSTR(nvmf_status, sc); + else + s = ARGSTR(nvm_status, sc); + break; + case NVME_SCT_MEDIA: + s = ARGSTR(media_status, sc); + break; + case NVME_SCT_PATH: + s = ARGSTR(path_status, sc); + break; + case NVME_SCT_VS: + s = "Vendor Specific Status"; + break; + default: + break; + } + + return s; +} + +void nvme_init_copy_range(struct nvme_copy_range *copy, __u16 *nlbs, + __u64 *slbas, __u32 *eilbrts, __u32 *elbatms, + __u32 *elbats, __u16 nr) +{ + int i; + + for (i = 0; i < nr; i++) { + copy[i].nlb = cpu_to_le16(nlbs[i]); + copy[i].slba = cpu_to_le64(slbas[i]); + copy[i].eilbrt = cpu_to_le32(eilbrts[i]); + copy[i].elbatm = cpu_to_le16(elbatms[i]); + copy[i].elbat = cpu_to_le16(elbats[i]); + } +} + +void nvme_init_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs, + __u32 *llbas, __u64 *slbas, __u16 nr_ranges) +{ + int i; + + for (i = 0; i < nr_ranges; i++) { + dsm[i].cattr = cpu_to_le32(ctx_attrs[i]); + dsm[i].nlb = cpu_to_le32(llbas[i]); + dsm[i].slba = cpu_to_le64(slbas[i]); + } +} + +void nvme_init_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls, + __u16 *ctrlist) +{ + int i; + + cntlist->num = cpu_to_le16(num_ctrls); + for (i = 0; i < num_ctrls; i++) + cntlist->identifier[i] = cpu_to_le16(ctrlist[i]); +} + +int nvme_get_feature_length(int fid, __u32 cdw11, __u32 *len) +{ + switch (fid) { + case NVME_FEAT_FID_LBA_RANGE: + *len = sizeof(struct nvme_lba_range_type); + break; + case NVME_FEAT_FID_AUTO_PST: + *len = sizeof(struct nvme_feat_auto_pst); + break; + case NVME_FEAT_FID_PLM_CONFIG: + *len = sizeof(struct nvme_plm_config); + break; + case NVME_FEAT_FID_TIMESTAMP: + *len = sizeof(struct nvme_timestamp); + break; + case NVME_FEAT_FID_HOST_BEHAVIOR: + *len = sizeof(struct nvme_feat_host_behavior); + break; + case NVME_FEAT_FID_HOST_ID: + *len = (cdw11 & 0x1) ? 16 : 8; + break; + case NVME_FEAT_FID_ARBITRATION: + case NVME_FEAT_FID_POWER_MGMT: + case NVME_FEAT_FID_TEMP_THRESH: + case NVME_FEAT_FID_ERR_RECOVERY: + case NVME_FEAT_FID_VOLATILE_WC: + case NVME_FEAT_FID_NUM_QUEUES: + case NVME_FEAT_FID_IRQ_COALESCE: + case NVME_FEAT_FID_IRQ_CONFIG: + case NVME_FEAT_FID_WRITE_ATOMIC: + case NVME_FEAT_FID_ASYNC_EVENT: + case NVME_FEAT_FID_HOST_MEM_BUF: + case NVME_FEAT_FID_KATO: + case NVME_FEAT_FID_HCTM: + case NVME_FEAT_FID_NOPSC: + case NVME_FEAT_FID_RRL: + case NVME_FEAT_FID_PLM_WINDOW: + case NVME_FEAT_FID_LBA_STS_INTERVAL: + case NVME_FEAT_FID_SANITIZE: + case NVME_FEAT_FID_ENDURANCE_EVT_CFG: + case NVME_FEAT_FID_SW_PROGRESS: + case NVME_FEAT_FID_RESV_MASK: + case NVME_FEAT_FID_RESV_PERSIST: + case NVME_FEAT_FID_WRITE_PROTECT: + *len = 0; + break; + case NVME_FEAT_FID_ENH_CTRL_METADATA: + case NVME_FEAT_FID_CTRL_METADATA: + case NVME_FEAT_FID_NS_METADATA: + *len = sizeof(struct nvme_host_metadata); + break; + default: + errno = EINVAL; + return -1; + } + return 0; +} + +int nvme_get_directive_receive_length(enum nvme_directive_dtype dtype, + enum nvme_directive_receive_doper doper, __u32 *len) +{ + switch (dtype) { + case NVME_DIRECTIVE_DTYPE_IDENTIFY: + switch (doper) { + case NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM: + *len = sizeof(struct nvme_id_directives); + return 0; + default: + errno = EINVAL; + return -1; + } + case NVME_DIRECTIVE_DTYPE_STREAMS: + switch (doper) { + case NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM: + *len = sizeof(struct nvme_streams_directive_params); + return 0; + case NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS: + *len = (128 * 1024) * sizeof(__le16); + return 0; + case NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE: + *len = 0; + return 0; + default: + errno = EINVAL; + return -1; + } + default: + errno = EINVAL; + return -1; + } +} + +static const char * const libnvme_status[] = { + [ENVME_CONNECT_RESOLVE] = "failed to resolve host", + [ENVME_CONNECT_ADDRFAM] = "unrecognized address family", + [ENVME_CONNECT_TRADDR] = "failed to get transport address", + [ENVME_CONNECT_TARG] = "no transport specified", + [ENVME_CONNECT_AARG] = "no transport address specified", + [ENVME_CONNECT_OPEN] = "failed to open nvme-fabrics device", + [ENVME_CONNECT_WRITE] = "failed to write to nvme-fabrics device", + [ENVME_CONNECT_READ] = "failed to read from nvme-fabrics device", + [ENVME_CONNECT_PARSE] = "failed to parse ctrl info", + [ENVME_CONNECT_INVAL_TR] = "invalid transport type", + [ENVME_CONNECT_LOOKUP_SUBSYS_NAME] = "failed to lookup subsystem name", + [ENVME_CONNECT_LOOKUP_SUBSYS] = "failed to lookup subsystem", +}; + +const char *nvme_errno_to_string(int status) +{ + const char *s = ARGSTR(libnvme_status, status); + + return s; +} + +char *hostname2traddr(struct nvme_root *r, const char *traddr) +{ + struct addrinfo *host_info, hints = {.ai_family = AF_UNSPEC}; + char addrstr[NVMF_TRADDR_SIZE]; + const char *p; + char *ret_traddr = NULL; + int ret; + + ret = getaddrinfo(traddr, NULL, &hints, &host_info); + if (ret) { + nvme_msg(r, LOG_ERR, "failed to resolve host %s info\n", + traddr); + return NULL; + } + + switch (host_info->ai_family) { + case AF_INET: + p = inet_ntop(host_info->ai_family, + &(((struct sockaddr_in *)host_info->ai_addr)->sin_addr), + addrstr, NVMF_TRADDR_SIZE); + break; + case AF_INET6: + p = inet_ntop(host_info->ai_family, + &(((struct sockaddr_in6 *)host_info->ai_addr)->sin6_addr), + addrstr, NVMF_TRADDR_SIZE); + break; + default: + nvme_msg(r, LOG_ERR, "unrecognized address family (%d) %s\n", + host_info->ai_family, traddr); + goto free_addrinfo; + } + + if (!p) { + nvme_msg(r, LOG_ERR, "failed to get traddr for %s\n", + traddr); + goto free_addrinfo; + } + ret_traddr = strdup(addrstr); + +free_addrinfo: + freeaddrinfo(host_info); + return ret_traddr; +} + +char *startswith(const char *s, const char *prefix) +{ + size_t l; + + l = strlen(prefix); + if (!strncmp(s, prefix, l)) + return (char *)s + l; + + return NULL; +} + +char *kv_strip(char *kv) +{ + char *s; + + kv[strcspn(kv, "\n\r")] = '\0'; + + /* Remove leading newline and spaces */ + kv += strspn(kv, " \t\n\r"); + + /* Skip comments and empty lines */ + if (*kv == '#' || *kv == '\0') { + *kv = '\0'; + return kv; + } + + /* Remove trailing newline chars */ + kv[strcspn(kv, "\n\r")] = '\0'; + + /* Delete trailing comments (including spaces/tabs that precede the #)*/ + s = &kv[strcspn(kv, "#")]; + *s-- = '\0'; + while ((s >= kv) && ((*s == ' ') || (*s == '\t'))) { + *s-- = '\0'; + } + + return kv; +} + +char *kv_keymatch(const char *kv, const char *key) +{ + char *value; + + value = startswith(kv, key); + if (value) { + /* Make sure key is a whole word. I.e. it should be + * followed by spaces, tabs, or a equal sign. Skip + * leading spaces, tabs, and equal sign (=) */ + switch (*value) { + case ' ': + case '\t': + case '=': + value += strspn(value, " \t="); + return value; + default: ; + } + } + + return NULL; +} + +/** + * read_file - read contents of file into @buffer. + * @fname: File name + * @buffer: Where to save file's contents + * @bufsz: Size of @buffer. On success, @bufsz gets decremented by the + * number of characters that were writtent to @buffer. + * + * Return: The number of characters read. If the file cannot be opened or + * nothing is read from the file, then this function returns 0. + */ +static size_t read_file(const char * fname, char *buffer, size_t *bufsz) +{ + char *p; + FILE *file; + size_t len; + + file = fopen(fname, "re"); + if (!file) + return 0; + + p = fgets(buffer, *bufsz, file); + fclose(file); + + if (!p) + return 0; + + /* Strip unwanted trailing chars */ + len = strcspn(buffer, " \t\n\r"); + *bufsz -= len; + + return len; +} + +static size_t copy_value(char *buf, size_t buflen, const char *value) +{ + size_t val_len; + + memset(buf, 0, buflen); + + /* Remove leading " */ + if (value[0] == '"') + value++; + + /* Remove trailing " */ + val_len = strcspn(value, "\""); + + memcpy(buf, value, MIN(val_len, buflen-1)); + + return val_len; +} + +size_t get_entity_name(char *buffer, size_t bufsz) +{ + size_t len = !gethostname(buffer, bufsz) ? strlen(buffer) : 0; + + /* Fill the rest of buffer with zeros */ + memset(&buffer[len], '\0', bufsz-len); + + return len; +} + +size_t get_entity_version(char *buffer, size_t bufsz) +{ + FILE *file; + size_t num_bytes = 0; + + /* /proc/sys/kernel/ostype typically contains the string "Linux" */ + num_bytes += read_file("/proc/sys/kernel/ostype", + &buffer[num_bytes], &bufsz); + + /* /proc/sys/kernel/osrelease contains the Linux + * version (e.g. 5.8.0-63-generic) + */ + buffer[num_bytes++] = ' '; /* Append a space */ + num_bytes += read_file("/proc/sys/kernel/osrelease", + &buffer[num_bytes], &bufsz); + + /* /etc/os-release contains Key-Value pairs. We only care about the key + * PRETTY_NAME, which contains the Distro's version. For example: + * "SUSE Linux Enterprise Server 15 SP4", "Ubuntu 20.04.3 LTS", or + * "Fedora Linux 35 (Server Edition)" + */ + file = fopen("/etc/os-release", "re"); + if (file) { + char name[64] = {0}; + size_t name_len = 0; + char ver_id[64] = {0}; + size_t ver_id_len = 0; + char line[LINE_MAX]; + char *p; + char *s; + + /* Read key-value pairs one line at a time */ + while ((!name_len || !ver_id_len) && + (p = fgets(line, sizeof(line), file)) != NULL) { + /* Clean up string by removing leading/trailing blanks + * and new line characters. Also eliminate trailing + * comments, if any. + */ + p = kv_strip(p); + + /* Empty string? */ + if (*p == '\0') + continue; + + s = kv_keymatch(p, "NAME"); + if (s) + name_len = copy_value(name, sizeof(name), s); + + s = kv_keymatch(p, "VERSION_ID"); + if (s) + ver_id_len = copy_value(ver_id, sizeof(ver_id), s); + } + fclose(file); + + if (name_len) { + /* Append a space */ + buffer[num_bytes++] = ' '; + name_len = MIN(name_len, bufsz); + memcpy(&buffer[num_bytes], name, name_len); + bufsz -= name_len; + num_bytes += name_len; + } + + if (ver_id_len) { + /* Append a space */ + buffer[num_bytes++] = ' '; + ver_id_len = MIN(ver_id_len, bufsz); + memcpy(&buffer[num_bytes], ver_id, ver_id_len); + bufsz -= ver_id_len; + num_bytes += ver_id_len; + } + } + + /* Fill the rest of buffer with zeros */ + memset(&buffer[num_bytes], '\0', bufsz); + + return num_bytes; +} + +struct nvmf_ext_attr *nvmf_exat_ptr_next(struct nvmf_ext_attr *p) +{ + return (struct nvmf_ext_attr *) + ((uintptr_t)p + (ptrdiff_t)nvmf_exat_size(le16_to_cpu(p->exatlen))); +} diff --git a/src/nvme/util.h b/src/nvme/util.h new file mode 100644 index 0000000..277b857 --- /dev/null +++ b/src/nvme/util.h @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch <keith.busch@wdc.com> + * Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> + */ +#ifndef _LIBNVME_UTIL_H +#define _LIBNVME_UTIL_H + +#include "types.h" + +/** + * DOC: util.h + * + * libnvme utility functions + */ + +/** + * enum nvme_connect_err - nvme connect error codes + * @ENVME_CONNECT_RESOLVE: failed to resolve host + * @ENVME_CONNECT_ADDRFAM: unrecognized address family + * @ENVME_CONNECT_TRADDR: failed to get traddr + * @ENVME_CONNECT_TARG: need a transport (-t) argument + * @ENVME_CONNECT_AARG: need a address (-a) argument + * @ENVME_CONNECT_OPEN: failed to open nvme-fabrics device + * @ENVME_CONNECT_WRITE: failed to write to nvme-fabrics device + * @ENVME_CONNECT_READ: failed to read from nvme-fabrics device + * @ENVME_CONNECT_PARSE: failed to parse ctrl info + * @ENVME_CONNECT_INVAL_TR: invalid transport type + * @ENVME_CONNECT_LOOKUP_SUBSYS_NAME: failed to lookup subsystem name + * @ENVME_CONNECT_LOOKUP_SUBSYS: failed to lookup subsystem + */ +enum nvme_connect_err { + ENVME_CONNECT_RESOLVE = 1000, + ENVME_CONNECT_ADDRFAM, + ENVME_CONNECT_TRADDR, + ENVME_CONNECT_TARG, + ENVME_CONNECT_AARG, + ENVME_CONNECT_OPEN, + ENVME_CONNECT_WRITE, + ENVME_CONNECT_READ, + ENVME_CONNECT_PARSE, + ENVME_CONNECT_INVAL_TR, + ENVME_CONNECT_LOOKUP_SUBSYS_NAME, + ENVME_CONNECT_LOOKUP_SUBSYS, +}; + +/** + * nvme_status_to_errno() - Converts nvme return status to errno + * @status: Return status from an nvme passthrough commmand + * @fabrics: Set to true if &status is to a fabrics target. + * + * Return: An errno representing the nvme status if it is an nvme status field, + * or unchanged status is < 0 since errno is already set. + */ +__u8 nvme_status_to_errno(int status, bool fabrics); + +/** + * nvme_status_to_string() - Returns string describing nvme return status. + * @status: Return status from an nvme passthrough commmand + * @fabrics: Set to true if &status is to a fabrics target. + * + * Return: String representation of the nvme status if it is an nvme status field, + * or a standard errno string if status is < 0. + */ +const char *nvme_status_to_string(int status, bool fabrics); + +/** + * nvme_errno_to_string() - Returns string describing nvme connect failures + * @err: Returned error code from nvme_add_ctrl() + * + * Return: String representation of the nvme connect error codes + */ +const char *nvme_errno_to_string(int err); + +/** + * nvme_init_ctrl_list() - Initialize an nvme_ctrl_list structure from an array. + * @cntlist: The controller list structure to initialize + * @num_ctrls: The number of controllers in the array, &ctrlist. + * @ctrlist: An array of controller identifiers in CPU native endian. + * + * This is intended to be used with any command that takes a controller list + * argument. See nvme_ns_attach_ctrls() and nvme_ns_detach(). + */ +void nvme_init_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls, + __u16 *ctrlist); + +/** + * nvme_init_dsm_range() - Constructs a data set range structure + * @dsm: DSM range array + * @ctx_attrs: Array of context attributes + * @llbas: Array of length in logical blocks + * @slbas: Array of starting logical blocks + * @nr_ranges: The size of the dsm arrays + * + * Each array must be the same size of size 'nr_ranges'. This is intended to be + * used with constructing a payload for nvme_dsm(). + * + * Return: The nvme command status if a response was received or -errno + * otherwise. + */ +void nvme_init_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs, + __u32 *llbas, __u64 *slbas, __u16 nr_ranges); + +/** + * nvme_init_copy_range() - Constructs a copy range structure + * @copy: Copy range array + * @nlbs: Number of logical blocks + * @slbas: Starting LBA + * @eilbrts: Expected initial logical block reference tag + * @elbatms: Expected logical block application tag mask + * @elbats: Expected logical block application tag + * @nr: Number of descriptors to construct + */ +void nvme_init_copy_range(struct nvme_copy_range *copy, __u16 *nlbs, + __u64 *slbas, __u32 *eilbrts, __u32 *elbatms, + __u32 *elbats, __u16 nr); + +/** + * nvme_get_feature_length() - Retreive the command payload length for a + * specific feature identifier + * @fid: Feature identifier, see &enum nvme_features_id. + * @cdw11: The cdw11 value may affect the transfer (only known fid is + * %NVME_FEAT_FID_HOST_ID) + * @len: On success, set to this features payload length in bytes. + * + * Return: 0 on success, -1 with errno set to EINVAL if the function did not + * recognize &fid. + */ +int nvme_get_feature_length(int fid, __u32 cdw11, __u32 *len); + +/** + * nvme_get_directive_receive_length() - + * @dtype: Directive type, see &enum nvme_directive_dtype + * @doper: Directive receive operation, see &enum nvme_directive_receive_doper + * @len: On success, set to this directives payload length in bytes. + * + * Return: 0 on success, -1 with errno set to EINVAL if the function did not + * recognize &dtype or &doper. + */ +int nvme_get_directive_receive_length(enum nvme_directive_dtype dtype, + enum nvme_directive_receive_doper doper, __u32 *len); + +#define NVME_FEAT_ARB_BURST(v) NVME_GET(v, FEAT_ARBITRATION_BURST) +#define NVME_FEAT_ARB_LPW(v) NVME_GET(v, FEAT_ARBITRATION_LPW) +#define NVME_FEAT_ARB_MPW(v) NVME_GET(v, FEAT_ARBITRATION_MPW) +#define NVME_FEAT_ARB_HPW(v) NVME_GET(v, FEAT_ARBITRATION_HPW) + +static inline void nvme_feature_decode_arbitration(__u32 value, __u8 *ab, + __u8 *lpw, __u8 *mpw, + __u8 *hpw) +{ + *ab = NVME_FEAT_ARB_BURST(value); + *lpw = NVME_FEAT_ARB_LPW(value); + *mpw = NVME_FEAT_ARB_MPW(value); + *hpw = NVME_FEAT_ARB_HPW(value); +}; + +#define NVME_FEAT_PM_PS(v) NVME_GET(v, FEAT_PWRMGMT_PS) +#define NVME_FEAT_PM_WH(v) NVME_GET(v, FEAT_PWRMGMT_WH) + +static inline void nvme_feature_decode_power_mgmt(__u32 value, __u8 *ps, + __u8 *wh) +{ + *ps = NVME_FEAT_PM_PS(value); + *wh = NVME_FEAT_PM_WH(value); +} + +#define NVME_FEAT_LBAR_NR(v) NVME_GET(v, FEAT_LBAR_NR) + +static inline void nvme_feature_decode_lba_range(__u32 value, __u8 *num) +{ + *num = NVME_FEAT_LBAR_NR(value); +} + +#define NVME_FEAT_TT_TMPTH(v) NVME_GET(v, FEAT_TT_TMPTH) +#define NVME_FEAT_TT_TMPSEL(v) NVME_GET(v, FEAT_TT_TMPSEL) +#define NVME_FEAT_TT_THSEL(v) NVME_GET(v, FEAT_TT_THSEL) + +static inline void nvme_feature_decode_temp_threshold(__u32 value, __u16 *tmpth, + __u8 *tmpsel, __u8 *thsel) +{ + *tmpth = NVME_FEAT_TT_TMPTH(value); + *tmpsel = NVME_FEAT_TT_TMPSEL(value); + *thsel = NVME_FEAT_TT_THSEL(value); +} + +#define NVME_FEAT_ER_TLER(v) NVME_GET(v, FEAT_ERROR_RECOVERY_TLER) +#define NVME_FEAT_ER_DULBE(v) NVME_GET(v, FEAT_ERROR_RECOVERY_DULBE) + +static inline void nvme_feature_decode_error_recovery(__u32 value, __u16 *tler, + bool *dulbe) +{ + *tler = NVME_FEAT_ER_TLER(value); + *dulbe = NVME_FEAT_ER_DULBE(value); +} + +#define NVME_FEAT_VWC_WCE(v) NVME_GET(v, FEAT_VWC_WCE) + +static inline void nvme_feature_decode_volatile_write_cache(__u32 value, + bool *wce) +{ + *wce = NVME_FEAT_VWC_WCE(value); +} + +#define NVME_FEAT_NRQS_NSQR(v) NVME_GET(v, FEAT_NRQS_NSQR) +#define NVME_FEAT_NRQS_NCQR(v) NVME_GET(v, FEAT_NRQS_NCQR) + +static inline void nvme_feature_decode_number_of_queues(__u32 value, + __u16 *nsqr, + __u16 *ncqr) +{ + *nsqr = NVME_FEAT_NRQS_NSQR(value); + *ncqr = NVME_FEAT_NRQS_NCQR(value); +} + +#define NVME_FEAT_IRQC_THR(v) NVME_GET(v, FEAT_IRQC_THR) +#define NVME_FEAT_IRQC_TIME(v) NVME_GET(v, FEAT_IRQC_TIME) + +static inline void nvme_feature_decode_interrupt_coalescing(__u32 value, + __u8 *thr, + __u8 *time) +{ + *thr = NVME_FEAT_IRQC_THR(value); + *time = NVME_FEAT_IRQC_TIME(value); +} + +#define NVME_FEAT_ICFG_IV(v) NVME_GET(v, FEAT_ICFG_IV) +#define NVME_FEAT_ICFG_CD(v) NVME_GET(v, FEAT_ICFG_CD) + +static inline void nvme_feature_decode_interrupt_config(__u32 value, __u16 *iv, + bool *cd) +{ + *iv = NVME_FEAT_ICFG_IV(value); + *cd = NVME_FEAT_ICFG_CD(value); +} + +#define NVME_FEAT_WA_DN(v) NVME_GET(v, FEAT_WA_DN) + +static inline void nvme_feature_decode_write_atomicity(__u32 value, bool *dn) +{ + *dn = NVME_FEAT_WA_DN(value); +} + +#define NVME_FEAT_AE_SMART(v) NVME_GET(v, FEAT_AE_SMART) +#define NVME_FEAT_AE_NAN(v) NVME_GET(v, FEAT_AE_NAN) +#define NVME_FEAT_AE_FW(v) NVME_GET(v, FEAT_AE_FW) +#define NVME_FEAT_AE_TELEM(v) NVME_GET(v, FEAT_AE_TELEM) +#define NVME_FEAT_AE_ANA(v) NVME_GET(v, FEAT_AE_ANA) +#define NVME_FEAT_AE_PLA(v) NVME_GET(v, FEAT_AE_PLA) +#define NVME_FEAT_AE_LBAS(v) NVME_GET(v, FEAT_AE_LBAS) +#define NVME_FEAT_AE_EGA(v) NVME_GET(v, FEAT_AE_EGA) + +static inline void nvme_feature_decode_async_event_config(__u32 value, + __u8 *smart, bool *nan, bool *fw, bool *telem, + bool *ana, bool *pla, bool *lbas, bool *ega) +{ + *smart = NVME_FEAT_AE_SMART(value); + *nan = NVME_FEAT_AE_NAN(value); + *fw = NVME_FEAT_AE_FW(value); + *telem = NVME_FEAT_AE_TELEM(value); + *ana = NVME_FEAT_AE_ANA(value); + *pla = NVME_FEAT_AE_PLA(value); + *lbas = NVME_FEAT_AE_LBAS(value); + *ega = NVME_FEAT_AE_EGA(value); +} + +#define NVME_FEAT_APST_APSTE(v) NVME_GET(v, FEAT_APST_APSTE) + +static inline void nvme_feature_decode_auto_power_state(__u32 value, + bool *apste) +{ + *apste = NVME_FEAT_APST_APSTE(value); +} + +#define NVME_FEAT_HMEM_EHM(v) NVME_GET(v, FEAT_HMEM_EHM) + +static inline void nvme_feature_decode_host_memory_buffer(__u32 value, bool *ehm) +{ + *ehm = NVME_FEAT_HMEM_EHM(value); +} + +#define NVME_FEAT_HCTM_TMT2(v) NVME_GET(v, FEAT_HCTM_TMT2) +#define NVME_FEAT_HCTM_TMT1(v) NVME_GET(v, FEAT_HCTM_TMT1) + +static inline void nvme_feature_decode_host_thermal_mgmt(__u32 value, + __u16 *tmt2, + __u16 *tmt1) +{ + *tmt2 = NVME_FEAT_HCTM_TMT2(value); + *tmt1 = NVME_FEAT_HCTM_TMT1(value); +} + +#define NVME_FEAT_NOPS_NOPPME(v) NVME_GET(v, FEAT_NOPS_NOPPME) + +static inline void nvme_feature_decode_non_op_power_config(__u32 value, + bool *noppme) +{ + *noppme = NVME_FEAT_NOPS_NOPPME(value); +} + +#define NVME_FEAT_RRL_RRL(v) NVME_GET(v, FEAT_RRL_RRL) + +static inline void nvme_feature_decode_read_recovery_level_config(__u32 value, + __u8 *rrl) +{ + *rrl = NVME_FEAT_RRL_RRL(value); +} + +#define NVME_FEAT_PLM_PLME(v) NVME_GET(v, FEAT_PLM_PLME) + +static inline void nvme_feature_decode_predictable_latency_mode_config(__u32 value, + bool *plme) +{ + *plme = NVME_FEAT_PLM_PLME(value); +} + +#define NVME_FEAT_PLMW_WS(v) NVME_GET(v, FEAT_PLMW_WS) + +static inline void nvme_feature_decode_predictable_latency_mode_window(__u32 value, + __u8 *ws) +{ + *ws = NVME_FEAT_PLMW_WS(value); +} + +#define NVME_FEAT_LBAS_LSIRI(v) NVME_GET(v, FEAT_LBAS_LSIRI) +#define NVME_FEAT_LBAS_LSIPI(v) NVME_GET(v, FEAT_LBAS_LSIPI) + +static inline void nvme_feature_decode_lba_status_attributes(__u32 value, + __u16 *lsiri, + __u16 *lsipi) +{ + *lsiri = NVME_FEAT_LBAS_LSIRI(value); + *lsipi = NVME_FEAT_LBAS_LSIPI(value); +} + +#define NVME_FEAT_SC_NODRM(v) NVME_GET(v, FEAT_SC_NODRM) + +static inline void nvme_feature_decode_sanitize_config(__u32 value, bool *nodrm) +{ + *nodrm = NVME_FEAT_SC_NODRM(value); +} + +#define NVME_FEAT_EG_ENDGID(v) NVME_GET(v, FEAT_EG_ENDGID) +#define NVME_FEAT_EG_EGCW(v) NVME_GET(v, FEAT_EG_EGCW) + +static inline void nvme_feature_decode_endurance_group_event_config(__u32 value, + __u16 *endgid, __u8 *endgcw) +{ + *endgid = NVME_FEAT_EG_ENDGID(value); + *endgcw = NVME_FEAT_EG_EGCW(value); +} + +#define NVME_FEAT_SPM_PBSLC(v) NVME_GET(v, FEAT_SPM_PBSLC) + +static inline void nvme_feature_decode_software_progress_marker(__u32 value, + __u8 *pbslc) +{ + *pbslc = NVME_FEAT_SPM_PBSLC(value); +} + +#define NVME_FEAT_HOSTID_EXHID(v) NVME_GET(v, FEAT_HOSTID_EXHID) + +static inline void nvme_feature_decode_host_identifier(__u32 value, bool *exhid) +{ + *exhid = NVME_FEAT_HOSTID_EXHID(value); +} + +#define NVME_FEAT_RM_REGPRE(v) NVME_GET(v, FEAT_RM_REGPRE) +#define NVME_FEAT_RM_RESREL(v) NVME_GET(v, FEAT_RM_RESREL) +#define NVME_FEAT_RM_RESPRE(v) NVME_GET(v, FEAT_RM_RESPRE) + +static inline void nvme_feature_decode_reservation_notification(__u32 value, + bool *regpre, + bool *resrel, + bool *respre) +{ + *regpre = NVME_FEAT_RM_REGPRE(value); + *resrel = NVME_FEAT_RM_RESREL(value); + *respre = NVME_FEAT_RM_RESPRE(value); +} + +#define NVME_FEAT_RP_PTPL(v) NVME_GET(v, FEAT_RP_PTPL) + +static inline void nvme_feature_decode_reservation_persistance(__u32 value, + bool *ptpl) +{ + *ptpl = NVME_FEAT_RP_PTPL(value); +} + +#define NVME_FEAT_WP_WPS(v) NVME_GET(v, FEAT_WP_WPS) + +static inline void nvme_feature_decode_namespace_write_protect(__u32 value, + __u8 *wps) +{ + *wps = NVME_FEAT_WP_WPS(value); +} + +static inline void nvme_id_ns_flbas_to_lbaf_inuse(__u8 flbas, __u8 *lbaf_inuse) +{ + *lbaf_inuse = (((flbas & NVME_NS_FLBAS_HIGHER_MASK) >> 1) \ + | (flbas & NVME_NS_FLBAS_LOWER_MASK)); +} + +struct nvme_root; + +char *hostname2traddr(struct nvme_root *r, const char *traddr); + +/** + * get_entity_name - Get Entity Name (ENAME). + * @buffer: The buffer where the ENAME will be saved as an ASCII string. + * @bufsz: The size of @buffer. + * + * Per TP8010, ENAME is defined as the name associated with the host (i.e. + * hostname). + * + * Return: Number of characters copied to @buffer. + */ +size_t get_entity_name(char *buffer, size_t bufsz); + +/** + * get_entity_version - Get Entity Version (EVER). + * @buffer: The buffer where the EVER will be saved as an ASCII string. + * @bufsz: The size of @buffer. + * + * EVER is defined as the operating system name and version as an ASCII + * string. This function reads different files from the file system and + * builds a string as follows: [os type] [os release] [distro release] + * + * E.g. "Linux 5.17.0-rc1 SLES 15.4" + * + * Return: Number of characters copied to @buffer. + */ +size_t get_entity_version(char *buffer, size_t bufsz); + +/** + * kv_strip - Strip blanks from key value string + * @kv: The key-value string to strip + * + * Strip leading/trailing blanks as well as trailing comments from the + * Key=Value string pointed to by @kv. + * + * Return: A pointer to the stripped string. Note that the original string, + * @kv, gets modified. + */ +char *kv_strip(char *kv); + +/** + * kv_keymatch - Look for key in key value string + * @kv: The key=value string to search for the presence of @key + * @key: The key to look for + * + * Look for @key in the Key=Value pair pointed to by @k and return a + * pointer to the Value if @key is found. + * + * Check if @kv starts with @key. If it does then make sure that we + * have a whole-word match on the @key, and if we do, return a pointer + * to the first character of value (i.e. skip leading spaces, tabs, + * and equal sign) + * + * Return: A pointer to the first character of "value" if a match is found. + * NULL otherwise. + */ +char *kv_keymatch(const char *kv, const char *key); + +/** + * startswith - Checks that a string starts with a given prefix. + * @s: The string to check + * @prefix: A string that @s could be starting with + * + * Return: If @s starts with @prefix, then return a pointer within @s at + * the first character after the matched @prefix. NULL otherwise. + */ +char* startswith(const char *s, const char *prefix); + +#define __round_mask(val, mult) ((__typeof__(val))((mult)-1)) + +/** + * round_up - Round a value @val to the next multiple specified by @mult. + * @val: Value to round + * @mult: Multiple to round to. + * + * usage: int x = round_up(13, sizeof(__u32)); // 13 -> 16 + */ +#define round_up(val, mult) ((((val)-1) | __round_mask((val), (mult)))+1) + +/** + * nvmf_exat_len() - Return length rounded up by 4 + * @val_len: Value lenght + * + * Return the size in bytes, rounded to a multiple of 4 (e.g., size of + * __u32), of the buffer needed to hold the exat value of size + * @val_len. + * + * Return: Lenght rounded up by 4 + */ +static inline __u16 nvmf_exat_len(size_t val_len) +{ + return (__u16)round_up(val_len, sizeof(__u32)); +} + +/** + * nvmf_exat_size - Return min algined size to hold value + * @val_len: This is the length of the data to be copied to the "exatval" + * field of a "struct nvmf_ext_attr". + * + * Return the size of the "struct nvmf_ext_attr" needed to hold + * a value of size @val_len. + * + * Return: The size in bytes, rounded to a multiple of 4 (i.e. size of + * __u32), of the "struct nvmf_ext_attr" required to hold a string of + * length @val_len. + */ +static inline __u16 nvmf_exat_size(size_t val_len) +{ + return (__u16)(sizeof(struct nvmf_ext_attr) + nvmf_exat_len(val_len)); +} + +/** + * nvmf_exat_ptr_next - Increment @p to the next element in the array. + * @p: Pointer to an element of an array of "struct nvmf_ext_attr". + * + * Extended attributes are saved to an array of "struct nvmf_ext_attr" + * where each element of the array is of variable size. In order to + * move to the next element in the array one must increment the + * pointer to the current element (@p) by the size of the current + * element. + * + * Return: Pointer to the next element in the array. + */ +struct nvmf_ext_attr *nvmf_exat_ptr_next(struct nvmf_ext_attr *p); + +#endif /* _LIBNVME_UTIL_H */ |