summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/nvme/Kconfig8
-rw-r--r--drivers/nvme/Makefile5
-rw-r--r--drivers/nvme/common/Kconfig4
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c483
-rw-r--r--drivers/nvme/host/Kconfig121
-rw-r--r--drivers/nvme/host/Makefile32
-rw-r--r--drivers/nvme/host/apple.c1606
-rw-r--r--drivers/nvme/host/auth.c1043
-rw-r--r--drivers/nvme/host/constants.c203
-rw-r--r--drivers/nvme/host/core.c4803
-rw-r--r--drivers/nvme/host/fabrics.c1428
-rw-r--r--drivers/nvme/host/fabrics.h230
-rw-r--r--drivers/nvme/host/fault_inject.c82
-rw-r--r--drivers/nvme/host/fc.c4006
-rw-r--r--drivers/nvme/host/fc.h227
-rw-r--r--drivers/nvme/host/hwmon.c281
-rw-r--r--drivers/nvme/host/ioctl.c985
-rw-r--r--drivers/nvme/host/multipath.c964
-rw-r--r--drivers/nvme/host/nvme.h1141
-rw-r--r--drivers/nvme/host/pci.c3543
-rw-r--r--drivers/nvme/host/pr.c315
-rw-r--r--drivers/nvme/host/rdma.c2397
-rw-r--r--drivers/nvme/host/sysfs.c668
-rw-r--r--drivers/nvme/host/tcp.c2673
-rw-r--r--drivers/nvme/host/trace.c357
-rw-r--r--drivers/nvme/host/trace.h172
-rw-r--r--drivers/nvme/host/zns.c249
-rw-r--r--drivers/nvme/target/Kconfig100
-rw-r--r--drivers/nvme/target/Makefile22
-rw-r--r--drivers/nvme/target/admin-cmd.c1045
-rw-r--r--drivers/nvme/target/auth.c528
-rw-r--r--drivers/nvme/target/configfs.c2059
-rw-r--r--drivers/nvme/target/core.c1711
-rw-r--r--drivers/nvme/target/discovery.c404
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c527
-rw-r--r--drivers/nvme/target/fabrics-cmd.c373
-rw-r--r--drivers/nvme/target/fc.c2947
-rw-r--r--drivers/nvme/target/fcloop.c1656
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c473
-rw-r--r--drivers/nvme/target/io-cmd-file.c382
-rw-r--r--drivers/nvme/target/loop.c688
-rw-r--r--drivers/nvme/target/nvmet.h741
-rw-r--r--drivers/nvme/target/passthru.c659
-rw-r--r--drivers/nvme/target/rdma.c2095
-rw-r--r--drivers/nvme/target/tcp.c1936
-rw-r--r--drivers/nvme/target/trace.c235
-rw-r--r--drivers/nvme/target/trace.h164
-rw-r--r--drivers/nvme/target/zns.c628
-rw-r--r--drivers/nvmem/Kconfig420
-rw-r--r--drivers/nvmem/Makefile83
-rw-r--r--drivers/nvmem/apple-efuses.c80
-rw-r--r--drivers/nvmem/bcm-ocotp.c313
-rw-r--r--drivers/nvmem/brcm_nvram.c257
-rw-r--r--drivers/nvmem/core.c2148
-rw-r--r--drivers/nvmem/imx-iim.c143
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c175
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c274
-rw-r--r--drivers/nvmem/imx-ocotp.c645
-rw-r--r--drivers/nvmem/jz4780-efuse.c237
-rw-r--r--drivers/nvmem/lan9662-otpc.c222
-rw-r--r--drivers/nvmem/layerscape-sfp.c109
-rw-r--r--drivers/nvmem/layouts/Kconfig23
-rw-r--r--drivers/nvmem/layouts/Makefile7
-rw-r--r--drivers/nvmem/layouts/onie-tlv.c244
-rw-r--r--drivers/nvmem/layouts/sl28vpd.c153
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c280
-rw-r--r--drivers/nvmem/lpc18xx_otp.c105
-rw-r--r--drivers/nvmem/meson-efuse.c120
-rw-r--r--drivers/nvmem/meson-mx-efuse.c242
-rw-r--r--drivers/nvmem/microchip-otpc.c288
-rw-r--r--drivers/nvmem/mtk-efuse.c146
-rw-r--r--drivers/nvmem/mxs-ocotp.c197
-rw-r--r--drivers/nvmem/nintendo-otp.c122
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c181
-rw-r--r--drivers/nvmem/qfprom.c463
-rw-r--r--drivers/nvmem/qoriq-efuse.c78
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c361
-rw-r--r--drivers/nvmem/rmem.c98
-rw-r--r--drivers/nvmem/rockchip-efuse.c300
-rw-r--r--drivers/nvmem/rockchip-otp.c365
-rw-r--r--drivers/nvmem/sc27xx-efuse.c277
-rw-r--r--drivers/nvmem/sec-qfprom.c96
-rw-r--r--drivers/nvmem/snvs_lpgpr.c157
-rw-r--r--drivers/nvmem/sprd-efuse.c441
-rw-r--r--drivers/nvmem/stm32-bsec-optee-ta.c298
-rw-r--r--drivers/nvmem/stm32-bsec-optee-ta.h80
-rw-r--r--drivers/nvmem/stm32-romem.c294
-rw-r--r--drivers/nvmem/sunplus-ocotp.c231
-rw-r--r--drivers/nvmem/sunxi_sid.c231
-rw-r--r--drivers/nvmem/u-boot-env.c259
-rw-r--r--drivers/nvmem/uniphier-efuse.c77
-rw-r--r--drivers/nvmem/vf610-ocotp.c254
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c81
94 files changed, 59061 insertions, 0 deletions
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
new file mode 100644
index 0000000000..656e46d938
--- /dev/null
+++ b/drivers/nvme/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "NVME Support"
+
+source "drivers/nvme/common/Kconfig"
+source "drivers/nvme/host/Kconfig"
+source "drivers/nvme/target/Kconfig"
+
+endmenu
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
new file mode 100644
index 0000000000..eedca8c720
--- /dev/null
+++ b/drivers/nvme/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_NVME_COMMON) += common/
+obj-y += host/
+obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
new file mode 100644
index 0000000000..4514f44362
--- /dev/null
+++ b/drivers/nvme/common/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_COMMON
+ tristate
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
new file mode 100644
index 0000000000..720c625b8a
--- /dev/null
+++ b/drivers/nvme/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+
+nvme-common-y += auth.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
new file mode 100644
index 0000000000..d90e4f0c08
--- /dev/null
+++ b/drivers/nvme/common/auth.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <linux/nvme.h>
+#include <linux/nvme-auth.h>
+
+static u32 nvme_dhchap_seqnum;
+static DEFINE_MUTEX(nvme_dhchap_mutex);
+
+u32 nvme_auth_get_seqnum(void)
+{
+ u32 seqnum;
+
+ mutex_lock(&nvme_dhchap_mutex);
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum = get_random_u32();
+ else {
+ nvme_dhchap_seqnum++;
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum++;
+ }
+ seqnum = nvme_dhchap_seqnum;
+ mutex_unlock(&nvme_dhchap_mutex);
+ return seqnum;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
+
+static struct nvme_auth_dhgroup_map {
+ const char name[16];
+ const char kpp[16];
+} dhgroup_map[] = {
+ [NVME_AUTH_DHGROUP_NULL] = {
+ .name = "null", .kpp = "null" },
+ [NVME_AUTH_DHGROUP_2048] = {
+ .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
+ [NVME_AUTH_DHGROUP_3072] = {
+ .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
+ [NVME_AUTH_DHGROUP_4096] = {
+ .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
+ [NVME_AUTH_DHGROUP_6144] = {
+ .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
+ [NVME_AUTH_DHGROUP_8192] = {
+ .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
+};
+
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].name;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].kpp;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+ int i;
+
+ if (!dhgroup_name || !strlen(dhgroup_name))
+ return NVME_AUTH_DHGROUP_INVALID;
+ for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+ if (!strlen(dhgroup_map[i].name))
+ continue;
+ if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+ strlen(dhgroup_map[i].name)))
+ return i;
+ }
+ return NVME_AUTH_DHGROUP_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+ int len;
+ const char hmac[15];
+ const char digest[8];
+} hash_map[] = {
+ [NVME_AUTH_HASH_SHA256] = {
+ .len = 32,
+ .hmac = "hmac(sha256)",
+ .digest = "sha256",
+ },
+ [NVME_AUTH_HASH_SHA384] = {
+ .len = 48,
+ .hmac = "hmac(sha384)",
+ .digest = "sha384",
+ },
+ [NVME_AUTH_HASH_SHA512] = {
+ .len = 64,
+ .hmac = "hmac(sha512)",
+ .digest = "sha512",
+ },
+};
+
+const char *nvme_auth_hmac_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].hmac;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].digest;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+u8 nvme_auth_hmac_id(const char *hmac_name)
+{
+ int i;
+
+ if (!hmac_name || !strlen(hmac_name))
+ return NVME_AUTH_HASH_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+ if (!strlen(hash_map[i].hmac))
+ continue;
+ if (!strncmp(hash_map[i].hmac, hmac_name,
+ strlen(hash_map[i].hmac)))
+ return i;
+ }
+ return NVME_AUTH_HASH_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+size_t nvme_auth_hmac_hash_len(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return 0;
+ return hash_map[hmac_id].len;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash)
+{
+ struct nvme_dhchap_key *key;
+ unsigned char *p;
+ u32 crc;
+ int ret, key_len;
+ size_t allocated_len = strlen(secret);
+
+ /* Secret might be affixed with a ':' */
+ p = strrchr(secret, ':');
+ if (p)
+ allocated_len = p - secret;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+ key->key = kzalloc(allocated_len, GFP_KERNEL);
+ if (!key->key) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_len = base64_decode(secret, allocated_len, key->key);
+ if (key_len < 0) {
+ pr_debug("base64 key decoding error %d\n",
+ key_len);
+ ret = key_len;
+ goto out_free_secret;
+ }
+
+ if (key_len != 36 && key_len != 52 &&
+ key_len != 68) {
+ pr_err("Invalid key len %d\n", key_len);
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ if (key_hash > 0 &&
+ (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+ pr_err("Mismatched key len %d for %s\n", key_len,
+ nvme_auth_hmac_name(key_hash));
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ /* The last four bytes is the CRC in little-endian format */
+ key_len -= 4;
+ /*
+ * The linux implementation doesn't do pre- and post-increments,
+ * so we have to do it manually.
+ */
+ crc = ~crc32(~0, key->key, key_len);
+
+ if (get_unaligned_le32(key->key + key_len) != crc) {
+ pr_err("key crc mismatch (key %08x, crc %08x)\n",
+ get_unaligned_le32(key->key + key_len), crc);
+ ret = -EKEYREJECTED;
+ goto out_free_secret;
+ }
+ key->len = key_len;
+ key->hash = key_hash;
+ return key;
+out_free_secret:
+ kfree_sensitive(key->key);
+out_free_key:
+ kfree(key);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+
+void nvme_auth_free_key(struct nvme_dhchap_key *key)
+{
+ if (!key)
+ return;
+ kfree_sensitive(key->key);
+ kfree(key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free_key);
+
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+{
+ const char *hmac_name;
+ struct crypto_shash *key_tfm;
+ struct shash_desc *shash;
+ u8 *transformed_key;
+ int ret;
+
+ if (!key || !key->key) {
+ pr_warn("No key specified\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ if (key->hash == 0) {
+ transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
+ return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ }
+ hmac_name = nvme_auth_hmac_name(key->hash);
+ if (!hmac_name) {
+ pr_warn("Invalid key hash id %d\n", key->hash);
+ return ERR_PTR(-EINVAL);
+ }
+
+ key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(key_tfm))
+ return (u8 *)key_tfm;
+
+ shash = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(key_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ if (!transformed_key) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ shash->tfm = key_tfm;
+ ret = crypto_shash_setkey(key_tfm, key->key, key->len);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_init(shash);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, nqn, strlen(nqn));
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_final(shash, transformed_key);
+ if (ret < 0)
+ goto out_free_transformed_key;
+
+ kfree(shash);
+ crypto_free_shash(key_tfm);
+
+ return transformed_key;
+
+out_free_transformed_key:
+ kfree_sensitive(transformed_key);
+out_free_shash:
+ kfree(shash);
+out_free_key:
+ crypto_free_shash(key_tfm);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
+{
+ const char *digest_name;
+ struct crypto_shash *tfm;
+ int ret;
+
+ digest_name = nvme_auth_digest_name(hmac_id);
+ if (!digest_name) {
+ pr_debug("%s: failed to get digest for %d\n", __func__,
+ hmac_id);
+ return -EINVAL;
+ }
+ tfm = crypto_alloc_shash(digest_name, 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
+ if (ret < 0)
+ pr_debug("%s: Failed to hash digest len %zu\n", __func__,
+ skey_len);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ u8 *hashed_key;
+ const char *hmac_name;
+ int ret;
+
+ hashed_key = kmalloc(hlen, GFP_KERNEL);
+ if (!hashed_key)
+ return -ENOMEM;
+
+ ret = nvme_auth_hash_skey(hmac_id, skey,
+ skey_len, hashed_key);
+ if (ret < 0)
+ goto out_free_key;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ ret = -EINVAL;
+ goto out_free_key;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_key;
+ }
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_free_hash;
+ }
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, hashed_key, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_update(desc, challenge, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_final(desc, aug);
+out_free_desc:
+ kfree_sensitive(desc);
+out_free_hash:
+ crypto_free_shash(tfm);
+out_free_key:
+ kfree_sensitive(hashed_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
+
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
+{
+ int ret;
+
+ ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
+ if (ret)
+ pr_debug("failed to set private key, error %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
+
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, host_key, host_key_len);
+ kpp_request_set_output(req, &dst, host_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
+
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist src, dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ sg_init_one(&src, ctrl_key, ctrl_key_len);
+ kpp_request_set_input(req, &src, ctrl_key_len);
+ sg_init_one(&dst, sess_key, sess_key_len);
+ kpp_request_set_output(req, &dst, sess_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
+
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
+
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
+{
+ struct nvme_dhchap_key *key;
+ u8 key_hash;
+
+ if (!secret) {
+ *ret_key = NULL;
+ return 0;
+ }
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+ return -EINVAL;
+
+ /* Pass in the secret without the 'DHHC-1:XX:' prefix */
+ key = nvme_auth_extract_key(secret + 10, key_hash);
+ if (IS_ERR(key)) {
+ *ret_key = NULL;
+ return PTR_ERR(key);
+ }
+
+ *ret_key = key;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
new file mode 100644
index 0000000000..2f6a7f8c94
--- /dev/null
+++ b/drivers/nvme/host/Kconfig
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NVME_CORE
+ tristate
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
+
+config BLK_DEV_NVME
+ tristate "NVM Express block device"
+ depends on PCI && BLOCK
+ select NVME_CORE
+ help
+ The NVM Express driver is for solid state drives directly
+ connected to the PCI or PCI Express bus. If you know you
+ don't have one of these, it is safe to answer N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme.
+
+config NVME_MULTIPATH
+ bool "NVMe multipath support"
+ depends on NVME_CORE
+ help
+ This option enables support for multipath access to NVMe
+ subsystems. If this option is enabled only a single
+ /dev/nvmeXnY device will show up for each NVMe namespace,
+ even if it is accessible through multiple controllers.
+
+config NVME_VERBOSE_ERRORS
+ bool "NVMe verbose error reporting"
+ depends on NVME_CORE
+ help
+ This option enables verbose reporting for NVMe errors. The
+ error translation table will grow the kernel image size by
+ about 4 KB.
+
+config NVME_HWMON
+ bool "NVMe hardware monitoring"
+ depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
+ help
+ This provides support for NVMe hardware monitoring. If enabled,
+ a hardware monitoring device will be created for each NVMe drive
+ in the system.
+
+config NVME_FABRICS
+ select NVME_CORE
+ tristate
+
+config NVME_RDMA
+ tristate "NVM Express over Fabrics RDMA host driver"
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
+ to use remote block devices exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
+
+config NVME_FC
+ tristate "NVM Express over Fabrics FC host driver"
+ depends on BLOCK
+ depends on HAS_DMA
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the FC transport. This allows you to use remote block devices
+ exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
+
+config NVME_TCP
+ tristate "NVM Express over Fabrics TCP host driver"
+ depends on INET
+ depends on BLOCK
+ select NVME_FABRICS
+ select CRYPTO
+ select CRYPTO_CRC32C
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the TCP transport. This allows you to use remote block devices
+ exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
+
+config NVME_AUTH
+ bool "NVM Express over Fabrics In-Band Authentication"
+ depends on NVME_CORE
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This provides support for NVMe over Fabrics In-Band Authentication.
+
+ If unsure, say N.
+
+config NVME_APPLE
+ tristate "Apple ANS2 NVM Express host driver"
+ depends on OF && BLOCK
+ depends on APPLE_RTKIT && APPLE_SART
+ depends on ARCH_APPLE || COMPILE_TEST
+ select NVME_CORE
+ help
+ This provides support for the NVMe controller embedded in Apple SoCs
+ such as the M1.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme-apple.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
new file mode 100644
index 0000000000..c7c3cf202d
--- /dev/null
+++ b/drivers/nvme/host/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_CORE) += nvme-core.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
+obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
+obj-$(CONFIG_NVME_FC) += nvme-fc.o
+obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
+obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
+
+nvme-core-y += core.o ioctl.o sysfs.o pr.o
+nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
+nvme-core-$(CONFIG_TRACING) += trace.o
+nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
+nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
+nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH) += auth.o
+
+nvme-y += pci.o
+
+nvme-fabrics-y += fabrics.o
+
+nvme-rdma-y += rdma.o
+
+nvme-fc-y += fc.o
+
+nvme-tcp-y += tcp.o
+
+nvme-apple-y += apple.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
new file mode 100644
index 0000000000..596bb11eeb
--- /dev/null
+++ b/drivers/nvme/host/apple.c
@@ -0,0 +1,1606 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple ANS NVM Express device driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on the pci.c NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ * and on the rdma.c NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+
+#include <linux/async.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/once.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/soc/apple/sart.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+
+#include "nvme.h"
+
+#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
+#define APPLE_ANS_MAX_QUEUE_DEPTH 64
+
+#define APPLE_ANS_COPROC_CPU_CONTROL 0x44
+#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define APPLE_ANS_ACQ_DB 0x1004
+#define APPLE_ANS_IOCQ_DB 0x100c
+
+#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
+
+#define APPLE_ANS_BOOT_STATUS 0x1300
+#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
+
+#define APPLE_ANS_UNKNOWN_CTRL 0x24008
+#define APPLE_ANS_PRP_NULL_CHECK BIT(11)
+
+#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
+#define APPLE_ANS_LINEAR_SQ_EN BIT(0)
+
+#define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
+#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
+
+#define APPLE_NVMMU_NUM_TCBS 0x28100
+#define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
+#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
+#define APPLE_NVMMU_TCB_INVAL 0x28118
+#define APPLE_NVMMU_TCB_STAT 0x28120
+
+/*
+ * This controller is a bit weird in the way command tags works: Both the
+ * admin and the IO queue share the same tag space. Additionally, tags
+ * cannot be higher than 0x40 which effectively limits the combined
+ * queue depth to 0x40. Instead of wasting half of that on the admin queue
+ * which gets much less traffic we instead reduce its size here.
+ * The controller also doesn't support async event such that no space must
+ * be reserved for NVME_NR_AEN_COMMANDS.
+ */
+#define APPLE_NVME_AQ_DEPTH 2
+#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS 127
+
+/*
+ * This controller comes with an embedded IOMMU known as NVMMU.
+ * The NVMMU is pointed to an array of TCBs indexed by the command tag.
+ * Each command must be configured inside this structure before it's allowed
+ * to execute, including commands that don't require DMA transfers.
+ *
+ * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
+ * admin queue): Those commands must still be added to the NVMMU but the DMA
+ * buffers cannot be represented as PRPs and must instead be allowed using SART.
+ *
+ * Programming the PRPs to the same values as those in the submission queue
+ * looks rather silly at first. This hardware is however designed for a kernel
+ * that runs the NVMMU code in a higher exception level than the NVMe driver.
+ * In that setting the NVMe driver first programs the submission queue entry
+ * and then executes a hypercall to the code that is allowed to program the
+ * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
+ * verifying that they don't point to kernel text, data, pagetables, or similar
+ * protected areas before programming the TCB to point to this shadow copy.
+ * Since Linux doesn't do any of that we may as well just point both the queue
+ * and the TCB PRP pointer to the same memory.
+ */
+struct apple_nvmmu_tcb {
+ u8 opcode;
+
+#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
+#define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
+ u8 dma_flags;
+
+ u8 command_id;
+ u8 _unk0;
+ __le16 length;
+ u8 _unk1[18];
+ __le64 prp1;
+ __le64 prp2;
+ u8 _unk2[16];
+ u8 aes_iv[8];
+ u8 _aes_unk[64];
+};
+
+/*
+ * The Apple NVMe controller only supports a single admin and a single IO queue
+ * which are both limited to 64 entries and share a single interrupt.
+ *
+ * The completion queue works as usual. The submission "queue" instead is
+ * an array indexed by the command tag on this hardware. Commands must also be
+ * present in the NVMMU's tcb array. They are triggered by writing their tag to
+ * a MMIO register.
+ */
+struct apple_nvme_queue {
+ struct nvme_command *sqes;
+ struct nvme_completion *cqes;
+ struct apple_nvmmu_tcb *tcbs;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ dma_addr_t tcb_dma_addr;
+
+ u32 __iomem *sq_db;
+ u32 __iomem *cq_db;
+
+ u16 cq_head;
+ u8 cq_phase;
+
+ bool is_adminq;
+ bool enabled;
+};
+
+/*
+ * The apple_nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP chunk allocations in addition
+ * to the actual struct scatterlist.
+ */
+struct apple_nvme_iod {
+ struct nvme_request req;
+ struct nvme_command cmd;
+ struct apple_nvme_queue *q;
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int nents; /* Used in scatterlist */
+ dma_addr_t first_dma;
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ struct scatterlist *sg;
+};
+
+struct apple_nvme {
+ struct device *dev;
+
+ void __iomem *mmio_coproc;
+ void __iomem *mmio_nvme;
+
+ struct device **pd_dev;
+ struct device_link **pd_link;
+ int pd_count;
+
+ struct apple_sart *sart;
+ struct apple_rtkit *rtk;
+ struct reset_control *reset;
+
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ mempool_t *iod_mempool;
+
+ struct nvme_ctrl ctrl;
+ struct work_struct remove_work;
+
+ struct apple_nvme_queue adminq;
+ struct apple_nvme_queue ioq;
+
+ struct blk_mq_tag_set admin_tagset;
+ struct blk_mq_tag_set tagset;
+
+ int irq;
+ spinlock_t lock;
+};
+
+static_assert(sizeof(struct nvme_command) == 64);
+static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
+
+static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct apple_nvme, ctrl);
+}
+
+static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return container_of(q, struct apple_nvme, adminq);
+
+ return container_of(q, struct apple_nvme, ioq);
+}
+
+static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return APPLE_NVME_AQ_DEPTH;
+
+ return APPLE_ANS_MAX_QUEUE_DEPTH;
+}
+
+static void apple_nvme_rtkit_crashed(void *cookie)
+{
+ struct apple_nvme *anv = cookie;
+
+ dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
+ nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_sart_dma_setup(void *cookie,
+ struct apple_rtkit_shmem *bfr)
+{
+ struct apple_nvme *anv = cookie;
+ int ret;
+
+ if (bfr->iova)
+ return -EINVAL;
+ if (!bfr->size)
+ return -EINVAL;
+
+ bfr->buffer =
+ dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
+ if (ret) {
+ dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+ bfr->buffer = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void apple_nvme_sart_dma_destroy(void *cookie,
+ struct apple_rtkit_shmem *bfr)
+{
+ struct apple_nvme *anv = cookie;
+
+ apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
+ dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+}
+
+static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
+ .crashed = apple_nvme_rtkit_crashed,
+ .shmem_setup = apple_nvme_sart_dma_setup,
+ .shmem_destroy = apple_nvme_sart_dma_destroy,
+};
+
+static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
+ if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
+ dev_warn_ratelimited(anv->dev,
+ "NVMMU TCB invalidation failed\n");
+}
+
+static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+ struct nvme_command *cmd)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ u32 tag = nvme_tag_from_cid(cmd->common.command_id);
+ struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
+
+ tcb->opcode = cmd->common.opcode;
+ tcb->prp1 = cmd->common.dptr.prp1;
+ tcb->prp2 = cmd->common.dptr.prp2;
+ tcb->length = cmd->rw.length;
+ tcb->command_id = tag;
+
+ if (nvme_is_write(cmd))
+ tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
+ else
+ tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
+
+ memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
+
+ /*
+ * This lock here doesn't make much sense at a first glace but
+ * removing it will result in occasional missed completetion
+ * interrupts even though the commands still appear on the CQ.
+ * It's unclear why this happens but our best guess is that
+ * there is a bug in the firmware triggered when a new command
+ * is issued while we're inside the irq handler between the
+ * NVMMU invalidation (and making the tag available again)
+ * and the final CQ update.
+ */
+ spin_lock_irq(&anv->lock);
+ writel(tag, q->sq_db);
+ spin_unlock_irq(&anv->lock);
+}
+
+/*
+ * From pci.c:
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static inline size_t apple_nvme_iod_alloc_size(void)
+{
+ const unsigned int nprps = DIV_ROUND_UP(
+ NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
+ const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ const size_t alloc_size = sizeof(__le64 *) * npages +
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
+
+ return alloc_size;
+}
+
+static void **apple_nvme_iod_list(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+}
+
+static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
+{
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = apple_nvme_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+}
+
+static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (iod->dma_len) {
+ dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
+ }
+
+ WARN_ON_ONCE(!iod->nents);
+
+ dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+ if (iod->npages == 0)
+ dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
+ iod->first_dma);
+ else
+ apple_nvme_free_prps(anv, req);
+ mempool_free(iod->sg, anv->iod_mempool);
+}
+
+static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
+ i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+}
+
+static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_rw_command *cmnd)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct dma_pool *pool;
+ int length = blk_rq_payload_bytes(req);
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+ __le64 *prp_list;
+ void **list = apple_nvme_iod_list(req);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ goto done;
+ }
+
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= NVME_CTRL_PAGE_SIZE) {
+ iod->first_dma = dma_addr;
+ goto done;
+ }
+
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = anv->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = anv->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return BLK_STS_RESOURCE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+ __le64 *old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+ goto free_prps;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+ return BLK_STS_OK;
+free_prps:
+ apple_nvme_free_prps(anv, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
+ WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
+ "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
+ iod->nents);
+ return BLK_STS_IOERR;
+}
+
+static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
+
+ iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(anv->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ return BLK_STS_OK;
+}
+
+static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int nr_mapped;
+
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct bio_vec bv = req_bvec(req);
+
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
+ &bv);
+ }
+
+ iod->dma_len = 0;
+ iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return BLK_STS_RESOURCE;
+ sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ if (!iod->nents)
+ goto out_free_sg;
+
+ nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
+ goto out_free_sg;
+
+ ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
+ if (ret != BLK_STS_OK)
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+out_free_sg:
+ mempool_free(iod->sg, anv->iod_mempool);
+ return ret;
+}
+
+static __always_inline void apple_nvme_unmap_rq(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
+
+ if (blk_rq_nr_phys_segments(req))
+ apple_nvme_unmap_data(anv, req);
+}
+
+static void apple_nvme_complete_rq(struct request *req)
+{
+ apple_nvme_unmap_rq(req);
+ nvme_complete_rq(req);
+}
+
+static void apple_nvme_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, apple_nvme_unmap_rq);
+}
+
+static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
+{
+ struct nvme_completion *hcqe = &q->cqes[q->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
+}
+
+static inline struct blk_mq_tags *
+apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return anv->admin_tagset.tags[0];
+ else
+ return anv->tagset.tags[0];
+}
+
+static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
+ struct io_comp_batch *iob, u16 idx)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct nvme_completion *cqe = &q->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
+ struct request *req;
+
+ apple_nvmmu_inval(q, command_id);
+
+ req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
+ if (unlikely(!req)) {
+ dev_warn(anv->dev, "invalid id %d completed", command_id);
+ return;
+ }
+
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ apple_nvme_complete_batch))
+ apple_nvme_complete_rq(req);
+}
+
+static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
+{
+ u32 tmp = q->cq_head + 1;
+
+ if (tmp == apple_nvme_queue_depth(q)) {
+ q->cq_head = 0;
+ q->cq_phase ^= 1;
+ } else {
+ q->cq_head = tmp;
+ }
+}
+
+static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
+ struct io_comp_batch *iob)
+{
+ bool found = false;
+
+ while (apple_nvme_cqe_pending(q)) {
+ found = true;
+
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ apple_nvme_handle_cqe(q, iob, q->cq_head);
+ apple_nvme_update_cq_head(q);
+ }
+
+ if (found)
+ writel(q->cq_head, q->cq_db);
+
+ return found;
+}
+
+static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
+{
+ bool found;
+ DEFINE_IO_COMP_BATCH(iob);
+
+ if (!READ_ONCE(q->enabled) && !force)
+ return false;
+
+ found = apple_nvme_poll_cq(q, &iob);
+
+ if (!rq_list_empty(iob.req_list))
+ apple_nvme_complete_batch(&iob);
+
+ return found;
+}
+
+static irqreturn_t apple_nvme_irq(int irq, void *data)
+{
+ struct apple_nvme *anv = data;
+ bool handled = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&anv->lock, flags);
+ if (apple_nvme_handle_cq(&anv->ioq, false))
+ handled = true;
+ if (apple_nvme_handle_cq(&anv->adminq, false))
+ handled = true;
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ if (handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static int apple_nvme_create_cq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(1);
+ c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
+ c.create_cq.irq_vector = cpu_to_le16(0);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_cq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ c.delete_queue.opcode = nvme_admin_delete_cq;
+ c.delete_queue.qid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_create_sq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(1);
+ c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
+ c.create_sq.cqid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_sq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ c.delete_queue.opcode = nvme_admin_delete_sq;
+ c.delete_queue.qid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct apple_nvme_queue *q = hctx->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct request *req = bd->rq;
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_command *cmnd = &iod->cmd;
+ blk_status_t ret;
+
+ iod->npages = -1;
+ iod->nents = 0;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!READ_ONCE(q->enabled)))
+ return BLK_STS_IOERR;
+
+ if (!nvme_check_ready(&anv->ctrl, req, true))
+ return nvme_fail_nonready_command(&anv->ctrl, req);
+
+ ret = nvme_setup_cmd(ns, req);
+ if (ret)
+ return ret;
+
+ if (blk_rq_nr_phys_segments(req)) {
+ ret = apple_nvme_map_data(anv, req, cmnd);
+ if (ret)
+ goto out_free_cmd;
+ }
+
+ nvme_start_request(req);
+ apple_nvme_submit_cmd(q, cmnd);
+ return BLK_STS_OK;
+
+out_free_cmd:
+ nvme_cleanup_cmd(req);
+ return ret;
+}
+
+static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ hctx->driver_data = data;
+ return 0;
+}
+
+static int apple_nvme_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct apple_nvme_queue *q = set->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_request *nreq = nvme_req(req);
+
+ iod->q = q;
+ nreq->ctrl = &anv->ctrl;
+ nreq->cmd = &iod->cmd;
+
+ return 0;
+}
+
+static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
+{
+ u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+ bool dead = false, freeze = false;
+ unsigned long flags;
+
+ if (apple_rtkit_is_crashed(anv->rtk))
+ dead = true;
+ if (!(csts & NVME_CSTS_RDY))
+ dead = true;
+ if (csts & NVME_CSTS_CFS)
+ dead = true;
+
+ if (anv->ctrl.state == NVME_CTRL_LIVE ||
+ anv->ctrl.state == NVME_CTRL_RESETTING) {
+ freeze = true;
+ nvme_start_freeze(&anv->ctrl);
+ }
+
+ /*
+ * Give the controller a chance to complete all entered requests if
+ * doing a safe shutdown.
+ */
+ if (!dead && shutdown && freeze)
+ nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
+
+ nvme_quiesce_io_queues(&anv->ctrl);
+
+ if (!dead) {
+ if (READ_ONCE(anv->ioq.enabled)) {
+ apple_nvme_remove_sq(anv);
+ apple_nvme_remove_cq(anv);
+ }
+
+ /*
+ * Always disable the NVMe controller after shutdown.
+ * We need to do this to bring it back up later anyway, and we
+ * can't do it while the firmware is not running (e.g. in the
+ * resume reset path before RTKit is initialized), so for Apple
+ * controllers it makes sense to unconditionally do it here.
+ * Additionally, this sequence of events is reliable, while
+ * others (like disabling after bringing back the firmware on
+ * resume) seem to run into trouble under some circumstances.
+ *
+ * Both U-Boot and m1n1 also use this convention (i.e. an ANS
+ * NVMe controller is handed off with firmware shut down, in an
+ * NVMe disabled state, after a clean shutdown).
+ */
+ if (shutdown)
+ nvme_disable_ctrl(&anv->ctrl, shutdown);
+ nvme_disable_ctrl(&anv->ctrl, false);
+ }
+
+ WRITE_ONCE(anv->ioq.enabled, false);
+ WRITE_ONCE(anv->adminq.enabled, false);
+ mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
+ nvme_quiesce_admin_queue(&anv->ctrl);
+
+ /* last chance to complete any requests before nvme_cancel_request */
+ spin_lock_irqsave(&anv->lock, flags);
+ apple_nvme_handle_cq(&anv->ioq, true);
+ apple_nvme_handle_cq(&anv->adminq, true);
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ nvme_cancel_tagset(&anv->ctrl);
+ nvme_cancel_admin_tagset(&anv->ctrl);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ if (shutdown) {
+ nvme_unquiesce_io_queues(&anv->ctrl);
+ nvme_unquiesce_admin_queue(&anv->ctrl);
+ }
+}
+
+static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct apple_nvme_queue *q = iod->q;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ unsigned long flags;
+ u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+
+ if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * From rdma.c:
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
+ */
+ dev_warn(anv->dev,
+ "I/O %d(aq:%d) timeout while not in live state\n",
+ req->tag, q->is_adminq);
+ if (blk_mq_request_started(req) &&
+ !blk_mq_request_completed(req)) {
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ blk_mq_complete_request(req);
+ }
+ return BLK_EH_DONE;
+ }
+
+ /* check if we just missed an interrupt if we're still alive */
+ if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
+ spin_lock_irqsave(&anv->lock, flags);
+ apple_nvme_handle_cq(q, false);
+ spin_unlock_irqrestore(&anv->lock, flags);
+ if (blk_mq_request_completed(req)) {
+ dev_warn(anv->dev,
+ "I/O %d(aq:%d) timeout: completion polled\n",
+ req->tag, q->is_adminq);
+ return BLK_EH_DONE;
+ }
+ }
+
+ /*
+ * aborting commands isn't supported which leaves a full reset as our
+ * only option here
+ */
+ dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
+ req->tag, q->is_adminq);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ apple_nvme_disable(anv, false);
+ nvme_reset_ctrl(&anv->ctrl);
+ return BLK_EH_DONE;
+}
+
+static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
+ struct io_comp_batch *iob)
+{
+ struct apple_nvme_queue *q = hctx->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ bool found;
+ unsigned long flags;
+
+ spin_lock_irqsave(&anv->lock, flags);
+ found = apple_nvme_poll_cq(q, iob);
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ return found;
+}
+
+static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
+ .queue_rq = apple_nvme_queue_rq,
+ .complete = apple_nvme_complete_rq,
+ .init_hctx = apple_nvme_init_hctx,
+ .init_request = apple_nvme_init_request,
+ .timeout = apple_nvme_timeout,
+};
+
+static const struct blk_mq_ops apple_nvme_mq_ops = {
+ .queue_rq = apple_nvme_queue_rq,
+ .complete = apple_nvme_complete_rq,
+ .init_hctx = apple_nvme_init_hctx,
+ .init_request = apple_nvme_init_request,
+ .timeout = apple_nvme_timeout,
+ .poll = apple_nvme_poll,
+};
+
+static void apple_nvme_init_queue(struct apple_nvme_queue *q)
+{
+ unsigned int depth = apple_nvme_queue_depth(q);
+
+ q->cq_head = 0;
+ q->cq_phase = 1;
+ memset(q->tcbs, 0,
+ APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+ memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
+ WRITE_ONCE(q->enabled, true);
+ wmb(); /* ensure the first interrupt sees the initialization */
+}
+
+static void apple_nvme_reset_work(struct work_struct *work)
+{
+ unsigned int nr_io_queues = 1;
+ int ret;
+ u32 boot_status, aqa;
+ struct apple_nvme *anv =
+ container_of(work, struct apple_nvme, ctrl.reset_work);
+
+ if (anv->ctrl.state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
+ anv->ctrl.state);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* there's unfortunately no known way to recover if RTKit crashed :( */
+ if (apple_rtkit_is_crashed(anv->rtk)) {
+ dev_err(anv->dev,
+ "RTKit has crashed without any way to recover.");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* RTKit must be shut down cleanly for the (soft)-reset to work */
+ if (apple_rtkit_is_running(anv->rtk)) {
+ /* reset the controller if it is enabled */
+ if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+ apple_nvme_disable(anv, false);
+ dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
+ ret = apple_rtkit_shutdown(anv->rtk);
+ if (ret)
+ goto out;
+ }
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ ret = reset_control_assert(anv->reset);
+ if (ret)
+ goto out;
+
+ ret = apple_rtkit_reinit(anv->rtk);
+ if (ret)
+ goto out;
+
+ ret = reset_control_deassert(anv->reset);
+ if (ret)
+ goto out;
+
+ writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+ anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ ret = apple_rtkit_boot(anv->rtk);
+ if (ret) {
+ dev_err(anv->dev, "ANS did not boot");
+ goto out;
+ }
+
+ ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
+ boot_status,
+ boot_status == APPLE_ANS_BOOT_STATUS_OK,
+ USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
+ if (ret) {
+ dev_err(anv->dev, "ANS did not initialize");
+ goto out;
+ }
+
+ dev_dbg(anv->dev, "ANS booted successfully.");
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
+ dma_max_mapping_size(anv->dev) >> 9);
+ anv->ctrl.max_segments = NVME_MAX_SEGS;
+
+ dma_set_max_seg_size(anv->dev, 0xffffffff);
+
+ /*
+ * Enable NVMMU and linear submission queues.
+ * While we could keep those disabled and pretend this is slightly
+ * more common NVMe controller we'd still need some quirks (e.g.
+ * sq entries will be 128 bytes) and Apple might drop support for
+ * that mode in the future.
+ */
+ writel(APPLE_ANS_LINEAR_SQ_EN,
+ anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+
+ /* Allow as many pending command as possible for both queues */
+ writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
+ anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+
+ /* Setup the NVMMU for the maximum admin and IO queue depth */
+ writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
+ anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+
+ /*
+ * This is probably a chicken bit: without it all commands where any PRP
+ * is set to zero (including those that don't use that field) fail and
+ * the co-processor complains about "completed with err BAD_CMD-" or
+ * a "NULL_PRP_PTR_ERR" in the syslog
+ */
+ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+ ~APPLE_ANS_PRP_NULL_CHECK,
+ anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+
+ /* Setup the admin queue */
+ aqa = APPLE_NVME_AQ_DEPTH - 1;
+ aqa |= aqa << 16;
+ writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
+ writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
+ writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
+
+ /* Setup NVMMU for both queues */
+ writeq(anv->adminq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+ writeq(anv->ioq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+
+ anv->ctrl.sqsize =
+ APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
+
+ dev_dbg(anv->dev, "Enabling controller now");
+ ret = nvme_enable_ctrl(&anv->ctrl);
+ if (ret)
+ goto out;
+
+ dev_dbg(anv->dev, "Starting admin queue");
+ apple_nvme_init_queue(&anv->adminq);
+ nvme_unquiesce_admin_queue(&anv->ctrl);
+
+ if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(anv->ctrl.device,
+ "failed to mark controller CONNECTING\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = nvme_init_ctrl_finish(&anv->ctrl, false);
+ if (ret)
+ goto out;
+
+ dev_dbg(anv->dev, "Creating IOCQ");
+ ret = apple_nvme_create_cq(anv);
+ if (ret)
+ goto out;
+ dev_dbg(anv->dev, "Creating IOSQ");
+ ret = apple_nvme_create_sq(anv);
+ if (ret)
+ goto out_remove_cq;
+
+ apple_nvme_init_queue(&anv->ioq);
+ nr_io_queues = 1;
+ ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
+ if (ret)
+ goto out_remove_sq;
+ if (nr_io_queues != 1) {
+ ret = -ENXIO;
+ goto out_remove_sq;
+ }
+
+ anv->ctrl.queue_count = nr_io_queues + 1;
+
+ nvme_unquiesce_io_queues(&anv->ctrl);
+ nvme_wait_freeze(&anv->ctrl);
+ blk_mq_update_nr_hw_queues(&anv->tagset, 1);
+ nvme_unfreeze(&anv->ctrl);
+
+ if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(anv->ctrl.device,
+ "failed to mark controller live state\n");
+ ret = -ENODEV;
+ goto out_remove_sq;
+ }
+
+ nvme_start_ctrl(&anv->ctrl);
+
+ dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
+ return;
+
+out_remove_sq:
+ apple_nvme_remove_sq(anv);
+out_remove_cq:
+ apple_nvme_remove_cq(anv);
+out:
+ dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
+ nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+ nvme_get_ctrl(&anv->ctrl);
+ apple_nvme_disable(anv, false);
+ nvme_mark_namespaces_dead(&anv->ctrl);
+ if (!queue_work(nvme_wq, &anv->remove_work))
+ nvme_put_ctrl(&anv->ctrl);
+}
+
+static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
+{
+ struct apple_nvme *anv =
+ container_of(work, struct apple_nvme, remove_work);
+
+ nvme_put_ctrl(&anv->ctrl);
+ device_release_driver(anv->dev);
+}
+
+static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
+
+ return snprintf(buf, size, "%s\n", dev_name(dev));
+}
+
+static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
+{
+ struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
+
+ if (anv->ctrl.admin_q)
+ blk_put_queue(anv->ctrl.admin_q);
+ put_device(anv->dev);
+}
+
+static const struct nvme_ctrl_ops nvme_ctrl_ops = {
+ .name = "apple-nvme",
+ .module = THIS_MODULE,
+ .flags = 0,
+ .reg_read32 = apple_nvme_reg_read32,
+ .reg_write32 = apple_nvme_reg_write32,
+ .reg_read64 = apple_nvme_reg_read64,
+ .free_ctrl = apple_nvme_free_ctrl,
+ .get_address = apple_nvme_get_address,
+};
+
+static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
+{
+ struct apple_nvme *anv = data;
+
+ flush_work(&anv->ctrl.reset_work);
+ flush_work(&anv->ctrl.scan_work);
+ nvme_put_ctrl(&anv->ctrl);
+}
+
+static void devm_apple_nvme_put_tag_set(void *data)
+{
+ blk_mq_free_tag_set(data);
+}
+
+static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
+{
+ int ret;
+
+ anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
+ anv->admin_tagset.nr_hw_queues = 1;
+ anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
+ anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
+ anv->admin_tagset.numa_node = NUMA_NO_NODE;
+ anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
+ anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
+ anv->admin_tagset.driver_data = &anv->adminq;
+
+ ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->admin_tagset);
+ if (ret)
+ return ret;
+
+ anv->tagset.ops = &apple_nvme_mq_ops;
+ anv->tagset.nr_hw_queues = 1;
+ anv->tagset.nr_maps = 1;
+ /*
+ * Tags are used as an index to the NVMMU and must be unique across
+ * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
+ * must be marked as reserved in the IO queue.
+ */
+ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+ anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+ anv->tagset.timeout = NVME_IO_TIMEOUT;
+ anv->tagset.numa_node = NUMA_NO_NODE;
+ anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
+ anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
+ anv->tagset.driver_data = &anv->ioq;
+
+ ret = blk_mq_alloc_tag_set(&anv->tagset);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->tagset);
+ if (ret)
+ return ret;
+
+ anv->ctrl.admin_tagset = &anv->admin_tagset;
+ anv->ctrl.tagset = &anv->tagset;
+
+ return 0;
+}
+
+static int apple_nvme_queue_alloc(struct apple_nvme *anv,
+ struct apple_nvme_queue *q)
+{
+ unsigned int depth = apple_nvme_queue_depth(q);
+
+ q->cqes = dmam_alloc_coherent(anv->dev,
+ depth * sizeof(struct nvme_completion),
+ &q->cq_dma_addr, GFP_KERNEL);
+ if (!q->cqes)
+ return -ENOMEM;
+
+ q->sqes = dmam_alloc_coherent(anv->dev,
+ depth * sizeof(struct nvme_command),
+ &q->sq_dma_addr, GFP_KERNEL);
+ if (!q->sqes)
+ return -ENOMEM;
+
+ /*
+ * We need the maximum queue depth here because the NVMMU only has a
+ * single depth configuration shared between both queues.
+ */
+ q->tcbs = dmam_alloc_coherent(anv->dev,
+ APPLE_ANS_MAX_QUEUE_DEPTH *
+ sizeof(struct apple_nvmmu_tcb),
+ &q->tcb_dma_addr, GFP_KERNEL);
+ if (!q->tcbs)
+ return -ENOMEM;
+
+ /*
+ * initialize phase to make sure the allocated and empty memory
+ * doesn't look like a full cq already.
+ */
+ q->cq_phase = 1;
+ return 0;
+}
+
+static void apple_nvme_detach_genpd(struct apple_nvme *anv)
+{
+ int i;
+
+ if (anv->pd_count <= 1)
+ return;
+
+ for (i = anv->pd_count - 1; i >= 0; i--) {
+ if (anv->pd_link[i])
+ device_link_del(anv->pd_link[i]);
+ if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
+ dev_pm_domain_detach(anv->pd_dev[i], true);
+ }
+}
+
+static int apple_nvme_attach_genpd(struct apple_nvme *anv)
+{
+ struct device *dev = anv->dev;
+ int i;
+
+ anv->pd_count = of_count_phandle_with_args(
+ dev->of_node, "power-domains", "#power-domain-cells");
+ if (anv->pd_count <= 1)
+ return 0;
+
+ anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
+ GFP_KERNEL);
+ if (!anv->pd_dev)
+ return -ENOMEM;
+
+ anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
+ GFP_KERNEL);
+ if (!anv->pd_link)
+ return -ENOMEM;
+
+ for (i = 0; i < anv->pd_count; i++) {
+ anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(anv->pd_dev[i])) {
+ apple_nvme_detach_genpd(anv);
+ return PTR_ERR(anv->pd_dev[i]);
+ }
+
+ anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!anv->pd_link[i]) {
+ apple_nvme_detach_genpd(anv);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void devm_apple_nvme_mempool_destroy(void *data)
+{
+ mempool_destroy(data);
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_nvme *anv;
+ int ret;
+
+ anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
+ if (!anv)
+ return -ENOMEM;
+
+ anv->dev = get_device(dev);
+ anv->adminq.is_adminq = true;
+ platform_set_drvdata(pdev, anv);
+
+ ret = apple_nvme_attach_genpd(anv);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to attach power domains");
+ goto put_dev;
+ }
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ ret = -ENXIO;
+ goto put_dev;
+ }
+
+ anv->irq = platform_get_irq(pdev, 0);
+ if (anv->irq < 0) {
+ ret = anv->irq;
+ goto put_dev;
+ }
+ if (!anv->irq) {
+ ret = -ENXIO;
+ goto put_dev;
+ }
+
+ anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
+ if (IS_ERR(anv->mmio_coproc)) {
+ ret = PTR_ERR(anv->mmio_coproc);
+ goto put_dev;
+ }
+ anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
+ if (IS_ERR(anv->mmio_nvme)) {
+ ret = PTR_ERR(anv->mmio_nvme);
+ goto put_dev;
+ }
+
+ anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+
+ anv->sart = devm_apple_sart_get(dev);
+ if (IS_ERR(anv->sart)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->sart),
+ "Failed to initialize SART");
+ goto put_dev;
+ }
+
+ anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
+ if (IS_ERR(anv->reset)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->reset),
+ "Failed to get reset control");
+ goto put_dev;
+ }
+
+ INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
+ INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
+ spin_lock_init(&anv->lock);
+
+ ret = apple_nvme_queue_alloc(anv, &anv->adminq);
+ if (ret)
+ goto put_dev;
+ ret = apple_nvme_queue_alloc(anv, &anv->ioq);
+ if (ret)
+ goto put_dev;
+
+ anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
+ if (!anv->prp_page_pool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ anv->prp_small_pool =
+ dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
+ if (!anv->prp_small_pool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
+ anv->iod_mempool =
+ mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
+ if (!anv->iod_mempool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ret = devm_add_action_or_reset(anv->dev,
+ devm_apple_nvme_mempool_destroy, anv->iod_mempool);
+ if (ret)
+ goto put_dev;
+
+ ret = apple_nvme_alloc_tagsets(anv);
+ if (ret)
+ goto put_dev;
+
+ ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
+ "nvme-apple", anv);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request IRQ");
+ goto put_dev;
+ }
+
+ anv->rtk =
+ devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
+ if (IS_ERR(anv->rtk)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
+ "Failed to initialize RTKit");
+ goto put_dev;
+ }
+
+ ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
+ NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
+ goto put_dev;
+ }
+
+ anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ if (IS_ERR(anv->ctrl.admin_q)) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ nvme_reset_ctrl(&anv->ctrl);
+ async_schedule(apple_nvme_async_probe, anv);
+
+ return 0;
+
+put_dev:
+ put_device(anv->dev);
+ return ret;
+}
+
+static int apple_nvme_remove(struct platform_device *pdev)
+{
+ struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+ nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+ flush_work(&anv->ctrl.reset_work);
+ nvme_stop_ctrl(&anv->ctrl);
+ nvme_remove_namespaces(&anv->ctrl);
+ apple_nvme_disable(anv, true);
+ nvme_uninit_ctrl(&anv->ctrl);
+
+ if (apple_rtkit_is_running(anv->rtk))
+ apple_rtkit_shutdown(anv->rtk);
+
+ apple_nvme_detach_genpd(anv);
+
+ return 0;
+}
+
+static void apple_nvme_shutdown(struct platform_device *pdev)
+{
+ struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+ apple_nvme_disable(anv, true);
+ if (apple_rtkit_is_running(anv->rtk))
+ apple_rtkit_shutdown(anv->rtk);
+}
+
+static int apple_nvme_resume(struct device *dev)
+{
+ struct apple_nvme *anv = dev_get_drvdata(dev);
+
+ return nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_suspend(struct device *dev)
+{
+ struct apple_nvme *anv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ apple_nvme_disable(anv, true);
+
+ if (apple_rtkit_is_running(anv->rtk))
+ ret = apple_rtkit_shutdown(anv->rtk);
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
+ apple_nvme_resume);
+
+static const struct of_device_id apple_nvme_of_match[] = {
+ { .compatible = "apple,nvme-ans2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
+
+static struct platform_driver apple_nvme_driver = {
+ .driver = {
+ .name = "nvme-apple",
+ .of_match_table = apple_nvme_of_match,
+ .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
+ },
+ .probe = apple_nvme_probe,
+ .remove = apple_nvme_remove,
+ .shutdown = apple_nvme_shutdown,
+};
+module_platform_driver(apple_nvme_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 0000000000..811541ce20
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+#define CHAP_BUF_SIZE 4096
+static struct kmem_cache *nvme_chap_buf_cache;
+static mempool_t *nvme_chap_buf_pool;
+
+struct nvme_dhchap_queue_context {
+ struct list_head entry;
+ struct work_struct auth_work;
+ struct nvme_ctrl *ctrl;
+ struct crypto_shash *shash_tfm;
+ struct crypto_kpp *dh_tfm;
+ void *buf;
+ int qid;
+ int error;
+ u32 s1;
+ u32 s2;
+ u16 transaction;
+ u8 status;
+ u8 dhgroup_id;
+ u8 hash_id;
+ size_t hash_len;
+ u8 c1[64];
+ u8 c2[64];
+ u8 response[64];
+ u8 *host_response;
+ u8 *ctrl_key;
+ u8 *host_key;
+ u8 *sess_key;
+ int ctrl_key_len;
+ int host_key_len;
+ int sess_key_len;
+};
+
+static struct workqueue_struct *nvme_auth_wq;
+
+#define nvme_auth_flags_from_qid(qid) \
+ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
+#define nvme_auth_queue_from_qid(ctrl, qid) \
+ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+
+static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
+ ctrl->opts->nr_poll_queues + 1;
+}
+
+static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
+ void *data, size_t data_len, bool auth_send)
+{
+ struct nvme_command cmd = {};
+ blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
+ struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ int ret;
+
+ cmd.auth_common.opcode = nvme_fabrics_command;
+ cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+ cmd.auth_common.spsp0 = 0x01;
+ cmd.auth_common.spsp1 = 0x01;
+ if (auth_send) {
+ cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+ cmd.auth_send.tl = cpu_to_le32(data_len);
+ } else {
+ cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+ cmd.auth_receive.al = cpu_to_le32(data_len);
+ }
+
+ ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
+ qid == 0 ? NVME_QID_ANY : qid,
+ 0, flags);
+ if (ret > 0)
+ dev_warn(ctrl->device,
+ "qid %d auth_send failed with status %d\n", qid, ret);
+ else if (ret < 0)
+ dev_err(ctrl->device,
+ "qid %d auth_send failed with error %d\n", qid, ret);
+ return ret;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+ struct nvmf_auth_dhchap_failure_data *data,
+ u16 transaction, u8 expected_msg)
+{
+ dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+ __func__, qid, data->auth_type, data->auth_id);
+
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ return data->rescode_exp;
+ }
+ if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+ data->auth_id != expected_msg) {
+ dev_warn(ctrl->device,
+ "qid %d invalid message %02x/%02x\n",
+ qid, data->auth_type, data->auth_id);
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ if (le16_to_cpu(data->t_id) != transaction) {
+ dev_warn(ctrl->device,
+ "qid %d invalid transaction ID %d\n",
+ qid, le16_to_cpu(data->t_id));
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+ size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+ if (size > CHAP_BUF_SIZE) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+ memset((u8 *)chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->sc_c = 0; /* No secure channel concatenation */
+ data->napd = 1;
+ data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+ data->auth_protocol[0].dhchap.halen = 3;
+ data->auth_protocol[0].dhchap.dhlen = 6;
+ data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
+ data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
+ data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
+ data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
+ data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
+ data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
+ data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
+ data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
+ data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ size_t size = sizeof(*data) + data->hl + dhvlen;
+ const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
+ const char *hmac_name, *kpp_name;
+
+ if (size > CHAP_BUF_SIZE) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ hmac_name = nvme_auth_hmac_name(data->hashid);
+ if (!hmac_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid HASH ID %d\n",
+ chap->qid, data->hashid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return -EPROTO;
+ }
+
+ if (chap->hash_id == data->hashid && chap->shash_tfm &&
+ !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+ crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing hash %s\n",
+ chap->qid, hmac_name);
+ goto select_kpp;
+ }
+
+ /* Reset if hash cannot be reused */
+ if (chap->shash_tfm) {
+ crypto_free_shash(chap->shash_tfm);
+ chap->hash_id = 0;
+ chap->hash_len = 0;
+ }
+ chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(chap->shash_tfm)) {
+ dev_warn(ctrl->device,
+ "qid %d: failed to allocate hash %s, error %ld\n",
+ chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %d\n",
+ chap->qid, data->hl);
+ crypto_free_shash(chap->shash_tfm);
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return -EPROTO;
+ }
+
+ chap->hash_id = data->hashid;
+ chap->hash_len = data->hl;
+ dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+ chap->qid, hmac_name);
+
+select_kpp:
+ kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+ if (!kpp_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH group id %d\n",
+ chap->qid, data->dhgid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ /* Leave previous dh_tfm intact */
+ return -EPROTO;
+ }
+
+ if (chap->dhgroup_id == data->dhgid &&
+ (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing DH group %s\n",
+ chap->qid, gid_name);
+ goto skip_kpp;
+ }
+
+ /* Reset dh_tfm if it can't be reused */
+ if (chap->dh_tfm) {
+ crypto_free_kpp(chap->dh_tfm);
+ chap->dh_tfm = NULL;
+ }
+
+ if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
+ if (dhvlen == 0) {
+ dev_warn(ctrl->device,
+ "qid %d: empty DH value\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ return -EPROTO;
+ }
+
+ chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
+ if (IS_ERR(chap->dh_tfm)) {
+ int ret = PTR_ERR(chap->dh_tfm);
+
+ dev_warn(ctrl->device,
+ "qid %d: error %d initializing DH group %s\n",
+ chap->qid, ret, gid_name);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ chap->dh_tfm = NULL;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
+ chap->qid, gid_name);
+ } else if (dhvlen != 0) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH value for NULL DH\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EPROTO;
+ }
+ chap->dhgroup_id = data->dhgid;
+
+skip_kpp:
+ chap->s1 = le32_to_cpu(data->seqnum);
+ memcpy(chap->c1, data->cval, chap->hash_len);
+ if (dhvlen) {
+ chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
+ if (!chap->ctrl_key) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ chap->ctrl_key_len = dhvlen;
+ memcpy(chap->ctrl_key, data->cval + chap->hash_len,
+ dhvlen);
+ dev_dbg(ctrl->device, "ctrl public key %*ph\n",
+ (int)chap->ctrl_key_len, chap->ctrl_key);
+ }
+
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ size += 2 * chap->hash_len;
+
+ if (chap->host_key_len)
+ size += chap->host_key_len;
+
+ if (size > CHAP_BUF_SIZE) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->hl = chap->hash_len;
+ data->dhvlen = cpu_to_le16(chap->host_key_len);
+ memcpy(data->rval, chap->response, chap->hash_len);
+ if (ctrl->ctrl_key) {
+ get_random_bytes(chap->c2, chap->hash_len);
+ data->cvalid = 1;
+ chap->s2 = nvme_auth_get_seqnum();
+ memcpy(data->rval + chap->hash_len, chap->c2,
+ chap->hash_len);
+ dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, chap->c2);
+ } else {
+ memset(chap->c2, 0, chap->hash_len);
+ chap->s2 = 0;
+ }
+ data->seqnum = cpu_to_le32(chap->s2);
+ if (chap->host_key_len) {
+ dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
+ __func__, chap->qid,
+ chap->host_key_len, chap->host_key);
+ memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
+ chap->host_key_len);
+ }
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ if (chap->s2)
+ size += chap->hash_len;
+
+ if (size > CHAP_BUF_SIZE) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ if (data->hl != chap->hash_len) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %u\n",
+ chap->qid, data->hl);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return -EPROTO;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: authenticated with hash %s dhgroup %s\n",
+ nvme_auth_hmac_name(chap->hash_id),
+ nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+ if (!data->rvalid)
+ return 0;
+
+ /* Validate controller response */
+ if (memcmp(chap->response, data->rval, data->hl)) {
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, data->rval);
+ dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len,
+ chap->response);
+ dev_warn(ctrl->device,
+ "qid %d: controller authentication failed\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ECONNREFUSED;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: controller authenticated\n");
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ data->t_id = cpu_to_le16(chap->transaction);
+
+ return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = chap->status;
+
+ return size;
+}
+
+static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 buf[4], *challenge = chap->c1;
+ int ret;
+
+ dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s1, chap->transaction);
+
+ if (!chap->host_response) {
+ chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->opts->host->nqn);
+ if (IS_ERR(chap->host_response)) {
+ ret = PTR_ERR(chap->host_response);
+ chap->host_response = NULL;
+ return ret;
+ }
+ } else {
+ dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+ __func__, chap->qid);
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ chap->host_response, ctrl->host_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, sizeof(buf));
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c1)
+ kfree(challenge);
+ return ret;
+}
+
+static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 *ctrl_response;
+ u8 buf[4], *challenge = chap->c2;
+ int ret;
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->opts->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ return ret;
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ ctrl_response, ctrl->ctrl_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c2, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s2, chap->transaction);
+ dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, challenge);
+ dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+ __func__, chap->qid, ctrl->opts->subsysnqn);
+ dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+ __func__, chap->qid, ctrl->opts->host->nqn);
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c2)
+ kfree(challenge);
+ kfree(ctrl_response);
+ return ret;
+}
+
+static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ int ret;
+
+ if (chap->host_key && chap->host_key_len) {
+ dev_dbg(ctrl->device,
+ "qid %d: reusing host key\n", chap->qid);
+ goto gen_sesskey;
+ }
+ ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
+ if (ret < 0) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+ chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
+
+ chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
+ if (!chap->host_key) {
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(chap->dh_tfm,
+ chap->host_key, chap->host_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate public key, error %d\n", ret);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+gen_sesskey:
+ chap->sess_key_len = chap->host_key_len;
+ chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
+ if (!chap->sess_key) {
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
+ chap->ctrl_key, chap->ctrl_key_len,
+ chap->sess_key, chap->sess_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate shared secret, error %d\n", ret);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "shared secret %*ph\n",
+ (int)chap->sess_key_len, chap->sess_key);
+ return 0;
+}
+
+static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
+{
+ kfree_sensitive(chap->host_response);
+ chap->host_response = NULL;
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = 0;
+ chap->error = 0;
+ chap->s1 = 0;
+ chap->s2 = 0;
+ chap->transaction = 0;
+ memset(chap->c1, 0, sizeof(chap->c1));
+ memset(chap->c2, 0, sizeof(chap->c2));
+ mempool_free(chap->buf, nvme_chap_buf_pool);
+ chap->buf = NULL;
+}
+
+static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
+{
+ nvme_auth_reset_dhchap(chap);
+ if (chap->shash_tfm)
+ crypto_free_shash(chap->shash_tfm);
+ if (chap->dh_tfm)
+ crypto_free_kpp(chap->dh_tfm);
+}
+
+static void nvme_queue_auth_work(struct work_struct *work)
+{
+ struct nvme_dhchap_queue_context *chap =
+ container_of(work, struct nvme_dhchap_queue_context, auth_work);
+ struct nvme_ctrl *ctrl = chap->ctrl;
+ size_t tl;
+ int ret = 0;
+
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k is enough to ffdhe8192.
+ */
+ chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
+ if (!chap->buf) {
+ chap->error = -ENOMEM;
+ return;
+ }
+
+ chap->transaction = ctrl->transaction++;
+
+ /* DH-HMAC-CHAP Step 1: send negotiate */
+ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ return;
+ }
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ return;
+ }
+
+ /* DH-HMAC-CHAP Step 2: receive challenge */
+ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, CHAP_BUF_SIZE);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
+ false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive challenge, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+ if (ret) {
+ chap->status = ret;
+ chap->error = -ECONNREFUSED;
+ return;
+ }
+
+ ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+ if (ret) {
+ /* Invalid challenge parameters */
+ chap->error = ret;
+ goto fail2;
+ }
+
+ if (chap->ctrl_key_len) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d DH exponential\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_exponential(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ dev_dbg(ctrl->device, "%s: qid %d host response\n",
+ __func__, chap->qid);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ if (ret) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ chap->error = ret;
+ goto fail2;
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+
+ /* DH-HMAC-CHAP Step 3: send reply */
+ dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 4: receive success1 */
+ dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, CHAP_BUF_SIZE);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
+ false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive success1, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid,
+ chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+ if (ret) {
+ chap->status = ret;
+ chap->error = -ECONNREFUSED;
+ return;
+ }
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ if (ctrl->ctrl_key) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d controller response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
+ if (ret) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+
+ ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+ if (ret) {
+ /* Controller authentication failed */
+ chap->error = -ECONNREFUSED;
+ goto fail2;
+ }
+
+ if (chap->s2) {
+ /* DH-HMAC-CHAP Step 5: send success2 */
+ dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ __func__, chap->qid);
+ tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret)
+ chap->error = ret;
+ }
+ if (!ret) {
+ chap->error = 0;
+ return;
+ }
+
+fail2:
+ if (chap->status == 0)
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ /*
+ * only update error if send failure2 failed and no other
+ * error had been set during authentication.
+ */
+ if (ret && !chap->error)
+ chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ if (!ctrl->host_key) {
+ dev_warn(ctrl->device, "qid %d: no key\n", qid);
+ return -ENOKEY;
+ }
+
+ if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
+ dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
+ return -ENOKEY;
+ }
+
+ chap = &ctrl->dhchap_ctxs[qid];
+ cancel_work_sync(&chap->auth_work);
+ queue_work(nvme_auth_wq, &chap->auth_work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int ret;
+
+ chap = &ctrl->dhchap_ctxs[qid];
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ /* clear sensitive info */
+ nvme_auth_reset_dhchap(chap);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+static void nvme_ctrl_auth_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, dhchap_auth_work);
+ int ret, q;
+
+ /*
+ * If the ctrl is no connected, bail as reconnect will handle
+ * authentication.
+ */
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
+ /* Authenticate admin queue first */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: error %d setting up authentication\n", ret);
+ return;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ return;
+ }
+
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_negotiate(ctrl, q);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: error %d setting up authentication\n",
+ q, ret);
+ break;
+ }
+ }
+
+ /*
+ * Failure is a soft-state; credentials remain valid until
+ * the controller terminates the connection.
+ */
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_wait(ctrl, q);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %d: authentication failed\n", q);
+ }
+}
+
+int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int i, ret;
+
+ mutex_init(&ctrl->dhchap_auth_mutex);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
+ if (!ctrl->opts)
+ return 0;
+ ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
+ &ctrl->host_key);
+ if (ret)
+ return ret;
+ ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
+ &ctrl->ctrl_key);
+ if (ret)
+ goto err_free_dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
+ return 0;
+
+ ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
+ sizeof(*chap), GFP_KERNEL);
+ if (!ctrl->dhchap_ctxs) {
+ ret = -ENOMEM;
+ goto err_free_dhchap_ctrl_secret;
+ }
+
+ for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
+ chap = &ctrl->dhchap_ctxs[i];
+ chap->qid = i;
+ chap->ctrl = ctrl;
+ INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
+ }
+
+ return 0;
+err_free_dhchap_ctrl_secret:
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+err_free_dhchap_secret:
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+ cancel_work_sync(&ctrl->dhchap_auth_work);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+ int i;
+
+ if (ctrl->dhchap_ctxs) {
+ for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
+ nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
+ kfree(ctrl->dhchap_ctxs);
+ }
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
+
+int __init nvme_init_auth(void)
+{
+ nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_auth_wq)
+ return -ENOMEM;
+
+ nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
+ CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!nvme_chap_buf_cache)
+ goto err_destroy_workqueue;
+
+ nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
+ mempool_free_slab, nvme_chap_buf_cache);
+ if (!nvme_chap_buf_pool)
+ goto err_destroy_chap_buf_cache;
+
+ return 0;
+err_destroy_chap_buf_cache:
+ kmem_cache_destroy(nvme_chap_buf_cache);
+err_destroy_workqueue:
+ destroy_workqueue(nvme_auth_wq);
+ return -ENOMEM;
+}
+
+void __exit nvme_exit_auth(void)
+{
+ mempool_destroy(nvme_chap_buf_pool);
+ kmem_cache_destroy(nvme_chap_buf_cache);
+ destroy_workqueue(nvme_auth_wq);
+}
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
new file mode 100644
index 0000000000..20f46c2308
--- /dev/null
+++ b/drivers/nvme/host/constants.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver verbose errors
+ * Copyright (c) 2022, Oracle and/or its affiliates
+ */
+
+#include "nvme.h"
+
+static const char * const nvme_ops[] = {
+ [nvme_cmd_flush] = "Flush",
+ [nvme_cmd_write] = "Write",
+ [nvme_cmd_read] = "Read",
+ [nvme_cmd_write_uncor] = "Write Uncorrectable",
+ [nvme_cmd_compare] = "Compare",
+ [nvme_cmd_write_zeroes] = "Write Zeroes",
+ [nvme_cmd_dsm] = "Dataset Management",
+ [nvme_cmd_verify] = "Verify",
+ [nvme_cmd_resv_register] = "Reservation Register",
+ [nvme_cmd_resv_report] = "Reservation Report",
+ [nvme_cmd_resv_acquire] = "Reservation Acquire",
+ [nvme_cmd_resv_release] = "Reservation Release",
+ [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
+ [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
+ [nvme_cmd_zone_append] = "Zone Append",
+};
+
+static const char * const nvme_admin_ops[] = {
+ [nvme_admin_delete_sq] = "Delete SQ",
+ [nvme_admin_create_sq] = "Create SQ",
+ [nvme_admin_get_log_page] = "Get Log Page",
+ [nvme_admin_delete_cq] = "Delete CQ",
+ [nvme_admin_create_cq] = "Create CQ",
+ [nvme_admin_identify] = "Identify",
+ [nvme_admin_abort_cmd] = "Abort Command",
+ [nvme_admin_set_features] = "Set Features",
+ [nvme_admin_get_features] = "Get Features",
+ [nvme_admin_async_event] = "Async Event",
+ [nvme_admin_ns_mgmt] = "Namespace Management",
+ [nvme_admin_activate_fw] = "Activate Firmware",
+ [nvme_admin_download_fw] = "Download Firmware",
+ [nvme_admin_dev_self_test] = "Device Self Test",
+ [nvme_admin_ns_attach] = "Namespace Attach",
+ [nvme_admin_keep_alive] = "Keep Alive",
+ [nvme_admin_directive_send] = "Directive Send",
+ [nvme_admin_directive_recv] = "Directive Receive",
+ [nvme_admin_virtual_mgmt] = "Virtual Management",
+ [nvme_admin_nvme_mi_send] = "NVMe Send MI",
+ [nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
+ [nvme_admin_dbbuf] = "Doorbell Buffer Config",
+ [nvme_admin_format_nvm] = "Format NVM",
+ [nvme_admin_security_send] = "Security Send",
+ [nvme_admin_security_recv] = "Security Receive",
+ [nvme_admin_sanitize_nvm] = "Sanitize NVM",
+ [nvme_admin_get_lba_status] = "Get LBA Status",
+};
+
+static const char * const nvme_fabrics_ops[] = {
+ [nvme_fabrics_type_property_set] = "Property Set",
+ [nvme_fabrics_type_property_get] = "Property Get",
+ [nvme_fabrics_type_connect] = "Connect",
+ [nvme_fabrics_type_auth_send] = "Authentication Send",
+ [nvme_fabrics_type_auth_receive] = "Authentication Receive",
+};
+
+static const char * const nvme_statuses[] = {
+ [NVME_SC_SUCCESS] = "Success",
+ [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
+ [NVME_SC_INVALID_FIELD] = "Invalid Field in Command",
+ [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict",
+ [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error",
+ [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification",
+ [NVME_SC_INTERNAL] = "Internal Error",
+ [NVME_SC_ABORT_REQ] = "Command Abort Requested",
+ [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion",
+ [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command",
+ [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command",
+ [NVME_SC_INVALID_NS] = "Invalid Namespace or Format",
+ [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error",
+ [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor",
+ [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors",
+ [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid",
+ [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer",
+ [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid",
+ [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded",
+ [NVME_SC_OP_DENIED] = "Operation Denied",
+ [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid",
+ [NVME_SC_RESERVED] = "Reserved",
+ [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format",
+ [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired",
+ [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid",
+ [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort",
+ [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed",
+ [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress",
+ [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid",
+ [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB",
+ [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
+ [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
+ [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
+ [NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY] = "Admin Command Media Not Ready",
+ [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
+ [NVME_SC_LBA_RANGE] = "LBA Out of Range",
+ [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
+ [NVME_SC_NS_NOT_READY] = "Namespace Not Ready",
+ [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict",
+ [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress",
+ [NVME_SC_CQ_INVALID] = "Completion Queue Invalid",
+ [NVME_SC_QID_INVALID] = "Invalid Queue Identifier",
+ [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size",
+ [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded",
+ [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */
+ [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded",
+ [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot",
+ [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image",
+ [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector",
+ [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page",
+ [NVME_SC_INVALID_FORMAT] = "Invalid Format",
+ [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset",
+ [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion",
+ [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable",
+ [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable",
+ [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific",
+ [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset",
+ [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset",
+ [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation",
+ [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited",
+ [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range",
+ [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity",
+ [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable",
+ [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached",
+ [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private",
+ [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
+ [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
+ [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
+ [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
+ [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
+ [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
+ [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
+ [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources",
+ [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier",
+ [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited",
+ [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid",
+ [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed",
+ [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
+ [NVME_SC_INVALID_PI] = "Invalid Protection Information",
+ [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
+ [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
+ [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
+ [NVME_SC_ZONE_FULL] = "Zone Is Full",
+ [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
+ [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline",
+ [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write",
+ [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones",
+ [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones",
+ [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition",
+ [NVME_SC_WRITE_FAULT] = "Write Fault",
+ [NVME_SC_READ_ERROR] = "Unrecovered Read Error",
+ [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error",
+ [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error",
+ [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error",
+ [NVME_SC_COMPARE_FAILED] = "Compare Failure",
+ [NVME_SC_ACCESS_DENIED] = "Access Denied",
+ [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
+ [NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error",
+ [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
+ [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
+ [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
+ [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error",
+ [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
+ [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
+};
+
+const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ status &= 0x7ff;
+ if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
+ return nvme_statuses[status & 0x7ff];
+ return "Unknown";
+}
+
+const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
+ return nvme_ops[opcode];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
+
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
+ return nvme_admin_ops[opcode];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
+
+const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+ if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
+ return nvme_fabrics_ops[opcode];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(nvme_get_fabrics_opcode_str);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
new file mode 100644
index 0000000000..d4564a2517
--- /dev/null
+++ b/drivers/nvme/host/core.c
@@ -0,0 +1,4803 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
+#include <linux/compat.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pr.h>
+#include <linux/ptrace.h>
+#include <linux/nvme_ioctl.h>
+#include <linux/pm_qos.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#define NVME_MINORS (1U << MINORBITS)
+
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+ bool is_removed;
+};
+
+unsigned int admin_timeout = 60;
+module_param(admin_timeout, uint, 0644);
+MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
+EXPORT_SYMBOL_GPL(admin_timeout);
+
+unsigned int nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
+EXPORT_SYMBOL_GPL(nvme_io_timeout);
+
+static unsigned char shutdown_timeout = 5;
+module_param(shutdown_timeout, byte, 0644);
+MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
+
+static u8 nvme_max_retries = 5;
+module_param_named(max_retries, nvme_max_retries, byte, 0644);
+MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
+
+static unsigned long default_ps_max_latency_us = 100000;
+module_param(default_ps_max_latency_us, ulong, 0644);
+MODULE_PARM_DESC(default_ps_max_latency_us,
+ "max power saving latency for new devices; use PM QOS to change per device");
+
+static bool force_apst;
+module_param(force_apst, bool, 0644);
+MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
+
+static unsigned long apst_primary_timeout_ms = 100;
+module_param(apst_primary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_timeout_ms,
+ "primary APST timeout in ms");
+
+static unsigned long apst_secondary_timeout_ms = 2000;
+module_param(apst_secondary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_timeout_ms,
+ "secondary APST timeout in ms");
+
+static unsigned long apst_primary_latency_tol_us = 15000;
+module_param(apst_primary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_latency_tol_us,
+ "primary APST latency tolerance in us");
+
+static unsigned long apst_secondary_latency_tol_us = 100000;
+module_param(apst_secondary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_latency_tol_us,
+ "secondary APST latency tolerance in us");
+
+/*
+ * nvme_wq - hosts nvme related works that are not reset or delete
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+ * nvme_wq will host works such as scan, aen handling, fw activation,
+ * keep-alive, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+ */
+struct workqueue_struct *nvme_wq;
+EXPORT_SYMBOL_GPL(nvme_wq);
+
+struct workqueue_struct *nvme_reset_wq;
+EXPORT_SYMBOL_GPL(nvme_reset_wq);
+
+struct workqueue_struct *nvme_delete_wq;
+EXPORT_SYMBOL_GPL(nvme_delete_wq);
+
+static LIST_HEAD(nvme_subsystems);
+static DEFINE_MUTEX(nvme_subsystems_lock);
+
+static DEFINE_IDA(nvme_instance_ida);
+static dev_t nvme_ctrl_base_chr_devt;
+static struct class *nvme_class;
+static struct class *nvme_subsys_class;
+
+static DEFINE_IDA(nvme_ns_chr_minor_ida);
+static dev_t nvme_ns_chr_devt;
+static struct class *nvme_ns_chr_class;
+
+static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd);
+
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+ /*
+ * Only new queue scan work when admin and IO queues are both alive
+ */
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
+ queue_work(nvme_wq, &ctrl->scan_work);
+}
+
+/*
+ * Use this function to proceed with scheduling reset_work for a controller
+ * that had previously been set to the resetting state. This is intended for
+ * code paths that can't be interrupted by other reset attempts. A hot removal
+ * may prevent this from succeeding.
+ */
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+{
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
+ return -EBUSY;
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+
+static void nvme_failfast_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, failfast_work);
+
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
+ return;
+
+ set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+ dev_info(ctrl->device, "failfast expired\n");
+ nvme_kick_requeue_lists(ctrl);
+}
+
+static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
+ return;
+
+ schedule_delayed_work(&ctrl->failfast_work,
+ ctrl->opts->fast_io_fail_tmo * HZ);
+}
+
+static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->opts)
+ return;
+
+ cancel_delayed_work_sync(&ctrl->failfast_work);
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+}
+
+
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
+
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ ret = nvme_reset_ctrl(ctrl);
+ if (!ret) {
+ flush_work(&ctrl->reset_work);
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
+ ret = -ENETRESET;
+ }
+
+ return ret;
+}
+
+static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ dev_info(ctrl->device,
+ "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
+
+ flush_work(&ctrl->reset_work);
+ nvme_stop_ctrl(ctrl);
+ nvme_remove_namespaces(ctrl);
+ ctrl->ops->delete_ctrl(ctrl);
+ nvme_uninit_ctrl(ctrl);
+}
+
+static void nvme_delete_ctrl_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, delete_work);
+
+ nvme_do_delete_ctrl(ctrl);
+}
+
+int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+ if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
+
+void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+ /*
+ * Keep a reference until nvme_do_delete_ctrl() complete,
+ * since ->delete_ctrl can free the controller.
+ */
+ nvme_get_ctrl(ctrl);
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
+ nvme_do_delete_ctrl(ctrl);
+ nvme_put_ctrl(ctrl);
+}
+
+static blk_status_t nvme_error_status(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_SUCCESS:
+ return BLK_STS_OK;
+ case NVME_SC_CAP_EXCEEDED:
+ return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ case NVME_SC_CMD_INTERRUPTED:
+ case NVME_SC_NS_NOT_READY:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ return BLK_STS_NOTSUPP;
+ case NVME_SC_WRITE_FAULT:
+ case NVME_SC_READ_ERROR:
+ case NVME_SC_UNWRITTEN_BLOCK:
+ case NVME_SC_ACCESS_DENIED:
+ case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
+ return BLK_STS_MEDIUM;
+ case NVME_SC_GUARD_CHECK:
+ case NVME_SC_APPTAG_CHECK:
+ case NVME_SC_REFTAG_CHECK:
+ case NVME_SC_INVALID_PI:
+ return BLK_STS_PROTECTION;
+ case NVME_SC_RESERVATION_CONFLICT:
+ return BLK_STS_RESV_CONFLICT;
+ case NVME_SC_HOST_PATH_ERROR:
+ return BLK_STS_TRANSPORT;
+ case NVME_SC_ZONE_TOO_MANY_ACTIVE:
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+ case NVME_SC_ZONE_TOO_MANY_OPEN:
+ return BLK_STS_ZONE_OPEN_RESOURCE;
+ default:
+ return BLK_STS_IOERR;
+ }
+}
+
+static void nvme_retry_req(struct request *req)
+{
+ unsigned long delay = 0;
+ u16 crd;
+
+ /* The mask and shift result must be <= 3 */
+ crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
+ if (crd)
+ delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
+
+ nvme_req(req)->retries++;
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q, delay);
+}
+
+static void nvme_log_error(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ if (ns) {
+ pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+ ns->disk ? ns->disk->disk_name : "?",
+ nvme_get_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
+ (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+ return;
+ }
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
+ dev_name(nr->ctrl->device),
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+}
+
+enum nvme_disposition {
+ COMPLETE,
+ RETRY,
+ FAILOVER,
+ AUTHENTICATE,
+};
+
+static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
+{
+ if (likely(nvme_req(req)->status == 0))
+ return COMPLETE;
+
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
+ if (blk_noretry_request(req) ||
+ (nvme_req(req)->status & NVME_SC_DNR) ||
+ nvme_req(req)->retries >= nvme_max_retries)
+ return COMPLETE;
+
+ if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (nvme_is_path_error(nvme_req(req)->status) ||
+ blk_queue_dying(req->q))
+ return FAILOVER;
+ } else {
+ if (blk_queue_dying(req->q))
+ return COMPLETE;
+ }
+
+ return RETRY;
+}
+
+static inline void nvme_end_req_zoned(struct request *req)
+{
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = nvme_lba_to_sect(req->q->queuedata,
+ le64_to_cpu(nvme_req(req)->result.u64));
+}
+
+static inline void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
+
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
+ nvme_log_error(req);
+ nvme_end_req_zoned(req);
+ nvme_trace_bio_complete(req);
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ nvme_mpath_end_request(req);
+ blk_mq_end_request(req, status);
+}
+
+void nvme_complete_rq(struct request *req)
+{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
+ trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
+ /*
+ * Completions of long-running commands should not be able to
+ * defer sending of periodic keep alives, since the controller
+ * may have completed processing such commands a long time ago
+ * (arbitrarily close to command submission time).
+ * req->deadline - req->timeout is the command submission time
+ * in jiffies.
+ */
+ if (ctrl->kas &&
+ req->deadline - req->timeout >= ctrl->ka_last_check_time)
+ ctrl->comp_seen = true;
+
+ switch (nvme_decide_disposition(req)) {
+ case COMPLETE:
+ nvme_end_req(req);
+ return;
+ case RETRY:
+ nvme_retry_req(req);
+ return;
+ case FAILOVER:
+ nvme_failover_req(req);
+ return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_complete_rq);
+
+void nvme_complete_batch_req(struct request *req)
+{
+ trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+ nvme_end_req_zoned(req);
+}
+EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
+
+/*
+ * Called to unwind from ->queue_rq on a failed command submission so that the
+ * multipathing code gets called to potentially failover to another path.
+ * The caller needs to unwind all transport specific resource allocations and
+ * must return propagate the return value.
+ */
+blk_status_t nvme_host_path_error(struct request *req)
+{
+ nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_set_request_complete(req);
+ nvme_complete_rq(req);
+ return BLK_STS_OK;
+}
+EXPORT_SYMBOL_GPL(nvme_host_path_error);
+
+bool nvme_cancel_request(struct request *req, void *data)
+{
+ dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
+ "Cancelling I/O %d", req->tag);
+
+ /* don't abort one completed or idle request */
+ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
+ return true;
+
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ blk_mq_complete_request(req);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_request);
+
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->tagset) {
+ blk_mq_tagset_busy_iter(ctrl->tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
+
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->admin_tagset) {
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ enum nvme_ctrl_state new_state)
+{
+ enum nvme_ctrl_state old_state;
+ unsigned long flags;
+ bool changed = false;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ old_state = nvme_ctrl_state(ctrl);
+ switch (new_state) {
+ case NVME_CTRL_LIVE:
+ switch (old_state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_RESETTING:
+ switch (old_state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_CONNECTING:
+ switch (old_state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RESETTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DELETING:
+ switch (old_state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DELETING_NOIO:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DEAD:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (changed) {
+ WRITE_ONCE(ctrl->state, new_state);
+ wake_up_all(&ctrl->state_wq);
+ }
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (!changed)
+ return false;
+
+ if (new_state == NVME_CTRL_LIVE) {
+ if (old_state == NVME_CTRL_CONNECTING)
+ nvme_stop_failfast_work(ctrl);
+ nvme_kick_requeue_lists(ctrl);
+ } else if (new_state == NVME_CTRL_CONNECTING &&
+ old_state == NVME_CTRL_RESETTING) {
+ nvme_start_failfast_work(ctrl);
+ }
+ return changed;
+}
+EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
+/*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+ */
+bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+{
+ wait_event(ctrl->state_wq,
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+ nvme_state_terminal(ctrl));
+ return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
+static void nvme_free_ns_head(struct kref *ref)
+{
+ struct nvme_ns_head *head =
+ container_of(ref, struct nvme_ns_head, ref);
+
+ nvme_mpath_remove_disk(head);
+ ida_free(&head->subsys->ns_ida, head->instance);
+ cleanup_srcu_struct(&head->srcu);
+ nvme_put_subsystem(head->subsys);
+ kfree(head);
+}
+
+bool nvme_tryget_ns_head(struct nvme_ns_head *head)
+{
+ return kref_get_unless_zero(&head->ref);
+}
+
+void nvme_put_ns_head(struct nvme_ns_head *head)
+{
+ kref_put(&head->ref, nvme_free_ns_head);
+}
+
+static void nvme_free_ns(struct kref *kref)
+{
+ struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
+
+ put_disk(ns->disk);
+ nvme_put_ns_head(ns->head);
+ nvme_put_ctrl(ns->ctrl);
+ kfree(ns);
+}
+
+static inline bool nvme_get_ns(struct nvme_ns *ns)
+{
+ return kref_get_unless_zero(&ns->kref);
+}
+
+void nvme_put_ns(struct nvme_ns *ns)
+{
+ kref_put(&ns->kref, nvme_free_ns);
+}
+EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
+
+static inline void nvme_clear_nvme_request(struct request *req)
+{
+ nvme_req(req)->status = 0;
+ nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
+ req->rq_flags |= RQF_DONTPREP;
+}
+
+/* initialize a passthrough request */
+void nvme_init_request(struct request *req, struct nvme_command *cmd)
+{
+ if (req->q->queuedata)
+ req->timeout = NVME_IO_TIMEOUT;
+ else /* no queuedata implies admin queue */
+ req->timeout = NVME_ADMIN_TIMEOUT;
+
+ /* passthru commands should let the driver set the SGL flags */
+ cmd->common.flags &= ~NVME_CMD_SGL_ALL;
+
+ req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ if (req->mq_hctx->type == HCTX_TYPE_POLL)
+ req->cmd_flags |= REQ_POLLED;
+ nvme_clear_nvme_request(req);
+ req->rq_flags |= RQF_QUIET;
+ memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
+}
+EXPORT_SYMBOL_GPL(nvme_init_request);
+
+/*
+ * For something we're not in a state to send to the device the default action
+ * is to busy it and retry it after the controller state is recovered. However,
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
+ *
+ * Note: commands used to initialize the controller will be marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq)
+{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state != NVME_CTRL_DELETING_NOIO &&
+ state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DEAD &&
+ !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
+ !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+ return nvme_host_path_error(rq);
+}
+EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
+
+bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
+{
+ struct nvme_request *req = nvme_req(rq);
+
+ /*
+ * currently we have a problem sending passthru commands
+ * on the admin_q if the controller is not LIVE because we can't
+ * make sure that they are going out after the admin connect,
+ * controller enable and/or other commands in the initialization
+ * sequence. until the controller will be LIVE, fail with
+ * BLK_STS_RESOURCE so that they will be rescheduled.
+ */
+ if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
+ return false;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+ * Only allow commands on a live queue, except for the connect
+ * command, which is require to set the queue live in the
+ * appropinquate states.
+ */
+ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_CONNECTING:
+ if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
+ return true;
+ break;
+ default:
+ break;
+ case NVME_CTRL_DEAD:
+ return false;
+ }
+ }
+
+ return queue_live;
+}
+EXPORT_SYMBOL_GPL(__nvme_check_ready);
+
+static inline void nvme_setup_flush(struct nvme_ns *ns,
+ struct nvme_command *cmnd)
+{
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
+}
+
+static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd)
+{
+ unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
+ struct nvme_dsm_range *range;
+ struct bio *bio;
+
+ /*
+ * Some devices do not consider the DSM 'Number of Ranges' field when
+ * determining how much data to DMA. Always allocate memory for maximum
+ * number of segments to prevent device reading beyond end of buffer.
+ */
+ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
+
+ range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!range) {
+ /*
+ * If we fail allocation our range, fallback to the controller
+ * discard page. If that's also busy, it's safe to return
+ * busy, as we know we can make progress once that's freed.
+ */
+ if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
+ return BLK_STS_RESOURCE;
+
+ range = page_address(ns->ctrl->discard_page);
+ }
+
+ if (queue_max_discard_segments(req->q) == 1) {
+ u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+ u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+ range[0].cattr = cpu_to_le32(0);
+ range[0].nlb = cpu_to_le32(nlb);
+ range[0].slba = cpu_to_le64(slba);
+ n = 1;
+ } else {
+ __rq_for_each_bio(bio, req) {
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+ if (n < segments) {
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ }
+ n++;
+ }
+ }
+
+ if (WARN_ON_ONCE(n != segments)) {
+ if (virt_to_page(range) == ns->ctrl->discard_page)
+ clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ else
+ kfree(range);
+ return BLK_STS_IOERR;
+ }
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->dsm.opcode = nvme_cmd_dsm;
+ cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->dsm.nr = cpu_to_le32(segments - 1);
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ bvec_set_virt(&req->special_vec, range, alloc_size);
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ return BLK_STS_OK;
+}
+
+static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
+ struct request *req)
+{
+ u32 upper, lower;
+ u64 ref48;
+
+ /* both rw and write zeroes share the same reftag format */
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+ cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
+ break;
+ case NVME_NVM_NS_64B_GUARD:
+ ref48 = ext_pi_ref_tag(req);
+ lower = lower_32_bits(ref48);
+ upper = upper_32_bits(ref48);
+
+ cmnd->rw.reftag = cpu_to_le32(lower);
+ cmnd->rw.cdw3 = cpu_to_le32(upper);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd)
+{
+ memset(cmnd, 0, sizeof(*cmnd));
+
+ if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
+ return nvme_setup_discard(ns, req, cmnd);
+
+ cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
+ cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->write_zeroes.slba =
+ cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ cmnd->write_zeroes.length =
+ cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+ if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
+ cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
+
+ if (nvme_ns_has_pi(ns)) {
+ cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ nvme_set_ref_tag(ns, cmnd, req);
+ break;
+ }
+ }
+
+ return BLK_STS_OK;
+}
+
+static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_opcode op)
+{
+ u16 control = 0;
+ u32 dsmgmt = 0;
+
+ if (req->cmd_flags & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ if (req->cmd_flags & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ cmnd->rw.opcode = op;
+ cmnd->rw.flags = 0;
+ cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->rw.cdw2 = 0;
+ cmnd->rw.cdw3 = 0;
+ cmnd->rw.metadata = 0;
+ cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cmnd->rw.reftag = 0;
+ cmnd->rw.apptag = 0;
+ cmnd->rw.appmask = 0;
+
+ if (ns->ms) {
+ /*
+ * If formated with metadata, the block layer always provides a
+ * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
+ * we enable the PRACT bit for protection information or set the
+ * namespace capacity to zero to prevent any I/O.
+ */
+ if (!blk_integrity_rq(req)) {
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
+ return BLK_STS_NOTSUPP;
+ control |= NVME_RW_PRINFO_PRACT;
+ }
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE3:
+ control |= NVME_RW_PRINFO_PRCHK_GUARD;
+ break;
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ control |= NVME_RW_PRINFO_PRCHK_GUARD |
+ NVME_RW_PRINFO_PRCHK_REF;
+ if (op == nvme_cmd_zone_append)
+ control |= NVME_RW_APPEND_PIREMAP;
+ nvme_set_ref_tag(ns, cmnd, req);
+ break;
+ }
+ }
+
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ return 0;
+}
+
+void nvme_cleanup_cmd(struct request *req)
+{
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
+ if (req->special_vec.bv_page == ctrl->discard_page)
+ clear_bit_unlock(0, &ctrl->discard_page_busy);
+ else
+ kfree(bvec_virt(&req->special_vec));
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
+{
+ struct nvme_command *cmd = nvme_req(req)->cmd;
+ blk_status_t ret = BLK_STS_OK;
+
+ if (!(req->rq_flags & RQF_DONTPREP))
+ nvme_clear_nvme_request(req);
+
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ /* these are setup prior to execution in nvme_init_request() */
+ break;
+ case REQ_OP_FLUSH:
+ nvme_setup_flush(ns, cmd);
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_RESET:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ ret = nvme_setup_write_zeroes(ns, req, cmd);
+ break;
+ case REQ_OP_DISCARD:
+ ret = nvme_setup_discard(ns, req, cmd);
+ break;
+ case REQ_OP_READ:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
+ break;
+ case REQ_OP_WRITE:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
+ break;
+ case REQ_OP_ZONE_APPEND:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_STS_IOERR;
+ }
+
+ cmd->common.command_id = nvme_cid(req);
+ trace_nvme_setup_cmd(req, cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+
+/*
+ * Return values:
+ * 0: success
+ * >0: nvme controller's cqe status response
+ * <0: kernel error in lieu of controller response
+ */
+int nvme_execute_rq(struct request *rq, bool at_head)
+{
+ blk_status_t status;
+
+ status = blk_execute_rq(rq, at_head);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ return -EINTR;
+ if (nvme_req(rq)->status)
+ return nvme_req(rq)->status;
+ return blk_status_to_errno(status);
+}
+EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
+
+/*
+ * Returns 0 on success. If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ union nvme_result *result, void *buffer, unsigned bufflen,
+ int qid, int at_head, blk_mq_req_flags_t flags)
+{
+ struct request *req;
+ int ret;
+
+ if (qid == NVME_QID_ANY)
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ else
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ qid - 1);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ nvme_init_request(req, cmd);
+
+ if (buffer && bufflen) {
+ ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
+ if (ret)
+ goto out;
+ }
+
+ ret = nvme_execute_rq(req, at_head);
+ if (result && ret >= 0)
+ *result = nvme_req(req)->result;
+ out:
+ blk_mq_free_request(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
+
+int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ void *buffer, unsigned bufflen)
+{
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
+ NVME_QID_ANY, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
+
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+{
+ u32 effects = 0;
+
+ if (ns) {
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ dev_warn_once(ctrl->device,
+ "IO command:%02x has unusual effects:%08x\n",
+ opcode, effects);
+
+ /*
+ * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
+ * which would deadlock when done on an I/O command. Note that
+ * We already warn about an unusual effect above.
+ */
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
+ } else {
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+ }
+
+ return effects;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
+
+u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+{
+ u32 effects = nvme_command_effects(ctrl, ns, opcode);
+
+ /*
+ * For simplicity, IO to all namespaces is quiesced even if the command
+ * effects say only one namespace is affected.
+ */
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+ mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+ return effects;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
+
+void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+ struct nvme_command *cmd, int status)
+{
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+ nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
+ mutex_unlock(&ctrl->scan_lock);
+ }
+ if (effects & NVME_CMD_EFFECTS_CCC) {
+ if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY,
+ &ctrl->flags)) {
+ dev_info(ctrl->device,
+"controller capabilities changed, reset may be required to take effect.\n");
+ }
+ }
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
+ nvme_queue_scan(ctrl);
+ flush_work(&ctrl->scan_work);
+ }
+ if (ns)
+ return;
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
+ case NVME_FEAT_KATO:
+ /*
+ * Keep alive commands interval on the host should be
+ * updated when KATO is modified by Set Features
+ * commands.
+ */
+ if (!status)
+ nvme_update_keep_alive(ctrl, cmd);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
+
+/*
+ * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
+ *
+ * The host should send Keep Alive commands at half of the Keep Alive Timeout
+ * accounting for transport roundtrip times [..].
+ */
+static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
+{
+ unsigned long delay = ctrl->kato * HZ / 2;
+
+ /*
+ * When using Traffic Based Keep Alive, we need to run
+ * nvme_keep_alive_work at twice the normal frequency, as one
+ * command completion can postpone sending a keep alive command
+ * by up to twice the delay between runs.
+ */
+ if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
+ delay /= 2;
+ return delay;
+}
+
+static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+{
+ queue_delayed_work(nvme_wq, &ctrl->ka_work,
+ nvme_keep_alive_work_period(ctrl));
+}
+
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
+{
+ struct nvme_ctrl *ctrl = rq->end_io_data;
+ unsigned long flags;
+ bool startka = false;
+ unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
+
+ /*
+ * Subtract off the keepalive RTT so nvme_keep_alive_work runs
+ * at the desired frequency.
+ */
+ if (rtt <= delay) {
+ delay -= rtt;
+ } else {
+ dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
+ jiffies_to_msecs(rtt));
+ delay = 0;
+ }
+
+ blk_mq_free_request(rq);
+
+ if (status) {
+ dev_err(ctrl->device,
+ "failed nvme_keep_alive_end_io error=%d\n",
+ status);
+ return RQ_END_IO_NONE;
+ }
+
+ ctrl->ka_last_check_time = jiffies;
+ ctrl->comp_seen = false;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->state == NVME_CTRL_LIVE ||
+ ctrl->state == NVME_CTRL_CONNECTING)
+ startka = true;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (startka)
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
+ return RQ_END_IO_NONE;
+}
+
+static void nvme_keep_alive_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, ka_work);
+ bool comp_seen = ctrl->comp_seen;
+ struct request *rq;
+
+ ctrl->ka_last_check_time = jiffies;
+
+ if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
+ dev_dbg(ctrl->device,
+ "reschedule traffic based keep-alive timer\n");
+ ctrl->comp_seen = false;
+ nvme_queue_keep_alive_work(ctrl);
+ return;
+ }
+
+ rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(rq)) {
+ /* allocation failure, reset the controller */
+ dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
+ nvme_reset_ctrl(ctrl);
+ return;
+ }
+ nvme_init_request(rq, &ctrl->ka_cmd);
+
+ rq->timeout = ctrl->kato * HZ;
+ rq->end_io = nvme_keep_alive_end_io;
+ rq->end_io_data = ctrl;
+ blk_execute_rq_nowait(rq, false);
+}
+
+static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ nvme_queue_keep_alive_work(ctrl);
+}
+
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd)
+{
+ unsigned int new_kato =
+ DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
+
+ dev_info(ctrl->device,
+ "keep alive interval updated from %u ms to %u ms\n",
+ ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
+
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = new_kato;
+ nvme_start_keep_alive(ctrl);
+}
+
+/*
+ * In NVMe 1.0 the CNS field was just a binary controller or namespace
+ * flag, thus sending any new CNS opcodes has a big chance of not working.
+ * Qemu unfortunately had that bug after reporting a 1.1 version compliance
+ * (but not for any later version).
+ */
+static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
+ return ctrl->vs < NVME_VS(1, 2, 0);
+ return ctrl->vs < NVME_VS(1, 1, 0);
+}
+
+static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+{
+ struct nvme_command c = { };
+ int error;
+
+ /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CTRL;
+
+ *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
+ if (!*id)
+ return -ENOMEM;
+
+ error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+ sizeof(struct nvme_id_ctrl));
+ if (error)
+ kfree(*id);
+ return error;
+}
+
+static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ struct nvme_ns_id_desc *cur, bool *csi_seen)
+{
+ const char *warn_str = "ctrl returned bogus length:";
+ void *data = cur;
+
+ switch (cur->nidt) {
+ case NVME_NIDT_EUI64:
+ if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_EUI64_LEN;
+ memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
+ return NVME_NIDT_EUI64_LEN;
+ case NVME_NIDT_NGUID:
+ if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_NGUID_LEN;
+ memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
+ return NVME_NIDT_NGUID_LEN;
+ case NVME_NIDT_UUID:
+ if (cur->nidl != NVME_NIDT_UUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_UUID_LEN;
+ uuid_copy(&ids->uuid, data + sizeof(*cur));
+ return NVME_NIDT_UUID_LEN;
+ case NVME_NIDT_CSI:
+ if (cur->nidl != NVME_NIDT_CSI_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
+ *csi_seen = true;
+ return NVME_NIDT_CSI_LEN;
+ default:
+ /* Skip unknown types */
+ return cur->nidl;
+ }
+}
+
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_command c = { };
+ bool csi_seen = false;
+ int status, pos, len;
+ void *data;
+
+ if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
+ return 0;
+ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
+ return 0;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(info->nsid);
+ c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
+
+ data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
+ NVME_IDENTIFY_DATA_SIZE);
+ if (status) {
+ dev_warn(ctrl->device,
+ "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
+ info->nsid, status);
+ goto free_data;
+ }
+
+ for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+ struct nvme_ns_id_desc *cur = data + pos;
+
+ if (cur->nidl == 0)
+ break;
+
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
+ if (len < 0)
+ break;
+
+ len += sizeof(*cur);
+ }
+
+ if (nvme_multi_css(ctrl) && !csi_seen) {
+ dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
+ info->nsid);
+ status = -EINVAL;
+ }
+
+free_data:
+ kfree(data);
+ return status;
+}
+
+static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_id_ns **id)
+{
+ struct nvme_command c = { };
+ int error;
+
+ /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.cns = NVME_ID_CNS_NS;
+
+ *id = kmalloc(sizeof(**id), GFP_KERNEL);
+ if (!*id)
+ return -ENOMEM;
+
+ error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
+ if (error) {
+ dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
+ kfree(*id);
+ }
+ return error;
+}
+
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+ ret = -ENODEV;
+ goto error;
+ }
+
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+ dev_info(ctrl->device,
+ "Ignoring bogus Namespace Identifiers\n");
+ } else {
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
+ }
+
+error:
+ kfree(id);
+ return ret;
+}
+
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns_cs_indep *id;
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(info->nsid),
+ .identify.cns = NVME_ID_CNS_NS_CS_INDEP,
+ };
+ int ret;
+
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
+ }
+ kfree(id);
+ return ret;
+}
+
+static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen, u32 *result)
+{
+ union nvme_result res = { 0 };
+ struct nvme_command c = { };
+ int ret;
+
+ c.features.opcode = op;
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
+ buffer, buflen, NVME_QID_ANY, 0, 0);
+ if (ret >= 0 && result)
+ *result = le32_to_cpu(res.u32);
+ return ret;
+}
+
+int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result)
+{
+ return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
+ buflen, result);
+}
+EXPORT_SYMBOL_GPL(nvme_set_features);
+
+int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result)
+{
+ return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
+ buflen, result);
+}
+EXPORT_SYMBOL_GPL(nvme_get_features);
+
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
+{
+ u32 q_count = (*count - 1) | ((*count - 1) << 16);
+ u32 result;
+ int status, nr_io_queues;
+
+ status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
+ &result);
+ if (status < 0)
+ return status;
+
+ /*
+ * Degraded controllers might return an error when setting the queue
+ * count. We still want to be able to bring them online and offer
+ * access to the admin queue, as that might be only way to fix them up.
+ */
+ if (status > 0) {
+ dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
+ *count = 0;
+ } else {
+ nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+ *count = min(*count, nr_io_queues);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_set_queue_count);
+
+#define NVME_AEN_SUPPORTED \
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
+ NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
+
+static void nvme_enable_aen(struct nvme_ctrl *ctrl)
+{
+ u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
+ int status;
+
+ if (!supported_aens)
+ return;
+
+ status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
+ NULL, 0, &result);
+ if (status)
+ dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
+ supported_aens);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
+}
+
+static int nvme_ns_open(struct nvme_ns *ns)
+{
+
+ /* should never be called due to GENHD_FL_HIDDEN */
+ if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
+ goto fail;
+ if (!nvme_get_ns(ns))
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+
+ return 0;
+
+fail_put_ns:
+ nvme_put_ns(ns);
+fail:
+ return -ENXIO;
+}
+
+static void nvme_ns_release(struct nvme_ns *ns)
+{
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
+}
+
+static int nvme_open(struct gendisk *disk, blk_mode_t mode)
+{
+ return nvme_ns_open(disk->private_data);
+}
+
+static void nvme_release(struct gendisk *disk)
+{
+ nvme_ns_release(disk->private_data);
+}
+
+int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ /* some standard values */
+ geo->heads = 1 << 6;
+ geo->sectors = 1 << 5;
+ geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
+ u32 max_integrity_segments)
+{
+ struct blk_integrity integrity = { };
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE3:
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+ integrity.profile = &t10_pi_type3_crc;
+ integrity.tag_size = sizeof(u16) + sizeof(u32);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ case NVME_NVM_NS_64B_GUARD:
+ integrity.profile = &ext_pi_type3_crc64;
+ integrity.tag_size = sizeof(u16) + 6;
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ default:
+ integrity.profile = NULL;
+ break;
+ }
+ break;
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+ integrity.profile = &t10_pi_type1_crc;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ case NVME_NVM_NS_64B_GUARD:
+ integrity.profile = &ext_pi_type1_crc64;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ default:
+ integrity.profile = NULL;
+ break;
+ }
+ break;
+ default:
+ integrity.profile = NULL;
+ break;
+ }
+
+ integrity.tuple_size = ns->ms;
+ blk_integrity_register(disk, &integrity);
+ blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
+}
+#else
+static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
+ u32 max_integrity_segments)
+{
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct request_queue *queue = disk->queue;
+ u32 size = queue_logical_block_size(queue);
+
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+ ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
+ if (ctrl->max_discard_sectors == 0) {
+ blk_queue_max_discard_sectors(queue, 0);
+ return;
+ }
+
+ BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
+ NVME_DSM_MAX_RANGES);
+
+ queue->limits.discard_granularity = size;
+
+ /* If discard is already enabled, don't reset queue limits */
+ if (queue->limits.max_discard_sectors)
+ return;
+
+ blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
+ blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+
+ if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
+ blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
+}
+
+static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
+{
+ return uuid_equal(&a->uuid, &b->uuid) &&
+ memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
+ memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
+ a->csi == b->csi;
+}
+
+static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ bool first = id->dps & NVME_NS_DPS_PI_FIRST;
+ unsigned lbaf = nvme_lbaf_index(id->flbas);
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct nvme_command c = { };
+ struct nvme_id_ns_nvm *nvm;
+ int ret = 0;
+ u32 elbaf;
+
+ ns->pi_size = 0;
+ ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+ if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
+ ns->pi_size = sizeof(struct t10_pi_tuple);
+ ns->guard_type = NVME_NVM_NS_16B_GUARD;
+ goto set_pi;
+ }
+
+ nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!nvm)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.cns = NVME_ID_CNS_CS_NS;
+ c.identify.csi = NVME_CSI_NVM;
+
+ ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
+ if (ret)
+ goto free_data;
+
+ elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
+
+ /* no support for storage tag formats right now */
+ if (nvme_elbaf_sts(elbaf))
+ goto free_data;
+
+ ns->guard_type = nvme_elbaf_guard_type(elbaf);
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_64B_GUARD:
+ ns->pi_size = sizeof(struct crc64_pi_tuple);
+ break;
+ case NVME_NVM_NS_16B_GUARD:
+ ns->pi_size = sizeof(struct t10_pi_tuple);
+ break;
+ default:
+ break;
+ }
+
+free_data:
+ kfree(nvm);
+set_pi:
+ if (ns->pi_size && (first || ns->ms == ns->pi_size))
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ else
+ ns->pi_type = 0;
+
+ return ret;
+}
+
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ ret = nvme_init_ms(ns, id);
+ if (ret)
+ return ret;
+
+ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ return 0;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+ * The NVMe over Fabrics specification only supports metadata as
+ * part of the extended data LBA. We rely on HCA/HBA support to
+ * remap the separate metadata buffer from the block layer.
+ */
+ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
+ return 0;
+
+ ns->features |= NVME_NS_EXT_LBAS;
+
+ /*
+ * The current fabrics transport drivers support namespace
+ * metadata formats only if nvme_ns_has_pi() returns true.
+ * Suppress support for all other formats so the namespace will
+ * have a 0 capacity and not be usable through the block stack.
+ *
+ * Note, this check will need to be modified if any drivers
+ * gain the ability to use other metadata formats.
+ */
+ if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ } else {
+ /*
+ * For PCIe controllers, we can't easily remap the separate
+ * metadata buffer from the block layer and thus require a
+ * separate metadata buffer for block layer metadata/PI support.
+ * We allow extended LBAs for the passthrough interface, though.
+ */
+ if (id->flbas & NVME_NS_FLBAS_META_EXT)
+ ns->features |= NVME_NS_EXT_LBAS;
+ else
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ }
+ return 0;
+}
+
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+ struct request_queue *q)
+{
+ bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
+
+ if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+
+ max_segments = min_not_zero(max_segments, ctrl->max_segments);
+ blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ }
+ blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, 3);
+ blk_queue_write_cache(q, vwc, vwc);
+}
+
+static void nvme_update_disk_info(struct gendisk *disk,
+ struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
+ u32 bs = 1U << ns->lba_shift;
+ u32 atomic_bs, phys_bs, io_opt = 0;
+
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+ * or smaller than a sector size yet, so catch this early and don't
+ * allow block I/O.
+ */
+ if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
+ capacity = 0;
+ bs = (1 << 9);
+ }
+
+ blk_integrity_unregister(disk);
+
+ atomic_bs = phys_bs = bs;
+ if (id->nabo == 0) {
+ /*
+ * Bit 1 indicates whether NAWUPF is defined for this namespace
+ * and whether it should be used instead of AWUPF. If NAWUPF ==
+ * 0 then AWUPF must be used instead.
+ */
+ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
+ atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
+ else
+ atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
+ }
+
+ if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
+ /* NPWG = Namespace Preferred Write Granularity */
+ phys_bs = bs * (1 + le16_to_cpu(id->npwg));
+ /* NOWS = Namespace Optimal Write Size */
+ io_opt = bs * (1 + le16_to_cpu(id->nows));
+ }
+
+ blk_queue_logical_block_size(disk->queue, bs);
+ /*
+ * Linux filesystems assume writing a single physical block is
+ * an atomic operation. Hence limit the physical block size to the
+ * value of the Atomic Write Unit Power Fail parameter.
+ */
+ blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
+ blk_queue_io_min(disk->queue, phys_bs);
+ blk_queue_io_opt(disk->queue, io_opt);
+
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (ns->ms) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ (ns->features & NVME_NS_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns,
+ ns->ctrl->max_integrity_segments);
+ else if (!nvme_ns_has_pi(ns))
+ capacity = 0;
+ }
+
+ set_capacity_and_notify(disk, capacity);
+
+ nvme_config_discard(disk, ns);
+ blk_queue_max_write_zeroes_sectors(disk->queue,
+ ns->ctrl->max_zeroes_sectors);
+}
+
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
+
+static inline bool nvme_first_scan(struct gendisk *disk)
+{
+ /* nvme_alloc_ns() scans the disk prior to adding it */
+ return !disk_live(disk);
+}
+
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ u32 iob;
+
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
+ iob = ctrl->max_hw_sectors;
+ else
+ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
+
+ if (!iob)
+ return;
+
+ if (!is_power_of_2(iob)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring unaligned IO boundary:%u\n",
+ ns->disk->disk_name, iob);
+ return;
+ }
+
+ if (blk_queue_is_zoned(ns->disk->queue)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring zoned namespace IO boundary\n",
+ ns->disk->disk_name);
+ return;
+ }
+
+ blk_queue_chunk_sectors(ns->queue, iob);
+}
+
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
+ int ret;
+
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+ ret = -ENODEV;
+ goto error;
+ }
+
+ blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
+ ns->lba_shift = id->lbaf[lbaf].ds;
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+
+ ret = nvme_configure_metadata(ns, id);
+ if (ret < 0) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+
+ if (ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_update_zone_info(ns, lbaf);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
+ }
+
+ /*
+ * Only set the DEAC bit if the device guarantees that reads from
+ * deallocated data return zeroes. While the DEAC bit does not
+ * require that, it must be a no-op if reads from deallocated data
+ * do not return zeroes.
+ */
+ if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
+ ns->features |= NVME_NS_DEAC;
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ set_bit(NVME_NS_READY, &ns->flags);
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (blk_queue_is_zoned(ns->queue)) {
+ ret = nvme_revalidate_zones(ns);
+ if (ret && !nvme_first_scan(ns->disk))
+ goto out;
+ }
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ disk_update_readahead(ns->head->disk);
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ ret = 0;
+out:
+ /*
+ * If probing fails due an unsupported feature, hide the block device,
+ * but still allow other access.
+ */
+ if (ret == -ENODEV) {
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+ ret = 0;
+ }
+
+error:
+ kfree(id);
+ return ret;
+}
+
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
+#ifdef CONFIG_BLK_SED_OPAL
+static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
+ bool send)
+{
+ struct nvme_ctrl *ctrl = data;
+ struct nvme_command cmd = { };
+
+ if (send)
+ cmd.common.opcode = nvme_admin_security_send;
+ else
+ cmd.common.opcode = nvme_admin_security_recv;
+ cmd.common.nsid = 0;
+ cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
+ cmd.common.cdw11 = cpu_to_le32(len);
+
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
+ NVME_QID_ANY, 1, 0);
+}
+
+static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
+{
+ if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
+ if (!ctrl->opal_dev)
+ ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
+ else if (was_suspended)
+ opal_unlock_from_suspend(ctrl->opal_dev);
+ } else {
+ free_opal_dev(ctrl->opal_dev);
+ ctrl->opal_dev = NULL;
+ }
+}
+#else
+static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
+{
+}
+#endif /* CONFIG_BLK_SED_OPAL */
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
+ data);
+}
+#else
+#define nvme_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+const struct block_device_operations nvme_bdev_ops = {
+ .owner = THIS_MODULE,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
+ .getgeo = nvme_getgeo,
+ .report_zones = nvme_report_zones,
+ .pr_ops = &nvme_pr_ops,
+};
+
+static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
+ u32 timeout, const char *op)
+{
+ unsigned long timeout_jiffies = jiffies + timeout * HZ;
+ u32 csts;
+ int ret;
+
+ while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if (csts == ~0)
+ return -ENODEV;
+ if ((csts & mask) == val)
+ break;
+
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout_jiffies)) {
+ dev_err(ctrl->device,
+ "Device not ready; aborting %s, CSTS=0x%x\n",
+ op, csts);
+ return -ENODEV;
+ }
+ }
+
+ return ret;
+}
+
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
+{
+ int ret;
+
+ ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+ if (shutdown)
+ ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
+ else
+ ctrl->ctrl_config &= ~NVME_CC_ENABLE;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ if (shutdown) {
+ return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
+ NVME_CSTS_SHST_CMPLT,
+ ctrl->shutdown_timeout, "shutdown");
+ }
+ if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
+ msleep(NVME_QUIRK_DELAY_AMOUNT);
+ return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0,
+ (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
+}
+EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
+
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+{
+ unsigned dev_page_min;
+ u32 timeout;
+ int ret;
+
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
+ return ret;
+ }
+ dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+
+ if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
+ dev_err(ctrl->device,
+ "Minimum device page size %u too large for host (%u)\n",
+ 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
+ return -ENODEV;
+ }
+
+ if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
+ ctrl->ctrl_config = NVME_CC_CSS_CSI;
+ else
+ ctrl->ctrl_config = NVME_CC_CSS_NVM;
+
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
+ ctrl->ctrl_config |= NVME_CC_CRIME;
+
+ ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
+ ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ /* Flush write to device (required if transport is PCI) */
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ /* CAP value may change after initial CC write */
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (ret)
+ return ret;
+
+ timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+ u32 crto, ready_timeout;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * CRTO should always be greater or equal to CAP.TO, but some
+ * devices are known to get this wrong. Use the larger of the
+ * two values.
+ */
+ if (ctrl->ctrl_config & NVME_CC_CRIME)
+ ready_timeout = NVME_CRTO_CRIMT(crto);
+ else
+ ready_timeout = NVME_CRTO_CRWMT(crto);
+
+ if (ready_timeout < timeout)
+ dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
+ crto, ctrl->cap);
+ else
+ timeout = ready_timeout;
+ }
+
+ ctrl->ctrl_config |= NVME_CC_ENABLE;
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+ return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY,
+ (timeout + 1) / 2, "initialisation");
+}
+EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
+
+static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
+{
+ __le64 ts;
+ int ret;
+
+ if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
+ return 0;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
+ NULL);
+ if (ret)
+ dev_warn_once(ctrl->device,
+ "could not set timestamp (%d)\n", ret);
+ return ret;
+}
+
+static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
+{
+ struct nvme_feat_host_behavior *host;
+ u8 acre = 0, lbafee = 0;
+ int ret;
+
+ /* Don't bother enabling the feature if retry delay is not reported */
+ if (ctrl->crdt[0])
+ acre = NVME_ENABLE_ACRE;
+ if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
+ lbafee = NVME_ENABLE_LBAFEE;
+
+ if (!acre && !lbafee)
+ return 0;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return 0;
+
+ host->acre = acre;
+ host->lbafee = lbafee;
+ ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
+ host, sizeof(*host), NULL);
+ kfree(host);
+ return ret;
+}
+
+/*
+ * The function checks whether the given total (exlat + enlat) latency of
+ * a power state allows the latter to be used as an APST transition target.
+ * It does so by comparing the latency to the primary and secondary latency
+ * tolerances defined by module params. If there's a match, the corresponding
+ * timeout value is returned and the matching tolerance index (1 or 2) is
+ * reported.
+ */
+static bool nvme_apst_get_transition_time(u64 total_latency,
+ u64 *transition_time, unsigned *last_index)
+{
+ if (total_latency <= apst_primary_latency_tol_us) {
+ if (*last_index == 1)
+ return false;
+ *last_index = 1;
+ *transition_time = apst_primary_timeout_ms;
+ return true;
+ }
+ if (apst_secondary_timeout_ms &&
+ total_latency <= apst_secondary_latency_tol_us) {
+ if (*last_index <= 2)
+ return false;
+ *last_index = 2;
+ *transition_time = apst_secondary_timeout_ms;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * APST (Autonomous Power State Transition) lets us program a table of power
+ * state transitions that the controller will perform automatically.
+ *
+ * Depending on module params, one of the two supported techniques will be used:
+ *
+ * - If the parameters provide explicit timeouts and tolerances, they will be
+ * used to build a table with up to 2 non-operational states to transition to.
+ * The default parameter values were selected based on the values used by
+ * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
+ * regeneration of the APST table in the event of switching between external
+ * and battery power, the timeouts and tolerances reflect a compromise
+ * between values used by Microsoft for AC and battery scenarios.
+ * - If not, we'll configure the table with a simple heuristic: we are willing
+ * to spend at most 2% of the time transitioning between power states.
+ * Therefore, when running in any given state, we will enter the next
+ * lower-power non-operational state after waiting 50 * (enlat + exlat)
+ * microseconds, as long as that state's exit latency is under the requested
+ * maximum latency.
+ *
+ * We will not autonomously enter any non-operational state for which the total
+ * latency exceeds ps_max_latency_us.
+ *
+ * Users can set ps_max_latency_us to zero to turn off APST.
+ */
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
+{
+ struct nvme_feat_auto_pst *table;
+ unsigned apste = 0;
+ u64 max_lat_us = 0;
+ __le64 target = 0;
+ int max_ps = -1;
+ int state;
+ int ret;
+ unsigned last_lt_index = UINT_MAX;
+
+ /*
+ * If APST isn't supported or if we haven't been initialized yet,
+ * then don't do anything.
+ */
+ if (!ctrl->apsta)
+ return 0;
+
+ if (ctrl->npss > 31) {
+ dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
+ return 0;
+ }
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return 0;
+
+ if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
+ /* Turn off APST. */
+ dev_dbg(ctrl->device, "APST disabled\n");
+ goto done;
+ }
+
+ /*
+ * Walk through all states from lowest- to highest-power.
+ * According to the spec, lower-numbered states use more power. NPSS,
+ * despite the name, is the index of the lowest-power state, not the
+ * number of states.
+ */
+ for (state = (int)ctrl->npss; state >= 0; state--) {
+ u64 total_latency_us, exit_latency_us, transition_ms;
+
+ if (target)
+ table->entries[state] = target;
+
+ /*
+ * Don't allow transitions to the deepest state if it's quirked
+ * off.
+ */
+ if (state == ctrl->npss &&
+ (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
+ continue;
+
+ /*
+ * Is this state a useful non-operational state for higher-power
+ * states to autonomously transition to?
+ */
+ if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
+ continue;
+
+ exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+ if (exit_latency_us > ctrl->ps_max_latency_us)
+ continue;
+
+ total_latency_us = exit_latency_us +
+ le32_to_cpu(ctrl->psd[state].entry_lat);
+
+ /*
+ * This state is good. It can be used as the APST idle target
+ * for higher power states.
+ */
+ if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
+ if (!nvme_apst_get_transition_time(total_latency_us,
+ &transition_ms, &last_lt_index))
+ continue;
+ } else {
+ transition_ms = total_latency_us + 19;
+ do_div(transition_ms, 20);
+ if (transition_ms > (1 << 24) - 1)
+ transition_ms = (1 << 24) - 1;
+ }
+
+ target = cpu_to_le64((state << 3) | (transition_ms << 8));
+ if (max_ps == -1)
+ max_ps = state;
+ if (total_latency_us > max_lat_us)
+ max_lat_us = total_latency_us;
+ }
+
+ if (max_ps == -1)
+ dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
+ else
+ dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
+ max_ps, max_lat_us, (int)sizeof(*table), table);
+ apste = 1;
+
+done:
+ ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
+ table, sizeof(*table), NULL);
+ if (ret)
+ dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
+ kfree(table);
+ return ret;
+}
+
+static void nvme_set_latency_tolerance(struct device *dev, s32 val)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ u64 latency;
+
+ switch (val) {
+ case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
+ case PM_QOS_LATENCY_ANY:
+ latency = U64_MAX;
+ break;
+
+ default:
+ latency = val;
+ }
+
+ if (ctrl->ps_max_latency_us != latency) {
+ ctrl->ps_max_latency_us = latency;
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
+ nvme_configure_apst(ctrl);
+ }
+}
+
+struct nvme_core_quirk_entry {
+ /*
+ * NVMe model and firmware strings are padded with spaces. For
+ * simplicity, strings in the quirk table are padded with NULLs
+ * instead.
+ */
+ u16 vid;
+ const char *mn;
+ const char *fr;
+ unsigned long quirks;
+};
+
+static const struct nvme_core_quirk_entry core_quirks[] = {
+ {
+ /*
+ * This Toshiba device seems to die using any APST states. See:
+ * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
+ */
+ .vid = 0x1179,
+ .mn = "THNSF5256GPUK TOSHIBA",
+ .quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * This LiteON CL1-3D*-Q11 firmware version has a race
+ * condition associated with actions related to suspend to idle
+ * LiteON has resolved the problem in future firmware
+ */
+ .vid = 0x14a4,
+ .fr = "22301111",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kioxia CD6-V Series / HPE PE8030 device times out and
+ * aborts I/O during any load, but more easily reproducible
+ * with discards (fstrim).
+ *
+ * The device is left in a state where it is also not possible
+ * to use "nvme set-feature" to disable APST, but booting with
+ * nvme_core.default_ps_max_latency=0 works.
+ */
+ .vid = 0x1e0f,
+ .mn = "KCD6XVUL6T40",
+ .quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * The external Samsung X5 SSD fails initialization without a
+ * delay before checking if it is ready and has a whole set of
+ * other problems. To make this even more interesting, it
+ * shares the PCI ID with internal Samsung 970 Evo Plus that
+ * does not need or want these quirks.
+ */
+ .vid = 0x144d,
+ .mn = "Samsung Portable SSD X5",
+ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN,
+ }
+};
+
+/* match is null-terminated but idstr is space-padded. */
+static bool string_matches(const char *idstr, const char *match, size_t len)
+{
+ size_t matchlen;
+
+ if (!match)
+ return true;
+
+ matchlen = strlen(match);
+ WARN_ON_ONCE(matchlen > len);
+
+ if (memcmp(idstr, match, matchlen))
+ return false;
+
+ for (; matchlen < len; matchlen++)
+ if (idstr[matchlen] != ' ')
+ return false;
+
+ return true;
+}
+
+static bool quirk_matches(const struct nvme_id_ctrl *id,
+ const struct nvme_core_quirk_entry *q)
+{
+ return q->vid == le16_to_cpu(id->vid) &&
+ string_matches(id->mn, q->mn, sizeof(id->mn)) &&
+ string_matches(id->fr, q->fr, sizeof(id->fr));
+}
+
+static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
+ struct nvme_id_ctrl *id)
+{
+ size_t nqnlen;
+ int off;
+
+ if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
+ nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
+ if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
+ strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ return;
+ }
+
+ if (ctrl->vs >= NVME_VS(1, 2, 1))
+ dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
+ }
+
+ /*
+ * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
+ * Base Specification 2.0. It is slightly different from the format
+ * specified there due to historic reasons, and we can't change it now.
+ */
+ off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
+ "nqn.2014.08.org.nvmexpress:%04x%04x",
+ le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
+ memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
+ off += sizeof(id->sn);
+ memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
+ off += sizeof(id->mn);
+ memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
+}
+
+static void nvme_release_subsystem(struct device *dev)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ if (subsys->instance >= 0)
+ ida_free(&nvme_instance_ida, subsys->instance);
+ kfree(subsys);
+}
+
+static void nvme_destroy_subsystem(struct kref *ref)
+{
+ struct nvme_subsystem *subsys =
+ container_of(ref, struct nvme_subsystem, ref);
+
+ mutex_lock(&nvme_subsystems_lock);
+ list_del(&subsys->entry);
+ mutex_unlock(&nvme_subsystems_lock);
+
+ ida_destroy(&subsys->ns_ida);
+ device_del(&subsys->dev);
+ put_device(&subsys->dev);
+}
+
+static void nvme_put_subsystem(struct nvme_subsystem *subsys)
+{
+ kref_put(&subsys->ref, nvme_destroy_subsystem);
+}
+
+static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
+{
+ struct nvme_subsystem *subsys;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ /*
+ * Fail matches for discovery subsystems. This results
+ * in each discovery controller bound to a unique subsystem.
+ * This avoids issues with validating controller values
+ * that can only be true when there is a single unique subsystem.
+ * There may be multiple and completely independent entities
+ * that provide discovery controllers.
+ */
+ if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
+ return NULL;
+
+ list_for_each_entry(subsys, &nvme_subsystems, entry) {
+ if (strcmp(subsys->subnqn, subsysnqn))
+ continue;
+ if (!kref_get_unless_zero(&subsys->ref))
+ continue;
+ return subsys;
+ }
+
+ return NULL;
+}
+
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
+static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
+ struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ struct nvme_ctrl *tmp;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
+ if (nvme_state_terminal(tmp))
+ continue;
+
+ if (tmp->cntlid == ctrl->cntlid) {
+ dev_err(ctrl->device,
+ "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device),
+ subsys->subnqn);
+ return false;
+ }
+
+ if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ nvme_discovery_ctrl(ctrl))
+ continue;
+
+ dev_err(ctrl->device,
+ "Subsystem does not support multiple controllers\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ struct nvme_subsystem *subsys, *found;
+ int ret;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return -ENOMEM;
+
+ subsys->instance = -1;
+ mutex_init(&subsys->lock);
+ kref_init(&subsys->ref);
+ INIT_LIST_HEAD(&subsys->ctrls);
+ INIT_LIST_HEAD(&subsys->nsheads);
+ nvme_init_subnqn(subsys, ctrl, id);
+ memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
+ memcpy(subsys->model, id->mn, sizeof(subsys->model));
+ subsys->vendor_id = le16_to_cpu(id->vid);
+ subsys->cmic = id->cmic;
+
+ /* Versions prior to 1.4 don't necessarily report a valid type */
+ if (id->cntrltype == NVME_CTRL_DISC ||
+ !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
+ subsys->subtype = NVME_NQN_DISC;
+ else
+ subsys->subtype = NVME_NQN_NVME;
+
+ if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
+ dev_err(ctrl->device,
+ "Subsystem %s is not a discovery controller",
+ subsys->subnqn);
+ kfree(subsys);
+ return -EINVAL;
+ }
+ subsys->awupf = le16_to_cpu(id->awupf);
+ nvme_mpath_default_iopolicy(subsys);
+
+ subsys->dev.class = nvme_subsys_class;
+ subsys->dev.release = nvme_release_subsystem;
+ subsys->dev.groups = nvme_subsys_attrs_groups;
+ dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
+ device_initialize(&subsys->dev);
+
+ mutex_lock(&nvme_subsystems_lock);
+ found = __nvme_find_get_subsystem(subsys->subnqn);
+ if (found) {
+ put_device(&subsys->dev);
+ subsys = found;
+
+ if (!nvme_validate_cntlid(subsys, ctrl, id)) {
+ ret = -EINVAL;
+ goto out_put_subsystem;
+ }
+ } else {
+ ret = device_add(&subsys->dev);
+ if (ret) {
+ dev_err(ctrl->device,
+ "failed to register subsystem device.\n");
+ put_device(&subsys->dev);
+ goto out_unlock;
+ }
+ ida_init(&subsys->ns_ida);
+ list_add_tail(&subsys->entry, &nvme_subsystems);
+ }
+
+ ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+ dev_name(ctrl->device));
+ if (ret) {
+ dev_err(ctrl->device,
+ "failed to create sysfs link from subsystem.\n");
+ goto out_put_subsystem;
+ }
+
+ if (!found)
+ subsys->instance = ctrl->instance;
+ ctrl->subsys = subsys;
+ list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ mutex_unlock(&nvme_subsystems_lock);
+ return 0;
+
+out_put_subsystem:
+ nvme_put_subsystem(subsys);
+out_unlock:
+ mutex_unlock(&nvme_subsystems_lock);
+ return ret;
+}
+
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
+ void *log, size_t size, u64 offset)
+{
+ struct nvme_command c = { };
+ u32 dwlen = nvme_bytes_to_numd(size);
+
+ c.get_log_page.opcode = nvme_admin_get_log_page;
+ c.get_log_page.nsid = cpu_to_le32(nsid);
+ c.get_log_page.lid = log_page;
+ c.get_log_page.lsp = lsp;
+ c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
+ c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
+ c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
+ c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
+ c.get_log_page.csi = csi;
+
+ return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
+}
+
+static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ struct nvme_effects_log **log)
+{
+ struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
+ int ret;
+
+ if (cel)
+ goto out;
+
+ cel = kzalloc(sizeof(*cel), GFP_KERNEL);
+ if (!cel)
+ return -ENOMEM;
+
+ ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
+ cel, sizeof(*cel), 0);
+ if (ret) {
+ kfree(cel);
+ return ret;
+ }
+
+ xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+out:
+ *log = cel;
+ return 0;
+}
+
+static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
+{
+ u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
+
+ if (check_shl_overflow(1U, units + page_shift - 9, &val))
+ return UINT_MAX;
+ return val;
+}
+
+static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c = { };
+ struct nvme_id_ctrl_nvm *id;
+ int ret;
+
+ if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
+ ctrl->max_discard_sectors = UINT_MAX;
+ ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
+ } else {
+ ctrl->max_discard_sectors = 0;
+ ctrl->max_discard_segments = 0;
+ }
+
+ /*
+ * Even though NVMe spec explicitly states that MDTS is not applicable
+ * to the write-zeroes, we are cautious and limit the size to the
+ * controllers max_hw_sectors value, which is based on the MDTS field
+ * and possibly other limiting factors.
+ */
+ if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
+ !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
+ ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
+ else
+ ctrl->max_zeroes_sectors = 0;
+
+ if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+ nvme_ctrl_limited_cns(ctrl) ||
+ test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
+ return 0;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+ c.identify.csi = NVME_CSI_NVM;
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (ret)
+ goto free_data;
+
+ if (id->dmrl)
+ ctrl->max_discard_segments = id->dmrl;
+ ctrl->dmrsl = le32_to_cpu(id->dmrsl);
+ if (id->wzsl)
+ ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
+
+free_data:
+ if (ret > 0)
+ set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
+ kfree(id);
+ return ret;
+}
+
+static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
+{
+ struct nvme_effects_log *log = ctrl->effects;
+
+ log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_NCC |
+ NVME_CMD_EFFECTS_CSE_MASK);
+ log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_CSE_MASK);
+
+ /*
+ * The spec says the result of a security receive command depends on
+ * the previous security send command. As such, many vendors log this
+ * command as one to submitted only when no other commands to the same
+ * namespace are outstanding. The intention is to tell the host to
+ * prevent mixing security send and receive.
+ *
+ * This driver can only enforce such exclusive access against IO
+ * queues, though. We are not readily able to enforce such a rule for
+ * two commands to the admin queue, which is the only queue that
+ * matters for this command.
+ *
+ * Rather than blindly freezing the IO queues for this effect that
+ * doesn't even apply to IO, mask it off.
+ */
+ log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
+
+ log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
+}
+
+static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ int ret = 0;
+
+ if (ctrl->effects)
+ return 0;
+
+ if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
+ ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!ctrl->effects) {
+ ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+ if (!ctrl->effects)
+ return -ENOMEM;
+ xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
+ }
+
+ nvme_init_known_nvm_effects(ctrl);
+ return 0;
+}
+
+static int nvme_init_identify(struct nvme_ctrl *ctrl)
+{
+ struct nvme_id_ctrl *id;
+ u32 max_hw_sectors;
+ bool prev_apst_enabled;
+ int ret;
+
+ ret = nvme_identify_ctrl(ctrl, &id);
+ if (ret) {
+ dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
+ return -EIO;
+ }
+
+ if (!(ctrl->ops->flags & NVME_F_FABRICS))
+ ctrl->cntlid = le16_to_cpu(id->cntlid);
+
+ if (!ctrl->identified) {
+ unsigned int i;
+
+ /*
+ * Check for quirks. Quirk can depend on firmware version,
+ * so, in principle, the set of quirks present can change
+ * across a reset. As a possible future enhancement, we
+ * could re-scan for quirks every time we reinitialize
+ * the device, but we'd have to make sure that the driver
+ * behaves intelligently if the quirks change.
+ */
+ for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
+ if (quirk_matches(id, &core_quirks[i]))
+ ctrl->quirks |= core_quirks[i].quirks;
+ }
+
+ ret = nvme_init_subsystem(ctrl, id);
+ if (ret)
+ goto out_free;
+
+ ret = nvme_init_effects(ctrl, id);
+ if (ret)
+ goto out_free;
+ }
+ memcpy(ctrl->subsys->firmware_rev, id->fr,
+ sizeof(ctrl->subsys->firmware_rev));
+
+ if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
+ dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
+ ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
+ }
+
+ ctrl->crdt[0] = le16_to_cpu(id->crdt1);
+ ctrl->crdt[1] = le16_to_cpu(id->crdt2);
+ ctrl->crdt[2] = le16_to_cpu(id->crdt3);
+
+ ctrl->oacs = le16_to_cpu(id->oacs);
+ ctrl->oncs = le16_to_cpu(id->oncs);
+ ctrl->mtfa = le16_to_cpu(id->mtfa);
+ ctrl->oaes = le32_to_cpu(id->oaes);
+ ctrl->wctemp = le16_to_cpu(id->wctemp);
+ ctrl->cctemp = le16_to_cpu(id->cctemp);
+
+ atomic_set(&ctrl->abort_limit, id->acl + 1);
+ ctrl->vwc = id->vwc;
+ if (id->mdts)
+ max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
+ else
+ max_hw_sectors = UINT_MAX;
+ ctrl->max_hw_sectors =
+ min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
+
+ nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ ctrl->sgls = le32_to_cpu(id->sgls);
+ ctrl->kas = le16_to_cpu(id->kas);
+ ctrl->max_namespaces = le32_to_cpu(id->mnan);
+ ctrl->ctratt = le32_to_cpu(id->ctratt);
+
+ ctrl->cntrltype = id->cntrltype;
+ ctrl->dctype = id->dctype;
+
+ if (id->rtd3e) {
+ /* us -> s */
+ u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
+
+ ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
+ shutdown_timeout, 60);
+
+ if (ctrl->shutdown_timeout != shutdown_timeout)
+ dev_info(ctrl->device,
+ "Shutdown timeout set to %u seconds\n",
+ ctrl->shutdown_timeout);
+ } else
+ ctrl->shutdown_timeout = shutdown_timeout;
+
+ ctrl->npss = id->npss;
+ ctrl->apsta = id->apsta;
+ prev_apst_enabled = ctrl->apst_enabled;
+ if (ctrl->quirks & NVME_QUIRK_NO_APST) {
+ if (force_apst && id->apsta) {
+ dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
+ ctrl->apst_enabled = true;
+ } else {
+ ctrl->apst_enabled = false;
+ }
+ } else {
+ ctrl->apst_enabled = id->apsta;
+ }
+ memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->icdoff = le16_to_cpu(id->icdoff);
+ ctrl->ioccsz = le32_to_cpu(id->ioccsz);
+ ctrl->iorcsz = le32_to_cpu(id->iorcsz);
+ ctrl->maxcmd = le16_to_cpu(id->maxcmd);
+
+ /*
+ * In fabrics we need to verify the cntlid matches the
+ * admin connect
+ */
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+ dev_err(ctrl->device,
+ "Mismatching cntlid: Connect %u vs Identify "
+ "%u, rejecting\n",
+ ctrl->cntlid, le16_to_cpu(id->cntlid));
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
+ dev_err(ctrl->device,
+ "keep-alive support is mandatory for fabrics\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ } else {
+ ctrl->hmpre = le32_to_cpu(id->hmpre);
+ ctrl->hmmin = le32_to_cpu(id->hmmin);
+ ctrl->hmminds = le32_to_cpu(id->hmminds);
+ ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
+ }
+
+ ret = nvme_mpath_init_identify(ctrl, id);
+ if (ret < 0)
+ goto out_free;
+
+ if (ctrl->apst_enabled && !prev_apst_enabled)
+ dev_pm_qos_expose_latency_tolerance(ctrl->device);
+ else if (!ctrl->apst_enabled && prev_apst_enabled)
+ dev_pm_qos_hide_latency_tolerance(ctrl->device);
+
+out_free:
+ kfree(id);
+ return ret;
+}
+
+/*
+ * Initialize the cached copies of the Identify data and various controller
+ * register in our nvme_ctrl structure. This should be called as soon as
+ * the admin queue is fully up and running.
+ */
+int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
+{
+ int ret;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
+ if (ret) {
+ dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
+ return ret;
+ }
+
+ ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+
+ if (ctrl->vs >= NVME_VS(1, 1, 0))
+ ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
+
+ ret = nvme_init_identify(ctrl);
+ if (ret)
+ return ret;
+
+ ret = nvme_configure_apst(ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = nvme_configure_timestamp(ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = nvme_configure_host_options(ctrl);
+ if (ret < 0)
+ return ret;
+
+ nvme_configure_opal(ctrl, was_suspended);
+
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
+ /*
+ * Do not return errors unless we are in a controller reset,
+ * the controller works perfectly fine without hwmon.
+ */
+ ret = nvme_hwmon_init(ctrl);
+ if (ret == -EINTR)
+ return ret;
+ }
+
+ clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
+ ctrl->identified = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
+
+static int nvme_dev_open(struct inode *inode, struct file *file)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
+
+ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_LIVE:
+ break;
+ default:
+ return -EWOULDBLOCK;
+ }
+
+ nvme_get_ctrl(ctrl);
+ if (!try_module_get(ctrl->ops->module)) {
+ nvme_put_ctrl(ctrl);
+ return -EINVAL;
+ }
+
+ file->private_data = ctrl;
+ return 0;
+}
+
+static int nvme_dev_release(struct inode *inode, struct file *file)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
+
+ module_put(ctrl->ops->module);
+ nvme_put_ctrl(ctrl);
+ return 0;
+}
+
+static const struct file_operations nvme_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_dev_open,
+ .release = nvme_dev_release,
+ .unlocked_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_dev_uring_cmd,
+};
+
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
+ unsigned nsid)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&ctrl->subsys->lock);
+
+ list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
+ /*
+ * Private namespaces can share NSIDs under some conditions.
+ * In that case we can't use the same ns_head for namespaces
+ * with the same NSID.
+ */
+ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
+ continue;
+ if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
+ return h;
+ }
+
+ return NULL;
+}
+
+static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_ids *ids)
+{
+ bool has_uuid = !uuid_is_null(&ids->uuid);
+ bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
+ bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+
+ list_for_each_entry(h, &subsys->nsheads, entry) {
+ if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
+ return -EINVAL;
+ if (has_nguid &&
+ memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
+ return -EINVAL;
+ if (has_eui64 &&
+ memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void nvme_cdev_rel(struct device *dev)
+{
+ ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
+}
+
+void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
+{
+ cdev_device_del(cdev, cdev_device);
+ put_device(cdev_device);
+}
+
+int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
+ const struct file_operations *fops, struct module *owner)
+{
+ int minor, ret;
+
+ minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+ cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
+ cdev_device->class = nvme_ns_chr_class;
+ cdev_device->release = nvme_cdev_rel;
+ device_initialize(cdev_device);
+ cdev_init(cdev, fops);
+ cdev->owner = owner;
+ ret = cdev_device_add(cdev, cdev_device);
+ if (ret)
+ put_device(cdev_device);
+
+ return ret;
+}
+
+static int nvme_ns_chr_open(struct inode *inode, struct file *file)
+{
+ return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
+}
+
+static int nvme_ns_chr_release(struct inode *inode, struct file *file)
+{
+ nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
+ return 0;
+}
+
+static const struct file_operations nvme_ns_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_chr_open,
+ .release = nvme_ns_chr_release,
+ .unlocked_ioctl = nvme_ns_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_ns_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
+};
+
+static int nvme_add_ns_cdev(struct nvme_ns *ns)
+{
+ int ret;
+
+ ns->cdev_device.parent = ns->ctrl->device;
+ ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
+ ns->ctrl->instance, ns->head->instance);
+ if (ret)
+ return ret;
+
+ return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
+ ns->ctrl->ops->module);
+}
+
+static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_head *head;
+ size_t size = sizeof(*head);
+ int ret = -ENOMEM;
+
+#ifdef CONFIG_NVME_MULTIPATH
+ size += num_possible_nodes() * sizeof(struct nvme_ns *);
+#endif
+
+ head = kzalloc(size, GFP_KERNEL);
+ if (!head)
+ goto out;
+ ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_head;
+ head->instance = ret;
+ INIT_LIST_HEAD(&head->list);
+ ret = init_srcu_struct(&head->srcu);
+ if (ret)
+ goto out_ida_remove;
+ head->subsys = ctrl->subsys;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
+ kref_init(&head->ref);
+
+ if (head->ids.csi) {
+ ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
+ if (ret)
+ goto out_cleanup_srcu;
+ } else
+ head->effects = ctrl->effects;
+
+ ret = nvme_mpath_alloc_disk(ctrl, head);
+ if (ret)
+ goto out_cleanup_srcu;
+
+ list_add_tail(&head->entry, &ctrl->subsys->nsheads);
+
+ kref_get(&ctrl->subsys->ref);
+
+ return head;
+out_cleanup_srcu:
+ cleanup_srcu_struct(&head->srcu);
+out_ida_remove:
+ ida_free(&ctrl->subsys->ns_ida, head->instance);
+out_free_head:
+ kfree(head);
+out:
+ if (ret > 0)
+ ret = blk_status_to_errno(nvme_error_status(ret));
+ return ERR_PTR(ret);
+}
+
+static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
+ struct nvme_ns_ids *ids)
+{
+ struct nvme_subsystem *s;
+ int ret = 0;
+
+ /*
+ * Note that this check is racy as we try to avoid holding the global
+ * lock over the whole ns_head creation. But it is only intended as
+ * a sanity check anyway.
+ */
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(s, &nvme_subsystems, entry) {
+ if (s == this)
+ continue;
+ mutex_lock(&s->lock);
+ ret = nvme_subsys_check_duplicate_ids(s, ids);
+ mutex_unlock(&s->lock);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&nvme_subsystems_lock);
+
+ return ret;
+}
+
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct nvme_ns_head *head = NULL;
+ int ret;
+
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
+ if (ret) {
+ /*
+ * We've found two different namespaces on two different
+ * subsystems that report the same ID. This is pretty nasty
+ * for anything that actually requires unique device
+ * identification. In the kernel we need this for multipathing,
+ * and in user space the /dev/disk/by-id/ links rely on it.
+ *
+ * If the device also claims to be multi-path capable back off
+ * here now and refuse the probe the second device as this is a
+ * recipe for data corruption. If not this is probably a
+ * cheap consumer device if on the PCIe bus, so let the user
+ * proceed and use the shiny toy, but warn that with changing
+ * probing order (which due to our async probing could just be
+ * device taking longer to startup) the other device could show
+ * up at any time.
+ */
+ nvme_print_device_info(ctrl);
+ if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
+ ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
+ info->is_shared)) {
+ dev_err(ctrl->device,
+ "ignoring nsid %d because of duplicate IDs\n",
+ info->nsid);
+ return ret;
+ }
+
+ dev_err(ctrl->device,
+ "clearing duplicate IDs for nsid %d\n", info->nsid);
+ dev_err(ctrl->device,
+ "use of /dev/disk/by-id/ may cause data corruption\n");
+ memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
+ memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
+ memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
+ ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
+ }
+
+ mutex_lock(&ctrl->subsys->lock);
+ head = nvme_find_ns_head(ctrl, info->nsid);
+ if (!head) {
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "duplicate IDs in subsystem for nsid %d\n",
+ info->nsid);
+ goto out_unlock;
+ }
+ head = nvme_alloc_ns_head(ctrl, info);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
+ goto out_unlock;
+ }
+ } else {
+ ret = -EINVAL;
+ if (!info->is_shared || !head->shared) {
+ dev_err(ctrl->device,
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
+ goto out_put_ns_head;
+ }
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
+ dev_err(ctrl->device,
+ "IDs don't match for shared namespace %d\n",
+ info->nsid);
+ goto out_put_ns_head;
+ }
+
+ if (!multipath) {
+ dev_warn(ctrl->device,
+ "Found shared namespace %d, but multipathing not supported.\n",
+ info->nsid);
+ dev_warn_once(ctrl->device,
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+ }
+ }
+
+ list_add_tail_rcu(&ns->siblings, &head->list);
+ ns->head = head;
+ mutex_unlock(&ctrl->subsys->lock);
+ return 0;
+
+out_put_ns_head:
+ nvme_put_ns_head(head);
+out_unlock:
+ mutex_unlock(&ctrl->subsys->lock);
+ return ret;
+}
+
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+ struct nvme_ns *ns, *ret = NULL;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->head->ns_id == nsid) {
+ if (!nvme_get_ns(ns))
+ continue;
+ ret = ns;
+ break;
+ }
+ if (ns->head->ns_id > nsid)
+ break;
+ }
+ up_read(&ctrl->namespaces_rwsem);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
+
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+{
+ struct nvme_ns *tmp;
+
+ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+ if (tmp->head->ns_id < ns->head->ns_id) {
+ list_add(&ns->list, &tmp->list);
+ return;
+ }
+ }
+ list_add(&ns->list, &ns->ctrl->namespaces);
+}
+
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
+{
+ struct nvme_ns *ns;
+ struct gendisk *disk;
+ int node = ctrl->numa_node;
+
+ ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
+ if (!ns)
+ return;
+
+ disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+ if (IS_ERR(disk))
+ goto out_free_ns;
+ disk->fops = &nvme_bdev_ops;
+ disk->private_data = ns;
+
+ ns->disk = disk;
+ ns->queue = disk->queue;
+
+ if (ctrl->opts && ctrl->opts->data_digest)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
+ blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
+
+ ns->ctrl = ctrl;
+ kref_init(&ns->kref);
+
+ if (nvme_init_ns_head(ns, info))
+ goto out_cleanup_disk;
+
+ /*
+ * If multipathing is enabled, the device name for all disks and not
+ * just those that represent shared namespaces needs to be based on the
+ * subsystem instance. Using the controller instance for private
+ * namespaces could lead to naming collisions between shared and private
+ * namespaces if they don't use a common numbering scheme.
+ *
+ * If multipathing is not enabled, disk names must use the controller
+ * instance as shared namespaces will show up as multiple block
+ * devices.
+ */
+ if (nvme_ns_head_multipath(ns->head)) {
+ sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->instance, ns->head->instance);
+ disk->flags |= GENHD_FL_HIDDEN;
+ } else if (multipath) {
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ } else {
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
+ ns->head->instance);
+ }
+
+ if (nvme_update_ns_info(ns, info))
+ goto out_unlink_ns;
+
+ down_write(&ctrl->namespaces_rwsem);
+ /*
+ * Ensure that no namespaces are added to the ctrl list after the queues
+ * are frozen, thereby avoiding a deadlock between scan and reset.
+ */
+ if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
+ up_write(&ctrl->namespaces_rwsem);
+ goto out_unlink_ns;
+ }
+ nvme_ns_add_to_ctrl_list(ns);
+ up_write(&ctrl->namespaces_rwsem);
+ nvme_get_ctrl(ctrl);
+
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ goto out_cleanup_ns_from_list;
+
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_add_ns_cdev(ns);
+
+ nvme_mpath_add_disk(ns, info->anagrpid);
+ nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
+
+ return;
+
+ out_cleanup_ns_from_list:
+ nvme_put_ctrl(ctrl);
+ down_write(&ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ctrl->namespaces_rwsem);
+ out_unlink_ns:
+ mutex_lock(&ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list))
+ list_del_init(&ns->head->entry);
+ mutex_unlock(&ctrl->subsys->lock);
+ nvme_put_ns_head(ns->head);
+ out_cleanup_disk:
+ put_disk(disk);
+ out_free_ns:
+ kfree(ns);
+}
+
+static void nvme_ns_remove(struct nvme_ns *ns)
+{
+ bool last_path = false;
+
+ if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
+ return;
+
+ clear_bit(NVME_NS_READY, &ns->flags);
+ set_capacity(ns->disk, 0);
+ nvme_fault_inject_fini(&ns->fault_inject);
+
+ /*
+ * Ensure that !NVME_NS_READY is seen by other threads to prevent
+ * this ns going back into current_path.
+ */
+ synchronize_srcu(&ns->head->srcu);
+
+ /* wait for concurrent submissions */
+ if (nvme_mpath_clear_current_path(ns))
+ synchronize_srcu(&ns->head->srcu);
+
+ mutex_lock(&ns->ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
+ mutex_unlock(&ns->ctrl->subsys->lock);
+
+ /* guarantee not available in head->list */
+ synchronize_srcu(&ns->head->srcu);
+
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+ del_gendisk(ns->disk);
+
+ down_write(&ns->ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ns->ctrl->namespaces_rwsem);
+
+ if (last_path)
+ nvme_mpath_shutdown_disk(ns->head);
+ nvme_put_ns(ns);
+}
+
+static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
+{
+ struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
+
+ if (ns) {
+ nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
+}
+
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
+ dev_err(ns->ctrl->device,
+ "identifiers changed for nsid %d\n", ns->head->ns_id);
+ goto out;
+ }
+
+ ret = nvme_update_ns_info(ns, info);
+out:
+ /*
+ * Only remove the namespace if we got a fatal error back from the
+ * device, otherwise ignore the error and just move on.
+ *
+ * TODO: we should probably schedule a delayed retry here.
+ */
+ if (ret > 0 && (ret & NVME_SC_DNR))
+ nvme_ns_remove(ns);
+}
+
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+ struct nvme_ns_info info = { .nsid = nsid };
+ struct nvme_ns *ns;
+ int ret;
+
+ if (nvme_identify_ns_descs(ctrl, &info))
+ return;
+
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
+ /*
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
+ */
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
+ ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
+ else
+ ret = nvme_ns_info_from_identify(ctrl, &info);
+
+ if (info.is_removed)
+ nvme_ns_remove_by_nsid(ctrl, nsid);
+
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (ret || !info.is_ready)
+ return;
+
+ ns = nvme_find_get_ns(ctrl, nsid);
+ if (ns) {
+ nvme_validate_ns(ns, &info);
+ nvme_put_ns(ns);
+ } else {
+ nvme_alloc_ns(ctrl, &info);
+ }
+}
+
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid)
+{
+ struct nvme_ns *ns, *next;
+ LIST_HEAD(rm_list);
+
+ down_write(&ctrl->namespaces_rwsem);
+ list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
+ if (ns->head->ns_id > nsid)
+ list_move_tail(&ns->list, &rm_list);
+ }
+ up_write(&ctrl->namespaces_rwsem);
+
+ list_for_each_entry_safe(ns, next, &rm_list, list)
+ nvme_ns_remove(ns);
+
+}
+
+static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
+{
+ const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
+ __le32 *ns_list;
+ u32 prev = 0;
+ int ret = 0, i;
+
+ ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!ns_list)
+ return -ENOMEM;
+
+ for (;;) {
+ struct nvme_command cmd = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
+ .identify.nsid = cpu_to_le32(prev),
+ };
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
+ NVME_IDENTIFY_DATA_SIZE);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "Identify NS List failed (status=0x%x)\n", ret);
+ goto free;
+ }
+
+ for (i = 0; i < nr_entries; i++) {
+ u32 nsid = le32_to_cpu(ns_list[i]);
+
+ if (!nsid) /* end of the list? */
+ goto out;
+ nvme_scan_ns(ctrl, nsid);
+ while (++prev < nsid)
+ nvme_ns_remove_by_nsid(ctrl, prev);
+ }
+ }
+ out:
+ nvme_remove_invalid_namespaces(ctrl, prev);
+ free:
+ kfree(ns_list);
+ return ret;
+}
+
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
+{
+ struct nvme_id_ctrl *id;
+ u32 nn, i;
+
+ if (nvme_identify_ctrl(ctrl, &id))
+ return;
+ nn = le32_to_cpu(id->nn);
+ kfree(id);
+
+ for (i = 1; i <= nn; i++)
+ nvme_scan_ns(ctrl, i);
+
+ nvme_remove_invalid_namespaces(ctrl, nn);
+}
+
+static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
+{
+ size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
+ __le32 *log;
+ int error;
+
+ log = kzalloc(log_size, GFP_KERNEL);
+ if (!log)
+ return;
+
+ /*
+ * We need to read the log to clear the AEN, but we don't want to rely
+ * on it for the changed namespace information as userspace could have
+ * raced with us in reading the log page, which could cause us to miss
+ * updates.
+ */
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
+ NVME_CSI_NVM, log, log_size, 0);
+ if (error)
+ dev_warn(ctrl->device,
+ "reading changed ns log failed: %d\n", error);
+
+ kfree(log);
+}
+
+static void nvme_scan_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, scan_work);
+ int ret;
+
+ /* No tagset on a live ctrl means IO queues could not created */
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
+ return;
+
+ /*
+ * Identify controller limits can change at controller reset due to
+ * new firmware download, even though it is not common we cannot ignore
+ * such scenario. Controller's non-mdts limits are reported in the unit
+ * of logical blocks that is dependent on the format of attached
+ * namespace. Hence re-read the limits at the time of ns allocation.
+ */
+ ret = nvme_init_non_mdts_limits(ctrl);
+ if (ret < 0) {
+ dev_warn(ctrl->device,
+ "reading non-mdts-limits failed: %d\n", ret);
+ return;
+ }
+
+ if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
+ dev_info(ctrl->device, "rescanning namespaces.\n");
+ nvme_clear_changed_ns_log(ctrl);
+ }
+
+ mutex_lock(&ctrl->scan_lock);
+ if (nvme_ctrl_limited_cns(ctrl)) {
+ nvme_scan_ns_sequential(ctrl);
+ } else {
+ /*
+ * Fall back to sequential scan if DNR is set to handle broken
+ * devices which should support Identify NS List (as per the VS
+ * they report) but don't actually support it.
+ */
+ ret = nvme_scan_ns_list(ctrl);
+ if (ret > 0 && ret & NVME_SC_DNR)
+ nvme_scan_ns_sequential(ctrl);
+ }
+ mutex_unlock(&ctrl->scan_lock);
+}
+
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
+void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns, *next;
+ LIST_HEAD(ns_list);
+
+ /*
+ * make sure to requeue I/O to all namespaces as these
+ * might result from the scan itself and must complete
+ * for the scan_work to make progress
+ */
+ nvme_mpath_clear_ctrl_paths(ctrl);
+
+ /*
+ * Unquiesce io queues so any pending IO won't hang, especially
+ * those submitted from scan work
+ */
+ nvme_unquiesce_io_queues(ctrl);
+
+ /* prevent racing with ns scanning */
+ flush_work(&ctrl->scan_work);
+
+ /*
+ * The dead states indicates the controller was not gracefully
+ * disconnected. In that case, we won't be able to flush any data while
+ * removing the namespaces' disks; fail all the queues now to avoid
+ * potentially having to clean up the failed sync later.
+ */
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
+ nvme_mark_namespaces_dead(ctrl);
+
+ /* this is a no-op when called from the controller reset handler */
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
+
+ down_write(&ctrl->namespaces_rwsem);
+ list_splice_init(&ctrl->namespaces, &ns_list);
+ up_write(&ctrl->namespaces_rwsem);
+
+ list_for_each_entry_safe(ns, next, &ns_list, list)
+ nvme_ns_remove(ns);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
+
+static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct nvme_ctrl *ctrl =
+ container_of(dev, struct nvme_ctrl, ctrl_device);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ret;
+
+ ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
+ if (ret)
+ return ret;
+
+ if (opts) {
+ ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_TRSVCID=%s",
+ opts->trsvcid ?: "none");
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
+ opts->host_traddr ?: "none");
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
+ opts->host_iface ?: "none");
+ }
+ return ret;
+}
+
+static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
+{
+ char *envp[2] = { envdata, NULL };
+
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
+{
+ char *envp[2] = { NULL, NULL };
+ u32 aen_result = ctrl->aen_result;
+
+ ctrl->aen_result = 0;
+ if (!aen_result)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
+ if (!envp[0])
+ return;
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+ kfree(envp[0]);
+}
+
+static void nvme_async_event_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, async_event_work);
+
+ nvme_aen_uevent(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
+}
+
+static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
+{
+
+ u32 csts;
+
+ if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
+ return false;
+
+ if (csts == ~0)
+ return false;
+
+ return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
+}
+
+static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
+{
+ struct nvme_fw_slot_info_log *log;
+
+ log = kmalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ return;
+
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
+ log, sizeof(*log), 0))
+ dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
+ kfree(log);
+}
+
+static void nvme_fw_act_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(work,
+ struct nvme_ctrl, fw_act_work);
+ unsigned long fw_act_timeout;
+
+ nvme_auth_stop(ctrl);
+
+ if (ctrl->mtfa)
+ fw_act_timeout = jiffies +
+ msecs_to_jiffies(ctrl->mtfa * 100);
+ else
+ fw_act_timeout = jiffies +
+ msecs_to_jiffies(admin_timeout * 1000);
+
+ nvme_quiesce_io_queues(ctrl);
+ while (nvme_ctrl_pp_status(ctrl)) {
+ if (time_after(jiffies, fw_act_timeout)) {
+ dev_warn(ctrl->device,
+ "Fw activation timeout, reset controller\n");
+ nvme_try_sched_reset(ctrl);
+ return;
+ }
+ msleep(100);
+ }
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
+ return;
+
+ nvme_unquiesce_io_queues(ctrl);
+ /* read FW slot information to clear the AER */
+ nvme_get_fw_slot_info(ctrl);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
+}
+
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+{
+ u32 aer_notice_type = nvme_aer_subtype(result);
+ bool requeue = true;
+
+ switch (aer_notice_type) {
+ case NVME_AER_NOTICE_NS_CHANGED:
+ set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
+ nvme_queue_scan(ctrl);
+ break;
+ case NVME_AER_NOTICE_FW_ACT_STARTING:
+ /*
+ * We are (ab)using the RESETTING state to prevent subsequent
+ * recovery actions from interfering with the controller's
+ * firmware activation.
+ */
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ requeue = false;
+ queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
+ break;
+#ifdef CONFIG_NVME_MULTIPATH
+ case NVME_AER_NOTICE_ANA:
+ if (!ctrl->ana_log_buf)
+ break;
+ queue_work(nvme_wq, &ctrl->ana_work);
+ break;
+#endif
+ case NVME_AER_NOTICE_DISC_CHANGED:
+ ctrl->aen_result = result;
+ break;
+ default:
+ dev_warn(ctrl->device, "async event result %08x\n", result);
+ }
+ return requeue;
+}
+
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ volatile union nvme_result *res)
+{
+ u32 result = le32_to_cpu(res->u32);
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
+ bool requeue = true;
+
+ if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
+ return;
+
+ trace_nvme_async_event(ctrl, result);
+ switch (aer_type) {
+ case NVME_AER_NOTICE:
+ requeue = nvme_handle_aen_notice(ctrl, result);
+ break;
+ case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
+ case NVME_AER_SMART:
+ case NVME_AER_CSS:
+ case NVME_AER_VS:
+ ctrl->aen_result = result;
+ break;
+ default:
+ break;
+ }
+
+ if (requeue)
+ queue_work(nvme_wq, &ctrl->async_event_work);
+}
+EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = BLK_MQ_F_NO_SCHED;
+ if (ctrl->ops->flags & NVME_F_BLOCKING)
+ set->flags |= BLK_MQ_F_BLOCKING;
+ set->cmd_size = cmd_size;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ ctrl->admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+ goto out_free_tagset;
+ }
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->fabrics_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->fabrics_q)) {
+ ret = PTR_ERR(ctrl->fabrics_q);
+ goto out_cleanup_admin_q;
+ }
+ }
+
+ ctrl->admin_tagset = set;
+ return 0;
+
+out_cleanup_admin_q:
+ blk_mq_destroy_queue(ctrl->admin_q);
+ blk_put_queue(ctrl->admin_q);
+out_free_tagset:
+ blk_mq_free_tag_set(set);
+ ctrl->admin_q = NULL;
+ ctrl->fabrics_q = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+ blk_mq_destroy_queue(ctrl->admin_q);
+ blk_put_queue(ctrl->admin_q);
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ blk_mq_destroy_queue(ctrl->fabrics_q);
+ blk_put_queue(ctrl->fabrics_q);
+ }
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int nr_maps,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
+ /*
+ * Some Apple controllers requires tags to be unique across admin and
+ * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
+ */
+ if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
+ else if (ctrl->ops->flags & NVME_F_FABRICS)
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (ctrl->ops->flags & NVME_F_BLOCKING)
+ set->flags |= BLK_MQ_F_BLOCKING;
+ set->cmd_size = cmd_size,
+ set->driver_data = ctrl;
+ set->nr_hw_queues = ctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nr_maps;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->connect_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
+ }
+ blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE,
+ ctrl->connect_q);
+ }
+
+ ctrl->tagset = set;
+ return 0;
+
+out_free_tag_set:
+ blk_mq_free_tag_set(set);
+ ctrl->connect_q = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ blk_mq_destroy_queue(ctrl->connect_q);
+ blk_put_queue(ctrl->connect_q);
+ }
+ blk_mq_free_tag_set(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
+
+void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+ nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_stop_keep_alive(ctrl);
+ nvme_stop_failfast_work(ctrl);
+ flush_work(&ctrl->async_event_work);
+ cancel_work_sync(&ctrl->fw_act_work);
+ if (ctrl->ops->stop_ctrl)
+ ctrl->ops->stop_ctrl(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
+
+void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+{
+ nvme_start_keep_alive(ctrl);
+
+ nvme_enable_aen(ctrl);
+
+ /*
+ * persistent discovery controllers need to send indication to userspace
+ * to re-read the discovery log page to learn about possible changes
+ * that were missed. We identify persistent discovery controllers by
+ * checking that they started once before, hence are reconnecting back.
+ */
+ if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+ nvme_discovery_ctrl(ctrl))
+ nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+
+ if (ctrl->queue_count > 1) {
+ nvme_queue_scan(ctrl);
+ nvme_unquiesce_io_queues(ctrl);
+ nvme_mpath_update(ctrl);
+ }
+
+ nvme_change_uevent(ctrl, "NVME_EVENT=connected");
+ set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
+}
+EXPORT_SYMBOL_GPL(nvme_start_ctrl);
+
+void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
+{
+ nvme_hwmon_exit(ctrl);
+ nvme_fault_inject_fini(&ctrl->fault_inject);
+ dev_pm_qos_hide_latency_tolerance(ctrl->device);
+ cdev_device_del(&ctrl->cdev, ctrl->device);
+ nvme_put_ctrl(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
+
+static void nvme_free_cels(struct nvme_ctrl *ctrl)
+{
+ struct nvme_effects_log *cel;
+ unsigned long i;
+
+ xa_for_each(&ctrl->cels, i, cel) {
+ xa_erase(&ctrl->cels, i);
+ kfree(cel);
+ }
+
+ xa_destroy(&ctrl->cels);
+}
+
+static void nvme_free_ctrl(struct device *dev)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(dev, struct nvme_ctrl, ctrl_device);
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ if (!subsys || ctrl->instance != subsys->instance)
+ ida_free(&nvme_instance_ida, ctrl->instance);
+
+ nvme_free_cels(ctrl);
+ nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
+ __free_page(ctrl->discard_page);
+ free_opal_dev(ctrl->opal_dev);
+
+ if (subsys) {
+ mutex_lock(&nvme_subsystems_lock);
+ list_del(&ctrl->subsys_entry);
+ sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
+ mutex_unlock(&nvme_subsystems_lock);
+ }
+
+ ctrl->ops->free_ctrl(ctrl);
+
+ if (subsys)
+ nvme_put_subsystem(subsys);
+}
+
+/*
+ * Initialize a NVMe controller structures. This needs to be called during
+ * earliest initialization so that we have the initialized structured around
+ * during probing.
+ */
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ const struct nvme_ctrl_ops *ops, unsigned long quirks)
+{
+ int ret;
+
+ WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+ spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->scan_lock);
+ INIT_LIST_HEAD(&ctrl->namespaces);
+ xa_init(&ctrl->cels);
+ init_rwsem(&ctrl->namespaces_rwsem);
+ ctrl->dev = dev;
+ ctrl->ops = ops;
+ ctrl->quirks = quirks;
+ ctrl->numa_node = NUMA_NO_NODE;
+ INIT_WORK(&ctrl->scan_work, nvme_scan_work);
+ INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
+ INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
+ INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+ init_waitqueue_head(&ctrl->state_wq);
+
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
+ memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
+ ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+
+ BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
+ PAGE_SIZE);
+ ctrl->discard_page = alloc_page(GFP_KERNEL);
+ if (!ctrl->discard_page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+ ctrl->instance = ret;
+
+ device_initialize(&ctrl->ctrl_device);
+ ctrl->device = &ctrl->ctrl_device;
+ ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
+ ctrl->instance);
+ ctrl->device->class = nvme_class;
+ ctrl->device->parent = ctrl->dev;
+ if (ops->dev_attr_groups)
+ ctrl->device->groups = ops->dev_attr_groups;
+ else
+ ctrl->device->groups = nvme_dev_attr_groups;
+ ctrl->device->release = nvme_free_ctrl;
+ dev_set_drvdata(ctrl->device, ctrl);
+ ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
+ if (ret)
+ goto out_release_instance;
+
+ nvme_get_ctrl(ctrl);
+ cdev_init(&ctrl->cdev, &nvme_dev_fops);
+ ctrl->cdev.owner = ops->module;
+ ret = cdev_device_add(&ctrl->cdev, ctrl->device);
+ if (ret)
+ goto out_free_name;
+
+ /*
+ * Initialize latency tolerance controls. The sysfs files won't
+ * be visible to userspace unless the device actually supports APST.
+ */
+ ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
+ dev_pm_qos_update_user_latency_tolerance(ctrl->device,
+ min(default_ps_max_latency_us, (unsigned long)S32_MAX));
+
+ nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+ nvme_mpath_init_ctrl(ctrl);
+ ret = nvme_auth_init_ctrl(ctrl);
+ if (ret)
+ goto out_free_cdev;
+
+ return 0;
+out_free_cdev:
+ nvme_fault_inject_fini(&ctrl->fault_inject);
+ dev_pm_qos_hide_latency_tolerance(ctrl->device);
+ cdev_device_del(&ctrl->cdev, ctrl->device);
+out_free_name:
+ nvme_put_ctrl(ctrl);
+ kfree_const(ctrl->device->kobj.name);
+out_release_instance:
+ ida_free(&nvme_instance_ida, ctrl->instance);
+out:
+ if (ctrl->discard_page)
+ __free_page(ctrl->discard_page);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+
+/* let I/O to all namespaces fail in preparation for surprise removal */
+void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_mark_disk_dead(ns->disk);
+ up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
+
+void nvme_unfreeze(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_mq_unfreeze_queue(ns->queue);
+ up_read(&ctrl->namespaces_rwsem);
+ clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
+}
+EXPORT_SYMBOL_GPL(nvme_unfreeze);
+
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
+ if (timeout <= 0)
+ break;
+ }
+ up_read(&ctrl->namespaces_rwsem);
+ return timeout;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
+
+void nvme_wait_freeze(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_mq_freeze_queue_wait(ns->queue);
+ up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_wait_freeze);
+
+void nvme_start_freeze(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_freeze_queue_start(ns->queue);
+ up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_start_freeze);
+
+void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->tagset)
+ return;
+ if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
+ blk_mq_quiesce_tagset(ctrl->tagset);
+ else
+ blk_mq_wait_quiesce_done(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues);
+
+void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->tagset)
+ return;
+ if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
+ blk_mq_unquiesce_tagset(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues);
+
+void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ else
+ blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
+}
+EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue);
+
+void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
+
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_sync_queue(ns->queue);
+ up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
+
+void nvme_sync_queues(struct nvme_ctrl *ctrl)
+{
+ nvme_sync_io_queues(ctrl);
+ if (ctrl->admin_q)
+ blk_sync_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_sync_queues);
+
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
+{
+ if (file->f_op != &nvme_dev_fops)
+ return NULL;
+ return file->private_data;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
+
+/*
+ * Check we didn't inadvertently grow the command structure sizes:
+ */
+static inline void _nvme_check_size(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
+ NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
+ BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
+}
+
+
+static int __init nvme_core_init(void)
+{
+ int result = -ENOMEM;
+
+ _nvme_check_size();
+
+ nvme_wq = alloc_workqueue("nvme-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_wq)
+ goto out;
+
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_reset_wq)
+ goto destroy_wq;
+
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_delete_wq)
+ goto destroy_reset_wq;
+
+ result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
+ NVME_MINORS, "nvme");
+ if (result < 0)
+ goto destroy_delete_wq;
+
+ nvme_class = class_create("nvme");
+ if (IS_ERR(nvme_class)) {
+ result = PTR_ERR(nvme_class);
+ goto unregister_chrdev;
+ }
+ nvme_class->dev_uevent = nvme_class_uevent;
+
+ nvme_subsys_class = class_create("nvme-subsystem");
+ if (IS_ERR(nvme_subsys_class)) {
+ result = PTR_ERR(nvme_subsys_class);
+ goto destroy_class;
+ }
+
+ result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
+ "nvme-generic");
+ if (result < 0)
+ goto destroy_subsys_class;
+
+ nvme_ns_chr_class = class_create("nvme-generic");
+ if (IS_ERR(nvme_ns_chr_class)) {
+ result = PTR_ERR(nvme_ns_chr_class);
+ goto unregister_generic_ns;
+ }
+
+ result = nvme_init_auth();
+ if (result)
+ goto destroy_ns_chr;
+ return 0;
+
+destroy_ns_chr:
+ class_destroy(nvme_ns_chr_class);
+unregister_generic_ns:
+ unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
+destroy_subsys_class:
+ class_destroy(nvme_subsys_class);
+destroy_class:
+ class_destroy(nvme_class);
+unregister_chrdev:
+ unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
+destroy_delete_wq:
+ destroy_workqueue(nvme_delete_wq);
+destroy_reset_wq:
+ destroy_workqueue(nvme_reset_wq);
+destroy_wq:
+ destroy_workqueue(nvme_wq);
+out:
+ return result;
+}
+
+static void __exit nvme_core_exit(void)
+{
+ nvme_exit_auth();
+ class_destroy(nvme_ns_chr_class);
+ class_destroy(nvme_subsys_class);
+ class_destroy(nvme_class);
+ unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
+ unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
+ destroy_workqueue(nvme_delete_wq);
+ destroy_workqueue(nvme_reset_wq);
+ destroy_workqueue(nvme_wq);
+ ida_destroy(&nvme_ns_chr_minor_ida);
+ ida_destroy(&nvme_instance_ida);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+module_init(nvme_core_init);
+module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
new file mode 100644
index 0000000000..92ba315cfe
--- /dev/null
+++ b/drivers/nvme/host/fabrics.c
@@ -0,0 +1,1428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include "nvme.h"
+#include "fabrics.h"
+
+static LIST_HEAD(nvmf_transports);
+static DECLARE_RWSEM(nvmf_transports_rwsem);
+
+static LIST_HEAD(nvmf_hosts);
+static DEFINE_MUTEX(nvmf_hosts_mutex);
+
+static struct nvmf_host *nvmf_default_host;
+
+static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
+{
+ struct nvmf_host *host;
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ kref_init(&host->ref);
+ uuid_copy(&host->id, id);
+ strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+
+ return host;
+}
+
+static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
+{
+ struct nvmf_host *host;
+
+ mutex_lock(&nvmf_hosts_mutex);
+
+ /*
+ * We have defined a host as how it is perceived by the target.
+ * Therefore, we don't allow different Host NQNs with the same Host ID.
+ * Similarly, we do not allow the usage of the same Host NQN with
+ * different Host IDs. This'll maintain unambiguous host identification.
+ */
+ list_for_each_entry(host, &nvmf_hosts, list) {
+ bool same_hostnqn = !strcmp(host->nqn, hostnqn);
+ bool same_hostid = uuid_equal(&host->id, id);
+
+ if (same_hostnqn && same_hostid) {
+ kref_get(&host->ref);
+ goto out_unlock;
+ }
+ if (same_hostnqn) {
+ pr_err("found same hostnqn %s but different hostid %pUb\n",
+ hostnqn, id);
+ host = ERR_PTR(-EINVAL);
+ goto out_unlock;
+ }
+ if (same_hostid) {
+ pr_err("found same hostid %pUb but different hostnqn %s\n",
+ id, hostnqn);
+ host = ERR_PTR(-EINVAL);
+ goto out_unlock;
+ }
+ }
+
+ host = nvmf_host_alloc(hostnqn, id);
+ if (!host) {
+ host = ERR_PTR(-ENOMEM);
+ goto out_unlock;
+ }
+
+ list_add_tail(&host->list, &nvmf_hosts);
+out_unlock:
+ mutex_unlock(&nvmf_hosts_mutex);
+ return host;
+}
+
+static struct nvmf_host *nvmf_host_default(void)
+{
+ struct nvmf_host *host;
+ char nqn[NVMF_NQN_SIZE];
+ uuid_t id;
+
+ uuid_gen(&id);
+ snprintf(nqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
+
+ host = nvmf_host_alloc(nqn, &id);
+ if (!host)
+ return NULL;
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_add_tail(&host->list, &nvmf_hosts);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ return host;
+}
+
+static void nvmf_host_destroy(struct kref *ref)
+{
+ struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_del(&host->list);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ kfree(host);
+}
+
+static void nvmf_host_put(struct nvmf_host *host)
+{
+ if (host)
+ kref_put(&host->ref, nvmf_host_destroy);
+}
+
+/**
+ * nvmf_get_address() - Get address/port
+ * @ctrl: Host NVMe controller instance which we got the address
+ * @buf: OUTPUT parameter that will contain the address/port
+ * @size: buffer size
+ */
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ int len = 0;
+
+ if (ctrl->opts->mask & NVMF_OPT_TRADDR)
+ len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
+ if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
+ len += scnprintf(buf + len, size - len, "%strsvcid=%s",
+ (len) ? "," : "", ctrl->opts->trsvcid);
+ if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
+ len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
+ (len) ? "," : "", ctrl->opts->host_traddr);
+ if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
+ len += scnprintf(buf + len, size - len, "%shost_iface=%s",
+ (len) ? "," : "", ctrl->opts->host_iface);
+ len += scnprintf(buf + len, size - len, "\n");
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(nvmf_get_address);
+
+/**
+ * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 32-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ struct nvme_command cmd = { };
+ union nvme_result res;
+ int ret;
+
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(res.u64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read32);
+
+/**
+ * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 64-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ struct nvme_command cmd = { };
+ union nvme_result res;
+ int ret;
+
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.attrib = 1;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(res.u64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read64);
+
+/**
+ * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: Input parameter that contains the value to be
+ * written to the property.
+ *
+ * Used by the NVMe host system to write a 32-bit capsule property value
+ * to an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful write
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ struct nvme_command cmd = { };
+ int ret;
+
+ cmd.prop_set.opcode = nvme_fabrics_command;
+ cmd.prop_set.fctype = nvme_fabrics_type_property_set;
+ cmd.prop_set.attrib = 0;
+ cmd.prop_set.offset = cpu_to_le32(off);
+ cmd.prop_set.value = cpu_to_le64(val);
+
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
+ NVME_QID_ANY, 0, 0);
+ if (unlikely(ret))
+ dev_err(ctrl->device,
+ "Property Set error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_write32);
+
+/**
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
+ * connect() errors.
+ * @ctrl: The specific /dev/nvmeX device that had the error.
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ * @offset: For use with the NVMe error code
+ * NVME_SC_CONNECT_INVALID_PARAM.
+ * @cmd: This is the SQE portion of a submission capsule.
+ * @data: This is the "Data" portion of a submission capsule.
+ */
+static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
+ int errval, int offset, struct nvme_command *cmd,
+ struct nvmf_connect_data *data)
+{
+ int err_sctype = errval & ~NVME_SC_DNR;
+
+ if (errval < 0) {
+ dev_err(ctrl->device,
+ "Connect command failed, errno: %d\n", errval);
+ return;
+ }
+
+ switch (err_sctype) {
+ case NVME_SC_CONNECT_INVALID_PARAM:
+ if (offset >> 16) {
+ char *inv_data = "Connect Invalid Data Parameter";
+
+ switch (offset & 0xffff) {
+ case (offsetof(struct nvmf_connect_data, cntlid)):
+ dev_err(ctrl->device,
+ "%s, cntlid: %d\n",
+ inv_data, data->cntlid);
+ break;
+ case (offsetof(struct nvmf_connect_data, hostnqn)):
+ dev_err(ctrl->device,
+ "%s, hostnqn \"%s\"\n",
+ inv_data, data->hostnqn);
+ break;
+ case (offsetof(struct nvmf_connect_data, subsysnqn)):
+ dev_err(ctrl->device,
+ "%s, subsysnqn \"%s\"\n",
+ inv_data, data->subsysnqn);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_data, offset & 0xffff);
+ break;
+ }
+ } else {
+ char *inv_sqe = "Connect Invalid SQE Parameter";
+
+ switch (offset) {
+ case (offsetof(struct nvmf_connect_command, qid)):
+ dev_err(ctrl->device,
+ "%s, qid %d\n",
+ inv_sqe, cmd->connect.qid);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_sqe, offset);
+ }
+ }
+ break;
+ case NVME_SC_CONNECT_INVALID_HOST:
+ dev_err(ctrl->device,
+ "Connect for subsystem %s is not allowed, hostnqn: %s\n",
+ data->subsysnqn, data->hostnqn);
+ break;
+ case NVME_SC_CONNECT_CTRL_BUSY:
+ dev_err(ctrl->device,
+ "Connect command failed: controller is busy or not available\n");
+ break;
+ case NVME_SC_CONNECT_FORMAT:
+ dev_err(ctrl->device,
+ "Connect incompatible format: %d",
+ cmd->connect.recfmt);
+ break;
+ case NVME_SC_HOST_PATH_ERROR:
+ dev_err(ctrl->device,
+ "Connect command failed: host path error\n");
+ break;
+ case NVME_SC_AUTH_REQUIRED:
+ dev_err(ctrl->device,
+ "Connect command failed: authentication required\n");
+ break;
+ default:
+ dev_err(ctrl->device,
+ "Connect command failed, error wo/DNR bit: %d\n",
+ err_sctype);
+ break;
+ }
+}
+
+static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
+ u16 cntlid)
+{
+ struct nvmf_connect_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ uuid_copy(&data->hostid, &ctrl->opts->host->id);
+ data->cntlid = cpu_to_le16(cntlid);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ return data;
+}
+
+static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
+ struct nvme_command *cmd)
+{
+ cmd->connect.opcode = nvme_fabrics_command;
+ cmd->connect.fctype = nvme_fabrics_type_connect;
+ cmd->connect.qid = cpu_to_le16(qid);
+
+ if (qid) {
+ cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ } else {
+ cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
+
+ /*
+ * set keep-alive timeout in seconds granularity (ms * 1000)
+ */
+ cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
+ }
+
+ if (ctrl->opts->disable_sqflow)
+ cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+}
+
+/**
+ * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to request
+ * a new NVMe controller allocation on the target
+ * system and establish an NVMe Admin connection to
+ * that controller.
+ *
+ * This function enables an NVMe host device to request a new allocation of
+ * an NVMe controller resource on a target system as well establish a
+ * fabrics-protocol connection of the NVMe Admin queue between the
+ * host system device and the allocated NVMe controller on the
+ * target system via a NVMe Fabrics "Connect" command.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ *
+ */
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command cmd = { };
+ union nvme_result res;
+ struct nvmf_connect_data *data;
+ int ret;
+ u32 result;
+
+ nvmf_connect_cmd_prep(ctrl, 0, &cmd);
+
+ data = nvmf_connect_data_prep(ctrl, 0xffff);
+ if (!data)
+ return -ENOMEM;
+
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
+ data, sizeof(*data), NVME_QID_ANY, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
+ &cmd, data);
+ goto out_free_data;
+ }
+
+ result = le32_to_cpu(res.u32);
+ ctrl->cntlid = result & 0xFFFF;
+ if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
+ /* Secure concatenation is not implemented */
+ if (result & NVME_CONNECT_AUTHREQ_ASCR) {
+ dev_warn(ctrl->device,
+ "qid 0: secure concatenation is not supported\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication setup failed\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ else
+ dev_info(ctrl->device,
+ "qid 0: authenticated\n");
+ }
+out_free_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
+
+/**
+ * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to establish an
+ * NVMe I/O queue connection to the already allocated NVMe
+ * controller on the target system.
+ * @qid: NVMe I/O queue number for the new I/O connection between
+ * host and target (note qid == 0 is illegal as this is
+ * the Admin queue, per NVMe standard).
+ *
+ * This function issues a fabrics-protocol connection
+ * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
+ * between the host system device and the allocated NVMe controller
+ * on the target system.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
+{
+ struct nvme_command cmd = { };
+ struct nvmf_connect_data *data;
+ union nvme_result res;
+ int ret;
+ u32 result;
+
+ nvmf_connect_cmd_prep(ctrl, qid, &cmd);
+
+ data = nvmf_connect_data_prep(ctrl, ctrl->cntlid);
+ if (!data)
+ return -ENOMEM;
+
+ ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
+ data, sizeof(*data), qid, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
+ &cmd, data);
+ }
+ result = le32_to_cpu(res.u32);
+ if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
+ /* Secure concatenation is not implemented */
+ if (result & NVME_CONNECT_AUTHREQ_ASCR) {
+ dev_warn(ctrl->device,
+ "qid 0: secure concatenation is not supported\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: authentication setup failed\n", qid);
+ ret = NVME_SC_AUTH_REQUIRED;
+ } else {
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed\n", qid);
+ }
+ }
+out_free_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
+
+bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->opts->max_reconnects == -1 ||
+ ctrl->nr_reconnects < ctrl->opts->max_reconnects)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
+
+/**
+ * nvmf_register_transport() - NVMe Fabrics Library registration function.
+ * @ops: Transport ops instance to be registered to the
+ * common fabrics library.
+ *
+ * API function that registers the type of specific transport fabric
+ * being implemented to the common NVMe fabrics library. Part of
+ * the overall init sequence of starting up a fabrics driver.
+ */
+int nvmf_register_transport(struct nvmf_transport_ops *ops)
+{
+ if (!ops->create_ctrl)
+ return -EINVAL;
+
+ down_write(&nvmf_transports_rwsem);
+ list_add_tail(&ops->entry, &nvmf_transports);
+ up_write(&nvmf_transports_rwsem);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmf_register_transport);
+
+/**
+ * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
+ * @ops: Transport ops instance to be unregistered from the
+ * common fabrics library.
+ *
+ * Fabrics API function that unregisters the type of specific transport
+ * fabric being implemented from the common NVMe fabrics library.
+ * Part of the overall exit sequence of unloading the implemented driver.
+ */
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
+{
+ down_write(&nvmf_transports_rwsem);
+ list_del(&ops->entry);
+ up_write(&nvmf_transports_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
+
+static struct nvmf_transport_ops *nvmf_lookup_transport(
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvmf_transport_ops *ops;
+
+ lockdep_assert_held(&nvmf_transports_rwsem);
+
+ list_for_each_entry(ops, &nvmf_transports, entry) {
+ if (strcmp(ops->name, opts->transport) == 0)
+ return ops;
+ }
+
+ return NULL;
+}
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_TRANSPORT, "transport=%s" },
+ { NVMF_OPT_TRADDR, "traddr=%s" },
+ { NVMF_OPT_TRSVCID, "trsvcid=%s" },
+ { NVMF_OPT_NQN, "nqn=%s" },
+ { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
+ { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
+ { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
+ { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
+ { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
+ { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
+ { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
+ { NVMF_OPT_HOST_ID, "hostid=%s" },
+ { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
+ { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
+ { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
+ { NVMF_OPT_DATA_DIGEST, "data_digest" },
+ { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
+ { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
+ { NVMF_OPT_TOS, "tos=%d" },
+ { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
+#ifdef CONFIG_NVME_HOST_AUTH
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
+#endif
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ size_t nqnlen = 0;
+ int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
+ uuid_t hostid;
+ char hostnqn[NVMF_NQN_SIZE];
+
+ /* Set defaults */
+ opts->queue_size = NVMF_DEF_QUEUE_SIZE;
+ opts->nr_io_queues = num_online_cpus();
+ opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
+ opts->kato = 0;
+ opts->duplicate_connect = false;
+ opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
+ opts->hdr_digest = false;
+ opts->data_digest = false;
+ opts->tos = -1; /* < 0 == use transport default */
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ /* use default host if not given by user space */
+ uuid_copy(&hostid, &nvmf_default_host->id);
+ strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE);
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_TRANSPORT:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->transport);
+ opts->transport = p;
+ break;
+ case NVMF_OPT_NQN:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->subsysnqn);
+ opts->subsysnqn = p;
+ nqnlen = strlen(opts->subsysnqn);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ opts->subsysnqn, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->discovery_nqn =
+ !(strcmp(opts->subsysnqn,
+ NVME_DISC_SUBSYS_NAME));
+ break;
+ case NVMF_OPT_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->traddr);
+ opts->traddr = p;
+ break;
+ case NVMF_OPT_TRSVCID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->trsvcid);
+ opts->trsvcid = p;
+ break;
+ case NVMF_OPT_QUEUE_SIZE:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < NVMF_MIN_QUEUE_SIZE ||
+ token > NVMF_MAX_QUEUE_SIZE) {
+ pr_err("Invalid queue_size %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_size = token;
+ break;
+ case NVMF_OPT_NR_IO_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid number of IOQs %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (opts->discovery_nqn) {
+ pr_debug("Ignoring nr_io_queues value for discovery controller\n");
+ break;
+ }
+
+ opts->nr_io_queues = min_t(unsigned int,
+ num_online_cpus(), token);
+ break;
+ case NVMF_OPT_KATO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token < 0) {
+ pr_err("Invalid keep_alive_tmo %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ } else if (token == 0 && !opts->discovery_nqn) {
+ /* Allowed for debug */
+ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
+ }
+ opts->kato = token;
+ break;
+ case NVMF_OPT_CTRL_LOSS_TMO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token < 0)
+ pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
+ ctrl_loss_tmo = token;
+ break;
+ case NVMF_OPT_FAIL_FAST_TMO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token >= 0)
+ pr_warn("I/O fail on reconnect controller after %d sec\n",
+ token);
+ else
+ token = -1;
+
+ opts->fast_io_fail_tmo = token;
+ break;
+ case NVMF_OPT_HOSTNQN:
+ if (opts->host) {
+ pr_err("hostnqn already user-assigned: %s\n",
+ opts->host->nqn);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ nqnlen = strlen(p);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ p, NVMF_NQN_SIZE);
+ kfree(p);
+ ret = -EINVAL;
+ goto out;
+ }
+ strscpy(hostnqn, p, NVMF_NQN_SIZE);
+ kfree(p);
+ break;
+ case NVMF_OPT_RECONNECT_DELAY:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid reconnect_delay %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->reconnect_delay = token;
+ break;
+ case NVMF_OPT_HOST_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->host_traddr);
+ opts->host_traddr = p;
+ break;
+ case NVMF_OPT_HOST_IFACE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->host_iface);
+ opts->host_iface = p;
+ break;
+ case NVMF_OPT_HOST_ID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = uuid_parse(p, &hostid);
+ if (ret) {
+ pr_err("Invalid hostid %s\n", p);
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ kfree(p);
+ break;
+ case NVMF_OPT_DUP_CONNECT:
+ opts->duplicate_connect = true;
+ break;
+ case NVMF_OPT_DISABLE_SQFLOW:
+ opts->disable_sqflow = true;
+ break;
+ case NVMF_OPT_HDR_DIGEST:
+ opts->hdr_digest = true;
+ break;
+ case NVMF_OPT_DATA_DIGEST:
+ opts->data_digest = true;
+ break;
+ case NVMF_OPT_NR_WRITE_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid nr_write_queues %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_write_queues = token;
+ break;
+ case NVMF_OPT_NR_POLL_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid nr_poll_queues %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_poll_queues = token;
+ break;
+ case NVMF_OPT_TOS:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < 0) {
+ pr_err("Invalid type of service %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token > 255) {
+ pr_warn("Clamping type of service to 255\n");
+ token = 255;
+ }
+ opts->tos = token;
+ break;
+ case NVMF_OPT_DISCOVERY:
+ opts->discovery_nqn = true;
+ break;
+ case NVMF_OPT_DHCHAP_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = p;
+ break;
+ case NVMF_OPT_DHCHAP_CTRL_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = p;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (opts->discovery_nqn) {
+ opts->nr_io_queues = 0;
+ opts->nr_write_queues = 0;
+ opts->nr_poll_queues = 0;
+ opts->duplicate_connect = true;
+ } else {
+ if (!opts->kato)
+ opts->kato = NVME_DEFAULT_KATO;
+ }
+ if (ctrl_loss_tmo < 0) {
+ opts->max_reconnects = -1;
+ } else {
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
+ pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
+ opts->fast_io_fail_tmo, ctrl_loss_tmo);
+ }
+
+ opts->host = nvmf_host_add(hostnqn, &hostid);
+ if (IS_ERR(opts->host)) {
+ ret = PTR_ERR(opts->host);
+ opts->host = NULL;
+ goto out;
+ }
+
+out:
+ kfree(options);
+ return ret;
+}
+
+void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
+ u32 io_queues[HCTX_MAX_TYPES])
+{
+ if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
+ /*
+ * separate read/write queues
+ * hand out dedicated default queues only after we have
+ * sufficient read queues.
+ */
+ io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
+ nr_io_queues -= io_queues[HCTX_TYPE_READ];
+ io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_write_queues, nr_io_queues);
+ nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /*
+ * shared read/write queues
+ * either no write queues were requested, or we don't have
+ * sufficient queue count to have dedicated default queues.
+ */
+ io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_io_queues, nr_io_queues);
+ nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
+ }
+
+ if (opts->nr_poll_queues && nr_io_queues) {
+ /* map dedicated poll queues only if we have queues left */
+ io_queues[HCTX_TYPE_POLL] =
+ min(opts->nr_poll_queues, nr_io_queues);
+ }
+}
+EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
+
+void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
+ u32 io_queues[HCTX_MAX_TYPES])
+{
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
+ /* separate read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ io_queues[HCTX_TYPE_READ];
+ set->map[HCTX_TYPE_READ].queue_offset =
+ io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /* shared read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_READ].queue_offset = 0;
+ }
+
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
+ if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
+ /* map dedicated poll queues only if we have queues left */
+ set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
+ set->map[HCTX_TYPE_POLL].queue_offset =
+ io_queues[HCTX_TYPE_DEFAULT] +
+ io_queues[HCTX_TYPE_READ];
+ blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
+ }
+
+ dev_info(ctrl->device,
+ "mapped %d/%d/%d default/read/poll queues.\n",
+ io_queues[HCTX_TYPE_DEFAULT],
+ io_queues[HCTX_TYPE_READ],
+ io_queues[HCTX_TYPE_POLL]);
+}
+EXPORT_SYMBOL_GPL(nvmf_map_queues);
+
+static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
+ unsigned int required_opts)
+{
+ if ((opts->mask & required_opts) != required_opts) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if ((opt_tokens[i].token & required_opts) &&
+ !(opt_tokens[i].token & opts->mask)) {
+ pr_warn("missing parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
+ strcmp(opts->traddr, ctrl->opts->traddr) ||
+ strcmp(opts->trsvcid, ctrl->opts->trsvcid))
+ return false;
+
+ /*
+ * Checking the local address or host interfaces is rough.
+ *
+ * In most cases, none is specified and the host port or
+ * host interface is selected by the stack.
+ *
+ * Assume no match if:
+ * - local address or host interface is specified and address
+ * or host interface is not the same
+ * - local address or host interface is not specified but
+ * remote is, or vice versa (admin using specific
+ * host_traddr/host_iface when it matters).
+ */
+ if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
+ if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
+ return false;
+ }
+
+ if ((opts->mask & NVMF_OPT_HOST_IFACE) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ if (strcmp(opts->host_iface, ctrl->opts->host_iface))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_IFACE) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
+
+static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
+ unsigned int allowed_opts)
+{
+ if (opts->mask & ~allowed_opts) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if ((opt_tokens[i].token & opts->mask) &&
+ (opt_tokens[i].token & ~allowed_opts)) {
+ pr_warn("invalid parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void nvmf_free_options(struct nvmf_ctrl_options *opts)
+{
+ nvmf_host_put(opts->host);
+ kfree(opts->transport);
+ kfree(opts->traddr);
+ kfree(opts->trsvcid);
+ kfree(opts->subsysnqn);
+ kfree(opts->host_traddr);
+ kfree(opts->host_iface);
+ kfree(opts->dhchap_secret);
+ kfree(opts->dhchap_ctrl_secret);
+ kfree(opts);
+}
+EXPORT_SYMBOL_GPL(nvmf_free_options);
+
+#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
+#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
+ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
+ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
+ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
+ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+ NVMF_OPT_DHCHAP_CTRL_SECRET)
+
+static struct nvme_ctrl *
+nvmf_create_ctrl(struct device *dev, const char *buf)
+{
+ struct nvmf_ctrl_options *opts;
+ struct nvmf_transport_ops *ops;
+ struct nvme_ctrl *ctrl;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ ret = nvmf_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+
+ request_module("nvme-%s", opts->transport);
+
+ /*
+ * Check the generic options first as we need a valid transport for
+ * the lookup below. Then clear the generic flags so that transport
+ * drivers don't have to care about them.
+ */
+ ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
+ if (ret)
+ goto out_free_opts;
+ opts->mask &= ~NVMF_REQUIRED_OPTS;
+
+ down_read(&nvmf_transports_rwsem);
+ ops = nvmf_lookup_transport(opts);
+ if (!ops) {
+ pr_info("no handler found for transport %s.\n",
+ opts->transport);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!try_module_get(ops->module)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ up_read(&nvmf_transports_rwsem);
+
+ ret = nvmf_check_required_opts(opts, ops->required_opts);
+ if (ret)
+ goto out_module_put;
+ ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
+ ops->allowed_opts | ops->required_opts);
+ if (ret)
+ goto out_module_put;
+
+ ctrl = ops->create_ctrl(dev, opts);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_module_put;
+ }
+
+ module_put(ops->module);
+ return ctrl;
+
+out_module_put:
+ module_put(ops->module);
+ goto out_free_opts;
+out_unlock:
+ up_read(&nvmf_transports_rwsem);
+out_free_opts:
+ nvmf_free_options(opts);
+ return ERR_PTR(ret);
+}
+
+static struct class *nvmf_class;
+static struct device *nvmf_device;
+static DEFINE_MUTEX(nvmf_dev_mutex);
+
+static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl;
+ const char *buf;
+ int ret = 0;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ mutex_lock(&nvmf_dev_mutex);
+ if (seq_file->private) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ctrl = nvmf_create_ctrl(nvmf_device, buf);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ seq_file->private = ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
+{
+ const struct match_token *tok;
+ int idx;
+
+ /*
+ * Add dummy entries for instance and cntlid to
+ * signal an invalid/non-existing controller
+ */
+ seq_puts(seq_file, "instance=-1,cntlid=-1");
+ for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
+ tok = &opt_tokens[idx];
+ if (tok->token == NVMF_OPT_ERR)
+ continue;
+ seq_puts(seq_file, ",");
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_puts(seq_file, "\n");
+}
+
+static int nvmf_dev_show(struct seq_file *seq_file, void *private)
+{
+ struct nvme_ctrl *ctrl;
+
+ mutex_lock(&nvmf_dev_mutex);
+ ctrl = seq_file->private;
+ if (!ctrl) {
+ __nvmf_concat_opt_tokens(seq_file);
+ goto out_unlock;
+ }
+
+ seq_printf(seq_file, "instance=%d,cntlid=%d\n",
+ ctrl->instance, ctrl->cntlid);
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ return 0;
+}
+
+static int nvmf_dev_open(struct inode *inode, struct file *file)
+{
+ /*
+ * The miscdevice code initializes file->private_data, but doesn't
+ * make use of it later.
+ */
+ file->private_data = NULL;
+ return single_open(file, nvmf_dev_show, NULL);
+}
+
+static int nvmf_dev_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl = seq_file->private;
+
+ if (ctrl)
+ nvme_put_ctrl(ctrl);
+ return single_release(inode, file);
+}
+
+static const struct file_operations nvmf_dev_fops = {
+ .owner = THIS_MODULE,
+ .write = nvmf_dev_write,
+ .read = seq_read,
+ .open = nvmf_dev_open,
+ .release = nvmf_dev_release,
+};
+
+static struct miscdevice nvmf_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nvme-fabrics",
+ .fops = &nvmf_dev_fops,
+};
+
+static int __init nvmf_init(void)
+{
+ int ret;
+
+ nvmf_default_host = nvmf_host_default();
+ if (!nvmf_default_host)
+ return -ENOMEM;
+
+ nvmf_class = class_create("nvme-fabrics");
+ if (IS_ERR(nvmf_class)) {
+ pr_err("couldn't register class nvme-fabrics\n");
+ ret = PTR_ERR(nvmf_class);
+ goto out_free_host;
+ }
+
+ nvmf_device =
+ device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(nvmf_device)) {
+ pr_err("couldn't create nvme-fabrics device!\n");
+ ret = PTR_ERR(nvmf_device);
+ goto out_destroy_class;
+ }
+
+ ret = misc_register(&nvmf_misc);
+ if (ret) {
+ pr_err("couldn't register misc device: %d\n", ret);
+ goto out_destroy_device;
+ }
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(nvmf_class, MKDEV(0, 0));
+out_destroy_class:
+ class_destroy(nvmf_class);
+out_free_host:
+ nvmf_host_put(nvmf_default_host);
+ return ret;
+}
+
+static void __exit nvmf_exit(void)
+{
+ misc_deregister(&nvmf_misc);
+ device_destroy(nvmf_class, MKDEV(0, 0));
+ class_destroy(nvmf_class);
+ nvmf_host_put(nvmf_default_host);
+
+ BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
+}
+
+MODULE_LICENSE("GPL v2");
+
+module_init(nvmf_init);
+module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
new file mode 100644
index 0000000000..82e7a27ffb
--- /dev/null
+++ b/drivers/nvme/host/fabrics.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#ifndef _NVME_FABRICS_H
+#define _NVME_FABRICS_H 1
+
+#include <linux/in.h>
+#include <linux/inet.h>
+
+#define NVMF_MIN_QUEUE_SIZE 16
+#define NVMF_MAX_QUEUE_SIZE 1024
+#define NVMF_DEF_QUEUE_SIZE 128
+#define NVMF_DEF_RECONNECT_DELAY 10
+/* default to 600 seconds of reconnect attempts before giving up */
+#define NVMF_DEF_CTRL_LOSS_TMO 600
+/* default is -1: the fail fast mechanism is disabled */
+#define NVMF_DEF_FAIL_FAST_TMO -1
+
+/*
+ * Reserved one command for internal usage. This command is used for sending
+ * the connect command, as well as for the keep alive command on the admin
+ * queue once live.
+ */
+#define NVMF_RESERVED_TAGS 1
+
+/*
+ * Define a host as seen by the target. We allocate one at boot, but also
+ * allow the override it when creating controllers. This is both to provide
+ * persistence of the Host NQN over multiple boots, and to allow using
+ * multiple ones, for example in a container scenario. Because we must not
+ * use different Host NQNs with the same Host ID we generate a Host ID and
+ * use this structure to keep track of the relation between the two.
+ */
+struct nvmf_host {
+ struct kref ref;
+ struct list_head list;
+ char nqn[NVMF_NQN_SIZE];
+ uuid_t id;
+};
+
+/**
+ * enum nvmf_parsing_opts - used to define the sysfs parsing options used.
+ */
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_TRANSPORT = 1 << 0,
+ NVMF_OPT_NQN = 1 << 1,
+ NVMF_OPT_TRADDR = 1 << 2,
+ NVMF_OPT_TRSVCID = 1 << 3,
+ NVMF_OPT_QUEUE_SIZE = 1 << 4,
+ NVMF_OPT_NR_IO_QUEUES = 1 << 5,
+ NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
+ NVMF_OPT_KATO = 1 << 7,
+ NVMF_OPT_HOSTNQN = 1 << 8,
+ NVMF_OPT_RECONNECT_DELAY = 1 << 9,
+ NVMF_OPT_HOST_TRADDR = 1 << 10,
+ NVMF_OPT_CTRL_LOSS_TMO = 1 << 11,
+ NVMF_OPT_HOST_ID = 1 << 12,
+ NVMF_OPT_DUP_CONNECT = 1 << 13,
+ NVMF_OPT_DISABLE_SQFLOW = 1 << 14,
+ NVMF_OPT_HDR_DIGEST = 1 << 15,
+ NVMF_OPT_DATA_DIGEST = 1 << 16,
+ NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
+ NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
+ NVMF_OPT_TOS = 1 << 19,
+ NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
+ NVMF_OPT_HOST_IFACE = 1 << 21,
+ NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_DHCHAP_SECRET = 1 << 23,
+ NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
+};
+
+/**
+ * struct nvmf_ctrl_options - Used to hold the options specified
+ * with the parsing opts enum.
+ * @mask: Used by the fabrics library to parse through sysfs options
+ * on adding a NVMe controller.
+ * @max_reconnects: maximum number of allowed reconnect attempts before removing
+ * the controller, (-1) means reconnect forever, zero means remove
+ * immediately;
+ * @transport: Holds the fabric transport "technology name" (for a lack of
+ * better description) that will be used by an NVMe controller
+ * being added.
+ * @subsysnqn: Hold the fully qualified NQN subystem name (format defined
+ * in the NVMe specification, "NVMe Qualified Names").
+ * @traddr: The transport-specific TRADDR field for a port on the
+ * subsystem which is adding a controller.
+ * @trsvcid: The transport-specific TRSVCID field for a port on the
+ * subsystem which is adding a controller.
+ * @host_traddr: A transport-specific field identifying the NVME host port
+ * to use for the connection to the controller.
+ * @host_iface: A transport-specific field identifying the NVME host
+ * interface to use for the connection to the controller.
+ * @queue_size: Number of IO queue elements.
+ * @nr_io_queues: Number of controller IO queues that will be established.
+ * @reconnect_delay: Time between two consecutive reconnect attempts.
+ * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
+ * @kato: Keep-alive timeout.
+ * @host: Virtual NVMe host, contains the NQN and Host ID.
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ * authentication
+ * @disable_sqflow: disable controller sq flow control
+ * @hdr_digest: generate/verify header digest (TCP)
+ * @data_digest: generate/verify data digest (TCP)
+ * @nr_write_queues: number of queues for write I/O
+ * @nr_poll_queues: number of queues for polling I/O
+ * @tos: type of service
+ * @fast_io_fail_tmo: Fast I/O fail timeout in seconds
+ */
+struct nvmf_ctrl_options {
+ unsigned mask;
+ int max_reconnects;
+ char *transport;
+ char *subsysnqn;
+ char *traddr;
+ char *trsvcid;
+ char *host_traddr;
+ char *host_iface;
+ size_t queue_size;
+ unsigned int nr_io_queues;
+ unsigned int reconnect_delay;
+ bool discovery_nqn;
+ bool duplicate_connect;
+ unsigned int kato;
+ struct nvmf_host *host;
+ char *dhchap_secret;
+ char *dhchap_ctrl_secret;
+ bool disable_sqflow;
+ bool hdr_digest;
+ bool data_digest;
+ unsigned int nr_write_queues;
+ unsigned int nr_poll_queues;
+ int tos;
+ int fast_io_fail_tmo;
+};
+
+/*
+ * struct nvmf_transport_ops - used to register a specific
+ * fabric implementation of NVMe fabrics.
+ * @entry: Used by the fabrics library to add the new
+ * registration entry to its linked-list internal tree.
+ * @module: Transport module reference
+ * @name: Name of the NVMe fabric driver implementation.
+ * @required_opts: sysfs command-line options that must be specified
+ * when adding a new NVMe controller.
+ * @allowed_opts: sysfs command-line options that can be specified
+ * when adding a new NVMe controller.
+ * @create_ctrl(): function pointer that points to a non-NVMe
+ * implementation-specific fabric technology
+ * that would go into starting up that fabric
+ * for the purpose of conneciton to an NVMe controller
+ * using that fabric technology.
+ *
+ * Notes:
+ * 1. At minimum, 'required_opts' and 'allowed_opts' should
+ * be set to the same enum parsing options defined earlier.
+ * 2. create_ctrl() must be defined (even if it does nothing)
+ * 3. struct nvmf_transport_ops must be statically allocated in the
+ * modules .bss section so that a pure module_get on @module
+ * prevents the memory from beeing freed.
+ */
+struct nvmf_transport_ops {
+ struct list_head entry;
+ struct module *module;
+ const char *name;
+ int required_opts;
+ int allowed_opts;
+ struct nvme_ctrl *(*create_ctrl)(struct device *dev,
+ struct nvmf_ctrl_options *opts);
+};
+
+static inline bool
+nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
+ ctrl->state == NVME_CTRL_DEAD ||
+ strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
+ strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
+ !uuid_equal(&opts->host->id, &ctrl->opts->host->id))
+ return false;
+
+ return true;
+}
+
+static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->subsys ||
+ !strcmp(ctrl->opts->subsysnqn, NVME_DISC_SUBSYS_NAME))
+ return ctrl->opts->subsysnqn;
+ return ctrl->subsys->subnqn;
+}
+
+static inline void nvmf_complete_timed_out_request(struct request *rq)
+{
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+}
+
+static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
+{
+ return min(opts->nr_io_queues, num_online_cpus()) +
+ min(opts->nr_write_queues, num_online_cpus()) +
+ min(opts->nr_poll_queues, num_online_cpus());
+}
+
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
+int nvmf_register_transport(struct nvmf_transport_ops *ops);
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
+void nvmf_free_options(struct nvmf_ctrl_options *opts);
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts);
+void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
+ u32 io_queues[HCTX_MAX_TYPES]);
+void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
+ u32 io_queues[HCTX_MAX_TYPES]);
+
+#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
new file mode 100644
index 0000000000..1ba10a5c65
--- /dev/null
+++ b/drivers/nvme/host/fault_inject.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fault injection support for nvme.
+ *
+ * Copyright (c) 2018, Oracle and/or its affiliates
+ */
+
+#include <linux/moduleparam.h>
+#include "nvme.h"
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+/* optional fault injection attributes boot time option:
+ * nvme_core.fail_request=<interval>,<probability>,<space>,<times>
+ */
+static char *fail_request;
+module_param(fail_request, charp, 0000);
+
+void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
+ const char *dev_name)
+{
+ struct dentry *dir, *parent;
+ struct fault_attr *attr = &fault_inj->attr;
+
+ /* set default fault injection attribute */
+ if (fail_request)
+ setup_fault_attr(&fail_default_attr, fail_request);
+
+ /* create debugfs directory and attribute */
+ parent = debugfs_create_dir(dev_name, NULL);
+ if (IS_ERR(parent)) {
+ pr_warn("%s: failed to create debugfs directory\n", dev_name);
+ return;
+ }
+
+ *attr = fail_default_attr;
+ dir = fault_create_debugfs_attr("fault_inject", parent, attr);
+ if (IS_ERR(dir)) {
+ pr_warn("%s: failed to create debugfs attr\n", dev_name);
+ debugfs_remove_recursive(parent);
+ return;
+ }
+ fault_inj->parent = parent;
+
+ /* create debugfs for status code and dont_retry */
+ fault_inj->status = NVME_SC_INVALID_OPCODE;
+ fault_inj->dont_retry = true;
+ debugfs_create_x16("status", 0600, dir, &fault_inj->status);
+ debugfs_create_bool("dont_retry", 0600, dir, &fault_inj->dont_retry);
+}
+
+void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject)
+{
+ /* remove debugfs directories */
+ debugfs_remove_recursive(fault_inject->parent);
+}
+
+void nvme_should_fail(struct request *req)
+{
+ struct gendisk *disk = req->q->disk;
+ struct nvme_fault_inject *fault_inject = NULL;
+ u16 status;
+
+ if (disk) {
+ struct nvme_ns *ns = disk->private_data;
+
+ if (ns)
+ fault_inject = &ns->fault_inject;
+ else
+ WARN_ONCE(1, "No namespace found for request\n");
+ } else {
+ fault_inject = &nvme_req(req)->ctrl->fault_inject;
+ }
+
+ if (fault_inject && should_fail(&fault_inject->attr, 1)) {
+ /* inject status code and DNR bit */
+ status = fault_inject->status;
+ if (fault_inject->dont_retry)
+ status |= NVME_SC_DNR;
+ nvme_req(req)->status = status;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_should_fail);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
new file mode 100644
index 0000000000..46cce0ec35
--- /dev/null
+++ b/drivers/nvme/host/fc.c
@@ -0,0 +1,4006 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+#include <linux/delay.h>
+#include <linux/overflow.h>
+#include <linux/blk-cgroup.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+#include "fc.h"
+#include <scsi/scsi_transport_fc.h>
+#include <linux/blk-mq-pci.h>
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+enum nvme_fc_queue_flags {
+ NVME_FC_Q_CONNECTED = 0,
+ NVME_FC_Q_LIVE,
+};
+
+#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
+#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
+ * when connected and a
+ * connection failure.
+ */
+
+struct nvme_fc_queue {
+ struct nvme_fc_ctrl *ctrl;
+ struct device *dev;
+ struct blk_mq_hw_ctx *hctx;
+ void *lldd_handle;
+ size_t cmnd_capsule_len;
+ u32 qnum;
+ u32 rqcnt;
+ u32 seqno;
+
+ u64 connection_id;
+ atomic_t csn;
+
+ unsigned long flags;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+enum nvme_fcop_flags {
+ FCOP_FLAGS_TERMIO = (1 << 0),
+ FCOP_FLAGS_AEN = (1 << 1),
+};
+
+struct nvmefc_ls_req_op {
+ struct nvmefc_ls_req ls_req;
+
+ struct nvme_fc_rport *rport;
+ struct nvme_fc_queue *queue;
+ struct request *rq;
+ u32 flags;
+
+ int ls_error;
+ struct completion ls_done;
+ struct list_head lsreq_list; /* rport->ls_req_list */
+ bool req_queued;
+};
+
+struct nvmefc_ls_rcv_op {
+ struct nvme_fc_rport *rport;
+ struct nvmefc_ls_rsp *lsrsp;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
+ u16 rqstdatalen;
+ bool handled;
+ dma_addr_t rspdma;
+ struct list_head lsrcv_list; /* rport->ls_rcv_list */
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+enum nvme_fcpop_state {
+ FCPOP_STATE_UNINIT = 0,
+ FCPOP_STATE_IDLE = 1,
+ FCPOP_STATE_ACTIVE = 2,
+ FCPOP_STATE_ABORTED = 3,
+ FCPOP_STATE_COMPLETE = 4,
+};
+
+struct nvme_fc_fcp_op {
+ struct nvme_request nreq; /*
+ * nvme/host/core.c
+ * requires this to be
+ * the 1st element in the
+ * private structure
+ * associated with the
+ * request.
+ */
+ struct nvmefc_fcp_req fcp_req;
+
+ struct nvme_fc_ctrl *ctrl;
+ struct nvme_fc_queue *queue;
+ struct request *rq;
+
+ atomic_t state;
+ u32 flags;
+ u32 rqno;
+ u32 nents;
+
+ struct nvme_fc_cmd_iu cmd_iu;
+ struct nvme_fc_ersp_iu rsp_iu;
+};
+
+struct nvme_fcp_op_w_sgl {
+ struct nvme_fc_fcp_op op;
+ struct scatterlist sgl[NVME_INLINE_SG_CNT];
+ uint8_t priv[];
+};
+
+struct nvme_fc_lport {
+ struct nvme_fc_local_port localport;
+
+ struct ida endp_cnt;
+ struct list_head port_list; /* nvme_fc_port_list */
+ struct list_head endp_list;
+ struct device *dev; /* physical device for dma */
+ struct nvme_fc_port_template *ops;
+ struct kref ref;
+ atomic_t act_rport_cnt;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+struct nvme_fc_rport {
+ struct nvme_fc_remote_port remoteport;
+
+ struct list_head endp_list; /* for lport->endp_list */
+ struct list_head ctrl_list;
+ struct list_head ls_req_list;
+ struct list_head ls_rcv_list;
+ struct list_head disc_list;
+ struct device *dev; /* physical device for dma */
+ struct nvme_fc_lport *lport;
+ spinlock_t lock;
+ struct kref ref;
+ atomic_t act_ctrl_cnt;
+ unsigned long dev_loss_end;
+ struct work_struct lsrcv_work;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+/* fc_ctrl flags values - specified as bit positions */
+#define ASSOC_ACTIVE 0
+#define ASSOC_FAILED 1
+#define FCCTRL_TERMIO 2
+
+struct nvme_fc_ctrl {
+ spinlock_t lock;
+ struct nvme_fc_queue *queues;
+ struct device *dev;
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ u32 cnum;
+
+ bool ioq_live;
+ u64 association_id;
+ struct nvmefc_ls_rcv_op *rcv_disconn;
+
+ struct list_head ctrl_list; /* rport->ctrl_list */
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct blk_mq_tag_set tag_set;
+
+ struct work_struct ioerr_work;
+ struct delayed_work connect_work;
+
+ struct kref ref;
+ unsigned long flags;
+ u32 iocnt;
+ wait_queue_head_t ioabort_wait;
+
+ struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
+
+ struct nvme_ctrl ctrl;
+};
+
+static inline struct nvme_fc_ctrl *
+to_fc_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
+}
+
+static inline struct nvme_fc_lport *
+localport_to_lport(struct nvme_fc_local_port *portptr)
+{
+ return container_of(portptr, struct nvme_fc_lport, localport);
+}
+
+static inline struct nvme_fc_rport *
+remoteport_to_rport(struct nvme_fc_remote_port *portptr)
+{
+ return container_of(portptr, struct nvme_fc_rport, remoteport);
+}
+
+static inline struct nvmefc_ls_req_op *
+ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
+{
+ return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
+}
+
+static inline struct nvme_fc_fcp_op *
+fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
+{
+ return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
+}
+
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvme_fc_lock);
+
+static LIST_HEAD(nvme_fc_lport_list);
+static DEFINE_IDA(nvme_fc_local_port_cnt);
+static DEFINE_IDA(nvme_fc_ctrl_cnt);
+
+static struct workqueue_struct *nvme_fc_wq;
+
+static bool nvme_fc_waiting_to_unload;
+static DECLARE_COMPLETION(nvme_fc_unload_proceed);
+
+/*
+ * These items are short-term. They will eventually be moved into
+ * a generic FC class. See comments in module init.
+ */
+static struct device *fc_udev_device;
+
+static void nvme_fc_complete_rq(struct request *rq);
+
+/* *********************** FC-NVME Port Management ************************ */
+
+static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
+ struct nvme_fc_queue *, unsigned int);
+
+static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
+
+
+static void
+nvme_fc_free_lport(struct kref *ref)
+{
+ struct nvme_fc_lport *lport =
+ container_of(ref, struct nvme_fc_lport, ref);
+ unsigned long flags;
+
+ WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&lport->endp_list));
+
+ /* remove from transport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&lport->port_list);
+ if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
+ complete(&nvme_fc_unload_proceed);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
+ ida_destroy(&lport->endp_cnt);
+
+ put_device(lport->dev);
+
+ kfree(lport);
+}
+
+static void
+nvme_fc_lport_put(struct nvme_fc_lport *lport)
+{
+ kref_put(&lport->ref, nvme_fc_free_lport);
+}
+
+static int
+nvme_fc_lport_get(struct nvme_fc_lport *lport)
+{
+ return kref_get_unless_zero(&lport->ref);
+}
+
+
+static struct nvme_fc_lport *
+nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *ops,
+ struct device *dev)
+{
+ struct nvme_fc_lport *lport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ if (lport->localport.node_name != pinfo->node_name ||
+ lport->localport.port_name != pinfo->port_name)
+ continue;
+
+ if (lport->dev != dev) {
+ lport = ERR_PTR(-EXDEV);
+ goto out_done;
+ }
+
+ if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
+ lport = ERR_PTR(-EEXIST);
+ goto out_done;
+ }
+
+ if (!nvme_fc_lport_get(lport)) {
+ /*
+ * fails if ref cnt already 0. If so,
+ * act as if lport already deleted
+ */
+ lport = NULL;
+ goto out_done;
+ }
+
+ /* resume the lport */
+
+ lport->ops = ops;
+ lport->localport.port_role = pinfo->port_role;
+ lport->localport.port_id = pinfo->port_id;
+ lport->localport.port_state = FC_OBJSTATE_ONLINE;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return lport;
+ }
+
+ lport = NULL;
+
+out_done:
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return lport;
+}
+
+/**
+ * nvme_fc_register_localport - transport entry point called by an
+ * LLDD to register the existence of a NVME
+ * host FC port.
+ * @pinfo: pointer to information about the port to be registered
+ * @template: LLDD entrypoints and operational parameters for the port
+ * @dev: physical hardware device node port corresponds to. Will be
+ * used for DMA mappings
+ * @portptr: pointer to a local port pointer. Upon success, the routine
+ * will allocate a nvme_fc_local_port structure and place its
+ * address in the local port pointer. Upon failure, local port
+ * pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *template,
+ struct device *dev,
+ struct nvme_fc_local_port **portptr)
+{
+ struct nvme_fc_lport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!template->localport_delete || !template->remoteport_delete ||
+ !template->ls_req || !template->fcp_io ||
+ !template->ls_abort || !template->fcp_abort ||
+ !template->max_hw_queues || !template->max_sgl_segments ||
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
+ ret = -EINVAL;
+ goto out_reghost_failed;
+ }
+
+ /*
+ * look to see if there is already a localport that had been
+ * deregistered and in the process of waiting for all the
+ * references to fully be removed. If the references haven't
+ * expired, we can simply re-enable the localport. Remoteports
+ * and controller reconnections should resume naturally.
+ */
+ newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
+
+ /* found an lport, but something about its state is bad */
+ if (IS_ERR(newrec)) {
+ ret = PTR_ERR(newrec);
+ goto out_reghost_failed;
+
+ /* found existing lport, which was resumed */
+ } else if (newrec) {
+ *portptr = &newrec->localport;
+ return 0;
+ }
+
+ /* nothing found - allocate a new localport struct */
+
+ newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_reghost_failed;
+ }
+
+ idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_fail_kfree;
+ }
+
+ if (!get_device(dev) && dev) {
+ ret = -ENODEV;
+ goto out_ida_put;
+ }
+
+ INIT_LIST_HEAD(&newrec->port_list);
+ INIT_LIST_HEAD(&newrec->endp_list);
+ kref_init(&newrec->ref);
+ atomic_set(&newrec->act_rport_cnt, 0);
+ newrec->ops = template;
+ newrec->dev = dev;
+ ida_init(&newrec->endp_cnt);
+ if (template->local_priv_sz)
+ newrec->localport.private = &newrec[1];
+ else
+ newrec->localport.private = NULL;
+ newrec->localport.node_name = pinfo->node_name;
+ newrec->localport.port_name = pinfo->port_name;
+ newrec->localport.port_role = pinfo->port_role;
+ newrec->localport.port_id = pinfo->port_id;
+ newrec->localport.port_state = FC_OBJSTATE_ONLINE;
+ newrec->localport.port_num = idx;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ if (dev)
+ dma_set_seg_boundary(dev, template->dma_boundary);
+
+ *portptr = &newrec->localport;
+ return 0;
+
+out_ida_put:
+ ida_free(&nvme_fc_local_port_cnt, idx);
+out_fail_kfree:
+ kfree(newrec);
+out_reghost_failed:
+ *portptr = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
+
+/**
+ * nvme_fc_unregister_localport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a NVME host FC port.
+ * @portptr: pointer to the (registered) local port that is to be deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
+{
+ struct nvme_fc_lport *lport = localport_to_lport(portptr);
+ unsigned long flags;
+
+ if (!portptr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ return -EINVAL;
+ }
+ portptr->port_state = FC_OBJSTATE_DELETED;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ if (atomic_read(&lport->act_rport_cnt) == 0)
+ lport->ops->localport_delete(&lport->localport);
+
+ nvme_fc_lport_put(lport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
+
+/*
+ * TRADDR strings, per FC-NVME are fixed format:
+ * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
+ * udev event will only differ by prefix of what field is
+ * being specified:
+ * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
+ * 19 + 43 + null_fudge = 64 characters
+ */
+#define FCNVME_TRADDR_LENGTH 64
+
+static void
+nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
+ struct nvme_fc_rport *rport)
+{
+ char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
+ char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
+ char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
+
+ if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
+ return;
+
+ snprintf(hostaddr, sizeof(hostaddr),
+ "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
+ lport->localport.node_name, lport->localport.port_name);
+ snprintf(tgtaddr, sizeof(tgtaddr),
+ "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
+ rport->remoteport.node_name, rport->remoteport.port_name);
+ kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void
+nvme_fc_free_rport(struct kref *ref)
+{
+ struct nvme_fc_rport *rport =
+ container_of(ref, struct nvme_fc_rport, ref);
+ struct nvme_fc_lport *lport =
+ localport_to_lport(rport->remoteport.localport);
+ unsigned long flags;
+
+ WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&rport->ctrl_list));
+
+ /* remove from lport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&rport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ WARN_ON(!list_empty(&rport->disc_list));
+ ida_free(&lport->endp_cnt, rport->remoteport.port_num);
+
+ kfree(rport);
+
+ nvme_fc_lport_put(lport);
+}
+
+static void
+nvme_fc_rport_put(struct nvme_fc_rport *rport)
+{
+ kref_put(&rport->ref, nvme_fc_free_rport);
+}
+
+static int
+nvme_fc_rport_get(struct nvme_fc_rport *rport)
+{
+ return kref_get_unless_zero(&rport->ref);
+}
+
+static void
+nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
+{
+ switch (nvme_ctrl_state(&ctrl->ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_CONNECTING:
+ /*
+ * As all reconnects were suppressed, schedule a
+ * connect.
+ */
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: connectivity re-established. "
+ "Attempting reconnect\n", ctrl->cnum);
+
+ queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
+ break;
+
+ case NVME_CTRL_RESETTING:
+ /*
+ * Controller is already in the process of terminating the
+ * association. No need to do anything further. The reconnect
+ * step will naturally occur after the reset completes.
+ */
+ break;
+
+ default:
+ /* no action to take - let it delete */
+ break;
+ }
+}
+
+static struct nvme_fc_rport *
+nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
+ struct nvme_fc_port_info *pinfo)
+{
+ struct nvme_fc_rport *rport;
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (rport->remoteport.node_name != pinfo->node_name ||
+ rport->remoteport.port_name != pinfo->port_name)
+ continue;
+
+ if (!nvme_fc_rport_get(rport)) {
+ rport = ERR_PTR(-ENOLCK);
+ goto out_done;
+ }
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ /* has it been unregistered */
+ if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
+ /* means lldd called us twice */
+ spin_unlock_irqrestore(&rport->lock, flags);
+ nvme_fc_rport_put(rport);
+ return ERR_PTR(-ESTALE);
+ }
+
+ rport->remoteport.port_role = pinfo->port_role;
+ rport->remoteport.port_id = pinfo->port_id;
+ rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
+ rport->dev_loss_end = 0;
+
+ /*
+ * kick off a reconnect attempt on all associations to the
+ * remote port. A successful reconnects will resume i/o.
+ */
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
+ nvme_fc_resume_controller(ctrl);
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return rport;
+ }
+
+ rport = NULL;
+
+out_done:
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return rport;
+}
+
+static inline void
+__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
+ struct nvme_fc_port_info *pinfo)
+{
+ if (pinfo->dev_loss_tmo)
+ rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
+ else
+ rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
+}
+
+/**
+ * nvme_fc_register_remoteport - transport entry point called by an
+ * LLDD to register the existence of a NVME
+ * subsystem FC port on its fabric.
+ * @localport: pointer to the (registered) local port that the remote
+ * subsystem port is connected to.
+ * @pinfo: pointer to information about the port to be registered
+ * @portptr: pointer to a remote port pointer. Upon success, the routine
+ * will allocate a nvme_fc_remote_port structure and place its
+ * address in the remote port pointer. Upon failure, remote port
+ * pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+ struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_remote_port **portptr)
+{
+ struct nvme_fc_lport *lport = localport_to_lport(localport);
+ struct nvme_fc_rport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!nvme_fc_lport_get(lport)) {
+ ret = -ESHUTDOWN;
+ goto out_reghost_failed;
+ }
+
+ /*
+ * look to see if there is already a remoteport that is waiting
+ * for a reconnect (within dev_loss_tmo) with the same WWN's.
+ * If so, transition to it and reconnect.
+ */
+ newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
+
+ /* found an rport, but something about its state is bad */
+ if (IS_ERR(newrec)) {
+ ret = PTR_ERR(newrec);
+ goto out_lport_put;
+
+ /* found existing rport, which was resumed */
+ } else if (newrec) {
+ nvme_fc_lport_put(lport);
+ __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ nvme_fc_signal_discovery_scan(lport, newrec);
+ *portptr = &newrec->remoteport;
+ return 0;
+ }
+
+ /* nothing found - allocate a new remoteport struct */
+
+ newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_lport_put;
+ }
+
+ idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_kfree_rport;
+ }
+
+ INIT_LIST_HEAD(&newrec->endp_list);
+ INIT_LIST_HEAD(&newrec->ctrl_list);
+ INIT_LIST_HEAD(&newrec->ls_req_list);
+ INIT_LIST_HEAD(&newrec->disc_list);
+ kref_init(&newrec->ref);
+ atomic_set(&newrec->act_ctrl_cnt, 0);
+ spin_lock_init(&newrec->lock);
+ newrec->remoteport.localport = &lport->localport;
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
+ newrec->dev = lport->dev;
+ newrec->lport = lport;
+ if (lport->ops->remote_priv_sz)
+ newrec->remoteport.private = &newrec[1];
+ else
+ newrec->remoteport.private = NULL;
+ newrec->remoteport.port_role = pinfo->port_role;
+ newrec->remoteport.node_name = pinfo->node_name;
+ newrec->remoteport.port_name = pinfo->port_name;
+ newrec->remoteport.port_id = pinfo->port_id;
+ newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
+ newrec->remoteport.port_num = idx;
+ __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_add_tail(&newrec->endp_list, &lport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ nvme_fc_signal_discovery_scan(lport, newrec);
+
+ *portptr = &newrec->remoteport;
+ return 0;
+
+out_kfree_rport:
+ kfree(newrec);
+out_lport_put:
+ nvme_fc_lport_put(lport);
+out_reghost_failed:
+ *portptr = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
+
+static int
+nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
+{
+ struct nvmefc_ls_req_op *lsop;
+ unsigned long flags;
+
+restart:
+ spin_lock_irqsave(&rport->lock, flags);
+
+ list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
+ if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
+ lsop->flags |= FCOP_FLAGS_TERMIO;
+ spin_unlock_irqrestore(&rport->lock, flags);
+ rport->lport->ops->ls_abort(&rport->lport->localport,
+ &rport->remoteport,
+ &lsop->ls_req);
+ goto restart;
+ }
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return 0;
+}
+
+static void
+nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
+{
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connectivity lost. Awaiting "
+ "Reconnect", ctrl->cnum);
+
+ switch (nvme_ctrl_state(&ctrl->ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ /*
+ * Schedule a controller reset. The reset will terminate the
+ * association and schedule the reconnect timer. Reconnects
+ * will be attempted until either the ctlr_loss_tmo
+ * (max_retries * connect_delay) expires or the remoteport's
+ * dev_loss_tmo expires.
+ */
+ if (nvme_reset_ctrl(&ctrl->ctrl)) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Couldn't schedule reset.\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+ break;
+
+ case NVME_CTRL_CONNECTING:
+ /*
+ * The association has already been terminated and the
+ * controller is attempting reconnects. No need to do anything
+ * futher. Reconnects will be attempted until either the
+ * ctlr_loss_tmo (max_retries * connect_delay) expires or the
+ * remoteport's dev_loss_tmo expires.
+ */
+ break;
+
+ case NVME_CTRL_RESETTING:
+ /*
+ * Controller is already in the process of terminating the
+ * association. No need to do anything further. The reconnect
+ * step will kick in naturally after the association is
+ * terminated.
+ */
+ break;
+
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
+ default:
+ /* no action to take - let it delete */
+ break;
+ }
+}
+
+/**
+ * nvme_fc_unregister_remoteport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a NVME subsystem FC port.
+ * @portptr: pointer to the (registered) remote port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+
+ if (!portptr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return -EINVAL;
+ }
+ portptr->port_state = FC_OBJSTATE_DELETED;
+
+ rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
+
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ /* if dev_loss_tmo==0, dev loss is immediate */
+ if (!portptr->dev_loss_tmo) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connectivity lost.\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ } else
+ nvme_fc_ctrl_connectivity_loss(ctrl);
+ }
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ nvme_fc_abort_lsops(rport);
+
+ if (atomic_read(&rport->act_ctrl_cnt) == 0)
+ rport->lport->ops->remoteport_delete(portptr);
+
+ /*
+ * release the reference, which will allow, if all controllers
+ * go away, which should only occur after dev_loss_tmo occurs,
+ * for the rport to be torn down.
+ */
+ nvme_fc_rport_put(rport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
+
+/**
+ * nvme_fc_rescan_remoteport - transport entry point called by an
+ * LLDD to request a nvme device rescan.
+ * @remoteport: pointer to the (registered) remote port that is to be
+ * rescanned.
+ *
+ * Returns: N/A
+ */
+void
+nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
+
+ nvme_fc_signal_discovery_scan(rport->lport, rport);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
+
+int
+nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
+ u32 dev_loss_tmo)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return -EINVAL;
+ }
+
+ /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
+ rport->remoteport.dev_loss_tmo = dev_loss_tmo;
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ }
+ return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
+static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
+
+static void
+__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvme_fc_rport_put(rport);
+}
+
+static int
+__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
+ struct nvmefc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret = 0;
+
+ if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ return -ECONNREFUSED;
+
+ if (!nvme_fc_rport_get(rport))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->rport = rport;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+ init_completion(&lsop->ls_done);
+
+ lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
+ ret = -EFAULT;
+ goto out_putrport;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ ret = rport->lport->ops->ls_req(&rport->lport->localport,
+ &rport->remoteport, lsreq);
+ if (ret)
+ goto out_unlink;
+
+ return 0;
+
+out_unlink:
+ lsop->ls_error = ret;
+ spin_lock_irqsave(&rport->lock, flags);
+ lsop->req_queued = false;
+ list_del(&lsop->lsreq_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+ fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+out_putrport:
+ nvme_fc_rport_put(rport);
+
+ return ret;
+}
+
+static void
+nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+
+ lsop->ls_error = status;
+ complete(&lsop->ls_done);
+}
+
+static int
+nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
+ int ret;
+
+ ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
+
+ if (!ret) {
+ /*
+ * No timeout/not interruptible as we need the struct
+ * to exist until the lldd calls us back. Thus mandate
+ * wait until driver calls back. lldd responsible for
+ * the timeout action
+ */
+ wait_for_completion(&lsop->ls_done);
+
+ __nvme_fc_finish_ls_req(lsop);
+
+ ret = lsop->ls_error;
+ }
+
+ if (ret)
+ return ret;
+
+ /* ACC or RJT payload ? */
+ if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
+ return -ENXIO;
+
+ return 0;
+}
+
+static int
+nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
+ struct nvmefc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ /* don't wait for completion */
+
+ return __nvme_fc_send_ls_req(rport, lsop, done);
+}
+
+static int
+nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
+{
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
+ struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+ unsigned long flags;
+ int ret, fcret = 0;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
+ ctrl->cnum);
+ ret = -ENOMEM;
+ goto out_no_memory;
+ }
+
+ assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
+ assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = &assoc_acc[1];
+ else
+ lsreq->private = NULL;
+
+ assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+ assoc_rqst->desc_list_len =
+ cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+ assoc_rqst->assoc_cmd.desc_tag =
+ cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
+ assoc_rqst->assoc_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+ assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+ assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
+ /* Linux supports only Dynamic controllers */
+ assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
+ uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
+ strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+ min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
+ strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+ min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+
+ lsop->queue = queue;
+ lsreq->rqstaddr = assoc_rqst;
+ lsreq->rqstlen = sizeof(*assoc_rqst);
+ lsreq->rspaddr = assoc_acc;
+ lsreq->rsplen = sizeof(*assoc_acc);
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+
+ ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
+ if (ret)
+ goto out_free_buffer;
+
+ /* process connect LS completion */
+
+ /* validate the ACC response */
+ if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+ fcret = VERR_LSACC;
+ else if (assoc_acc->hdr.desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_acc)))
+ fcret = VERR_CR_ASSOC_ACC_LEN;
+ else if (assoc_acc->hdr.rqst.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_RQST))
+ fcret = VERR_LSDESC_RQST;
+ else if (assoc_acc->hdr.rqst.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+ fcret = VERR_LSDESC_RQST_LEN;
+ else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
+ fcret = VERR_CR_ASSOC;
+ else if (assoc_acc->associd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ fcret = VERR_ASSOC_ID;
+ else if (assoc_acc->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ fcret = VERR_ASSOC_ID_LEN;
+ else if (assoc_acc->connectid.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+ fcret = VERR_CONN_ID;
+ else if (assoc_acc->connectid.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+ fcret = VERR_CONN_ID_LEN;
+
+ if (fcret) {
+ ret = -EBADF;
+ dev_err(ctrl->dev,
+ "q %d Create Association LS failed: %s\n",
+ queue->qnum, validation_errors[fcret]);
+ } else {
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->association_id =
+ be64_to_cpu(assoc_acc->associd.association_id);
+ queue->connection_id =
+ be64_to_cpu(assoc_acc->connectid.connection_id);
+ set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ }
+
+out_free_buffer:
+ kfree(lsop);
+out_no_memory:
+ if (ret)
+ dev_err(ctrl->dev,
+ "queue %d connect admin queue failed (%d).\n",
+ queue->qnum, ret);
+ return ret;
+}
+
+static int
+nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ u16 qsize, u16 ersp_ratio)
+{
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct fcnvme_ls_cr_conn_rqst *conn_rqst;
+ struct fcnvme_ls_cr_conn_acc *conn_acc;
+ int ret, fcret = 0;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*conn_rqst) + sizeof(*conn_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
+ ctrl->cnum);
+ ret = -ENOMEM;
+ goto out_no_memory;
+ }
+
+ conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
+ conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&conn_acc[1];
+ else
+ lsreq->private = NULL;
+
+ conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+ conn_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+
+ conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ conn_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+ conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+ conn_rqst->connect_cmd.desc_tag =
+ cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
+ conn_rqst->connect_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+ conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+ conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
+ conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
+
+ lsop->queue = queue;
+ lsreq->rqstaddr = conn_rqst;
+ lsreq->rqstlen = sizeof(*conn_rqst);
+ lsreq->rspaddr = conn_acc;
+ lsreq->rsplen = sizeof(*conn_acc);
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+
+ ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
+ if (ret)
+ goto out_free_buffer;
+
+ /* process connect LS completion */
+
+ /* validate the ACC response */
+ if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+ fcret = VERR_LSACC;
+ else if (conn_acc->hdr.desc_list_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
+ fcret = VERR_CR_CONN_ACC_LEN;
+ else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+ fcret = VERR_LSDESC_RQST;
+ else if (conn_acc->hdr.rqst.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+ fcret = VERR_LSDESC_RQST_LEN;
+ else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
+ fcret = VERR_CR_CONN;
+ else if (conn_acc->connectid.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+ fcret = VERR_CONN_ID;
+ else if (conn_acc->connectid.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+ fcret = VERR_CONN_ID_LEN;
+
+ if (fcret) {
+ ret = -EBADF;
+ dev_err(ctrl->dev,
+ "q %d Create I/O Connection LS failed: %s\n",
+ queue->qnum, validation_errors[fcret]);
+ } else {
+ queue->connection_id =
+ be64_to_cpu(conn_acc->connectid.connection_id);
+ set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ }
+
+out_free_buffer:
+ kfree(lsop);
+out_no_memory:
+ if (ret)
+ dev_err(ctrl->dev,
+ "queue %d connect I/O queue failed (%d).\n",
+ queue->qnum, ret);
+ return ret;
+}
+
+static void
+nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+
+ __nvme_fc_finish_ls_req(lsop);
+
+ /* fc-nvme initiator doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme initiator is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme target, so you may never get a
+ * response even if you tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme target
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
+{
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ int ret;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Disconnect Association "
+ "failed: ENOMEM\n",
+ ctrl->cnum);
+ return;
+ }
+
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
+
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ ctrl->association_id);
+
+ ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
+ nvme_fc_disconnect_assoc_done);
+ if (ret)
+ kfree(lsop);
+}
+
+static void
+nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_del(&lsop->lsrcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ kfree(lsop->rspbuf);
+ kfree(lsop->rqstbuf);
+ kfree(lsop);
+
+ nvme_fc_rport_put(rport);
+}
+
+static void
+nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ int ret;
+
+ fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
+ lsop->lsrsp);
+ if (ret) {
+ dev_warn(lport->dev,
+ "LLDD rejected LS RSP xmt: LS %d status %d\n",
+ w0->ls_cmd, ret);
+ nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
+ return;
+ }
+}
+
+static struct nvme_fc_ctrl *
+nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
+ struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct nvme_fc_ctrl *ctrl, *ret = NULL;
+ struct nvmefc_ls_rcv_op *oldls = NULL;
+ u64 association_id = be64_to_cpu(rqst->associd.association_id);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ if (!nvme_fc_ctrl_get(ctrl))
+ continue;
+ spin_lock(&ctrl->lock);
+ if (association_id == ctrl->association_id) {
+ oldls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = lsop;
+ ret = ctrl;
+ }
+ spin_unlock(&ctrl->lock);
+ if (ret)
+ /* leave the ctrl get reference */
+ break;
+ nvme_fc_ctrl_put(ctrl);
+ }
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ /* transmit a response for anything that was pending */
+ if (oldls) {
+ dev_info(rport->lport->dev,
+ "NVME-FC{%d}: Multiple Disconnect Association "
+ "LS's received\n", ctrl->cnum);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*oldls->rspbuf),
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvme_fc_xmt_ls_rsp(oldls);
+ }
+
+ return ret;
+}
+
+/*
+ * returns true to mean LS handled and ls_rsp can be sent
+ * returns false to defer ls_rsp xmt (will be done as part of
+ * association termination)
+ */
+static bool
+nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ &lsop->rspbuf->rsp_dis_assoc;
+ struct nvme_fc_ctrl *ctrl = NULL;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association */
+ ctrl = nvme_fc_match_disconn_ls(rport, lsop);
+ if (!ctrl)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret) {
+ dev_info(rport->lport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return true;
+ }
+
+ /* format an ACCept response */
+
+ lsop->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
+
+ /*
+ * the transmit of the response will occur after the exchanges
+ * for the association have been ABTS'd by
+ * nvme_fc_delete_association().
+ */
+
+ /* fail the association */
+ nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
+
+ /* release the reference taken by nvme_fc_match_disconn_ls() */
+ nvme_fc_ctrl_put(ctrl);
+
+ return false;
+}
+
+/*
+ * Actual Processing routine for received FC-NVME LS Requests from the LLD
+ * returns true if a response should be sent afterward, false if rsp will
+ * be sent asynchronously.
+ */
+static bool
+nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ bool ret = true;
+
+ lsop->lsrsp->nvme_fc_private = lsop;
+ lsop->lsrsp->rspbuf = lsop->rspbuf;
+ lsop->lsrsp->rspdma = lsop->rspdma;
+ lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ lsop->lsrsp->rsplen = 0;
+
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_DISCONNECT_ASSOC:
+ ret = nvme_fc_ls_disconnect_assoc(lsop);
+ break;
+ case FCNVME_LS_DISCONNECT_CONN:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ case FCNVME_LS_CREATE_CONNECTION:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ default:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ }
+
+ return(ret);
+}
+
+static void
+nvme_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvme_fc_rport *rport =
+ container_of(work, struct nvme_fc_rport, lsrcv_work);
+ struct fcnvme_ls_rqst_w0 *w0;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ bool sendrsp;
+
+restart:
+ sendrsp = true;
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
+ if (lsop->handled)
+ continue;
+
+ lsop->handled = true;
+ if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ sendrsp = nvme_fc_handle_ls_rqst(lsop);
+ } else {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ w0 = &lsop->rqstbuf->w0;
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(
+ lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ w0->ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ }
+ if (sendrsp)
+ nvme_fc_xmt_ls_rsp(lsop);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+}
+
+static
+void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport,
+ struct fcnvme_ls_rqst_w0 *w0)
+{
+ dev_info(lport->dev, "RCV %s LS failed: No memory\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+}
+
+/**
+ * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvme-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @portptr: pointer to the (registered) remote port that the LS
+ * was received from. The remoteport is associated with
+ * a specific localport.
+ * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
+ * used to reference the exchange corresponding to the LS
+ * when issuing an ls response.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ int ret;
+
+ nvme_fc_rport_get(rport);
+
+ /* validate there's a routine to transmit a response */
+ if (!lport->ops->xmt_ls_rsp) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: payload too large\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -E2BIG;
+ goto out_put;
+ }
+
+ lsop = kzalloc(sizeof(*lsop), GFP_KERNEL);
+ if (!lsop) {
+ nvme_fc_rcv_ls_req_err_msg(lport, w0);
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL);
+ lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL);
+ if (!lsop->rqstbuf || !lsop->rspbuf) {
+ nvme_fc_rcv_ls_req_err_msg(lport, w0);
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: DMA mapping failure\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ lsop->rport = rport;
+ lsop->lsrsp = lsrsp;
+
+ memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
+ lsop->rqstdatalen = lsreqbuf_len;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ ret = -ENOTCONN;
+ goto out_unmap;
+ }
+ list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ schedule_work(&rport->lsrcv_work);
+
+ return 0;
+
+out_unmap:
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+out_free:
+ kfree(lsop->rspbuf);
+ kfree(lsop->rqstbuf);
+ kfree(lsop);
+out_put:
+ nvme_fc_rport_put(rport);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+static void
+__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_fcp_op *op)
+{
+ fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
+ sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+ fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
+ sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+ atomic_set(&op->state, FCPOP_STATE_UNINIT);
+}
+
+static void
+nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+
+ return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
+}
+
+static int
+__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
+{
+ unsigned long flags;
+ int opstate;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+ if (opstate != FCPOP_STATE_ACTIVE)
+ atomic_set(&op->state, opstate);
+ else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
+ op->flags |= FCOP_FLAGS_TERMIO;
+ ctrl->iocnt++;
+ }
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ if (opstate != FCPOP_STATE_ACTIVE)
+ return -ECANCELED;
+
+ ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ op->queue->lldd_handle,
+ &op->fcp_req);
+
+ return 0;
+}
+
+static void
+nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
+ int i;
+
+ /* ensure we've initialized the ops once */
+ if (!(aen_op->flags & FCOP_FLAGS_AEN))
+ return;
+
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
+ __nvme_fc_abort_op(ctrl, aen_op);
+}
+
+static inline void
+__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_fcp_op *op, int opstate)
+{
+ unsigned long flags;
+
+ if (opstate == FCPOP_STATE_ABORTED) {
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
+ op->flags & FCOP_FLAGS_TERMIO) {
+ if (!--ctrl->iocnt)
+ wake_up(&ctrl->ioabort_wait);
+ }
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ }
+}
+
+static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+/*
+ * nvme_fc_io_getuuid - Routine called to get the appid field
+ * associated with request by the lldd
+ * @req:IO request from nvme fc to driver
+ * Returns: UUID if there is an appid associated with VM or
+ * NULL if the user/libvirt has not set the appid to VM
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
+ return NULL;
+ return blkcg_get_fc_appid(rq->bio);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
+
+static void
+nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_queue *queue = op->queue;
+ struct nvme_completion *cqe = &op->rsp_iu.cqe;
+ struct nvme_command *sqe = &op->cmd_iu.sqe;
+ __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
+ union nvme_result result;
+ bool terminate_assoc = true;
+ int opstate;
+
+ /*
+ * WARNING:
+ * The current linux implementation of a nvme controller
+ * allocates a single tag set for all io queues and sizes
+ * the io queues to fully hold all possible tags. Thus, the
+ * implementation does not reference or care about the sqhd
+ * value as it never needs to use the sqhd/sqtail pointers
+ * for submission pacing.
+ *
+ * This affects the FC-NVME implementation in two ways:
+ * 1) As the value doesn't matter, we don't need to waste
+ * cycles extracting it from ERSPs and stamping it in the
+ * cases where the transport fabricates CQEs on successful
+ * completions.
+ * 2) The FC-NVME implementation requires that delivery of
+ * ERSP completions are to go back to the nvme layer in order
+ * relative to the rsn, such that the sqhd value will always
+ * be "in order" for the nvme layer. As the nvme layer in
+ * linux doesn't care about sqhd, there's no need to return
+ * them in order.
+ *
+ * Additionally:
+ * As the core nvme layer in linux currently does not look at
+ * every field in the cqe - in cases where the FC transport must
+ * fabricate a CQE, the following fields will not be set as they
+ * are not referenced:
+ * cqe.sqid, cqe.sqhd, cqe.command_id
+ *
+ * Failure or error of an individual i/o, in a transport
+ * detected fashion unrelated to the nvme completion status,
+ * potentially cause the initiator and target sides to get out
+ * of sync on SQ head/tail (aka outstanding io count allowed).
+ * Per FC-NVME spec, failure of an individual command requires
+ * the connection to be terminated, which in turn requires the
+ * association to be terminated.
+ */
+
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
+
+ fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
+ sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+
+ if (opstate == FCPOP_STATE_ABORTED)
+ status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
+ else if (freq->status) {
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to lldd error %d\n",
+ ctrl->cnum, freq->status);
+ }
+
+ /*
+ * For the linux implementation, if we have an unsuccesful
+ * status, they blk-mq layer can typically be called with the
+ * non-zero status and the content of the cqe isn't important.
+ */
+ if (status)
+ goto done;
+
+ /*
+ * command completed successfully relative to the wire
+ * protocol. However, validate anything received and
+ * extract the status and result from the cqe (create it
+ * where necessary).
+ */
+
+ switch (freq->rcv_rsplen) {
+
+ case 0:
+ case NVME_FC_SIZEOF_ZEROS_RSP:
+ /*
+ * No response payload or 12 bytes of payload (which
+ * should all be zeros) are considered successful and
+ * no payload in the CQE by the transport.
+ */
+ if (freq->transferred_length !=
+ be32_to_cpu(op->cmd_iu.data_len)) {
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to bad transfer "
+ "length: %d vs expected %d\n",
+ ctrl->cnum, freq->transferred_length,
+ be32_to_cpu(op->cmd_iu.data_len));
+ goto done;
+ }
+ result.u64 = 0;
+ break;
+
+ case sizeof(struct nvme_fc_ersp_iu):
+ /*
+ * The ERSP IU contains a full completion with CQE.
+ * Validate ERSP IU and look at cqe.
+ */
+ if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
+ (freq->rcv_rsplen / 4) ||
+ be32_to_cpu(op->rsp_iu.xfrd_len) !=
+ freq->transferred_length ||
+ op->rsp_iu.ersp_result ||
+ sqe->common.command_id != cqe->command_id)) {
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
+ "iu len %d, xfr len %d vs %d, status code "
+ "%d, cmdid %d vs %d\n",
+ ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
+ be32_to_cpu(op->rsp_iu.xfrd_len),
+ freq->transferred_length,
+ op->rsp_iu.ersp_result,
+ sqe->common.command_id,
+ cqe->command_id);
+ goto done;
+ }
+ result = cqe->result;
+ status = cqe->status;
+ break;
+
+ default:
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
+ "len %d\n",
+ ctrl->cnum, freq->rcv_rsplen);
+ goto done;
+ }
+
+ terminate_assoc = false;
+
+done:
+ if (op->flags & FCOP_FLAGS_AEN) {
+ nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags = FCOP_FLAGS_AEN; /* clear other flags */
+ nvme_fc_ctrl_put(ctrl);
+ goto check_error;
+ }
+
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+ if (!nvme_try_complete_req(rq, status, result))
+ nvme_fc_complete_rq(rq);
+
+check_error:
+ if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
+}
+
+static int
+__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
+ struct request *rq, u32 rqno)
+{
+ struct nvme_fcp_op_w_sgl *op_w_sgl =
+ container_of(op, typeof(*op_w_sgl), op);
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ int ret = 0;
+
+ memset(op, 0, sizeof(*op));
+ op->fcp_req.cmdaddr = &op->cmd_iu;
+ op->fcp_req.cmdlen = sizeof(op->cmd_iu);
+ op->fcp_req.rspaddr = &op->rsp_iu;
+ op->fcp_req.rsplen = sizeof(op->rsp_iu);
+ op->fcp_req.done = nvme_fc_fcpio_done;
+ op->ctrl = ctrl;
+ op->queue = queue;
+ op->rq = rq;
+ op->rqno = rqno;
+
+ cmdiu->format_id = NVME_CMD_FORMAT_ID;
+ cmdiu->fc_id = NVME_CMD_FC_ID;
+ cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ if (queue->qnum)
+ cmdiu->rsv_cat = fccmnd_set_cat_css(0,
+ (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
+ else
+ cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
+
+ op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
+ &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - cmdiu dma mapping failed.\n");
+ ret = -EFAULT;
+ goto out_on_error;
+ }
+
+ op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
+ &op->rsp_iu, sizeof(op->rsp_iu),
+ DMA_FROM_DEVICE);
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - rspiu dma mapping failed.\n");
+ ret = -EFAULT;
+ }
+
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+out_on_error:
+ return ret;
+}
+
+static int
+nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
+ struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
+ int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+ struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
+ int res;
+
+ res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
+ if (res)
+ return res;
+ op->op.fcp_req.first_sgl = op->sgl;
+ op->op.fcp_req.private = &op->priv[0];
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
+ nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
+ return res;
+}
+
+static int
+nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op;
+ struct nvme_fc_cmd_iu *cmdiu;
+ struct nvme_command *sqe;
+ void *private = NULL;
+ int i, ret;
+
+ aen_op = ctrl->aen_ops;
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
+ if (ctrl->lport->ops->fcprqst_priv_sz) {
+ private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
+ GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+ }
+
+ cmdiu = &aen_op->cmd_iu;
+ sqe = &cmdiu->sqe;
+ ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
+ aen_op, (struct request *)NULL,
+ (NVME_AQ_BLK_MQ_DEPTH + i));
+ if (ret) {
+ kfree(private);
+ return ret;
+ }
+
+ aen_op->flags = FCOP_FLAGS_AEN;
+ aen_op->fcp_req.private = private;
+
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->common.opcode = nvme_admin_async_event;
+ /* Note: core layer may overwrite the sqe.command_id value */
+ sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
+ }
+ return 0;
+}
+
+static void
+nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op;
+ int i;
+
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
+ aen_op = ctrl->aen_ops;
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
+ __nvme_fc_exit_request(ctrl, aen_op);
+
+ kfree(aen_op->fcp_req.private);
+ aen_op->fcp_req.private = NULL;
+ }
+}
+
+static inline int
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
+ struct nvme_fc_queue *queue = &ctrl->queues[qidx];
+
+ hctx->driver_data = queue;
+ queue->hctx = hctx;
+ return 0;
+}
+
+static int
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
+{
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
+}
+
+static int
+nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx);
+}
+
+static void
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
+{
+ struct nvme_fc_queue *queue;
+
+ queue = &ctrl->queues[idx];
+ memset(queue, 0, sizeof(*queue));
+ queue->ctrl = ctrl;
+ queue->qnum = idx;
+ atomic_set(&queue->csn, 0);
+ queue->dev = ctrl->dev;
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ /*
+ * Considered whether we should allocate buffers for all SQEs
+ * and CQEs and dma map them - mapping their respective entries
+ * into the request structures (kernel vm addr and dma address)
+ * thus the driver could use the buffers/mappings directly.
+ * It only makes sense if the LLDD would use them for its
+ * messaging api. It's very unlikely most adapter api's would use
+ * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
+ * structures were used instead.
+ */
+}
+
+/*
+ * This routine terminates a queue at the transport level.
+ * The transport has already ensured that all outstanding ios on
+ * the queue have been terminated.
+ * The transport will send a Disconnect LS request to terminate
+ * the queue's connection. Termination of the admin queue will also
+ * terminate the association at the target.
+ */
+static void
+nvme_fc_free_queue(struct nvme_fc_queue *queue)
+{
+ if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
+ return;
+
+ clear_bit(NVME_FC_Q_LIVE, &queue->flags);
+ /*
+ * Current implementation never disconnects a single queue.
+ * It always terminates a whole association. So there is never
+ * a disconnect(queue) LS sent to the target.
+ */
+
+ queue->connection_id = 0;
+ atomic_set(&queue->csn, 0);
+}
+
+static void
+__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, unsigned int qidx)
+{
+ if (ctrl->lport->ops->delete_queue)
+ ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
+ queue->lldd_handle);
+ queue->lldd_handle = NULL;
+}
+
+static void
+nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++)
+ nvme_fc_free_queue(&ctrl->queues[i]);
+}
+
+static int
+__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
+{
+ int ret = 0;
+
+ queue->lldd_handle = NULL;
+ if (ctrl->lport->ops->create_queue)
+ ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
+ qidx, qsize, &queue->lldd_handle);
+
+ return ret;
+}
+
+static void
+nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
+ int i;
+
+ for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
+ __nvme_fc_delete_hw_queue(ctrl, queue, i);
+}
+
+static int
+nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[1];
+ int i, ret;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
+ ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
+ if (ret)
+ goto delete_queues;
+ }
+
+ return 0;
+
+delete_queues:
+ for (; i > 0; i--)
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+ return ret;
+}
+
+static int
+nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
+ (qsize / 5));
+ if (ret)
+ break;
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
+
+ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
+ }
+
+ return ret;
+}
+
+static void
+nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++)
+ nvme_fc_init_queue(ctrl, i);
+}
+
+static void
+nvme_fc_ctrl_free(struct kref *ref)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(ref, struct nvme_fc_ctrl, ref);
+ unsigned long flags;
+
+ if (ctrl->ctrl.tagset)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+
+ /* remove from rport list */
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ list_del(&ctrl->ctrl_list);
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+
+ kfree(ctrl->queues);
+
+ put_device(ctrl->dev);
+ nvme_fc_rport_put(ctrl->rport);
+
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ if (ctrl->ctrl.opts)
+ nvmf_free_options(ctrl->ctrl.opts);
+ kfree(ctrl);
+}
+
+static void
+nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
+{
+ kref_put(&ctrl->ref, nvme_fc_ctrl_free);
+}
+
+static int
+nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
+{
+ return kref_get_unless_zero(&ctrl->ref);
+}
+
+/*
+ * All accesses from nvme core layer done - can now free the
+ * controller. Called after last nvme_put_ctrl() call
+ */
+static void
+nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+
+ WARN_ON(nctrl != &ctrl->ctrl);
+
+ nvme_fc_ctrl_put(ctrl);
+}
+
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static bool nvme_fc_terminate_exchange(struct request *req, void *data)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+
+ op->nreq.flags |= NVME_REQ_CANCELLED;
+ __nvme_fc_abort_op(ctrl, op);
+ return true;
+}
+
+/*
+ * This routine runs through all outstanding commands on the association
+ * and aborts them. This routine is typically be called by the
+ * delete_association routine. It is also called due to an error during
+ * reconnect. In that scenario, it is most likely a command that initializes
+ * the controller, including fabric Connect commands on io queues, that
+ * may have timed out or failed thus the io must be killed for the connect
+ * thread to see the error.
+ */
+static void
+__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
+{
+ int q;
+
+ /*
+ * if aborting io, the queues are no longer good, mark them
+ * all as not live.
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ for (q = 1; q < ctrl->ctrl.queue_count; q++)
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+ }
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_quiesce_io_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ if (start_queues)
+ nvme_unquiesce_io_queues(&ctrl->ctrl);
+ }
+
+ /*
+ * Other transports, which don't have link-level contexts bound
+ * to sqe's, would try to gracefully shutdown the controller by
+ * writing the registers for shutdown and polling (call
+ * nvme_disable_ctrl()). Given a bunch of i/o was potentially
+ * just aborted and we will wait on those contexts, and given
+ * there was no indication of how live the controlelr is on the
+ * link, don't send more io to create more contexts for the
+ * shutdown. Let the controller fail via keepalive failure if
+ * its still present.
+ */
+
+ /*
+ * clean up the admin queue. Same thing as above.
+ */
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ if (start_queues)
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+}
+
+static void
+nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+{
+ /*
+ * if an error (io timeout, etc) while (re)connecting, the remote
+ * port requested terminating of the association (disconnect_ls)
+ * or an error (timeout or abort) occurred on an io while creating
+ * the controller. Abort any ios on the association and let the
+ * create_association error path resolve things.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ __nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: transport error during (re)connect\n",
+ ctrl->cnum);
+ return;
+ }
+
+ /* Otherwise, only proceed if in LIVE state - e.g. on first error */
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ return;
+
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: transport association event: %s\n",
+ ctrl->cnum, errmsg);
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
+
+ nvme_reset_ctrl(&ctrl->ctrl);
+}
+
+static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+
+ /*
+ * Attempt to abort the offending command. Command completion
+ * will detect the aborted io and will fail the connection.
+ */
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "x%08x/x%08x\n",
+ ctrl->cnum, op->queue->qnum, sqe->common.opcode,
+ sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ if (__nvme_fc_abort_op(ctrl, op))
+ nvme_fc_error_recovery(ctrl, "io timeout abort failed");
+
+ /*
+ * the io abort has been initiated. Have the reset timer
+ * restarted and the abort completion will complete the io
+ * shortly. Avoids a synchronous wait while the abort finishes.
+ */
+ return BLK_EH_RESET_TIMER;
+}
+
+static int
+nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+ struct nvme_fc_fcp_op *op)
+{
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+ int ret;
+
+ freq->sg_cnt = 0;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ return 0;
+
+ freq->sg_table.sgl = freq->first_sgl;
+ ret = sg_alloc_table_chained(&freq->sg_table,
+ blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
+ NVME_INLINE_SG_CNT);
+ if (ret)
+ return -ENOMEM;
+
+ op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+ WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
+ freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
+ op->nents, rq_dma_dir(rq));
+ if (unlikely(freq->sg_cnt <= 0)) {
+ sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
+ freq->sg_cnt = 0;
+ return -EFAULT;
+ }
+
+ /*
+ * TODO: blk_integrity_rq(rq) for DIF
+ */
+ return 0;
+}
+
+static void
+nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+ struct nvme_fc_fcp_op *op)
+{
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+
+ if (!freq->sg_cnt)
+ return;
+
+ fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
+ rq_dma_dir(rq));
+
+ sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
+
+ freq->sg_cnt = 0;
+}
+
+/*
+ * In FC, the queue is a logical thing. At transport connect, the target
+ * creates its "queue" and returns a handle that is to be given to the
+ * target whenever it posts something to the corresponding SQ. When an
+ * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
+ * command contained within the SQE, an io, and assigns a FC exchange
+ * to it. The SQE and the associated SQ handle are sent in the initial
+ * CMD IU sents on the exchange. All transfers relative to the io occur
+ * as part of the exchange. The CQE is the last thing for the io,
+ * which is transferred (explicitly or implicitly) with the RSP IU
+ * sent on the exchange. After the CQE is received, the FC exchange is
+ * terminaed and the Exchange may be used on a different io.
+ *
+ * The transport to LLDD api has the transport making a request for a
+ * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
+ * resource and transfers the command. The LLDD will then process all
+ * steps to complete the io. Upon completion, the transport done routine
+ * is called.
+ *
+ * So - while the operation is outstanding to the LLDD, there is a link
+ * level FC exchange resource that is also outstanding. This must be
+ * considered in all cleanup operations.
+ */
+static blk_status_t
+nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ struct nvme_fc_fcp_op *op, u32 data_len,
+ enum nvmefc_fcp_datadir io_dir)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+ int ret, opstate;
+
+ /*
+ * before attempting to send the io, check to see if we believe
+ * the target device is present
+ */
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ return BLK_STS_RESOURCE;
+
+ if (!nvme_fc_ctrl_get(ctrl))
+ return BLK_STS_IOERR;
+
+ /* format the FC-NVME CMD IU and fcp_req */
+ cmdiu->connection_id = cpu_to_be64(queue->connection_id);
+ cmdiu->data_len = cpu_to_be32(data_len);
+ switch (io_dir) {
+ case NVMEFC_FCP_WRITE:
+ cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
+ break;
+ case NVMEFC_FCP_READ:
+ cmdiu->flags = FCNVME_CMD_FLAGS_READ;
+ break;
+ case NVMEFC_FCP_NODATA:
+ cmdiu->flags = 0;
+ break;
+ }
+ op->fcp_req.payload_length = data_len;
+ op->fcp_req.io_dir = io_dir;
+ op->fcp_req.transferred_length = 0;
+ op->fcp_req.rcv_rsplen = 0;
+ op->fcp_req.status = NVME_SC_SUCCESS;
+ op->fcp_req.sqid = cpu_to_le16(queue->qnum);
+
+ /*
+ * validate per fabric rules, set fields mandated by fabric spec
+ * as well as those by FC-NVME spec.
+ */
+ WARN_ON_ONCE(sqe->common.metadata);
+ sqe->common.flags |= NVME_CMD_SGL_METABUF;
+
+ /*
+ * format SQE DPTR field per FC-NVME rules:
+ * type=0x5 Transport SGL Data Block Descriptor
+ * subtype=0xA Transport-specific value
+ * address=0
+ * length=length of the data series
+ */
+ sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+ NVME_SGL_FMT_TRANSPORT_A;
+ sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
+ sqe->rw.dptr.sgl.addr = 0;
+
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
+ ret = nvme_fc_map_data(ctrl, op->rq, op);
+ if (ret < 0) {
+ nvme_cleanup_cmd(op->rq);
+ nvme_fc_ctrl_put(ctrl);
+ if (ret == -ENOMEM || ret == -EAGAIN)
+ return BLK_STS_RESOURCE;
+ return BLK_STS_IOERR;
+ }
+ }
+
+ fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
+ sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+ atomic_set(&op->state, FCPOP_STATE_ACTIVE);
+
+ if (!(op->flags & FCOP_FLAGS_AEN))
+ nvme_start_request(op->rq);
+
+ cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
+ ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ queue->lldd_handle, &op->fcp_req);
+
+ if (ret) {
+ /*
+ * If the lld fails to send the command is there an issue with
+ * the csn value? If the command that fails is the Connect,
+ * no - as the connection won't be live. If it is a command
+ * post-connect, it's possible a gap in csn may be created.
+ * Does this matter? As Linux initiators don't send fused
+ * commands, no. The gap would exist, but as there's nothing
+ * that depends on csn order to be delivered on the target
+ * side, it shouldn't hurt. It would be difficult for a
+ * target to even detect the csn gap as it has no idea when the
+ * cmd with the csn was supposed to arrive.
+ */
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
+ nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
+
+ nvme_fc_ctrl_put(ctrl);
+
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
+ ret != -EBUSY)
+ return BLK_STS_IOERR;
+
+ return BLK_STS_RESOURCE;
+ }
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t
+nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_fc_queue *queue = hctx->driver_data;
+ struct nvme_fc_ctrl *ctrl = queue->ctrl;
+ struct request *rq = bd->rq;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ enum nvmefc_fcp_datadir io_dir;
+ bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
+ u32 data_len;
+ blk_status_t ret;
+
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+ !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
+ ret = nvme_setup_cmd(ns, rq);
+ if (ret)
+ return ret;
+
+ /*
+ * nvme core doesn't quite treat the rq opaquely. Commands such
+ * as WRITE ZEROES will return a non-zero rq payload_bytes yet
+ * there is no actual payload to be transferred.
+ * To get it right, key data transmission on there being 1 or
+ * more physical segments in the sg list. If there is no
+ * physical segments, there is no payload.
+ */
+ if (blk_rq_nr_phys_segments(rq)) {
+ data_len = blk_rq_payload_bytes(rq);
+ io_dir = ((rq_data_dir(rq) == WRITE) ?
+ NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
+ } else {
+ data_len = 0;
+ io_dir = NVMEFC_FCP_NODATA;
+ }
+
+
+ return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
+}
+
+static void
+nvme_fc_submit_async_event(struct nvme_ctrl *arg)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
+ struct nvme_fc_fcp_op *aen_op;
+ blk_status_t ret;
+
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
+ return;
+
+ aen_op = &ctrl->aen_ops[0];
+
+ ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
+ NVMEFC_FCP_NODATA);
+ if (ret)
+ dev_err(ctrl->ctrl.device,
+ "failed async event work\n");
+}
+
+static void
+nvme_fc_complete_rq(struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags &= ~FCOP_FLAGS_TERMIO;
+
+ nvme_fc_unmap_data(ctrl, rq, op);
+ nvme_complete_rq(rq);
+ nvme_fc_ctrl_put(ctrl);
+}
+
+static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
+ int i;
+
+ for (i = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ if (!map->nr_queues) {
+ WARN_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /* Call LLDD map queue functionality if defined */
+ if (ctrl->lport->ops->map_queues)
+ ctrl->lport->ops->map_queues(&ctrl->lport->localport,
+ map);
+ else
+ blk_mq_map_queues(map);
+ }
+}
+
+static const struct blk_mq_ops nvme_fc_mq_ops = {
+ .queue_rq = nvme_fc_queue_rq,
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_request,
+ .exit_request = nvme_fc_exit_request,
+ .init_hctx = nvme_fc_init_hctx,
+ .timeout = nvme_fc_timeout,
+ .map_queues = nvme_fc_map_queues,
+};
+
+static int
+nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
+ int ret;
+
+ nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ ctrl->lport->ops->max_hw_queues);
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "set_queue_count failed: %d\n", ret);
+ return ret;
+ }
+
+ ctrl->ctrl.queue_count = nr_io_queues + 1;
+ if (!nr_io_queues)
+ return 0;
+
+ nvme_fc_init_io_queues(ctrl);
+
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_fc_mq_ops, 1,
+ struct_size_t(struct nvme_fcp_op_w_sgl, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
+ if (ret)
+ return ret;
+
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_cleanup_tagset;
+
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_delete_hw_queues;
+
+ ctrl->ioq_live = true;
+
+ return 0;
+
+out_delete_hw_queues:
+ nvme_fc_delete_hw_io_queues(ctrl);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ nvme_fc_free_io_queues(ctrl);
+
+ /* force put free routine to ignore io queues */
+ ctrl->ctrl.tagset = NULL;
+
+ return ret;
+}
+
+static int
+nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
+ unsigned int nr_io_queues;
+ int ret;
+
+ nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ ctrl->lport->ops->max_hw_queues);
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "set_queue_count failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!nr_io_queues && prior_ioq_cnt) {
+ dev_info(ctrl->ctrl.device,
+ "Fail Reconnect: At least 1 io queue "
+ "required (was %d)\n", prior_ioq_cnt);
+ return -ENOSPC;
+ }
+
+ ctrl->ctrl.queue_count = nr_io_queues + 1;
+ /* check for io queues existing */
+ if (ctrl->ctrl.queue_count == 1)
+ return 0;
+
+ if (prior_ioq_cnt != nr_io_queues) {
+ dev_info(ctrl->ctrl.device,
+ "reconnect: revising io queue count from %d to %d\n",
+ prior_ioq_cnt, nr_io_queues);
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ }
+
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_free_io_queues;
+
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_delete_hw_queues;
+
+ return 0;
+
+out_delete_hw_queues:
+ nvme_fc_delete_hw_io_queues(ctrl);
+out_free_io_queues:
+ nvme_fc_free_io_queues(ctrl);
+ return ret;
+}
+
+static void
+nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_lport *lport = rport->lport;
+
+ atomic_inc(&lport->act_rport_cnt);
+}
+
+static void
+nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_lport *lport = rport->lport;
+ u32 cnt;
+
+ cnt = atomic_dec_return(&lport->act_rport_cnt);
+ if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
+ lport->ops->localport_delete(&lport->localport);
+}
+
+static int
+nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ u32 cnt;
+
+ if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
+ return 1;
+
+ cnt = atomic_inc_return(&rport->act_ctrl_cnt);
+ if (cnt == 1)
+ nvme_fc_rport_active_on_lport(rport);
+
+ return 0;
+}
+
+static int
+nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ u32 cnt;
+
+ /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
+
+ cnt = atomic_dec_return(&rport->act_ctrl_cnt);
+ if (cnt == 0) {
+ if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
+ lport->ops->remoteport_delete(&rport->remoteport);
+ nvme_fc_rport_inactive_on_lport(rport);
+ }
+
+ return 0;
+}
+
+/*
+ * This routine restarts the controller on the host side, and
+ * on the link side, recreates the controller association.
+ */
+static int
+nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ struct nvmefc_ls_rcv_op *disls = NULL;
+ unsigned long flags;
+ int ret;
+ bool changed;
+
+ ++ctrl->ctrl.nr_reconnects;
+
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ return -ENODEV;
+
+ if (nvme_fc_ctlr_active_on_rport(ctrl))
+ return -ENOTUNIQ;
+
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: create association : host wwpn 0x%016llx "
+ " rport wwpn 0x%016llx: NQN \"%s\"\n",
+ ctrl->cnum, ctrl->lport->localport.port_name,
+ ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
+
+ clear_bit(ASSOC_FAILED, &ctrl->flags);
+
+ /*
+ * Create the admin queue
+ */
+
+ ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+ NVME_AQ_DEPTH);
+ if (ret)
+ goto out_free_queue;
+
+ ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
+ NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
+ if (ret)
+ goto out_delete_hw_queue;
+
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (ret)
+ goto out_disconnect_admin_queue;
+
+ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
+ /*
+ * Check controller capabilities
+ *
+ * todo:- add code to check if ctrl attributes changed from
+ * prior connection values
+ */
+
+ ret = nvme_enable_ctrl(&ctrl->ctrl);
+ if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+ ret = -EIO;
+ if (ret)
+ goto out_disconnect_admin_queue;
+
+ ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
+ ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
+ (ilog2(SZ_4K) - 9);
+
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+
+ ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
+ if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+ ret = -EIO;
+ if (ret)
+ goto out_disconnect_admin_queue;
+
+ /* sanity checks */
+
+ /* FC-NVME does not have other data in the capsule */
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
+ ctrl->ctrl.icdoff);
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out_disconnect_admin_queue;
+ }
+
+ /* FC-NVME supports normal SGL Data Block Descriptors */
+ if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
+ dev_err(ctrl->ctrl.device,
+ "Mandatory sgls are not supported!\n");
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out_disconnect_admin_queue;
+ }
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, reducing "
+ "to maxcmd\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ }
+
+ ret = nvme_fc_init_aen_ops(ctrl);
+ if (ret)
+ goto out_term_aen_ops;
+
+ /*
+ * Create the io queues
+ */
+
+ if (ctrl->ctrl.queue_count > 1) {
+ if (!ctrl->ioq_live)
+ ret = nvme_fc_create_io_queues(ctrl);
+ else
+ ret = nvme_fc_recreate_io_queues(ctrl);
+ }
+ if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+ ret = -EIO;
+ if (ret)
+ goto out_term_aen_ops;
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+
+ ctrl->ctrl.nr_reconnects = 0;
+
+ if (changed)
+ nvme_start_ctrl(&ctrl->ctrl);
+
+ return 0; /* Success */
+
+out_term_aen_ops:
+ nvme_fc_term_aen_ops(ctrl);
+out_disconnect_admin_queue:
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
+ ctrl->cnum, ctrl->association_id, ret);
+ /* send a Disconnect(association) LS to fc-nvme target */
+ nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ nvme_fc_xmt_ls_rsp(disls);
+out_delete_hw_queue:
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+out_free_queue:
+ nvme_fc_free_queue(&ctrl->queues[0]);
+ clear_bit(ASSOC_ACTIVE, &ctrl->flags);
+ nvme_fc_ctlr_inactive_on_rport(ctrl);
+
+ return ret;
+}
+
+
+/*
+ * This routine stops operation of the controller on the host side.
+ * On the host os stack side: Admin and IO queues are stopped,
+ * outstanding ios on them terminated via FC ABTS.
+ * On the link side: the association is terminated.
+ */
+static void
+nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmefc_ls_rcv_op *disls = NULL;
+ unsigned long flags;
+
+ if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
+ return;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ set_bit(FCCTRL_TERMIO, &ctrl->flags);
+ ctrl->iocnt = 0;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ __nvme_fc_abort_outstanding_ios(ctrl, false);
+
+ /* kill the aens as they are a separate path */
+ nvme_fc_abort_aen_ops(ctrl);
+
+ /* wait for all io that had to be aborted */
+ spin_lock_irq(&ctrl->lock);
+ wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
+ clear_bit(FCCTRL_TERMIO, &ctrl->flags);
+ spin_unlock_irq(&ctrl->lock);
+
+ nvme_fc_term_aen_ops(ctrl);
+
+ /*
+ * send a Disconnect(association) LS to fc-nvme target
+ * Note: could have been sent at top of process, but
+ * cleaner on link traffic if after the aborts complete.
+ * Note: if association doesn't exist, association_id will be 0
+ */
+ if (ctrl->association_id)
+ nvme_fc_xmt_disconnect_assoc(ctrl);
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ /*
+ * if a Disconnect Request was waiting for a response, send
+ * now that all ABTS's have been issued (and are complete).
+ */
+ nvme_fc_xmt_ls_rsp(disls);
+
+ if (ctrl->ctrl.tagset) {
+ nvme_fc_delete_hw_io_queues(ctrl);
+ nvme_fc_free_io_queues(ctrl);
+ }
+
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+ nvme_fc_free_queue(&ctrl->queues[0]);
+
+ /* re-enable the admin_q so anything new can fast fail */
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+
+ /* resume the io queues so that things will fast fail */
+ nvme_unquiesce_io_queues(&ctrl->ctrl);
+
+ nvme_fc_ctlr_inactive_on_rport(ctrl);
+}
+
+static void
+nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+
+ cancel_work_sync(&ctrl->ioerr_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
+ /*
+ * kill the association on the link side. this will block
+ * waiting for io to terminate
+ */
+ nvme_fc_delete_association(ctrl);
+}
+
+static void
+nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ struct nvme_fc_remote_port *portptr = &rport->remoteport;
+ unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
+ bool recon = true;
+
+ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
+ return;
+
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
+ ctrl->cnum, status);
+ if (status > 0 && (status & NVME_SC_DNR))
+ recon = false;
+ } else if (time_after_eq(jiffies, rport->dev_loss_end))
+ recon = false;
+
+ if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: Reconnect attempt in %ld "
+ "seconds\n",
+ ctrl->cnum, recon_delay / HZ);
+ else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
+ recon_delay = rport->dev_loss_end - jiffies;
+
+ queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
+ } else {
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+ if (status > 0 && (status & NVME_SC_DNR))
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: reconnect failure\n",
+ ctrl->cnum);
+ else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Max reconnect attempts "
+ "(%d) reached.\n",
+ ctrl->cnum, ctrl->ctrl.nr_reconnects);
+ } else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: dev_loss_tmo (%d) expired "
+ "while waiting for remoteport connectivity.\n",
+ ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
+ (ctrl->ctrl.opts->max_reconnects *
+ ctrl->ctrl.opts->reconnect_delay)));
+ WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
+ }
+}
+
+static void
+nvme_fc_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
+
+ nvme_stop_ctrl(&ctrl->ctrl);
+
+ /* will block will waiting for io to terminate */
+ nvme_fc_delete_association(ctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to CONNECTING\n", ctrl->cnum);
+
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule connect "
+ "after reset\n", ctrl->cnum);
+ } else {
+ flush_delayed_work(&ctrl->connect_work);
+ }
+ } else {
+ nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
+ }
+}
+
+
+static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
+ .name = "fc",
+ .module = THIS_MODULE,
+ .flags = NVME_F_FABRICS,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .free_ctrl = nvme_fc_nvme_ctrl_freed,
+ .submit_async_event = nvme_fc_submit_async_event,
+ .delete_ctrl = nvme_fc_delete_ctrl,
+ .get_address = nvmf_get_address,
+};
+
+static void
+nvme_fc_connect_ctrl_work(struct work_struct *work)
+{
+ int ret;
+
+ struct nvme_fc_ctrl *ctrl =
+ container_of(to_delayed_work(work),
+ struct nvme_fc_ctrl, connect_work);
+
+ ret = nvme_fc_create_association(ctrl);
+ if (ret)
+ nvme_fc_reconnect_or_delete(ctrl, ret);
+ else
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connect complete\n",
+ ctrl->cnum);
+}
+
+
+static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
+ .queue_rq = nvme_fc_queue_rq,
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_request,
+ .exit_request = nvme_fc_exit_request,
+ .init_hctx = nvme_fc_init_admin_hctx,
+ .timeout = nvme_fc_timeout,
+};
+
+
+/*
+ * Fails a controller request if it matches an existing controller
+ * (association) with the same tuple:
+ * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
+ *
+ * The ports don't need to be compared as they are intrinsically
+ * already matched by the port pointers supplied.
+ */
+static bool
+nvme_fc_existing_controller(struct nvme_fc_rport *rport,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
+ if (found)
+ break;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return found;
+}
+
+static struct nvme_ctrl *
+nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ int ret, idx, ctrl_loss_tmo;
+
+ if (!(rport->remoteport.port_role &
+ (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
+ ret = -EBADR;
+ goto out_fail;
+ }
+
+ if (!opts->duplicate_connect &&
+ nvme_fc_existing_controller(rport, opts)) {
+ ret = -EALREADY;
+ goto out_fail;
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_free_ctrl;
+ }
+
+ /*
+ * if ctrl_loss_tmo is being enforced and the default reconnect delay
+ * is being used, change to a shorter reconnect delay for FC.
+ */
+ if (opts->max_reconnects != -1 &&
+ opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
+ opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
+ ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
+ opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ }
+
+ ctrl->ctrl.opts = opts;
+ ctrl->ctrl.nr_reconnects = 0;
+ if (lport->dev)
+ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
+ else
+ ctrl->ctrl.numa_node = NUMA_NO_NODE;
+ INIT_LIST_HEAD(&ctrl->ctrl_list);
+ ctrl->lport = lport;
+ ctrl->rport = rport;
+ ctrl->dev = lport->dev;
+ ctrl->cnum = idx;
+ ctrl->ioq_live = false;
+ init_waitqueue_head(&ctrl->ioabort_wait);
+
+ get_device(ctrl->dev);
+ kref_init(&ctrl->ref);
+
+ INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
+ spin_lock_init(&ctrl->lock);
+
+ /* io queue count */
+ ctrl->ctrl.queue_count = min_t(unsigned int,
+ opts->nr_io_queues,
+ lport->ops->max_hw_queues);
+ ctrl->ctrl.queue_count++; /* +1 for admin queue */
+
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+ ctrl->ctrl.cntlid = 0xffff;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
+ sizeof(struct nvme_fc_queue), GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_free_ida;
+
+ nvme_fc_init_queue(ctrl, 0);
+
+ /*
+ * Would have been nice to init io queues tag set as well.
+ * However, we require interaction from the controller
+ * for max io queue count before we can do so.
+ * Defer this to the connect path.
+ */
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
+ if (ret)
+ goto out_free_queues;
+
+ /* at this point, teardown path changes to ref counting on nvme ctrl */
+
+ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_fc_admin_mq_ops,
+ struct_size_t(struct nvme_fcp_op_w_sgl, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
+ if (ret)
+ goto fail_ctrl;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
+ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
+ goto fail_ctrl;
+ }
+
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule initial connect\n",
+ ctrl->cnum);
+ goto fail_ctrl;
+ }
+
+ flush_delayed_work(&ctrl->connect_work);
+
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+
+ return &ctrl->ctrl;
+
+fail_ctrl:
+ nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
+ cancel_work_sync(&ctrl->ctrl.reset_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
+
+ ctrl->ctrl.opts = NULL;
+
+ /* initiate nvme ctrl ref counting teardown */
+ nvme_uninit_ctrl(&ctrl->ctrl);
+
+ /* Remove core ctrl ref. */
+ nvme_put_ctrl(&ctrl->ctrl);
+
+ /* as we're past the point where we transition to the ref
+ * counting teardown path, if we return a bad pointer here,
+ * the calling routine, thinking it's prior to the
+ * transition, will do an rport put. Since the teardown
+ * path also does a rport put, we do an extra get here to
+ * so proper order/teardown happens.
+ */
+ nvme_fc_rport_get(rport);
+
+ return ERR_PTR(-EIO);
+
+out_free_queues:
+ kfree(ctrl->queues);
+out_free_ida:
+ put_device(ctrl->dev);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_free_ctrl:
+ kfree(ctrl);
+out_fail:
+ /* exit via here doesn't follow ctlr ref points */
+ return ERR_PTR(ret);
+}
+
+
+struct nvmet_fc_traddr {
+ u64 nn;
+ u64 pn;
+};
+
+static int
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
+{
+ u64 token64;
+
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
+
+ return 0;
+}
+
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate if string is one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
+
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
+
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
+}
+
+static struct nvme_ctrl *
+nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
+{
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ struct nvme_ctrl *ctrl;
+ struct nvmet_fc_traddr laddr = { 0L, 0L };
+ struct nvmet_fc_traddr raddr = { 0L, 0L };
+ unsigned long flags;
+ int ret;
+
+ ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
+ if (ret || !raddr.nn || !raddr.pn)
+ return ERR_PTR(-EINVAL);
+
+ ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
+ if (ret || !laddr.nn || !laddr.pn)
+ return ERR_PTR(-EINVAL);
+
+ /* find the host and remote ports to connect together */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ if (lport->localport.node_name != laddr.nn ||
+ lport->localport.port_name != laddr.pn ||
+ lport->localport.port_state != FC_OBJSTATE_ONLINE)
+ continue;
+
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (rport->remoteport.node_name != raddr.nn ||
+ rport->remoteport.port_name != raddr.pn ||
+ rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ continue;
+
+ /* if fail to get reference fall through. Will error */
+ if (!nvme_fc_rport_get(rport))
+ break;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
+ if (IS_ERR(ctrl))
+ nvme_fc_rport_put(rport);
+ return ctrl;
+ }
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ pr_warn("%s: %s - %s combination not found\n",
+ __func__, opts->traddr, opts->host_traddr);
+ return ERR_PTR(-ENOENT);
+}
+
+
+static struct nvmf_transport_ops nvme_fc_transport = {
+ .name = "fc",
+ .module = THIS_MODULE,
+ .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
+ .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
+ .create_ctrl = nvme_fc_create_ctrl,
+};
+
+/* Arbitrary successive failures max. With lots of subsystems could be high */
+#define DISCOVERY_MAX_FAIL 20
+
+static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ LIST_HEAD(local_disc_list);
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ int failcnt = 0;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+restart:
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (!nvme_fc_lport_get(lport))
+ continue;
+ if (!nvme_fc_rport_get(rport)) {
+ /*
+ * This is a temporary condition. Upon restart
+ * this rport will be gone from the list.
+ *
+ * Revert the lport put and retry. Anything
+ * added to the list already will be skipped (as
+ * they are no longer list_empty). Loops should
+ * resume at rports that were not yet seen.
+ */
+ nvme_fc_lport_put(lport);
+
+ if (failcnt++ < DISCOVERY_MAX_FAIL)
+ goto restart;
+
+ pr_err("nvme_discovery: too many reference "
+ "failures\n");
+ goto process_local_list;
+ }
+ if (list_empty(&rport->disc_list))
+ list_add_tail(&rport->disc_list,
+ &local_disc_list);
+ }
+ }
+
+process_local_list:
+ while (!list_empty(&local_disc_list)) {
+ rport = list_first_entry(&local_disc_list,
+ struct nvme_fc_rport, disc_list);
+ list_del_init(&rport->disc_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ lport = rport->lport;
+ /* signal discovery. Won't hurt if it repeats */
+ nvme_fc_signal_discovery_scan(lport, rport);
+ nvme_fc_rport_put(rport);
+ nvme_fc_lport_put(lport);
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
+/* Parse the cgroup id from a buf and return the length of cgrpid */
+static int fc_parse_cgrpid(const char *buf, u64 *id)
+{
+ char cgrp_id[16+1];
+ int cgrpid_len, j;
+
+ memset(cgrp_id, 0x0, sizeof(cgrp_id));
+ for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
+ if (buf[cgrpid_len] != ':')
+ cgrp_id[cgrpid_len] = buf[cgrpid_len];
+ else {
+ j = 1;
+ break;
+ }
+ }
+ if (!j)
+ return -EINVAL;
+ if (kstrtou64(cgrp_id, 16, id) < 0)
+ return -EINVAL;
+ return cgrpid_len;
+}
+
+/*
+ * Parse and update the appid in the blkcg associated with the cgroupid.
+ */
+static ssize_t fc_appid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ size_t orig_count = count;
+ u64 cgrp_id;
+ int appid_len = 0;
+ int cgrpid_len = 0;
+ char app_id[FC_APPID_LEN];
+ int ret = 0;
+
+ if (buf[count-1] == '\n')
+ count--;
+
+ if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
+ return -EINVAL;
+
+ cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
+ if (cgrpid_len < 0)
+ return -EINVAL;
+ appid_len = count - cgrpid_len - 1;
+ if (appid_len > FC_APPID_LEN)
+ return -EINVAL;
+
+ memset(app_id, 0x0, sizeof(app_id));
+ memcpy(app_id, &buf[cgrpid_len+1], appid_len);
+ ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
+ if (ret < 0)
+ return ret;
+ return orig_count;
+}
+static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
+#endif /* CONFIG_BLK_CGROUP_FC_APPID */
+
+static struct attribute *nvme_fc_attrs[] = {
+ &dev_attr_nvme_discovery.attr,
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
+ &dev_attr_appid_store.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group nvme_fc_attr_group = {
+ .attrs = nvme_fc_attrs,
+};
+
+static const struct attribute_group *nvme_fc_attr_groups[] = {
+ &nvme_fc_attr_group,
+ NULL
+};
+
+static struct class fc_class = {
+ .name = "fc",
+ .dev_groups = nvme_fc_attr_groups,
+};
+
+static int __init nvme_fc_init_module(void)
+{
+ int ret;
+
+ nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+ if (!nvme_fc_wq)
+ return -ENOMEM;
+
+ /*
+ * NOTE:
+ * It is expected that in the future the kernel will combine
+ * the FC-isms that are currently under scsi and now being
+ * added to by NVME into a new standalone FC class. The SCSI
+ * and NVME protocols and their devices would be under this
+ * new FC class.
+ *
+ * As we need something to post FC-specific udev events to,
+ * specifically for nvme probe events, start by creating the
+ * new device class. When the new standalone FC class is
+ * put in place, this code will move to a more generic
+ * location for the class.
+ */
+ ret = class_register(&fc_class);
+ if (ret) {
+ pr_err("couldn't register class fc\n");
+ goto out_destroy_wq;
+ }
+
+ /*
+ * Create a device for the FC-centric udev events
+ */
+ fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
+ "fc_udev_device");
+ if (IS_ERR(fc_udev_device)) {
+ pr_err("couldn't create fc_udev device!\n");
+ ret = PTR_ERR(fc_udev_device);
+ goto out_destroy_class;
+ }
+
+ ret = nvmf_register_transport(&nvme_fc_transport);
+ if (ret)
+ goto out_destroy_device;
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(&fc_class, MKDEV(0, 0));
+out_destroy_class:
+ class_unregister(&fc_class);
+out_destroy_wq:
+ destroy_workqueue(nvme_fc_wq);
+
+ return ret;
+}
+
+static void
+nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_ctrl *ctrl;
+
+ spin_lock(&rport->lock);
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: transport unloading: deleting ctrl\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+ spin_unlock(&rport->lock);
+}
+
+static void
+nvme_fc_cleanup_for_unload(void)
+{
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ nvme_fc_delete_controllers(rport);
+ }
+ }
+}
+
+static void __exit nvme_fc_exit_module(void)
+{
+ unsigned long flags;
+ bool need_cleanup = false;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ nvme_fc_waiting_to_unload = true;
+ if (!list_empty(&nvme_fc_lport_list)) {
+ need_cleanup = true;
+ nvme_fc_cleanup_for_unload();
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ if (need_cleanup) {
+ pr_info("%s: waiting for ctlr deletes\n", __func__);
+ wait_for_completion(&nvme_fc_unload_proceed);
+ pr_info("%s: ctrl deletes complete\n", __func__);
+ }
+
+ nvmf_unregister_transport(&nvme_fc_transport);
+
+ ida_destroy(&nvme_fc_local_port_cnt);
+ ida_destroy(&nvme_fc_ctrl_cnt);
+
+ device_destroy(&fc_class, MKDEV(0, 0));
+ class_unregister(&fc_class);
+ destroy_workqueue(nvme_fc_wq);
+}
+
+module_init(nvme_fc_init_module);
+module_exit(nvme_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fc.h b/drivers/nvme/host/fc.h
new file mode 100644
index 0000000000..05ce566f2c
--- /dev/null
+++ b/drivers/nvme/host/fc.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016, Avago Technologies
+ */
+
+#ifndef _NVME_FC_TRANSPORT_H
+#define _NVME_FC_TRANSPORT_H 1
+
+
+/*
+ * Common definitions between the nvme_fc (host) transport and
+ * nvmet_fc (target) transport implementation.
+ */
+
+/*
+ * ****************** FC-NVME LS HANDLING ******************
+ */
+
+union nvmefc_ls_requests {
+ struct fcnvme_ls_rqst_w0 w0;
+ struct fcnvme_ls_cr_assoc_rqst rq_cr_assoc;
+ struct fcnvme_ls_cr_conn_rqst rq_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_rqst rq_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_rqst rq_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+union nvmefc_ls_responses {
+ struct fcnvme_ls_rjt rsp_rjt;
+ struct fcnvme_ls_cr_assoc_acc rsp_cr_assoc;
+ struct fcnvme_ls_cr_conn_acc rsp_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_acc rsp_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_acc rsp_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+static inline void
+nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
+{
+ struct fcnvme_ls_acc_hdr *acc = buf;
+
+ acc->w0.ls_cmd = ls_cmd;
+ acc->desc_list_len = desc_len;
+ acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ acc->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static inline int
+nvme_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+ u8 reason, u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ nvme_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+ ls_cmd);
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+
+ return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_CR_ASSOC_LEN = 1,
+ VERR_CR_ASSOC_RQST_LEN = 2,
+ VERR_CR_ASSOC_CMD = 3,
+ VERR_CR_ASSOC_CMD_LEN = 4,
+ VERR_ERSP_RATIO = 5,
+ VERR_ASSOC_ALLOC_FAIL = 6,
+ VERR_QUEUE_ALLOC_FAIL = 7,
+ VERR_CR_CONN_LEN = 8,
+ VERR_CR_CONN_RQST_LEN = 9,
+ VERR_ASSOC_ID = 10,
+ VERR_ASSOC_ID_LEN = 11,
+ VERR_NO_ASSOC = 12,
+ VERR_CONN_ID = 13,
+ VERR_CONN_ID_LEN = 14,
+ VERR_INVAL_CONN = 15,
+ VERR_CR_CONN_CMD = 16,
+ VERR_CR_CONN_CMD_LEN = 17,
+ VERR_DISCONN_LEN = 18,
+ VERR_DISCONN_RQST_LEN = 19,
+ VERR_DISCONN_CMD = 20,
+ VERR_DISCONN_CMD_LEN = 21,
+ VERR_DISCONN_SCOPE = 22,
+ VERR_RS_LEN = 23,
+ VERR_RS_RQST_LEN = 24,
+ VERR_RS_CMD = 25,
+ VERR_RS_CMD_LEN = 26,
+ VERR_RS_RCTL = 27,
+ VERR_RS_RO = 28,
+ VERR_LSACC = 29,
+ VERR_LSDESC_RQST = 30,
+ VERR_LSDESC_RQST_LEN = 31,
+ VERR_CR_ASSOC = 32,
+ VERR_CR_ASSOC_ACC_LEN = 33,
+ VERR_CR_CONN = 34,
+ VERR_CR_CONN_ACC_LEN = 35,
+ VERR_DISCONN = 36,
+ VERR_DISCONN_ACC_LEN = 37,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Bad CR_ASSOC Length",
+ "Bad CR_ASSOC Rqst Length",
+ "Not CR_ASSOC Cmd",
+ "Bad CR_ASSOC Cmd Length",
+ "Bad Ersp Ratio",
+ "Association Allocation Failed",
+ "Queue Allocation Failed",
+ "Bad CR_CONN Length",
+ "Bad CR_CONN Rqst Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "No Association",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "Invalid Connection ID",
+ "Not CR_CONN Cmd",
+ "Bad CR_CONN Cmd Length",
+ "Bad DISCONN Length",
+ "Bad DISCONN Rqst Length",
+ "Not DISCONN Cmd",
+ "Bad DISCONN Cmd Length",
+ "Bad Disconnect Scope",
+ "Bad RS Length",
+ "Bad RS Rqst Length",
+ "Not RS Cmd",
+ "Bad RS Cmd Length",
+ "Bad RS R_CTL",
+ "Bad RS Relative Offset",
+ "Not LS_ACC",
+ "Not LSDESC_RQST",
+ "Bad LSDESC_RQST Length",
+ "Not CR_ASSOC Rqst",
+ "Bad CR_ASSOC ACC Length",
+ "Not CR_CONN Rqst",
+ "Bad CR_CONN ACC Length",
+ "Not Disconnect Rqst",
+ "Bad Disconnect ACC Length",
+};
+
+#define NVME_FC_LAST_LS_CMD_VALUE FCNVME_LS_DISCONNECT_CONN
+
+static char *nvmefc_ls_names[] = {
+ "Reserved (0)",
+ "RJT (1)",
+ "ACC (2)",
+ "Create Association",
+ "Create Connection",
+ "Disconnect Association",
+ "Disconnect Connection",
+};
+
+static inline void
+nvmefc_fmt_lsreq_discon_assoc(struct nvmefc_ls_req *lsreq,
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst,
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc,
+ u64 association_id)
+{
+ lsreq->rqstaddr = discon_rqst;
+ lsreq->rqstlen = sizeof(*discon_rqst);
+ lsreq->rspaddr = discon_acc;
+ lsreq->rsplen = sizeof(*discon_acc);
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
+ discon_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+ discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ discon_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+
+ discon_rqst->associd.association_id = cpu_to_be64(association_id);
+
+ discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+ FCNVME_LSDESC_DISCONN_CMD);
+ discon_rqst->discon_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+}
+
+static inline int
+nvmefc_vldt_lsreq_discon_assoc(u32 rqstlen,
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst)
+{
+ int ret = 0;
+
+ if (rqstlen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
+ ret = VERR_DISCONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
+ ret = VERR_DISCONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->discon_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+ ret = VERR_DISCONN_CMD;
+ else if (rqst->discon_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+ ret = VERR_DISCONN_CMD_LEN;
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
+ ret = VERR_DISCONN_SCOPE;
+
+ return ret;
+}
+
+#endif /* _NVME_FC_TRANSPORT_H */
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
new file mode 100644
index 0000000000..8df73a0b39
--- /dev/null
+++ b/drivers/nvme/host/hwmon.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express hardware monitoring support
+ * Copyright (c) 2019, Guenter Roeck
+ */
+
+#include <linux/hwmon.h>
+#include <linux/units.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+struct nvme_hwmon_data {
+ struct nvme_ctrl *ctrl;
+ struct nvme_smart_log *log;
+ struct mutex read_lock;
+};
+
+static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long *temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ u32 status;
+ int ret;
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ &status);
+ if (ret > 0)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ *temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK);
+
+ return 0;
+}
+
+static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ int ret;
+
+ temp = millicelsius_to_kelvin(temp);
+ threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ NULL);
+ if (ret > 0)
+ return -EIO;
+
+ return ret;
+}
+
+static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
+{
+ return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+ NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
+}
+
+static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+ struct nvme_smart_log *log = data->log;
+ int temp;
+ int err;
+
+ /*
+ * First handle attributes which don't require us to read
+ * the smart log.
+ */
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_get_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_get_temp_thresh(data->ctrl, channel, true, val);
+ case hwmon_temp_crit:
+ *val = kelvin_to_millicelsius(data->ctrl->cctemp);
+ return 0;
+ default:
+ break;
+ }
+
+ mutex_lock(&data->read_lock);
+ err = nvme_hwmon_get_smart_log(data);
+ if (err)
+ goto unlock;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!channel)
+ temp = get_unaligned_le16(log->temperature);
+ else
+ temp = le16_to_cpu(log->temp_sensor[channel - 1]);
+ *val = kelvin_to_millicelsius(temp);
+ break;
+ case hwmon_temp_alarm:
+ *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&data->read_lock);
+ return err;
+}
+
+static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_set_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_set_temp_thresh(data->ctrl, channel, true, val);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const char * const nvme_hwmon_sensor_names[] = {
+ "Composite",
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4",
+ "Sensor 5",
+ "Sensor 6",
+ "Sensor 7",
+ "Sensor 8",
+};
+
+static int nvme_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ *str = nvme_hwmon_sensor_names[channel];
+ return 0;
+}
+
+static umode_t nvme_hwmon_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct nvme_hwmon_data *data = _data;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ if (!channel && data->ctrl->cctemp)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ if ((!channel && data->ctrl->wctemp) ||
+ (channel && data->log->temp_sensor[channel - 1] &&
+ !(data->ctrl->quirks &
+ NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) {
+ if (data->ctrl->quirks &
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ return 0444;
+ return 0644;
+ }
+ break;
+ case hwmon_temp_alarm:
+ if (!channel)
+ return 0444;
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ if (!channel || data->log->temp_sensor[channel - 1])
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *const nvme_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops nvme_hwmon_ops = {
+ .is_visible = nvme_hwmon_is_visible,
+ .read = nvme_hwmon_read,
+ .read_string = nvme_hwmon_read_string,
+ .write = nvme_hwmon_write,
+};
+
+static const struct hwmon_chip_info nvme_hwmon_chip_info = {
+ .ops = &nvme_hwmon_ops,
+ .info = nvme_hwmon_info,
+};
+
+int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ struct device *dev = ctrl->device;
+ struct nvme_hwmon_data *data;
+ struct device *hwmon;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
+ if (!data->log) {
+ err = -ENOMEM;
+ goto err_free_data;
+ }
+
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ goto err_free_log;
+ }
+
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+ data, &nvme_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ err = PTR_ERR(hwmon);
+ goto err_free_log;
+ }
+ ctrl->hwmon_device = hwmon;
+ return 0;
+
+err_free_log:
+ kfree(data->log);
+err_free_data:
+ kfree(data);
+ return err;
+}
+
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->hwmon_device) {
+ struct nvme_hwmon_data *data =
+ dev_get_drvdata(ctrl->hwmon_device);
+
+ hwmon_device_unregister(ctrl->hwmon_device);
+ ctrl->hwmon_device = NULL;
+ kfree(data->log);
+ kfree(data);
+ }
+}
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
new file mode 100644
index 0000000000..4939ed3563
--- /dev/null
+++ b/drivers/nvme/host/ioctl.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2014, Intel Corporation.
+ * Copyright (c) 2017-2021 Christoph Hellwig.
+ */
+#include <linux/ptrace.h> /* for force_successful_syscall_return */
+#include <linux/nvme_ioctl.h>
+#include <linux/io_uring.h>
+#include "nvme.h"
+
+enum {
+ NVME_IOCTL_VEC = (1 << 0),
+ NVME_IOCTL_PARTITION = (1 << 1),
+};
+
+static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+ unsigned int flags, bool open_for_write)
+{
+ u32 effects;
+
+ /*
+ * Do not allow unprivileged passthrough on partitions, as that allows an
+ * escape from the containment of the partition.
+ */
+ if (flags & NVME_IOCTL_PARTITION)
+ goto admin;
+
+ /*
+ * Do not allow unprivileged processes to send vendor specific or fabrics
+ * commands as we can't be sure about their effects.
+ */
+ if (c->common.opcode >= nvme_cmd_vendor_start ||
+ c->common.opcode == nvme_fabrics_command)
+ goto admin;
+
+ /*
+ * Do not allow unprivileged passthrough of admin commands except
+ * for a subset of identify commands that contain information required
+ * to form proper I/O commands in userspace and do not expose any
+ * potentially sensitive information.
+ */
+ if (!ns) {
+ if (c->common.opcode == nvme_admin_identify) {
+ switch (c->identify.cns) {
+ case NVME_ID_CNS_NS:
+ case NVME_ID_CNS_CS_NS:
+ case NVME_ID_CNS_NS_CS_INDEP:
+ case NVME_ID_CNS_CS_CTRL:
+ case NVME_ID_CNS_CTRL:
+ return true;
+ }
+ }
+ goto admin;
+ }
+
+ /*
+ * Check if the controller provides a Commands Supported and Effects log
+ * and marks this command as supported. If not reject unprivileged
+ * passthrough.
+ */
+ effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
+ if (!(effects & NVME_CMD_EFFECTS_CSUPP))
+ goto admin;
+
+ /*
+ * Don't allow passthrough for command that have intrusive (or unknown)
+ * effects.
+ */
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_UUID_SEL |
+ NVME_CMD_EFFECTS_SCOPE_MASK))
+ goto admin;
+
+ /*
+ * Only allow I/O commands that transfer data to the controller or that
+ * change the logical block contents if the file descriptor is open for
+ * writing.
+ */
+ if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
+ !open_for_write)
+ goto admin;
+
+ return true;
+admin:
+ return capable(CAP_SYS_ADMIN);
+}
+
+/*
+ * Convert integer values from ioctl structures to user pointers, silently
+ * ignoring the upper bits in the compat case to match behaviour of 32-bit
+ * kernels.
+ */
+static void __user *nvme_to_user_ptr(uintptr_t ptrval)
+{
+ if (in_compat_syscall())
+ ptrval = (compat_uptr_t)ptrval;
+ return (void __user *)ptrval;
+}
+
+static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ unsigned len, u32 seed)
+{
+ struct bio_integrity_payload *bip;
+ int ret = -ENOMEM;
+ void *buf;
+ struct bio *bio = req->bio;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ if (req_op(req) == REQ_OP_DRV_OUT) {
+ ret = -EFAULT;
+ if (copy_from_user(buf, ubuf, len))
+ goto out_free_meta;
+ } else {
+ memset(buf, 0, len);
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+ if (IS_ERR(bip)) {
+ ret = PTR_ERR(bip);
+ goto out_free_meta;
+ }
+
+ bip->bip_iter.bi_sector = seed;
+ ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
+ offset_in_page(buf));
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto out_free_meta;
+ }
+
+ req->cmd_flags |= REQ_INTEGRITY;
+ return buf;
+out_free_meta:
+ kfree(buf);
+out:
+ return ERR_PTR(ret);
+}
+
+static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
+ void *meta, unsigned len, int ret)
+{
+ if (!ret && req_op(req) == REQ_OP_DRV_IN &&
+ copy_to_user(ubuf, meta, len))
+ ret = -EFAULT;
+ kfree(meta);
+ return ret;
+}
+
+static struct request *nvme_alloc_user_request(struct request_queue *q,
+ struct nvme_command *cmd, blk_opf_t rq_flags,
+ blk_mq_req_flags_t blk_flags)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
+ if (IS_ERR(req))
+ return req;
+ nvme_init_request(req, cmd);
+ nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
+
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+ unsigned int flags)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
+ goto out;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
+ 0, rq_data_dir(req));
+ }
+
+ if (ret)
+ goto out;
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
+ meta_seed);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
+ }
+ *metap = meta;
+ }
+
+ return ret;
+
+out_unmap:
+ if (bio)
+ blk_rq_unmap_user(bio);
+out:
+ blk_mq_free_request(req);
+ return ret;
+}
+
+static int nvme_submit_user_cmd(struct request_queue *q,
+ struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
+ void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+ u64 *result, unsigned timeout, unsigned int flags)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_ctrl *ctrl;
+ struct request *req;
+ void *meta = NULL;
+ struct bio *bio;
+ u32 effects;
+ int ret;
+
+ req = nvme_alloc_user_request(q, cmd, 0, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ req->timeout = timeout;
+ if (ubuffer && bufflen) {
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, NULL, flags);
+ if (ret)
+ return ret;
+ }
+
+ bio = req->bio;
+ ctrl = nvme_req(req)->ctrl;
+
+ effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ ret = nvme_execute_rq(req, false);
+ if (result)
+ *result = le64_to_cpu(nvme_req(req)->result.u64);
+ if (meta)
+ ret = nvme_finish_user_metadata(req, meta_buffer, meta,
+ meta_len, ret);
+ if (bio)
+ blk_rq_unmap_user(bio);
+ blk_mq_free_request(req);
+
+ if (effects)
+ nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+
+ return ret;
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length, meta_len;
+ void __user *metadata;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ if (io.flags)
+ return -EINVAL;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
+
+ if (ns->features & NVME_NS_EXT_LBAS) {
+ length += meta_len;
+ meta_len = 0;
+ } else if (meta_len) {
+ if ((io.metadata & 3) || !io.metadata)
+ return -EINVAL;
+ }
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(ns->head->ns_id);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag);
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+
+ return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
+ meta_len, lower_32_bits(io.slba), NULL, 0, 0);
+}
+
+static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns *ns, __u32 nsid)
+{
+ if (ns && nsid != ns->head->ns_id) {
+ dev_err(ctrl->device,
+ "%s: nsid (%u) in cmd does not match nsid (%u)"
+ "of namespace\n",
+ current->comm, nsid, ns->head->ns_id);
+ return false;
+ }
+
+ return true;
+}
+
+static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
+ bool open_for_write)
+{
+ struct nvme_passthru_cmd cmd;
+ struct nvme_command c;
+ unsigned timeout = 0;
+ u64 result;
+ int status;
+
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+ if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
+ return -EACCES;
+
+ if (cmd.timeout_ms)
+ timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+ cmd.metadata_len, 0, &result, timeout, 0);
+
+ if (status >= 0) {
+ if (put_user(result, &ucmd->result))
+ return -EFAULT;
+ }
+
+ return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
+ bool open_for_write)
+{
+ struct nvme_passthru_cmd64 cmd;
+ struct nvme_command c;
+ unsigned timeout = 0;
+ int status;
+
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+ if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
+ return -EACCES;
+
+ if (cmd.timeout_ms)
+ timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+ cmd.metadata_len, 0, &cmd.result, timeout, flags);
+
+ if (status >= 0) {
+ if (put_user(cmd.result, &ucmd->result))
+ return -EFAULT;
+ }
+
+ return status;
+}
+
+struct nvme_uring_data {
+ __u64 metadata;
+ __u64 addr;
+ __u32 data_len;
+ __u32 metadata_len;
+ __u32 timeout_ms;
+};
+
+/*
+ * This overlays struct io_uring_cmd pdu.
+ * Expect build errors if this grows larger than that.
+ */
+struct nvme_uring_cmd_pdu {
+ union {
+ struct bio *bio;
+ struct request *req;
+ };
+ u32 meta_len;
+ u32 nvme_status;
+ union {
+ struct {
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ };
+ u64 result;
+ } u;
+};
+
+static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
+ unsigned issue_flags)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ struct request *req = pdu->req;
+ int status;
+ u64 result;
+
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ status = -EINTR;
+ else
+ status = nvme_req(req)->status;
+
+ result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ if (pdu->meta_len)
+ status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+ pdu->u.meta, pdu->meta_len, status);
+ if (req->bio)
+ blk_rq_unmap_user(req->bio);
+ blk_mq_free_request(req);
+
+ io_uring_cmd_done(ioucmd, status, result, issue_flags);
+}
+
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
+ unsigned issue_flags)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ req->bio = pdu->bio;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
+ pdu->nvme_status = -EINTR;
+ } else {
+ pdu->nvme_status = nvme_req(req)->status;
+ if (!pdu->nvme_status)
+ pdu->nvme_status = blk_status_to_errno(err);
+ }
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (blk_rq_is_poll(req)) {
+ WRITE_ONCE(ioucmd->cookie, NULL);
+ nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
+ } else {
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+ }
+
+ return RQ_END_IO_FREE;
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ req->bio = pdu->bio;
+ pdu->req = req;
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (blk_rq_is_poll(req)) {
+ WRITE_ONCE(ioucmd->cookie, NULL);
+ nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
+ } else {
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
+ }
+
+ return RQ_END_IO_NONE;
+}
+
+static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
+ struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
+ struct nvme_uring_data d;
+ struct nvme_command c;
+ struct request *req;
+ blk_opf_t rq_flags = REQ_ALLOC_CACHE;
+ blk_mq_req_flags_t blk_flags = 0;
+ void *meta = NULL;
+ int ret;
+
+ c.common.opcode = READ_ONCE(cmd->opcode);
+ c.common.flags = READ_ONCE(cmd->flags);
+ if (c.common.flags)
+ return -EINVAL;
+
+ c.common.command_id = 0;
+ c.common.nsid = cpu_to_le32(cmd->nsid);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
+ return -EINVAL;
+
+ c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
+ c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
+ c.common.metadata = 0;
+ c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
+ c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
+ c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
+ c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
+ c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
+ c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
+ c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
+
+ if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
+ return -EACCES;
+
+ d.metadata = READ_ONCE(cmd->metadata);
+ d.addr = READ_ONCE(cmd->addr);
+ d.data_len = READ_ONCE(cmd->data_len);
+ d.metadata_len = READ_ONCE(cmd->metadata_len);
+ d.timeout_ms = READ_ONCE(cmd->timeout_ms);
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ rq_flags |= REQ_NOWAIT;
+ blk_flags = BLK_MQ_REQ_NOWAIT;
+ }
+ if (issue_flags & IO_URING_F_IOPOLL)
+ rq_flags |= REQ_POLLED;
+
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
+
+ if (d.addr && d.data_len) {
+ ret = nvme_map_user_request(req, d.addr,
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
+
+ if (blk_rq_is_poll(req)) {
+ ioucmd->flags |= IORING_URING_CMD_POLLED;
+ WRITE_ONCE(ioucmd->cookie, req);
+ }
+
+ /* to free bio on completion, as req->bio will be null at that time */
+ pdu->bio = req->bio;
+ pdu->meta_len = d.metadata_len;
+ req->end_io_data = ioucmd;
+ if (pdu->meta_len) {
+ pdu->u.meta = meta;
+ pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
+ req->end_io = nvme_uring_cmd_end_io_meta;
+ } else {
+ req->end_io = nvme_uring_cmd_end_io;
+ }
+ blk_execute_rq_nowait(req, false);
+ return -EIOCBQUEUED;
+}
+
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+ if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+ return true;
+ if (is_sed_ioctl(cmd))
+ return true;
+ return false;
+}
+
+static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
+ void __user *argp, bool open_for_write)
+{
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
+ case NVME_IOCTL_ADMIN64_CMD:
+ return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
+ default:
+ return sed_ioctl(ctrl->opal_dev, cmd, argp);
+ }
+}
+
+#ifdef COMPAT_FOR_U64_ALIGNMENT
+struct nvme_user_io32 {
+ __u8 opcode;
+ __u8 flags;
+ __u16 control;
+ __u16 nblocks;
+ __u16 rsvd;
+ __u64 metadata;
+ __u64 addr;
+ __u64 slba;
+ __u32 dsmgmt;
+ __u32 reftag;
+ __u16 apptag;
+ __u16 appmask;
+} __attribute__((__packed__));
+#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
+#endif /* COMPAT_FOR_U64_ALIGNMENT */
+
+static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ void __user *argp, unsigned int flags, bool open_for_write)
+{
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+ force_successful_syscall_return();
+ return ns->head->ns_id;
+ case NVME_IOCTL_IO_CMD:
+ return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
+ /*
+ * struct nvme_user_io can have different padding on some 32-bit ABIs.
+ * Just accept the compat version as all fields that are used are the
+ * same size and at the same offset.
+ */
+#ifdef COMPAT_FOR_U64_ALIGNMENT
+ case NVME_IOCTL_SUBMIT_IO32:
+#endif
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, argp);
+ case NVME_IOCTL_IO64_CMD_VEC:
+ flags |= NVME_IOCTL_VEC;
+ fallthrough;
+ case NVME_IOCTL_IO64_CMD:
+ return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
+ open_for_write);
+ default:
+ return -ENOTTY;
+ }
+}
+
+int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+ bool open_for_write = mode & BLK_OPEN_WRITE;
+ void __user *argp = (void __user *)arg;
+ unsigned int flags = 0;
+
+ if (bdev_is_partition(bdev))
+ flags |= NVME_IOCTL_PARTITION;
+
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
+ return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
+}
+
+long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns =
+ container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+ bool open_for_write = file->f_mode & FMODE_WRITE;
+ void __user *argp = (void __user *)arg;
+
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
+ return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
+}
+
+static int nvme_uring_cmd_checks(unsigned int issue_flags)
+{
+
+ /* NVMe passthrough requires big SQE/CQE support */
+ if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
+ (IO_URING_F_SQE128|IO_URING_F_CQE32))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
+
+ ret = nvme_uring_cmd_checks(issue_flags);
+ if (ret)
+ return ret;
+
+ switch (ioucmd->cmd_op) {
+ case NVME_URING_CMD_IO:
+ ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
+ break;
+ case NVME_URING_CMD_IO_VEC:
+ ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct nvme_ns, cdev);
+
+ return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
+}
+
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct request *req;
+ int ret = 0;
+
+ if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
+ return 0;
+
+ req = READ_ONCE(ioucmd->cookie);
+ if (req && blk_rq_is_poll(req))
+ ret = blk_rq_poll(req, iob, poll_flags);
+ return ret;
+}
+#ifdef CONFIG_NVME_MULTIPATH
+static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ void __user *argp, struct nvme_ns_head *head, int srcu_idx,
+ bool open_for_write)
+ __releases(&head->srcu)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ nvme_get_ctrl(ns->ctrl);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
+
+ nvme_put_ctrl(ctrl);
+ return ret;
+}
+
+int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ bool open_for_write = mode & BLK_OPEN_WRITE;
+ void __user *argp = (void __user *)arg;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+ unsigned int flags = 0;
+
+ if (bdev_is_partition(bdev))
+ flags |= NVME_IOCTL_PARTITION;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
+
+ /*
+ * Handle ioctls that apply to the controller instead of the namespace
+ * seperately and drop the ns SRCU reference early. This avoids a
+ * deadlock when deleting namespaces using the passthrough interface.
+ */
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
+ open_for_write);
+
+ ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ bool open_for_write = file->f_mode & FMODE_WRITE;
+ struct cdev *cdev = file_inode(file)->i_cdev;
+ struct nvme_ns_head *head =
+ container_of(cdev, struct nvme_ns_head, cdev);
+ void __user *argp = (void __user *)arg;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
+
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
+ open_for_write);
+
+ ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EINVAL;
+
+ if (ns)
+ ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+#endif /* CONFIG_NVME_MULTIPATH */
+
+int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct nvme_ctrl *ctrl = ioucmd->file->private_data;
+ int ret;
+
+ /* IOPOLL not supported yet */
+ if (issue_flags & IO_URING_F_IOPOLL)
+ return -EOPNOTSUPP;
+
+ ret = nvme_uring_cmd_checks(issue_flags);
+ if (ret)
+ return ret;
+
+ switch (ioucmd->cmd_op) {
+ case NVME_URING_CMD_ADMIN:
+ ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
+ break;
+ case NVME_URING_CMD_ADMIN_VEC:
+ ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
+ bool open_for_write)
+{
+ struct nvme_ns *ns;
+ int ret;
+
+ down_read(&ctrl->namespaces_rwsem);
+ if (list_empty(&ctrl->namespaces)) {
+ ret = -ENOTTY;
+ goto out_unlock;
+ }
+
+ ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
+ dev_warn(ctrl->device,
+ "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ dev_warn(ctrl->device,
+ "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
+ kref_get(&ns->kref);
+ up_read(&ctrl->namespaces_rwsem);
+
+ ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
+ nvme_put_ns(ns);
+ return ret;
+
+out_unlock:
+ up_read(&ctrl->namespaces_rwsem);
+ return ret;
+}
+
+long nvme_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ bool open_for_write = file->f_mode & FMODE_WRITE;
+ struct nvme_ctrl *ctrl = file->private_data;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
+ case NVME_IOCTL_ADMIN64_CMD:
+ return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
+ case NVME_IOCTL_IO_CMD:
+ return nvme_dev_user_cmd(ctrl, argp, open_for_write);
+ case NVME_IOCTL_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ dev_warn(ctrl->device, "resetting controller\n");
+ return nvme_reset_ctrl_sync(ctrl);
+ case NVME_IOCTL_SUBSYS_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return nvme_reset_subsystem(ctrl);
+ case NVME_IOCTL_RESCAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ nvme_queue_scan(ctrl);
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
new file mode 100644
index 0000000000..0a88d7bdc5
--- /dev/null
+++ b/drivers/nvme/host/multipath.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018 Christoph Hellwig.
+ */
+
+#include <linux/backing-dev.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <trace/events/block.h>
+#include "nvme.h"
+
+bool multipath = true;
+module_param(multipath, bool, 0444);
+MODULE_PARM_DESC(multipath,
+ "turn on native support for multiple controllers per subsystem");
+
+static const char *nvme_iopolicy_names[] = {
+ [NVME_IOPOLICY_NUMA] = "numa",
+ [NVME_IOPOLICY_RR] = "round-robin",
+};
+
+static int iopolicy = NVME_IOPOLICY_NUMA;
+
+static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
+{
+ if (!val)
+ return -EINVAL;
+ if (!strncmp(val, "numa", 4))
+ iopolicy = NVME_IOPOLICY_NUMA;
+ else if (!strncmp(val, "round-robin", 11))
+ iopolicy = NVME_IOPOLICY_RR;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
+{
+ return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
+}
+
+module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
+ &iopolicy, 0644);
+MODULE_PARM_DESC(iopolicy,
+ "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
+
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+ subsys->iopolicy = iopolicy;
+}
+
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_unfreeze_queue(h->disk->queue);
+}
+
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_freeze_queue_wait(h->disk->queue);
+}
+
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_freeze_queue_start(h->disk->queue);
+}
+
+void nvme_failover_req(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ u16 status = nvme_req(req)->status & 0x7ff;
+ unsigned long flags;
+ struct bio *bio;
+
+ nvme_mpath_clear_current_path(ns);
+
+ /*
+ * If we got back an ANA error, we know the controller is alive but not
+ * ready to serve this namespace. Kick of a re-read of the ANA
+ * information page, and just try any other available path for now.
+ */
+ if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
+
+ spin_lock_irqsave(&ns->head->requeue_lock, flags);
+ for (bio = req->bio; bio; bio = bio->bi_next) {
+ bio_set_dev(bio, ns->head->disk->part0);
+ if (bio->bi_opf & REQ_POLLED) {
+ bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_cookie = BLK_QC_T_NONE;
+ }
+ /*
+ * The alternate request queue that we may end up submitting
+ * the bio to may be frozen temporarily, in this case REQ_NOWAIT
+ * will fail the I/O immediately with EAGAIN to the issuer.
+ * We are not in the issuer context which cannot block. Clear
+ * the flag to avoid spurious EAGAIN I/O failures.
+ */
+ bio->bi_opf &= ~REQ_NOWAIT;
+ }
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+
+ blk_mq_end_request(req, 0);
+ kblockd_schedule_work(&ns->head->requeue_work);
+}
+
+void nvme_mpath_start_request(struct request *rq)
+{
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct gendisk *disk = ns->head->disk;
+
+ if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
+ return;
+
+ nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
+ nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
+ jiffies);
+}
+EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
+
+void nvme_mpath_end_request(struct request *rq)
+{
+ struct nvme_ns *ns = rq->q->queuedata;
+
+ if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
+ return;
+ bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
+ blk_rq_bytes(rq) >> SECTOR_SHIFT,
+ nvme_req(rq)->start_time);
+}
+
+void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (!ns->head->disk)
+ continue;
+ kblockd_schedule_work(&ns->head->requeue_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ disk_uevent(ns->head->disk, KOBJ_CHANGE);
+ }
+ up_read(&ctrl->namespaces_rwsem);
+}
+
+static const char *nvme_ana_state_names[] = {
+ [0] = "invalid state",
+ [NVME_ANA_OPTIMIZED] = "optimized",
+ [NVME_ANA_NONOPTIMIZED] = "non-optimized",
+ [NVME_ANA_INACCESSIBLE] = "inaccessible",
+ [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
+ [NVME_ANA_CHANGE] = "change",
+};
+
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ bool changed = false;
+ int node;
+
+ if (!head)
+ goto out;
+
+ for_each_node(node) {
+ if (ns == rcu_access_pointer(head->current_path[node])) {
+ rcu_assign_pointer(head->current_path[node], NULL);
+ changed = true;
+ }
+ }
+out:
+ return changed;
+}
+
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
+ up_read(&ctrl->namespaces_rwsem);
+}
+
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ sector_t capacity = get_capacity(head->disk);
+ int node;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (capacity != get_capacity(ns->disk))
+ clear_bit(NVME_NS_READY, &ns->flags);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+
+ for_each_node(node)
+ rcu_assign_pointer(head->current_path[node], NULL);
+ kblockd_schedule_work(&head->requeue_work);
+}
+
+static bool nvme_path_is_disabled(struct nvme_ns *ns)
+{
+ /*
+ * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
+ * still be able to complete assuming that the controller is connected.
+ * Otherwise it will fail immediately and return to the requeue list.
+ */
+ if (ns->ctrl->state != NVME_CTRL_LIVE &&
+ ns->ctrl->state != NVME_CTRL_DELETING)
+ return true;
+ if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
+ !test_bit(NVME_NS_READY, &ns->flags))
+ return true;
+ return false;
+}
+
+static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
+{
+ int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
+ struct nvme_ns *found = NULL, *fallback = NULL, *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (nvme_path_is_disabled(ns))
+ continue;
+
+ if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ distance = node_distance(node, ns->ctrl->numa_node);
+ else
+ distance = LOCAL_DISTANCE;
+
+ switch (ns->ana_state) {
+ case NVME_ANA_OPTIMIZED:
+ if (distance < found_distance) {
+ found_distance = distance;
+ found = ns;
+ }
+ break;
+ case NVME_ANA_NONOPTIMIZED:
+ if (distance < fallback_distance) {
+ fallback_distance = distance;
+ fallback = ns;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!found)
+ found = fallback;
+ if (found)
+ rcu_assign_pointer(head->current_path[node], found);
+ return found;
+}
+
+static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
+ struct nvme_ns *ns)
+{
+ ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
+ siblings);
+ if (ns)
+ return ns;
+ return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
+}
+
+static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
+ int node, struct nvme_ns *old)
+{
+ struct nvme_ns *ns, *found = NULL;
+
+ if (list_is_singular(&head->list)) {
+ if (nvme_path_is_disabled(old))
+ return NULL;
+ return old;
+ }
+
+ for (ns = nvme_next_ns(head, old);
+ ns && ns != old;
+ ns = nvme_next_ns(head, ns)) {
+ if (nvme_path_is_disabled(ns))
+ continue;
+
+ if (ns->ana_state == NVME_ANA_OPTIMIZED) {
+ found = ns;
+ goto out;
+ }
+ if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
+ found = ns;
+ }
+
+ /*
+ * The loop above skips the current path for round-robin semantics.
+ * Fall back to the current path if either:
+ * - no other optimized path found and current is optimized,
+ * - no other usable path found and current is usable.
+ */
+ if (!nvme_path_is_disabled(old) &&
+ (old->ana_state == NVME_ANA_OPTIMIZED ||
+ (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
+ return old;
+
+ if (!found)
+ return NULL;
+out:
+ rcu_assign_pointer(head->current_path[node], found);
+ return found;
+}
+
+static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
+{
+ return ns->ctrl->state == NVME_CTRL_LIVE &&
+ ns->ana_state == NVME_ANA_OPTIMIZED;
+}
+
+inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+{
+ int node = numa_node_id();
+ struct nvme_ns *ns;
+
+ ns = srcu_dereference(head->current_path[node], &head->srcu);
+ if (unlikely(!ns))
+ return __nvme_find_path(head, node);
+
+ if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
+ return nvme_round_robin_path(head, node, ns);
+ if (unlikely(!nvme_path_is_optimized(ns)))
+ return __nvme_find_path(head, node);
+ return ns;
+}
+
+static bool nvme_available_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
+ continue;
+ switch (ns->ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ /* fallthru */
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+static void nvme_ns_head_submit_bio(struct bio *bio)
+{
+ struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
+ struct device *dev = disk_to_dev(head->disk);
+ struct nvme_ns *ns;
+ int srcu_idx;
+
+ /*
+ * The namespace might be going away and the bio might be moved to a
+ * different queue via blk_steal_bios(), so we need to use the bio_split
+ * pool from the original queue to allocate the bvecs from.
+ */
+ bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (likely(ns)) {
+ bio_set_dev(bio, ns->disk->part0);
+ bio->bi_opf |= REQ_NVME_MPATH;
+ trace_block_bio_remap(bio, disk_devt(ns->head->disk),
+ bio->bi_iter.bi_sector);
+ submit_bio_noacct(bio);
+ } else if (nvme_available_path(head)) {
+ dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
+
+ spin_lock_irq(&head->requeue_lock);
+ bio_list_add(&head->requeue_list, bio);
+ spin_unlock_irq(&head->requeue_lock);
+ } else {
+ dev_warn_ratelimited(dev, "no available path - failing I/O\n");
+
+ bio_io_error(bio);
+ }
+
+ srcu_read_unlock(&head->srcu, srcu_idx);
+}
+
+static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode)
+{
+ if (!nvme_tryget_ns_head(disk->private_data))
+ return -ENXIO;
+ return 0;
+}
+
+static void nvme_ns_head_release(struct gendisk *disk)
+{
+ nvme_put_ns_head(disk->private_data);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_ns_head *head = disk->private_data;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (ns)
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+#else
+#define nvme_ns_head_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+const struct block_device_operations nvme_ns_head_ops = {
+ .owner = THIS_MODULE,
+ .submit_bio = nvme_ns_head_submit_bio,
+ .open = nvme_ns_head_open,
+ .release = nvme_ns_head_release,
+ .ioctl = nvme_ns_head_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+ .getgeo = nvme_getgeo,
+ .report_zones = nvme_ns_head_report_zones,
+ .pr_ops = &nvme_pr_ops,
+};
+
+static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
+{
+ return container_of(cdev, struct nvme_ns_head, cdev);
+}
+
+static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
+{
+ if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
+ return -ENXIO;
+ return 0;
+}
+
+static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
+{
+ nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
+ return 0;
+}
+
+static const struct file_operations nvme_ns_head_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_head_chr_open,
+ .release = nvme_ns_head_chr_release,
+ .unlocked_ioctl = nvme_ns_head_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_ns_head_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
+};
+
+static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
+{
+ int ret;
+
+ head->cdev_device.parent = &head->subsys->dev;
+ ret = dev_set_name(&head->cdev_device, "ng%dn%d",
+ head->subsys->instance, head->instance);
+ if (ret)
+ return ret;
+ ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
+ &nvme_ns_head_chr_fops, THIS_MODULE);
+ return ret;
+}
+
+static void nvme_requeue_work(struct work_struct *work)
+{
+ struct nvme_ns_head *head =
+ container_of(work, struct nvme_ns_head, requeue_work);
+ struct bio *bio, *next;
+
+ spin_lock_irq(&head->requeue_lock);
+ next = bio_list_get(&head->requeue_list);
+ spin_unlock_irq(&head->requeue_lock);
+
+ while ((bio = next) != NULL) {
+ next = bio->bi_next;
+ bio->bi_next = NULL;
+
+ submit_bio_noacct(bio);
+ }
+}
+
+int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
+{
+ bool vwc = false;
+
+ mutex_init(&head->lock);
+ bio_list_init(&head->requeue_list);
+ spin_lock_init(&head->requeue_lock);
+ INIT_WORK(&head->requeue_work, nvme_requeue_work);
+
+ /*
+ * Add a multipath node if the subsystems supports multiple controllers.
+ * We also do this for private namespaces as the namespace sharing flag
+ * could change after a rescan.
+ */
+ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ !nvme_is_unique_nsid(ctrl, head) || !multipath)
+ return 0;
+
+ head->disk = blk_alloc_disk(ctrl->numa_node);
+ if (!head->disk)
+ return -ENOMEM;
+ head->disk->fops = &nvme_ns_head_ops;
+ head->disk->private_data = head;
+ sprintf(head->disk->disk_name, "nvme%dn%d",
+ ctrl->subsys->instance, head->instance);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
+ /*
+ * This assumes all controllers that refer to a namespace either
+ * support poll queues or not. That is not a strict guarantee,
+ * but if the assumption is wrong the effect is only suboptimal
+ * performance but not correctness problem.
+ */
+ if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
+ ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
+ blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
+
+ /* set to a default value of 512 until the disk is validated */
+ blk_queue_logical_block_size(head->disk->queue, 512);
+ blk_set_stacking_limits(&head->disk->queue->limits);
+ blk_queue_dma_alignment(head->disk->queue, 3);
+
+ /* we need to propagate up the VMC settings */
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(head->disk->queue, vwc, vwc);
+ return 0;
+}
+
+static void nvme_mpath_set_live(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ int rc;
+
+ if (!head->disk)
+ return;
+
+ /*
+ * test_and_set_bit() is used because it is protecting against two nvme
+ * paths simultaneously calling device_add_disk() on the same namespace
+ * head.
+ */
+ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ rc = device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+ if (rc) {
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ return;
+ }
+ nvme_add_ns_head_cdev(head);
+ }
+
+ mutex_lock(&head->lock);
+ if (nvme_path_is_optimized(ns)) {
+ int node, srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ for_each_node(node)
+ __nvme_find_path(head, node);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ }
+ mutex_unlock(&head->lock);
+
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
+}
+
+static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
+ int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
+ void *))
+{
+ void *base = ctrl->ana_log_buf;
+ size_t offset = sizeof(struct nvme_ana_rsp_hdr);
+ int error, i;
+
+ lockdep_assert_held(&ctrl->ana_lock);
+
+ for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
+ struct nvme_ana_group_desc *desc = base + offset;
+ u32 nr_nsids;
+ size_t nsid_buf_size;
+
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+
+ nr_nsids = le32_to_cpu(desc->nnsids);
+ nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
+
+ if (WARN_ON_ONCE(desc->grpid == 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
+ return -EINVAL;
+ if (WARN_ON_ONCE(desc->state == 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
+ return -EINVAL;
+
+ offset += sizeof(*desc);
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
+ return -EINVAL;
+
+ error = cb(ctrl, desc, data);
+ if (error)
+ return error;
+
+ offset += nsid_buf_size;
+ }
+
+ return 0;
+}
+
+static inline bool nvme_state_is_live(enum nvme_ana_state state)
+{
+ return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
+}
+
+static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
+ struct nvme_ns *ns)
+{
+ ns->ana_grpid = le32_to_cpu(desc->grpid);
+ ns->ana_state = desc->state;
+ clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ /*
+ * nvme_mpath_set_live() will trigger I/O to the multipath path device
+ * and in turn to this path device. However we cannot accept this I/O
+ * if the controller is not live. This may deadlock if called from
+ * nvme_mpath_init_identify() and the ctrl will never complete
+ * initialization, preventing I/O from completing. For this case we
+ * will reprocess the ANA log page in nvme_mpath_update() once the
+ * controller is ready.
+ */
+ if (nvme_state_is_live(ns->ana_state) &&
+ ns->ctrl->state == NVME_CTRL_LIVE)
+ nvme_mpath_set_live(ns);
+}
+
+static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+{
+ u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
+ unsigned *nr_change_groups = data;
+ struct nvme_ns *ns;
+
+ dev_dbg(ctrl->device, "ANA group %d: %s.\n",
+ le32_to_cpu(desc->grpid),
+ nvme_ana_state_names[desc->state]);
+
+ if (desc->state == NVME_ANA_CHANGE)
+ (*nr_change_groups)++;
+
+ if (!nr_nsids)
+ return 0;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ unsigned nsid;
+again:
+ nsid = le32_to_cpu(desc->nsids[n]);
+ if (ns->head->ns_id < nsid)
+ continue;
+ if (ns->head->ns_id == nsid)
+ nvme_update_ns_ana_state(desc, ns);
+ if (++n == nr_nsids)
+ break;
+ if (ns->head->ns_id > nsid)
+ goto again;
+ }
+ up_read(&ctrl->namespaces_rwsem);
+ return 0;
+}
+
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
+{
+ u32 nr_change_groups = 0;
+ int error;
+
+ mutex_lock(&ctrl->ana_lock);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
+ ctrl->ana_log_buf, ctrl->ana_log_size, 0);
+ if (error) {
+ dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
+ goto out_unlock;
+ }
+
+ error = nvme_parse_ana_log(ctrl, &nr_change_groups,
+ nvme_update_ana_state);
+ if (error)
+ goto out_unlock;
+
+ /*
+ * In theory we should have an ANATT timer per group as they might enter
+ * the change state at different times. But that is a lot of overhead
+ * just to protect against a target that keeps entering new changes
+ * states while never finishing previous ones. But we'll still
+ * eventually time out once all groups are in change state, so this
+ * isn't a big deal.
+ *
+ * We also double the ANATT value to provide some slack for transports
+ * or AEN processing overhead.
+ */
+ if (nr_change_groups)
+ mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
+ else
+ del_timer_sync(&ctrl->anatt_timer);
+out_unlock:
+ mutex_unlock(&ctrl->ana_lock);
+ return error;
+}
+
+static void nvme_ana_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
+
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
+ nvme_read_ana_log(ctrl);
+}
+
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+ u32 nr_change_groups = 0;
+
+ if (!ctrl->ana_log_buf)
+ return;
+
+ mutex_lock(&ctrl->ana_lock);
+ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+ mutex_unlock(&ctrl->ana_lock);
+}
+
+static void nvme_anatt_timeout(struct timer_list *t)
+{
+ struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
+
+ dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
+ nvme_reset_ctrl(ctrl);
+}
+
+void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_ctrl_use_ana(ctrl))
+ return;
+ del_timer_sync(&ctrl->anatt_timer);
+ cancel_work_sync(&ctrl->ana_work);
+}
+
+#define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
+ struct device_attribute subsys_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ return sysfs_emit(buf, "%s\n",
+ nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
+}
+
+static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
+ if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
+ WRITE_ONCE(subsys->iopolicy, i);
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
+ nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
+
+static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
+}
+DEVICE_ATTR_RO(ana_grpid);
+
+static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
+}
+DEVICE_ATTR_RO(ana_state);
+
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+{
+ struct nvme_ana_group_desc *dst = data;
+
+ if (desc->grpid != dst->grpid)
+ return 0;
+
+ *dst = *desc;
+ return -ENXIO; /* just break out of the loop */
+}
+
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
+{
+ if (nvme_ctrl_use_ana(ns->ctrl)) {
+ struct nvme_ana_group_desc desc = {
+ .grpid = anagrpid,
+ .state = 0,
+ };
+
+ mutex_lock(&ns->ctrl->ana_lock);
+ ns->ana_grpid = le32_to_cpu(anagrpid);
+ nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
+ mutex_unlock(&ns->ctrl->ana_lock);
+ if (desc.state) {
+ /* found the group desc: update */
+ nvme_update_ns_ana_state(&desc, ns);
+ } else {
+ /* group desc not found: trigger a re-read */
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
+ } else {
+ ns->ana_state = NVME_ANA_OPTIMIZED;
+ nvme_mpath_set_live(ns);
+ }
+
+ if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ ns->head->disk->queue);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
+ ns->head->disk->nr_zones = ns->disk->nr_zones;
+#endif
+}
+
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ kblockd_schedule_work(&head->requeue_work);
+ if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ nvme_cdev_del(&head->cdev, &head->cdev_device);
+ del_gendisk(head->disk);
+ }
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ /* make sure all pending bios are cleaned up */
+ kblockd_schedule_work(&head->requeue_work);
+ flush_work(&head->requeue_work);
+ put_disk(head->disk);
+}
+
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ mutex_init(&ctrl->ana_lock);
+ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+}
+
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
+ size_t ana_log_size;
+ int error = 0;
+
+ /* check if multipath is enabled and we have the capability */
+ if (!multipath || !ctrl->subsys ||
+ !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
+ return 0;
+
+ if (!ctrl->max_namespaces ||
+ ctrl->max_namespaces > le32_to_cpu(id->nn)) {
+ dev_err(ctrl->device,
+ "Invalid MNAN value %u\n", ctrl->max_namespaces);
+ return -EINVAL;
+ }
+
+ ctrl->anacap = id->anacap;
+ ctrl->anatt = id->anatt;
+ ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
+ ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
+
+ ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
+ ctrl->max_namespaces * sizeof(__le32);
+ if (ana_log_size > max_transfer_size) {
+ dev_err(ctrl->device,
+ "ANA log page size (%zd) larger than MDTS (%zd).\n",
+ ana_log_size, max_transfer_size);
+ dev_err(ctrl->device, "disabling ANA support.\n");
+ goto out_uninit;
+ }
+ if (ana_log_size > ctrl->ana_log_size) {
+ nvme_mpath_stop(ctrl);
+ nvme_mpath_uninit(ctrl);
+ ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf)
+ return -ENOMEM;
+ }
+ ctrl->ana_log_size = ana_log_size;
+ error = nvme_read_ana_log(ctrl);
+ if (error)
+ goto out_uninit;
+ return 0;
+
+out_uninit:
+ nvme_mpath_uninit(ctrl);
+ return error;
+}
+
+void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+{
+ kvfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
+ ctrl->ana_log_size = 0;
+}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
new file mode 100644
index 0000000000..ba62d42d2a
--- /dev/null
+++ b/drivers/nvme/host/nvme.h
@@ -0,0 +1,1141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2011-2014, Intel Corporation.
+ */
+
+#ifndef _NVME_H
+#define _NVME_H
+
+#include <linux/nvme.h>
+#include <linux/cdev.h>
+#include <linux/pci.h>
+#include <linux/kref.h>
+#include <linux/blk-mq.h>
+#include <linux/sed-opal.h>
+#include <linux/fault-inject.h>
+#include <linux/rcupdate.h>
+#include <linux/wait.h>
+#include <linux/t10-pi.h>
+
+#include <trace/events/block.h>
+
+extern const struct pr_ops nvme_pr_ops;
+
+extern unsigned int nvme_io_timeout;
+#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
+
+extern unsigned int admin_timeout;
+#define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
+
+#define NVME_DEFAULT_KATO 5
+
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define NVME_INLINE_SG_CNT 0
+#define NVME_INLINE_METADATA_SG_CNT 0
+#else
+#define NVME_INLINE_SG_CNT 2
+#define NVME_INLINE_METADATA_SG_CNT 1
+#endif
+
+/*
+ * Default to a 4K page size, with the intention to update this
+ * path in the future to accommodate architectures with differing
+ * kernel and IO page sizes.
+ */
+#define NVME_CTRL_PAGE_SHIFT 12
+#define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
+
+extern struct workqueue_struct *nvme_wq;
+extern struct workqueue_struct *nvme_reset_wq;
+extern struct workqueue_struct *nvme_delete_wq;
+
+/*
+ * List of workarounds for devices that required behavior not specified in
+ * the standard.
+ */
+enum nvme_quirks {
+ /*
+ * Prefers I/O aligned to a stripe size specified in a vendor
+ * specific Identify field.
+ */
+ NVME_QUIRK_STRIPE_SIZE = (1 << 0),
+
+ /*
+ * The controller doesn't handle Identify value others than 0 or 1
+ * correctly.
+ */
+ NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
+
+ /*
+ * The controller deterministically returns O's on reads to
+ * logical blocks that deallocate was called on.
+ */
+ NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
+
+ /*
+ * The controller needs a delay before starts checking the device
+ * readiness, which is done by reading the NVME_CSTS_RDY bit.
+ */
+ NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
+
+ /*
+ * APST should not be used.
+ */
+ NVME_QUIRK_NO_APST = (1 << 4),
+
+ /*
+ * The deepest sleep state should not be used.
+ */
+ NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
+
+ /*
+ * Set MEDIUM priority on SQ creation
+ */
+ NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
+
+ /*
+ * Ignore device provided subnqn.
+ */
+ NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
+
+ /*
+ * Broken Write Zeroes.
+ */
+ NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
+
+ /*
+ * Force simple suspend/resume path.
+ */
+ NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
+
+ /*
+ * Use only one interrupt vector for all queues
+ */
+ NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
+
+ /*
+ * Use non-standard 128 bytes SQEs.
+ */
+ NVME_QUIRK_128_BYTES_SQES = (1 << 12),
+
+ /*
+ * Prevent tag overlap between queues
+ */
+ NVME_QUIRK_SHARED_TAGS = (1 << 13),
+
+ /*
+ * Don't change the value of the temperature threshold feature
+ */
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
+
+ /*
+ * The controller doesn't handle the Identify Namespace
+ * Identification Descriptor list subcommand despite claiming
+ * NVMe 1.3 compliance.
+ */
+ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
+
+ /*
+ * The controller does not properly handle DMA addresses over
+ * 48 bits.
+ */
+ NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
+
+ /*
+ * The controller requires the command_id value be limited, so skip
+ * encoding the generation sequence number.
+ */
+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
+
+ /*
+ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ */
+ NVME_QUIRK_BOGUS_NID = (1 << 18),
+
+ /*
+ * No temperature thresholds for channels other than 0 (Composite).
+ */
+ NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
+
+ /*
+ * Disables simple suspend/resume path.
+ */
+ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
+};
+
+/*
+ * Common request structure for NVMe passthrough. All drivers must have
+ * this structure as the first member of their request-private data.
+ */
+struct nvme_request {
+ struct nvme_command *cmd;
+ union nvme_result result;
+ u8 genctr;
+ u8 retries;
+ u8 flags;
+ u16 status;
+#ifdef CONFIG_NVME_MULTIPATH
+ unsigned long start_time;
+#endif
+ struct nvme_ctrl *ctrl;
+};
+
+/*
+ * Mark a bio as coming in through the mpath node.
+ */
+#define REQ_NVME_MPATH REQ_DRV
+
+enum {
+ NVME_REQ_CANCELLED = (1 << 0),
+ NVME_REQ_USERCMD = (1 << 1),
+ NVME_MPATH_IO_STATS = (1 << 2),
+};
+
+static inline struct nvme_request *nvme_req(struct request *req)
+{
+ return blk_mq_rq_to_pdu(req);
+}
+
+static inline u16 nvme_req_qid(struct request *req)
+{
+ if (!req->q->queuedata)
+ return 0;
+
+ return req->mq_hctx->queue_num + 1;
+}
+
+/* The below value is the specific amount of delay needed before checking
+ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
+ * found empirically.
+ */
+#define NVME_QUIRK_DELAY_AMOUNT 2300
+
+/*
+ * enum nvme_ctrl_state: Controller state
+ *
+ * @NVME_CTRL_NEW: New controller just allocated, initial state
+ * @NVME_CTRL_LIVE: Controller is connected and I/O capable
+ * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
+ * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
+ * transport
+ * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
+ * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
+ * disabled/failed immediately. This state comes
+ * after all async event processing took place and
+ * before ns removal and the controller deletion
+ * progress
+ * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
+ * shutdown or removal. In this case we forcibly
+ * kill all inflight I/O as they have no chance to
+ * complete
+ */
+enum nvme_ctrl_state {
+ NVME_CTRL_NEW,
+ NVME_CTRL_LIVE,
+ NVME_CTRL_RESETTING,
+ NVME_CTRL_CONNECTING,
+ NVME_CTRL_DELETING,
+ NVME_CTRL_DELETING_NOIO,
+ NVME_CTRL_DEAD,
+};
+
+struct nvme_fault_inject {
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+ struct fault_attr attr;
+ struct dentry *parent;
+ bool dont_retry; /* DNR, do not retry */
+ u16 status; /* status code */
+#endif
+};
+
+enum nvme_ctrl_flags {
+ NVME_CTRL_FAILFAST_EXPIRED = 0,
+ NVME_CTRL_ADMIN_Q_STOPPED = 1,
+ NVME_CTRL_STARTED_ONCE = 2,
+ NVME_CTRL_STOPPED = 3,
+ NVME_CTRL_SKIP_ID_CNS_CS = 4,
+ NVME_CTRL_DIRTY_CAPABILITY = 5,
+ NVME_CTRL_FROZEN = 6,
+};
+
+struct nvme_ctrl {
+ bool comp_seen;
+ bool identified;
+ enum nvme_ctrl_state state;
+ spinlock_t lock;
+ struct mutex scan_lock;
+ const struct nvme_ctrl_ops *ops;
+ struct request_queue *admin_q;
+ struct request_queue *connect_q;
+ struct request_queue *fabrics_q;
+ struct device *dev;
+ int instance;
+ int numa_node;
+ struct blk_mq_tag_set *tagset;
+ struct blk_mq_tag_set *admin_tagset;
+ struct list_head namespaces;
+ struct rw_semaphore namespaces_rwsem;
+ struct device ctrl_device;
+ struct device *device; /* char device */
+#ifdef CONFIG_NVME_HWMON
+ struct device *hwmon_device;
+#endif
+ struct cdev cdev;
+ struct work_struct reset_work;
+ struct work_struct delete_work;
+ wait_queue_head_t state_wq;
+
+ struct nvme_subsystem *subsys;
+ struct list_head subsys_entry;
+
+ struct opal_dev *opal_dev;
+
+ char name[12];
+ u16 cntlid;
+
+ u16 mtfa;
+ u32 ctrl_config;
+ u32 queue_count;
+
+ u64 cap;
+ u32 max_hw_sectors;
+ u32 max_segments;
+ u32 max_integrity_segments;
+ u32 max_discard_sectors;
+ u32 max_discard_segments;
+ u32 max_zeroes_sectors;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u32 max_zone_append;
+#endif
+ u16 crdt[3];
+ u16 oncs;
+ u32 dmrsl;
+ u16 oacs;
+ u16 sqsize;
+ u32 max_namespaces;
+ atomic_t abort_limit;
+ u8 vwc;
+ u32 vs;
+ u32 sgls;
+ u16 kas;
+ u8 npss;
+ u8 apsta;
+ u16 wctemp;
+ u16 cctemp;
+ u32 oaes;
+ u32 aen_result;
+ u32 ctratt;
+ unsigned int shutdown_timeout;
+ unsigned int kato;
+ bool subsystem;
+ unsigned long quirks;
+ struct nvme_id_power_state psd[32];
+ struct nvme_effects_log *effects;
+ struct xarray cels;
+ struct work_struct scan_work;
+ struct work_struct async_event_work;
+ struct delayed_work ka_work;
+ struct delayed_work failfast_work;
+ struct nvme_command ka_cmd;
+ unsigned long ka_last_check_time;
+ struct work_struct fw_act_work;
+ unsigned long events;
+
+#ifdef CONFIG_NVME_MULTIPATH
+ /* asymmetric namespace access: */
+ u8 anacap;
+ u8 anatt;
+ u32 anagrpmax;
+ u32 nanagrpid;
+ struct mutex ana_lock;
+ struct nvme_ana_rsp_hdr *ana_log_buf;
+ size_t ana_log_size;
+ struct timer_list anatt_timer;
+ struct work_struct ana_work;
+#endif
+
+#ifdef CONFIG_NVME_AUTH
+ struct work_struct dhchap_auth_work;
+ struct mutex dhchap_auth_mutex;
+ struct nvme_dhchap_queue_context *dhchap_ctxs;
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u16 transaction;
+#endif
+
+ /* Power saving configuration */
+ u64 ps_max_latency_us;
+ bool apst_enabled;
+
+ /* PCIe only: */
+ u16 hmmaxd;
+ u32 hmpre;
+ u32 hmmin;
+ u32 hmminds;
+
+ /* Fabrics only */
+ u32 ioccsz;
+ u32 iorcsz;
+ u16 icdoff;
+ u16 maxcmd;
+ int nr_reconnects;
+ unsigned long flags;
+ struct nvmf_ctrl_options *opts;
+
+ struct page *discard_page;
+ unsigned long discard_page_busy;
+
+ struct nvme_fault_inject fault_inject;
+
+ enum nvme_ctrl_type cntrltype;
+ enum nvme_dctype dctype;
+};
+
+static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
+{
+ return READ_ONCE(ctrl->state);
+}
+
+enum nvme_iopolicy {
+ NVME_IOPOLICY_NUMA,
+ NVME_IOPOLICY_RR,
+};
+
+struct nvme_subsystem {
+ int instance;
+ struct device dev;
+ /*
+ * Because we unregister the device on the last put we need
+ * a separate refcount.
+ */
+ struct kref ref;
+ struct list_head entry;
+ struct mutex lock;
+ struct list_head ctrls;
+ struct list_head nsheads;
+ char subnqn[NVMF_NQN_SIZE];
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+ u8 cmic;
+ enum nvme_subsys_type subtype;
+ u16 vendor_id;
+ u16 awupf; /* 0's based awupf value. */
+ struct ida ns_ida;
+#ifdef CONFIG_NVME_MULTIPATH
+ enum nvme_iopolicy iopolicy;
+#endif
+};
+
+/*
+ * Container structure for uniqueue namespace identifiers.
+ */
+struct nvme_ns_ids {
+ u8 eui64[8];
+ u8 nguid[16];
+ uuid_t uuid;
+ u8 csi;
+};
+
+/*
+ * Anchor structure for namespaces. There is one for each namespace in a
+ * NVMe subsystem that any of our controllers can see, and the namespace
+ * structure for each controller is chained of it. For private namespaces
+ * there is a 1:1 relation to our namespace structures, that is ->list
+ * only ever has a single entry for private namespaces.
+ */
+struct nvme_ns_head {
+ struct list_head list;
+ struct srcu_struct srcu;
+ struct nvme_subsystem *subsys;
+ unsigned ns_id;
+ struct nvme_ns_ids ids;
+ struct list_head entry;
+ struct kref ref;
+ bool shared;
+ int instance;
+ struct nvme_effects_log *effects;
+
+ struct cdev cdev;
+ struct device cdev_device;
+
+ struct gendisk *disk;
+#ifdef CONFIG_NVME_MULTIPATH
+ struct bio_list requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+ struct mutex lock;
+ unsigned long flags;
+#define NVME_NSHEAD_DISK_LIVE 0
+ struct nvme_ns __rcu *current_path[];
+#endif
+};
+
+static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
+{
+ return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
+}
+
+enum nvme_ns_features {
+ NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
+ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
+ NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
+};
+
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_ctrl *ctrl;
+ struct request_queue *queue;
+ struct gendisk *disk;
+#ifdef CONFIG_NVME_MULTIPATH
+ enum nvme_ana_state ana_state;
+ u32 ana_grpid;
+#endif
+ struct list_head siblings;
+ struct kref kref;
+ struct nvme_ns_head *head;
+
+ int lba_shift;
+ u16 ms;
+ u16 pi_size;
+ u16 sgs;
+ u32 sws;
+ u8 pi_type;
+ u8 guard_type;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u64 zsze;
+#endif
+ unsigned long features;
+ unsigned long flags;
+#define NVME_NS_REMOVING 0
+#define NVME_NS_ANA_PENDING 2
+#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
+
+ struct cdev cdev;
+ struct device cdev_device;
+
+ struct nvme_fault_inject fault_inject;
+
+};
+
+/* NVMe ns supports metadata actions by the controller (generate/strip) */
+static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+{
+ return ns->pi_type && ns->ms == ns->pi_size;
+}
+
+struct nvme_ctrl_ops {
+ const char *name;
+ struct module *module;
+ unsigned int flags;
+#define NVME_F_FABRICS (1 << 0)
+#define NVME_F_METADATA_SUPPORTED (1 << 1)
+#define NVME_F_BLOCKING (1 << 2)
+
+ const struct attribute_group **dev_attr_groups;
+ int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+ int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+ int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+ void (*free_ctrl)(struct nvme_ctrl *ctrl);
+ void (*submit_async_event)(struct nvme_ctrl *ctrl);
+ void (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ void (*stop_ctrl)(struct nvme_ctrl *ctrl);
+ int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+ void (*print_device_info)(struct nvme_ctrl *ctrl);
+ bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
+};
+
+/*
+ * nvme command_id is constructed as such:
+ * | xxxx | xxxxxxxxxxxx |
+ * gen request tag
+ */
+#define nvme_genctr_mask(gen) (gen & 0xf)
+#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
+#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
+#define nvme_tag_from_cid(cid) (cid & 0xfff)
+
+static inline u16 nvme_cid(struct request *rq)
+{
+ return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
+}
+
+static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
+ u16 command_id)
+{
+ u8 genctr = nvme_genctr_from_cid(command_id);
+ u16 tag = nvme_tag_from_cid(command_id);
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(tags, tag);
+ if (unlikely(!rq)) {
+ pr_err("could not locate request for tag %#x\n",
+ tag);
+ return NULL;
+ }
+ if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
+ dev_err(nvme_req(rq)->ctrl->device,
+ "request %#x genctr mismatch (got %#x expected %#x)\n",
+ tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
+ return NULL;
+ }
+ return rq;
+}
+
+static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
+ u16 command_id)
+{
+ return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
+}
+
+/*
+ * Return the length of the string without the space padding
+ */
+static inline int nvme_strlen(char *s, int len)
+{
+ while (s[len - 1] == ' ')
+ len--;
+ return len;
+}
+
+static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
+{
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ if (ctrl->ops->print_device_info) {
+ ctrl->ops->print_device_info(ctrl);
+ return;
+ }
+
+ dev_err(ctrl->device,
+ "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
+ nvme_strlen(subsys->model, sizeof(subsys->model)),
+ subsys->model, nvme_strlen(subsys->firmware_rev,
+ sizeof(subsys->firmware_rev)),
+ subsys->firmware_rev);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
+ const char *dev_name);
+void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
+void nvme_should_fail(struct request *req);
+#else
+static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
+ const char *dev_name)
+{
+}
+static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
+{
+}
+static inline void nvme_should_fail(struct request *req) {}
+#endif
+
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
+
+static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ if (!ctrl->subsystem)
+ return -ENOTTY;
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
+}
+
+/*
+ * Convert a 512B sector number to a device logical block number.
+ */
+static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
+{
+ return sector >> (ns->lba_shift - SECTOR_SHIFT);
+}
+
+/*
+ * Convert a device logical block number to a 512B sector number.
+ */
+static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
+{
+ return lba << (ns->lba_shift - SECTOR_SHIFT);
+}
+
+/*
+ * Convert byte length to nvme's 0-based num dwords
+ */
+static inline u32 nvme_bytes_to_numd(size_t len)
+{
+ return (len >> 2) - 1;
+}
+
+static inline bool nvme_is_ana_error(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool nvme_is_path_error(u16 status)
+{
+ /* check for a status code type of 'path related status' */
+ return (status & 0x700) == 0x300;
+}
+
+/*
+ * Fill in the status and result information from the CQE, and then figure out
+ * if blk-mq will need to use IPI magic to complete the request, and if yes do
+ * so. If not let the caller complete the request without an indirect function
+ * call.
+ */
+static inline bool nvme_try_complete_req(struct request *req, __le16 status,
+ union nvme_result result)
+{
+ struct nvme_request *rq = nvme_req(req);
+ struct nvme_ctrl *ctrl = rq->ctrl;
+
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ rq->genctr++;
+
+ rq->status = le16_to_cpu(status) >> 1;
+ rq->result = result;
+ /* inject error when permitted by fault injection framework */
+ nvme_should_fail(req);
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return true;
+ return blk_mq_complete_request_remote(req);
+}
+
+static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
+{
+ get_device(ctrl->device);
+}
+
+static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
+{
+ put_device(ctrl->device);
+}
+
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+ return !qid &&
+ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
+void nvme_complete_rq(struct request *req);
+void nvme_complete_batch_req(struct request *req);
+
+static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
+ void (*fn)(struct request *rq))
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ fn(req);
+ nvme_complete_batch_req(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
+blk_status_t nvme_host_path_error(struct request *req);
+bool nvme_cancel_request(struct request *req, void *data);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ enum nvme_ctrl_state new_state);
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ const struct nvme_ctrl_ops *ops, unsigned long quirks);
+void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
+void nvme_start_ctrl(struct nvme_ctrl *ctrl);
+void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int cmd_size);
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int nr_maps,
+ unsigned int cmd_size);
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
+
+void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ volatile union nvme_result *res);
+
+void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
+void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
+void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
+void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
+void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
+void nvme_sync_queues(struct nvme_ctrl *ctrl);
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
+void nvme_unfreeze(struct nvme_ctrl *ctrl);
+void nvme_wait_freeze(struct nvme_ctrl *ctrl);
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+void nvme_start_freeze(struct nvme_ctrl *ctrl);
+
+static inline enum req_op nvme_req_op(struct nvme_command *cmd)
+{
+ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
+
+#define NVME_QID_ANY -1
+void nvme_init_request(struct request *req, struct nvme_command *cmd);
+void nvme_cleanup_cmd(struct request *req);
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
+blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *req);
+bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live);
+
+static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
+{
+ if (likely(ctrl->state == NVME_CTRL_LIVE))
+ return true;
+ if (ctrl->ops->flags & NVME_F_FABRICS &&
+ ctrl->state == NVME_CTRL_DELETING)
+ return queue_live;
+ return __nvme_check_ready(ctrl, rq, queue_live);
+}
+
+/*
+ * NSID shall be unique for all shared namespaces, or if at least one of the
+ * following conditions is met:
+ * 1. Namespace Management is supported by the controller
+ * 2. ANA is supported by the controller
+ * 3. NVM Set are supported by the controller
+ *
+ * In other case, private namespace are not required to report a unique NSID.
+ */
+static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head)
+{
+ return head->shared ||
+ (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
+ (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
+ (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
+}
+
+int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ void *buf, unsigned bufflen);
+int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ union nvme_result *result, void *buffer, unsigned bufflen,
+ int qid, int at_head,
+ blk_mq_req_flags_t flags);
+int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result);
+int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result);
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
+int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
+ void *log, size_t size, u64 offset);
+bool nvme_tryget_ns_head(struct nvme_ns_head *head);
+void nvme_put_ns_head(struct nvme_ns_head *head);
+int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
+ const struct file_operations *fops, struct module *owner);
+void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
+int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg);
+long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg);
+long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+long nvme_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
+int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+
+extern const struct attribute_group *nvme_ns_id_attr_groups[];
+extern const struct pr_ops nvme_pr_ops;
+extern const struct block_device_operations nvme_ns_head_ops;
+extern const struct attribute_group nvme_dev_attrs_group;
+extern const struct attribute_group *nvme_subsys_attrs_groups[];
+extern const struct attribute_group *nvme_dev_attr_groups[];
+extern const struct block_device_operations nvme_bdev_ops;
+
+void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
+struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+#ifdef CONFIG_NVME_MULTIPATH
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return ctrl->ana_log_buf != NULL;
+}
+
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
+void nvme_failover_req(struct request *req);
+void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
+void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
+void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
+void nvme_mpath_stop(struct nvme_ctrl *ctrl);
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
+void nvme_mpath_start_request(struct request *rq);
+void nvme_mpath_end_request(struct request *rq);
+
+static inline void nvme_trace_bio_complete(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+
+ if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
+ trace_block_bio_complete(ns->head->disk->queue, req->bio);
+}
+
+extern bool multipath;
+extern struct device_attribute dev_attr_ana_grpid;
+extern struct device_attribute dev_attr_ana_state;
+extern struct device_attribute subsys_attr_iopolicy;
+
+#else
+#define multipath false
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return false;
+}
+static inline void nvme_failover_req(struct request *req)
+{
+}
+static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head)
+{
+ return 0;
+}
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
+{
+}
+static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+}
+static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ return false;
+}
+static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+{
+}
+static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
+{
+}
+static inline void nvme_trace_bio_complete(struct request *req)
+{
+}
+static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
+ struct nvme_id_ctrl *id)
+{
+ if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
+ dev_warn(ctrl->device,
+"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
+ return 0;
+}
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
+static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+{
+}
+static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+{
+}
+static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_request(struct request *rq)
+{
+}
+static inline void nvme_mpath_end_request(struct request *rq)
+{
+}
+#endif /* CONFIG_NVME_MULTIPATH */
+
+int nvme_revalidate_zones(struct nvme_ns *ns);
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+#ifdef CONFIG_BLK_DEV_ZONED
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action);
+#else
+static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action)
+{
+ return BLK_STS_NOTSUPP;
+}
+
+static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+{
+ dev_warn(ns->ctrl->device,
+ "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
+ return -EPROTONOSUPPORT;
+}
+#endif
+
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+ return dev_to_disk(dev)->private_data;
+}
+
+#ifdef CONFIG_NVME_HWMON
+int nvme_hwmon_init(struct nvme_ctrl *ctrl);
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
+#else
+static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ return 0;
+}
+
+static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+}
+#endif
+
+static inline void nvme_start_request(struct request *rq)
+{
+ if (rq->cmd_flags & REQ_NVME_MPATH)
+ nvme_mpath_start_request(rq);
+ blk_mq_start_request(rq);
+}
+
+static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
+{
+ return ctrl->sgls & ((1 << 0) | (1 << 1));
+}
+
+#ifdef CONFIG_NVME_AUTH
+int __init nvme_init_auth(void);
+void __exit nvme_exit_auth(void);
+int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+#else
+static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ return 0;
+}
+static inline int __init nvme_init_auth(void)
+{
+ return 0;
+}
+static inline void __exit nvme_exit_auth(void)
+{
+}
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode);
+u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
+int nvme_execute_rq(struct request *rq, bool at_head);
+void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+ struct nvme_command *cmd, int status);
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+void nvme_put_ns(struct nvme_ns *ns);
+
+static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+const unsigned char *nvme_get_error_status_str(u16 status);
+const unsigned char *nvme_get_opcode_str(u8 opcode);
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
+const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
+#else /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ return "I/O Error";
+}
+static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ return "I/O Cmd";
+}
+static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ return "Admin Cmd";
+}
+
+static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+{
+ return "Fabrics Cmd";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+
+static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+{
+ if (opcode == nvme_fabrics_command)
+ return nvme_get_fabrics_opcode_str(fctype);
+ return qid ? nvme_get_opcode_str(opcode) :
+ nvme_get_admin_opcode_str(opcode);
+}
+#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
new file mode 100644
index 0000000000..f8e92404a6
--- /dev/null
+++ b/drivers/nvme/host/pci.c
@@ -0,0 +1,3543 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/async.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/blk-integrity.h>
+#include <linux/dmi.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kstrtox.h>
+#include <linux/memremap.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/once.h>
+#include <linux/pci.h>
+#include <linux/suspend.h>
+#include <linux/t10-pi.h>
+#include <linux/types.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/sed-opal.h>
+#include <linux/pci-p2pdma.h>
+
+#include "trace.h"
+#include "nvme.h"
+
+#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
+#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
+
+#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 8192
+#define NVME_MAX_SEGS 128
+#define NVME_MAX_NR_ALLOCATIONS 5
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0444);
+
+static bool use_cmb_sqes = true;
+module_param(use_cmb_sqes, bool, 0444);
+MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
+
+static unsigned int max_host_mem_size_mb = 128;
+module_param(max_host_mem_size_mb, uint, 0444);
+MODULE_PARM_DESC(max_host_mem_size_mb,
+ "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
+
+static unsigned int sgl_threshold = SZ_32K;
+module_param(sgl_threshold, uint, 0644);
+MODULE_PARM_DESC(sgl_threshold,
+ "Use SGLs when average request segment size is larger or equal to "
+ "this size. Use 0 to disable SGLs.");
+
+#define NVME_PCI_MIN_QUEUE_SIZE 2
+#define NVME_PCI_MAX_QUEUE_SIZE 4095
+static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops io_queue_depth_ops = {
+ .set = io_queue_depth_set,
+ .get = param_get_uint,
+};
+
+static unsigned int io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
+
+static int io_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned int n;
+ int ret;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n > num_possible_cpus())
+ return -EINVAL;
+ return param_set_uint(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_count_ops = {
+ .set = io_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int write_queues;
+module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
+MODULE_PARM_DESC(write_queues,
+ "Number of queues to use for writes. If not set, reads and writes "
+ "will share a queue set.");
+
+static unsigned int poll_queues;
+module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
+
+static bool noacpi;
+module_param(noacpi, bool, 0444);
+MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
+
+struct nvme_dev;
+struct nvme_queue;
+
+static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static void nvme_delete_io_queues(struct nvme_dev *dev);
+static void nvme_update_attrs(struct nvme_dev *dev);
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct nvme_queue *queues;
+ struct blk_mq_tag_set tagset;
+ struct blk_mq_tag_set admin_tagset;
+ u32 __iomem *dbs;
+ struct device *dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ unsigned online_queues;
+ unsigned max_qid;
+ unsigned io_queues[HCTX_MAX_TYPES];
+ unsigned int num_vecs;
+ u32 q_depth;
+ int io_sqes;
+ u32 db_stride;
+ void __iomem *bar;
+ unsigned long bar_mapped_size;
+ struct mutex shutdown_lock;
+ bool subsystem;
+ u64 cmb_size;
+ bool cmb_use_sqes;
+ u32 cmbsz;
+ u32 cmbloc;
+ struct nvme_ctrl ctrl;
+ u32 last_ps;
+ bool hmb;
+
+ mempool_t *iod_mempool;
+
+ /* shadow doorbell buffer support: */
+ __le32 *dbbuf_dbs;
+ dma_addr_t dbbuf_dbs_dma_addr;
+ __le32 *dbbuf_eis;
+ dma_addr_t dbbuf_eis_dma_addr;
+
+ /* host memory buffer support: */
+ u64 host_mem_size;
+ u32 nr_host_mem_descs;
+ dma_addr_t host_mem_descs_dma;
+ struct nvme_host_mem_buf_desc *host_mem_descs;
+ void **host_mem_desc_bufs;
+ unsigned int nr_allocated_queues;
+ unsigned int nr_write_queues;
+ unsigned int nr_poll_queues;
+};
+
+static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
+ NVME_PCI_MAX_QUEUE_SIZE);
+}
+
+static inline unsigned int sq_idx(unsigned int qid, u32 stride)
+{
+ return qid * 2 * stride;
+}
+
+static inline unsigned int cq_idx(unsigned int qid, u32 stride)
+{
+ return (qid * 2 + 1) * stride;
+}
+
+static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_dev, ctrl);
+}
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct nvme_dev *dev;
+ spinlock_t sq_lock;
+ void *sq_cmds;
+ /* only used for poll queues: */
+ spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
+ struct nvme_completion *cqes;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ u32 __iomem *q_db;
+ u32 q_depth;
+ u16 cq_vector;
+ u16 sq_tail;
+ u16 last_sq_tail;
+ u16 cq_head;
+ u16 qid;
+ u8 cq_phase;
+ u8 sqes;
+ unsigned long flags;
+#define NVMEQ_ENABLED 0
+#define NVMEQ_SQ_CMB 1
+#define NVMEQ_DELETE_ERROR 2
+#define NVMEQ_POLLED 3
+ __le32 *dbbuf_sq_db;
+ __le32 *dbbuf_cq_db;
+ __le32 *dbbuf_sq_ei;
+ __le32 *dbbuf_cq_ei;
+ struct completion delete_done;
+};
+
+union nvme_descriptor {
+ struct nvme_sgl_desc *sg_list;
+ __le64 *prp_list;
+};
+
+/*
+ * The nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP/SGL chunk allocations in addition
+ * to the actual struct scatterlist.
+ */
+struct nvme_iod {
+ struct nvme_request req;
+ struct nvme_command cmd;
+ bool aborted;
+ s8 nr_allocations; /* PRP list pool allocations. 0 means small
+ pool in use */
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t first_dma;
+ dma_addr_t meta_dma;
+ struct sg_table sgt;
+ union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
+};
+
+static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
+{
+ return dev->nr_allocated_queues * 8 * dev->db_stride;
+}
+
+static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
+{
+ unsigned int mem_size = nvme_dbbuf_size(dev);
+
+ if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP))
+ return;
+
+ if (dev->dbbuf_dbs) {
+ /*
+ * Clear the dbbuf memory so the driver doesn't observe stale
+ * values from the previous instantiation.
+ */
+ memset(dev->dbbuf_dbs, 0, mem_size);
+ memset(dev->dbbuf_eis, 0, mem_size);
+ return;
+ }
+
+ dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
+ &dev->dbbuf_dbs_dma_addr,
+ GFP_KERNEL);
+ if (!dev->dbbuf_dbs)
+ goto fail;
+ dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
+ &dev->dbbuf_eis_dma_addr,
+ GFP_KERNEL);
+ if (!dev->dbbuf_eis)
+ goto fail_free_dbbuf_dbs;
+ return;
+
+fail_free_dbbuf_dbs:
+ dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs,
+ dev->dbbuf_dbs_dma_addr);
+ dev->dbbuf_dbs = NULL;
+fail:
+ dev_warn(dev->dev, "unable to allocate dma for dbbuf\n");
+}
+
+static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
+{
+ unsigned int mem_size = nvme_dbbuf_size(dev);
+
+ if (dev->dbbuf_dbs) {
+ dma_free_coherent(dev->dev, mem_size,
+ dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
+ dev->dbbuf_dbs = NULL;
+ }
+ if (dev->dbbuf_eis) {
+ dma_free_coherent(dev->dev, mem_size,
+ dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
+ dev->dbbuf_eis = NULL;
+ }
+}
+
+static void nvme_dbbuf_init(struct nvme_dev *dev,
+ struct nvme_queue *nvmeq, int qid)
+{
+ if (!dev->dbbuf_dbs || !qid)
+ return;
+
+ nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
+ nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
+ nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
+ nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
+}
+
+static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
+{
+ if (!nvmeq->qid)
+ return;
+
+ nvmeq->dbbuf_sq_db = NULL;
+ nvmeq->dbbuf_cq_db = NULL;
+ nvmeq->dbbuf_sq_ei = NULL;
+ nvmeq->dbbuf_cq_ei = NULL;
+}
+
+static void nvme_dbbuf_set(struct nvme_dev *dev)
+{
+ struct nvme_command c = { };
+ unsigned int i;
+
+ if (!dev->dbbuf_dbs)
+ return;
+
+ c.dbbuf.opcode = nvme_admin_dbbuf;
+ c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
+ c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
+
+ if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
+ dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
+ /* Free memory and continue on */
+ nvme_dbbuf_dma_free(dev);
+
+ for (i = 1; i <= dev->online_queues; i++)
+ nvme_dbbuf_free(&dev->queues[i]);
+ }
+}
+
+static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
+{
+ return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
+}
+
+/* Update dbbuf and return true if an MMIO is required */
+static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
+ volatile __le32 *dbbuf_ei)
+{
+ if (dbbuf_db) {
+ u16 old_value, event_idx;
+
+ /*
+ * Ensure that the queue is written before updating
+ * the doorbell in memory
+ */
+ wmb();
+
+ old_value = le32_to_cpu(*dbbuf_db);
+ *dbbuf_db = cpu_to_le32(value);
+
+ /*
+ * Ensure that the doorbell is updated before reading the event
+ * index from memory. The controller needs to provide similar
+ * ordering to ensure the envent index is updated before reading
+ * the doorbell.
+ */
+ mb();
+
+ event_idx = le32_to_cpu(*dbbuf_ei);
+ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_pci_npages_prp(void)
+{
+ unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
+ unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
+}
+
+static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_dev *dev = to_nvme_dev(data);
+ struct nvme_queue *nvmeq = &dev->queues[0];
+
+ WARN_ON(hctx_idx != 0);
+ WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
+
+ hctx->driver_data = nvmeq;
+ return 0;
+}
+
+static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_dev *dev = to_nvme_dev(data);
+ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
+
+ WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
+ hctx->driver_data = nvmeq;
+ return 0;
+}
+
+static int nvme_pci_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_req(req)->ctrl = set->driver_data;
+ nvme_req(req)->cmd = &iod->cmd;
+ return 0;
+}
+
+static int queue_irq_offset(struct nvme_dev *dev)
+{
+ /* if we have more than 1 vec, admin queue offsets us by 1 */
+ if (dev->num_vecs > 1)
+ return 1;
+
+ return 0;
+}
+
+static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_dev *dev = to_nvme_dev(set->driver_data);
+ int i, qoff, offset;
+
+ offset = queue_irq_offset(dev);
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ map->nr_queues = dev->io_queues[i];
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL && offset)
+ blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ else
+ blk_mq_map_queues(map);
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
+}
+
+/*
+ * Write sq tail if we are asked to, or if the next command would wrap.
+ */
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
+{
+ if (!write_sq) {
+ u16 next_tail = nvmeq->sq_tail + 1;
+
+ if (next_tail == nvmeq->q_depth)
+ next_tail = 0;
+ if (next_tail != nvmeq->last_sq_tail)
+ return;
+ }
+
+ if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
+ nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+ nvmeq->last_sq_tail = nvmeq->sq_tail;
+}
+
+static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd)
+{
+ memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
+ absolute_pointer(cmd), sizeof(*cmd));
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+}
+
+static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+
+ spin_lock(&nvmeq->sq_lock);
+ if (nvmeq->sq_tail != nvmeq->last_sq_tail)
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
+ int nseg)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ unsigned int avg_seg_size;
+
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+
+ if (!nvme_ctrl_sgl_supported(&dev->ctrl))
+ return false;
+ if (!nvmeq->qid)
+ return false;
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
+ return false;
+ return true;
+}
+
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
+{
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+
+ for (i = 0; i < iod->nr_allocations; i++) {
+ __le64 *prp_list = iod->list[i].prp_list;
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+}
+
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
+ }
+
+ WARN_ON_ONCE(!iod->sgt.nents);
+
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
+
+ if (iod->nr_allocations == 0)
+ dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
+ iod->first_dma);
+ else if (iod->nr_allocations == 1)
+ dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
+ iod->first_dma);
+ else
+ nvme_free_prps(dev, req);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
+}
+
+static void nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+ "dma_address:%pad dma_length:%d\n",
+ i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+}
+
+static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct dma_pool *pool;
+ int length = blk_rq_payload_bytes(req);
+ struct scatterlist *sg = iod->sgt.sgl;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+ __le64 *prp_list;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ goto done;
+ }
+
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= NVME_CTRL_PAGE_SIZE) {
+ iod->first_dma = dma_addr;
+ goto done;
+ }
+
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ iod->nr_allocations = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->nr_allocations = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ iod->nr_allocations = -1;
+ return BLK_STS_RESOURCE;
+ }
+ iod->list[0].prp_list = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+ goto free_prps;
+ iod->list[iod->nr_allocations++].prp_list = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+ return BLK_STS_OK;
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
+ "Invalid SGL for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), iod->sgt.nents);
+ return BLK_STS_IOERR;
+}
+
+static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = NVME_SGL_FMT_DATA_DESC << 4;
+}
+
+static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
+}
+
+static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct dma_pool *pool;
+ struct nvme_sgl_desc *sg_list;
+ struct scatterlist *sg = iod->sgt.sgl;
+ unsigned int entries = iod->sgt.nents;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ /* setting the transfer type as SGL */
+ cmd->flags = NVME_CMD_SGL_METABUF;
+
+ if (entries == 1) {
+ nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
+ return BLK_STS_OK;
+ }
+
+ if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
+ pool = dev->prp_small_pool;
+ iod->nr_allocations = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->nr_allocations = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ iod->nr_allocations = -1;
+ return BLK_STS_RESOURCE;
+ }
+
+ iod->list[0].sg_list = sg_list;
+ iod->first_dma = sgl_dma;
+
+ nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
+ do {
+ nvme_pci_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--entries > 0);
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ else
+ cmnd->dptr.prp2 = 0;
+ return BLK_STS_OK;
+}
+
+static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->flags = NVME_CMD_SGL_METABUF;
+ cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
+ cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
+ cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
+ return BLK_STS_OK;
+}
+
+static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int rc;
+
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct bio_vec bv = req_bvec(req);
+
+ if (!is_pci_p2pdma_page(bv.bv_page)) {
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ return nvme_setup_prp_simple(dev, req,
+ &cmnd->rw, &bv);
+
+ if (nvmeq->qid && sgl_threshold &&
+ nvme_ctrl_sgl_supported(&dev->ctrl))
+ return nvme_setup_sgl_simple(dev, req,
+ &cmnd->rw, &bv);
+ }
+ }
+
+ iod->dma_len = 0;
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
+ return BLK_STS_RESOURCE;
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
+ goto out_free_sg;
+
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
+ goto out_free_sg;
+ }
+
+ if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ else
+ ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
+ if (ret != BLK_STS_OK)
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
+out_free_sg:
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
+ return ret;
+}
+
+static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+ rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->meta_dma))
+ return BLK_STS_IOERR;
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ return BLK_STS_OK;
+}
+
+static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
+
+ iod->aborted = false;
+ iod->nr_allocations = -1;
+ iod->sgt.nents = 0;
+
+ ret = nvme_setup_cmd(req->q->queuedata, req);
+ if (ret)
+ return ret;
+
+ if (blk_rq_nr_phys_segments(req)) {
+ ret = nvme_map_data(dev, req, &iod->cmd);
+ if (ret)
+ goto out_free_cmd;
+ }
+
+ if (blk_integrity_rq(req)) {
+ ret = nvme_map_metadata(dev, req, &iod->cmd);
+ if (ret)
+ goto out_unmap_data;
+ }
+
+ nvme_start_request(req);
+ return BLK_STS_OK;
+out_unmap_data:
+ nvme_unmap_data(dev, req);
+out_free_cmd:
+ nvme_cleanup_cmd(req);
+ return ret;
+}
+
+/*
+ * NOTE: ns is NULL when called on the admin queue.
+ */
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return BLK_STS_IOERR;
+
+ if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
+ return nvme_fail_nonready_command(&dev->ctrl, req);
+
+ ret = nvme_prep_rq(dev, req);
+ if (unlikely(ret))
+ return ret;
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ nvme_write_sq_db(nvmeq, bd->last);
+ spin_unlock(&nvmeq->sq_lock);
+ return BLK_STS_OK;
+}
+
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+{
+ spin_lock(&nvmeq->sq_lock);
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ }
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+{
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return false;
+ if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
+ return false;
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+ return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+}
+
+static void nvme_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ if (!nvme_prep_rq_batch(nvmeq, req)) {
+ /* detach 'req' and add to remainder list */
+ rq_list_move(rqlist, &requeue_list, req, prev);
+
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ /* detach rest of list, and submit */
+ req->rq_next = NULL;
+ nvme_submit_cmds(nvmeq, rqlist);
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
+static __always_inline void nvme_pci_unmap_rq(struct request *req)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+
+ if (blk_integrity_rq(req)) {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ dma_unmap_page(dev->dev, iod->meta_dma,
+ rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ }
+
+ if (blk_rq_nr_phys_segments(req))
+ nvme_unmap_data(dev, req);
+}
+
+static void nvme_pci_complete_rq(struct request *req)
+{
+ nvme_pci_unmap_rq(req);
+ nvme_complete_rq(req);
+}
+
+static void nvme_pci_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, nvme_pci_unmap_rq);
+}
+
+/* We read the CQE phase first to check if the rest of the entry is valid */
+static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
+{
+ struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
+}
+
+static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
+{
+ u16 head = nvmeq->cq_head;
+
+ if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
+ nvmeq->dbbuf_cq_ei))
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+}
+
+static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
+{
+ if (!nvmeq->qid)
+ return nvmeq->dev->admin_tagset.tags[0];
+ return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
+}
+
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob, u16 idx)
+{
+ struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
+ struct request *req;
+
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
+ nvme_complete_async_event(&nvmeq->dev->ctrl,
+ cqe->status, &cqe->result);
+ return;
+ }
+
+ req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
+ if (unlikely(!req)) {
+ dev_warn(nvmeq->dev->ctrl.device,
+ "invalid id %d completed on queue %d\n",
+ command_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ nvme_pci_complete_batch))
+ nvme_pci_complete_rq(req);
+}
+
+static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
+{
+ u32 tmp = nvmeq->cq_head + 1;
+
+ if (tmp == nvmeq->q_depth) {
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase ^= 1;
+ } else {
+ nvmeq->cq_head = tmp;
+ }
+}
+
+static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
+{
+ int found = 0;
+
+ while (nvme_cqe_pending(nvmeq)) {
+ found++;
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
+ nvme_update_cq_head(nvmeq);
+ }
+
+ if (found)
+ nvme_ring_cq_doorbell(nvmeq);
+ return found;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ DEFINE_IO_COMP_BATCH(iob);
+
+ if (nvme_poll_cq(nvmeq, &iob)) {
+ if (!rq_list_empty(iob.req_list))
+ nvme_pci_complete_batch(&iob);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+
+ if (nvme_cqe_pending(nvmeq))
+ return IRQ_WAKE_THREAD;
+ return IRQ_NONE;
+}
+
+/*
+ * Poll for completions for any interrupt driven queue
+ * Can be called from any context.
+ */
+static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
+{
+ struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+
+ WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
+
+ disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ nvme_poll_cq(nvmeq, NULL);
+ enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+}
+
+static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ bool found;
+
+ if (!nvme_cqe_pending(nvmeq))
+ return 0;
+
+ spin_lock(&nvmeq->cq_poll_lock);
+ found = nvme_poll_cq(nvmeq, iob);
+ spin_unlock(&nvmeq->cq_poll_lock);
+
+ return found;
+}
+
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ struct nvme_queue *nvmeq = &dev->queues[0];
+ struct nvme_command c = { };
+
+ c.common.opcode = nvme_admin_async_event;
+ c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &c);
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+ struct nvme_command c = { };
+
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(id);
+
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq, s16 vector)
+{
+ struct nvme_command c = { };
+ int flags = NVME_QUEUE_PHYS_CONTIG;
+
+ if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
+ flags |= NVME_CQ_IRQ_ENABLED;
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(qid);
+ c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_cq.cq_flags = cpu_to_le16(flags);
+ c.create_cq.irq_vector = cpu_to_le16(vector);
+
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ struct nvme_ctrl *ctrl = &dev->ctrl;
+ struct nvme_command c = { };
+ int flags = NVME_QUEUE_PHYS_CONTIG;
+
+ /*
+ * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
+ * set. Since URGENT priority is zeroes, it makes all queues
+ * URGENT.
+ */
+ if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
+ flags |= NVME_SQ_PRIO_MEDIUM;
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(qid);
+ c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_sq.sq_flags = cpu_to_le16(flags);
+ c.create_sq.cqid = cpu_to_le16(qid);
+
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ dev_warn(nvmeq->dev->ctrl.device,
+ "Abort status: 0x%x", nvme_req(req)->status);
+ atomic_inc(&nvmeq->dev->ctrl.abort_limit);
+ blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
+}
+
+static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+{
+ /* If true, indicates loss of adapter communication, possibly by a
+ * NVMe Subsystem reset.
+ */
+ bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+ switch (nvme_ctrl_state(&dev->ctrl)) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ default:
+ break;
+ }
+
+ /* We shouldn't reset unless the controller is on fatal error state
+ * _or_ if we lost the communication with it.
+ */
+ if (!(csts & NVME_CSTS_CFS) && !nssro)
+ return false;
+
+ return true;
+}
+
+static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+{
+ /* Read a config register to help see what died. */
+ u16 pci_status;
+ int result;
+
+ result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
+ &pci_status);
+ if (result == PCIBIOS_SUCCESSFUL)
+ dev_warn(dev->ctrl.device,
+ "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
+ csts, pci_status);
+ else
+ dev_warn(dev->ctrl.device,
+ "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
+ csts, result);
+
+ if (csts != ~0)
+ return;
+
+ dev_warn(dev->ctrl.device,
+ "Does your device have a faulty power saving mode enabled?\n");
+ dev_warn(dev->ctrl.device,
+ "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
+}
+
+static enum blk_eh_timer_return nvme_timeout(struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct request *abort_req;
+ struct nvme_command cmd = { };
+ u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+ /* If PCI error recovery process is happening, we cannot reset or
+ * the recovery mechanism will surely fail.
+ */
+ mb();
+ if (pci_channel_offline(to_pci_dev(dev->dev)))
+ return BLK_EH_RESET_TIMER;
+
+ /*
+ * Reset immediately if the controller is failed
+ */
+ if (nvme_should_reset(dev, csts)) {
+ nvme_warn_reset(dev, csts);
+ goto disable;
+ }
+
+ /*
+ * Did we miss an interrupt?
+ */
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
+ nvme_poll(req->mq_hctx, NULL);
+ else
+ nvme_poll_irqdisable(nvmeq);
+
+ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
+ dev_warn(dev->ctrl.device,
+ "I/O %d QID %d timeout, completion polled\n",
+ req->tag, nvmeq->qid);
+ return BLK_EH_DONE;
+ }
+
+ /*
+ * Shutdown immediately if controller times out while starting. The
+ * reset work will see the pci device disabled when it gets the forced
+ * cancellation error. All outstanding requests are completed on
+ * shutdown, so we return BLK_EH_DONE.
+ */
+ switch (nvme_ctrl_state(&dev->ctrl)) {
+ case NVME_CTRL_CONNECTING:
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ fallthrough;
+ case NVME_CTRL_DELETING:
+ dev_warn_ratelimited(dev->ctrl.device,
+ "I/O %d QID %d timeout, disable controller\n",
+ req->tag, nvmeq->qid);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ nvme_dev_disable(dev, true);
+ return BLK_EH_DONE;
+ case NVME_CTRL_RESETTING:
+ return BLK_EH_RESET_TIMER;
+ default:
+ break;
+ }
+
+ /*
+ * Shutdown the controller immediately and schedule a reset if the
+ * command was already aborted once before and still hasn't been
+ * returned to the driver, or if this is the admin queue.
+ */
+ if (!nvmeq->qid || iod->aborted) {
+ dev_warn(dev->ctrl.device,
+ "I/O %d QID %d timeout, reset controller\n",
+ req->tag, nvmeq->qid);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ goto disable;
+ }
+
+ if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
+ atomic_inc(&dev->ctrl.abort_limit);
+ return BLK_EH_RESET_TIMER;
+ }
+ iod->aborted = true;
+
+ cmd.abort.opcode = nvme_admin_abort_cmd;
+ cmd.abort.cid = nvme_cid(req);
+ cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+
+ dev_warn(nvmeq->dev->ctrl.device,
+ "I/O %d (%s) QID %d timeout, aborting\n",
+ req->tag,
+ nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+ nvmeq->qid);
+
+ abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
+ BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(abort_req)) {
+ atomic_inc(&dev->ctrl.abort_limit);
+ return BLK_EH_RESET_TIMER;
+ }
+ nvme_init_request(abort_req, &cmd);
+
+ abort_req->end_io = abort_endio;
+ abort_req->end_io_data = NULL;
+ blk_execute_rq_nowait(abort_req, false);
+
+ /*
+ * The aborted req will be completed on receiving the abort req.
+ * We enable the timer again. If hit twice, it'll cause a device reset,
+ * as the device then is in a faulty state.
+ */
+ return BLK_EH_RESET_TIMER;
+
+disable:
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+ return BLK_EH_DONE;
+
+ nvme_dev_disable(dev, false);
+ if (nvme_try_sched_reset(&dev->ctrl))
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ return BLK_EH_DONE;
+}
+
+static void nvme_free_queue(struct nvme_queue *nvmeq)
+{
+ dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ if (!nvmeq->sq_cmds)
+ return;
+
+ if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
+ pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
+ nvmeq->sq_cmds, SQ_SIZE(nvmeq));
+ } else {
+ dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ }
+}
+
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
+{
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
+ dev->ctrl.queue_count--;
+ nvme_free_queue(&dev->queues[i]);
+ }
+}
+
+static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid)
+{
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
+ return;
+
+ /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
+ mb();
+
+ nvmeq->dev->online_queues--;
+ if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
+ nvme_quiesce_admin_queue(&nvmeq->dev->ctrl);
+ if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
+ pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq);
+}
+
+static void nvme_suspend_io_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(dev, i);
+}
+
+/*
+ * Called only on a device that has been disabled and after all other threads
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
+ */
+static void nvme_reap_pending_cqes(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+ spin_lock(&dev->queues[i].cq_poll_lock);
+ nvme_poll_cq(&dev->queues[i], NULL);
+ spin_unlock(&dev->queues[i].cq_poll_lock);
+ }
+}
+
+static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+ int entry_size)
+{
+ int q_depth = dev->q_depth;
+ unsigned q_size_aligned = roundup(q_depth * entry_size,
+ NVME_CTRL_PAGE_SIZE);
+
+ if (q_size_aligned * nr_io_queues > dev->cmb_size) {
+ u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
+
+ mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
+ q_depth = div_u64(mem_per_q, entry_size);
+
+ /*
+ * Ensure the reduced q_depth is above some threshold where it
+ * would be better to map queues in system memory with the
+ * original depth
+ */
+ if (q_depth < 64)
+ return -ENOMEM;
+ }
+
+ return q_depth;
+}
+
+static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ int qid)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
+ if (nvmeq->sq_cmds) {
+ nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
+ nvmeq->sq_cmds);
+ if (nvmeq->sq_dma_addr) {
+ set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
+ return 0;
+ }
+
+ pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
+ }
+ }
+
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ return -ENOMEM;
+ return 0;
+}
+
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
+{
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
+
+ nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
+ nvmeq->q_depth = depth;
+ nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->cqes)
+ goto free_nvmeq;
+
+ if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
+ goto free_cqdma;
+
+ nvmeq->dev = dev;
+ spin_lock_init(&nvmeq->sq_lock);
+ spin_lock_init(&nvmeq->cq_poll_lock);
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+ nvmeq->qid = qid;
+ dev->ctrl.queue_count++;
+
+ return 0;
+
+ free_cqdma:
+ dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
+ free_nvmeq:
+ return -ENOMEM;
+}
+
+static int queue_request_irq(struct nvme_queue *nvmeq)
+{
+ struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+ int nr = nvmeq->dev->ctrl.instance;
+
+ if (use_threaded_interrupts) {
+ return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
+ nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+ } else {
+ return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
+ NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+ }
+}
+
+static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+
+ nvmeq->sq_tail = 0;
+ nvmeq->last_sq_tail = 0;
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
+ nvme_dbbuf_init(dev, nvmeq, qid);
+ dev->online_queues++;
+ wmb(); /* ensure the first interrupt sees the initialization */
+}
+
+/*
+ * Try getting shutdown_lock while setting up IO queues.
+ */
+static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
+{
+ /*
+ * Give up if the lock is being held by nvme_dev_disable.
+ */
+ if (!mutex_trylock(&dev->shutdown_lock))
+ return -ENODEV;
+
+ /*
+ * Controller is in wrong state, fail early.
+ */
+ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
+ mutex_unlock(&dev->shutdown_lock);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+ int result;
+ u16 vector = 0;
+
+ clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
+
+ /*
+ * A queue's vector matches the queue identifier unless the controller
+ * has only one vector available.
+ */
+ if (!polled)
+ vector = dev->num_vecs == 1 ? 0 : qid;
+ else
+ set_bit(NVMEQ_POLLED, &nvmeq->flags);
+
+ result = adapter_alloc_cq(dev, qid, nvmeq, vector);
+ if (result)
+ return result;
+
+ result = adapter_alloc_sq(dev, qid, nvmeq);
+ if (result < 0)
+ return result;
+ if (result)
+ goto release_cq;
+
+ nvmeq->cq_vector = vector;
+
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ nvme_init_queue(nvmeq, qid);
+ if (!polled) {
+ result = queue_request_irq(nvmeq);
+ if (result < 0)
+ goto release_sq;
+ }
+
+ set_bit(NVMEQ_ENABLED, &nvmeq->flags);
+ mutex_unlock(&dev->shutdown_lock);
+ return result;
+
+release_sq:
+ dev->online_queues--;
+ mutex_unlock(&dev->shutdown_lock);
+ adapter_delete_sq(dev, qid);
+release_cq:
+ adapter_delete_cq(dev, qid);
+ return result;
+}
+
+static const struct blk_mq_ops nvme_mq_admin_ops = {
+ .queue_rq = nvme_queue_rq,
+ .complete = nvme_pci_complete_rq,
+ .init_hctx = nvme_admin_init_hctx,
+ .init_request = nvme_pci_init_request,
+ .timeout = nvme_timeout,
+};
+
+static const struct blk_mq_ops nvme_mq_ops = {
+ .queue_rq = nvme_queue_rq,
+ .queue_rqs = nvme_queue_rqs,
+ .complete = nvme_pci_complete_rq,
+ .commit_rqs = nvme_commit_rqs,
+ .init_hctx = nvme_init_hctx,
+ .init_request = nvme_pci_init_request,
+ .map_queues = nvme_pci_map_queues,
+ .timeout = nvme_timeout,
+ .poll = nvme_poll,
+};
+
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
+ /*
+ * If the controller was reset during removal, it's possible
+ * user requests may be waiting on a stopped queue. Start the
+ * queue to flush these to completion.
+ */
+ nvme_unquiesce_admin_queue(&dev->ctrl);
+ nvme_remove_admin_tag_set(&dev->ctrl);
+ }
+}
+
+static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+{
+ return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
+}
+
+static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (size <= dev->bar_mapped_size)
+ return 0;
+ if (size > pci_resource_len(pdev, 0))
+ return -ENOMEM;
+ if (dev->bar)
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!dev->bar) {
+ dev->bar_mapped_size = 0;
+ return -ENOMEM;
+ }
+ dev->bar_mapped_size = size;
+ dev->dbs = dev->bar + NVME_REG_DBS;
+
+ return 0;
+}
+
+static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
+{
+ int result;
+ u32 aqa;
+ struct nvme_queue *nvmeq;
+
+ result = nvme_remap_bar(dev, db_bar_size(dev, 0));
+ if (result < 0)
+ return result;
+
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
+ NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
+
+ if (dev->subsystem &&
+ (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
+ writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
+
+ /*
+ * If the device has been passed off to us in an enabled state, just
+ * clear the enabled bit. The spec says we should set the 'shutdown
+ * notification bits', but doing so may cause the device to complete
+ * commands to the admin queue ... and we don't know what memory that
+ * might be pointing at!
+ */
+ result = nvme_disable_ctrl(&dev->ctrl, false);
+ if (result < 0)
+ return result;
+
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
+ if (result)
+ return result;
+
+ dev->ctrl.numa_node = dev_to_node(dev->dev);
+
+ nvmeq = &dev->queues[0];
+ aqa = nvmeq->q_depth - 1;
+ aqa |= aqa << 16;
+
+ writel(aqa, dev->bar + NVME_REG_AQA);
+ lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
+ lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
+
+ result = nvme_enable_ctrl(&dev->ctrl);
+ if (result)
+ return result;
+
+ nvmeq->cq_vector = 0;
+ nvme_init_queue(nvmeq, 0);
+ result = queue_request_irq(nvmeq);
+ if (result) {
+ dev->online_queues--;
+ return result;
+ }
+
+ set_bit(NVMEQ_ENABLED, &nvmeq->flags);
+ return result;
+}
+
+static int nvme_create_io_queues(struct nvme_dev *dev)
+{
+ unsigned i, max, rw_queues;
+ int ret = 0;
+
+ for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
+ if (nvme_alloc_queue(dev, i, dev->q_depth)) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ max = min(dev->max_qid, dev->ctrl.queue_count - 1);
+ if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
+ rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
+ dev->io_queues[HCTX_TYPE_READ];
+ } else {
+ rw_queues = max;
+ }
+
+ for (i = dev->online_queues; i <= max; i++) {
+ bool polled = i > rw_queues;
+
+ ret = nvme_create_queue(&dev->queues[i], i, polled);
+ if (ret)
+ break;
+ }
+
+ /*
+ * Ignore failing Create SQ/CQ commands, we can continue with less
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
+ * be useful to upgrade a buggy firmware for example.
+ */
+ return ret >= 0 ? 0 : ret;
+}
+
+static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
+{
+ u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
+
+ return 1ULL << (12 + 4 * szu);
+}
+
+static u32 nvme_cmb_size(struct nvme_dev *dev)
+{
+ return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
+}
+
+static void nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 size, offset;
+ resource_size_t bar_size;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int bar;
+
+ if (dev->cmb_size)
+ return;
+
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
+ dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
+ if (!dev->cmbsz)
+ return;
+ dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+
+ size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
+ offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
+ bar = NVME_CMB_BIR(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, bar);
+
+ if (offset > bar_size)
+ return;
+
+ /*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
+ * Controllers may support a CMB size larger than their BAR,
+ * for example, due to being behind a bridge. Reduce the CMB to
+ * the reported size of the BAR
+ */
+ if (size > bar_size - offset)
+ size = bar_size - offset;
+
+ if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
+ dev_warn(dev->ctrl.device,
+ "failed to register the CMB\n");
+ return;
+ }
+
+ dev->cmb_size = size;
+ dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
+
+ if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
+ (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
+ pci_p2pmem_publish(pdev, true);
+
+ nvme_update_attrs(dev);
+}
+
+static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
+{
+ u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
+ u64 dma_addr = dev->host_mem_descs_dma;
+ struct nvme_command c = { };
+ int ret;
+
+ c.features.opcode = nvme_admin_set_features;
+ c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
+ c.features.dword11 = cpu_to_le32(bits);
+ c.features.dword12 = cpu_to_le32(host_mem_size);
+ c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
+ c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
+ c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
+
+ ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "failed to set host mem (err %d, flags %#x).\n",
+ ret, bits);
+ } else
+ dev->hmb = bits & NVME_HOST_MEM_ENABLE;
+
+ return ret;
+}
+
+static void nvme_free_host_mem(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nr_host_mem_descs; i++) {
+ struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
+ size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
+
+ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
+ le64_to_cpu(desc->addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+ }
+
+ kfree(dev->host_mem_desc_bufs);
+ dev->host_mem_desc_bufs = NULL;
+ dma_free_coherent(dev->dev,
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+ dev->host_mem_descs, dev->host_mem_descs_dma);
+ dev->host_mem_descs = NULL;
+ dev->nr_host_mem_descs = 0;
+}
+
+static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ u32 chunk_size)
+{
+ struct nvme_host_mem_buf_desc *descs;
+ u32 max_entries, len;
+ dma_addr_t descs_dma;
+ int i = 0;
+ void **bufs;
+ u64 size, tmp;
+
+ tmp = (preferred + chunk_size - 1);
+ do_div(tmp, chunk_size);
+ max_entries = tmp;
+
+ if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
+ max_entries = dev->ctrl.hmmaxd;
+
+ descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
+ if (!descs)
+ goto out;
+
+ bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
+ if (!bufs)
+ goto out_free_descs;
+
+ for (size = 0; size < preferred && i < max_entries; size += len) {
+ dma_addr_t dma_addr;
+
+ len = min_t(u64, chunk_size, preferred - size);
+ bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+ if (!bufs[i])
+ break;
+
+ descs[i].addr = cpu_to_le64(dma_addr);
+ descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
+ i++;
+ }
+
+ if (!size)
+ goto out_free_bufs;
+
+ dev->nr_host_mem_descs = i;
+ dev->host_mem_size = size;
+ dev->host_mem_descs = descs;
+ dev->host_mem_descs_dma = descs_dma;
+ dev->host_mem_desc_bufs = bufs;
+ return 0;
+
+out_free_bufs:
+ while (--i >= 0) {
+ size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
+
+ dma_free_attrs(dev->dev, size, bufs[i],
+ le64_to_cpu(descs[i].addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+ }
+
+ kfree(bufs);
+out_free_descs:
+ dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ descs_dma);
+out:
+ dev->host_mem_descs = NULL;
+ return -ENOMEM;
+}
+
+static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
+{
+ u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
+ u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
+ u64 chunk_size;
+
+ /* start big and work our way down */
+ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
+ if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
+ if (!min || dev->host_mem_size >= min)
+ return 0;
+ nvme_free_host_mem(dev);
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int nvme_setup_host_mem(struct nvme_dev *dev)
+{
+ u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+ u64 preferred = (u64)dev->ctrl.hmpre * 4096;
+ u64 min = (u64)dev->ctrl.hmmin * 4096;
+ u32 enable_bits = NVME_HOST_MEM_ENABLE;
+ int ret;
+
+ if (!dev->ctrl.hmpre)
+ return 0;
+
+ preferred = min(preferred, max);
+ if (min > max) {
+ dev_warn(dev->ctrl.device,
+ "min host memory (%lld MiB) above limit (%d MiB).\n",
+ min >> ilog2(SZ_1M), max_host_mem_size_mb);
+ nvme_free_host_mem(dev);
+ return 0;
+ }
+
+ /*
+ * If we already have a buffer allocated check if we can reuse it.
+ */
+ if (dev->host_mem_descs) {
+ if (dev->host_mem_size >= min)
+ enable_bits |= NVME_HOST_MEM_RETURN;
+ else
+ nvme_free_host_mem(dev);
+ }
+
+ if (!dev->host_mem_descs) {
+ if (nvme_alloc_host_mem(dev, min, preferred)) {
+ dev_warn(dev->ctrl.device,
+ "failed to allocate host memory buffer.\n");
+ return 0; /* controller must work without HMB */
+ }
+
+ dev_info(dev->ctrl.device,
+ "allocated %lld MiB host memory buffer.\n",
+ dev->host_mem_size >> ilog2(SZ_1M));
+ }
+
+ ret = nvme_set_host_mem(dev, enable_bits);
+ if (ret)
+ nvme_free_host_mem(dev);
+ return ret;
+}
+
+static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmb);
+
+static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%u\n", ndev->cmbloc);
+}
+static DEVICE_ATTR_RO(cmbloc);
+
+static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%u\n", ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmbsz);
+
+static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%d\n", ndev->hmb);
+}
+
+static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+ bool new;
+ int ret;
+
+ if (kstrtobool(buf, &new) < 0)
+ return -EINVAL;
+
+ if (new == ndev->hmb)
+ return count;
+
+ if (new) {
+ ret = nvme_setup_host_mem(ndev);
+ } else {
+ ret = nvme_set_host_mem(ndev, 0);
+ if (!ret)
+ nvme_free_host_mem(ndev);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(hmb);
+
+static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct nvme_ctrl *ctrl =
+ dev_get_drvdata(container_of(kobj, struct device, kobj));
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ if (a == &dev_attr_cmb.attr ||
+ a == &dev_attr_cmbloc.attr ||
+ a == &dev_attr_cmbsz.attr) {
+ if (!dev->cmbsz)
+ return 0;
+ }
+ if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute *nvme_pci_attrs[] = {
+ &dev_attr_cmb.attr,
+ &dev_attr_cmbloc.attr,
+ &dev_attr_cmbsz.attr,
+ &dev_attr_hmb.attr,
+ NULL,
+};
+
+static const struct attribute_group nvme_pci_dev_attrs_group = {
+ .attrs = nvme_pci_attrs,
+ .is_visible = nvme_pci_attrs_are_visible,
+};
+
+static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
+ &nvme_dev_attrs_group,
+ &nvme_pci_dev_attrs_group,
+ NULL,
+};
+
+static void nvme_update_attrs(struct nvme_dev *dev)
+{
+ sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
+}
+
+/*
+ * nirqs is the number of interrupts available for write and read
+ * queues. The core already reserved an interrupt for the admin queue.
+ */
+static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
+{
+ struct nvme_dev *dev = affd->priv;
+ unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
+
+ /*
+ * If there is no interrupt available for queues, ensure that
+ * the default queue is set to 1. The affinity set size is
+ * also set to one, but the irq core ignores it for this case.
+ *
+ * If only one interrupt is available or 'write_queue' == 0, combine
+ * write and read queues.
+ *
+ * If 'write_queues' > 0, ensure it leaves room for at least one read
+ * queue.
+ */
+ if (!nrirqs) {
+ nrirqs = 1;
+ nr_read_queues = 0;
+ } else if (nrirqs == 1 || !nr_write_queues) {
+ nr_read_queues = 0;
+ } else if (nr_write_queues >= nrirqs) {
+ nr_read_queues = 1;
+ } else {
+ nr_read_queues = nrirqs - nr_write_queues;
+ }
+
+ dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
+ affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
+ dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
+ affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
+ affd->nr_sets = nr_read_queues ? 2 : 1;
+}
+
+static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct irq_affinity affd = {
+ .pre_vectors = 1,
+ .calc_sets = nvme_calc_irq_sets,
+ .priv = dev,
+ };
+ unsigned int irq_queues, poll_queues;
+
+ /*
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+ * left over for non-polled I/O.
+ */
+ poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
+ dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
+
+ /*
+ * Initialize for the single interrupt case, will be updated in
+ * nvme_calc_irq_sets().
+ */
+ dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
+ dev->io_queues[HCTX_TYPE_READ] = 0;
+
+ /*
+ * We need interrupts for the admin queue and each non-polled I/O queue,
+ * but some Apple controllers require all queues to use the first
+ * vector.
+ */
+ irq_queues = 1;
+ if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
+ irq_queues += (nr_io_queues - poll_queues);
+ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+}
+
+static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
+{
+ /*
+ * If tags are shared with admin queue (Apple bug), then
+ * make sure we only use one IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ return 1;
+ return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev)
+{
+ struct nvme_queue *adminq = &dev->queues[0];
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned int nr_io_queues;
+ unsigned long size;
+ int result;
+
+ /*
+ * Sample the module parameters once at reset time so that we have
+ * stable values to work with.
+ */
+ dev->nr_write_queues = write_queues;
+ dev->nr_poll_queues = poll_queues;
+
+ nr_io_queues = dev->nr_allocated_queues - 1;
+ result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
+ if (result < 0)
+ return result;
+
+ if (nr_io_queues == 0)
+ return 0;
+
+ /*
+ * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
+ * from set to unset. If there is a window to it is truely freed,
+ * pci_free_irq_vectors() jumping into this window will crash.
+ * And take lock to avoid racing with pci_free_irq_vectors() in
+ * nvme_dev_disable() path.
+ */
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
+
+ if (dev->cmb_use_sqes) {
+ result = nvme_cmb_qdepth(dev, nr_io_queues,
+ sizeof(struct nvme_command));
+ if (result > 0) {
+ dev->q_depth = result;
+ dev->ctrl.sqsize = result - 1;
+ } else {
+ dev->cmb_use_sqes = false;
+ }
+ }
+
+ do {
+ size = db_bar_size(dev, nr_io_queues);
+ result = nvme_remap_bar(dev, size);
+ if (!result)
+ break;
+ if (!--nr_io_queues) {
+ result = -ENOMEM;
+ goto out_unlock;
+ }
+ } while (1);
+ adminq->q_db = dev->dbs;
+
+ retry:
+ /* Deregister the admin queue's interrupt */
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
+
+ /*
+ * If we enable msix early due to not intx, disable it again before
+ * setting up the full range we need.
+ */
+ pci_free_irq_vectors(pdev);
+
+ result = nvme_setup_irqs(dev, nr_io_queues);
+ if (result <= 0) {
+ result = -EIO;
+ goto out_unlock;
+ }
+
+ dev->num_vecs = result;
+ result = max(result - 1, 1);
+ dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
+
+ /*
+ * Should investigate if there's a performance win from allocating
+ * more queues than interrupt vectors; it might allow the submission
+ * path to scale better, even if the receive path is limited by the
+ * number of interrupts.
+ */
+ result = queue_request_irq(adminq);
+ if (result)
+ goto out_unlock;
+ set_bit(NVMEQ_ENABLED, &adminq->flags);
+ mutex_unlock(&dev->shutdown_lock);
+
+ result = nvme_create_io_queues(dev);
+ if (result || dev->online_queues < 2)
+ return result;
+
+ if (dev->online_queues - 1 < dev->max_qid) {
+ nr_io_queues = dev->online_queues - 1;
+ nvme_delete_io_queues(dev);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ nvme_suspend_io_queues(dev);
+ goto retry;
+ }
+ dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
+ dev->io_queues[HCTX_TYPE_DEFAULT],
+ dev->io_queues[HCTX_TYPE_READ],
+ dev->io_queues[HCTX_TYPE_POLL]);
+ return 0;
+out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ return result;
+}
+
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+ blk_status_t error)
+{
+ struct nvme_queue *nvmeq = req->end_io_data;
+
+ blk_mq_free_request(req);
+ complete(&nvmeq->delete_done);
+ return RQ_END_IO_NONE;
+}
+
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+ blk_status_t error)
+{
+ struct nvme_queue *nvmeq = req->end_io_data;
+
+ if (error)
+ set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
+
+ return nvme_del_queue_end(req, error);
+}
+
+static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
+{
+ struct request_queue *q = nvmeq->dev->ctrl.admin_q;
+ struct request *req;
+ struct nvme_command cmd = { };
+
+ cmd.delete_queue.opcode = opcode;
+ cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+ req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ nvme_init_request(req, &cmd);
+
+ if (opcode == nvme_admin_delete_cq)
+ req->end_io = nvme_del_cq_end;
+ else
+ req->end_io = nvme_del_queue_end;
+ req->end_io_data = nvmeq;
+
+ init_completion(&nvmeq->delete_done);
+ blk_execute_rq_nowait(req, false);
+ return 0;
+}
+
+static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode)
+{
+ int nr_queues = dev->online_queues - 1, sent = 0;
+ unsigned long timeout;
+
+ retry:
+ timeout = NVME_ADMIN_TIMEOUT;
+ while (nr_queues > 0) {
+ if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
+ break;
+ nr_queues--;
+ sent++;
+ }
+ while (sent) {
+ struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
+
+ timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
+ timeout);
+ if (timeout == 0)
+ return false;
+
+ sent--;
+ if (nr_queues)
+ goto retry;
+ }
+ return true;
+}
+
+static void nvme_delete_io_queues(struct nvme_dev *dev)
+{
+ if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq))
+ __nvme_delete_io_queues(dev, nvme_admin_delete_cq);
+}
+
+static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
+{
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ return 3;
+ if (dev->io_queues[HCTX_TYPE_READ])
+ return 2;
+ return 1;
+}
+
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
+}
+
+static int nvme_pci_enable(struct nvme_dev *dev)
+{
+ int result = -ENOMEM;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (pci_enable_device_mem(pdev))
+ return result;
+
+ pci_set_master(pdev);
+
+ if (readl(dev->bar + NVME_REG_CSTS) == -1) {
+ result = -ENODEV;
+ goto disable;
+ }
+
+ /*
+ * Some devices and/or platforms don't advertise or work with INTx
+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+ * adjust this later.
+ */
+ result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (result < 0)
+ goto disable;
+
+ dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
+
+ dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ io_queue_depth);
+ dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
+ dev->dbs = dev->bar + 4096;
+
+ /*
+ * Some Apple controllers require a non-standard SQE size.
+ * Interestingly they also seem to ignore the CC:IOSQES register
+ * so we don't bother updating it here.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
+ dev->io_sqes = 7;
+ else
+ dev->io_sqes = NVME_NVM_IOSQES;
+
+ /*
+ * Temporary fix for the Apple controller found in the MacBook8,1 and
+ * some MacBook7,1 to avoid controller resets and data loss.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
+ dev->q_depth = 2;
+ dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
+ "set queue depth=%u to work around controller resets\n",
+ dev->q_depth);
+ } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
+ (pdev->device == 0xa821 || pdev->device == 0xa822) &&
+ NVME_CAP_MQES(dev->ctrl.cap) == 0) {
+ dev->q_depth = 64;
+ dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
+ "set queue depth=%u\n", dev->q_depth);
+ }
+
+ /*
+ * Controllers with the shared tags quirk need the IO queue to be
+ * big enough so that we get 32 tags for the admin queue
+ */
+ if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
+ (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
+ dev->q_depth = NVME_AQ_DEPTH + 2;
+ dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
+ dev->q_depth);
+ }
+ dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
+
+ nvme_map_cmb(dev);
+
+ pci_save_state(pdev);
+
+ result = nvme_pci_configure_admin_queue(dev);
+ if (result)
+ goto free_irq;
+ return result;
+
+ free_irq:
+ pci_free_irq_vectors(pdev);
+ disable:
+ pci_disable_device(pdev);
+ return result;
+}
+
+static void nvme_dev_unmap(struct nvme_dev *dev)
+{
+ if (dev->bar)
+ iounmap(dev->bar);
+ pci_release_mem_regions(to_pci_dev(dev->dev));
+}
+
+static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ u32 csts;
+
+ if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev))
+ return true;
+ if (pdev->error_state != pci_channel_io_normal)
+ return true;
+
+ csts = readl(dev->bar + NVME_REG_CSTS);
+ return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY);
+}
+
+static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ bool dead;
+
+ mutex_lock(&dev->shutdown_lock);
+ dead = nvme_pci_ctrl_is_dead(dev);
+ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
+ if (pci_is_enabled(pdev))
+ nvme_start_freeze(&dev->ctrl);
+ /*
+ * Give the controller a chance to complete all entered requests
+ * if doing a safe shutdown.
+ */
+ if (!dead && shutdown)
+ nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
+ }
+
+ nvme_quiesce_io_queues(&dev->ctrl);
+
+ if (!dead && dev->ctrl.queue_count > 0) {
+ nvme_delete_io_queues(dev);
+ nvme_disable_ctrl(&dev->ctrl, shutdown);
+ nvme_poll_irqdisable(&dev->queues[0]);
+ }
+ nvme_suspend_io_queues(dev);
+ nvme_suspend_queue(dev, 0);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ nvme_reap_pending_cqes(dev);
+
+ nvme_cancel_tagset(&dev->ctrl);
+ nvme_cancel_admin_tagset(&dev->ctrl);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ if (shutdown) {
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
+ nvme_unquiesce_admin_queue(&dev->ctrl);
+ }
+ mutex_unlock(&dev->shutdown_lock);
+}
+
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+{
+ if (!nvme_wait_reset(&dev->ctrl))
+ return -EBUSY;
+ nvme_dev_disable(dev, shutdown);
+ return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
+static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
+{
+ size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS;
+
+ dev->iod_mempool = mempool_create_node(1,
+ mempool_kmalloc, mempool_kfree,
+ (void *)alloc_size, GFP_KERNEL,
+ dev_to_node(dev->dev));
+ if (!dev->iod_mempool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void nvme_free_tagset(struct nvme_dev *dev)
+{
+ if (dev->tagset.tags)
+ nvme_remove_io_tag_set(&dev->ctrl);
+ dev->ctrl.tagset = NULL;
+}
+
+/* pairs with nvme_pci_alloc_dev */
+static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ nvme_free_tagset(dev);
+ put_device(dev->dev);
+ kfree(dev->queues);
+ kfree(dev);
+}
+
+static void nvme_reset_work(struct work_struct *work)
+{
+ struct nvme_dev *dev =
+ container_of(work, struct nvme_dev, ctrl.reset_work);
+ bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
+ int result;
+
+ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
+ dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+ dev->ctrl.state);
+ result = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * If we're called to reset a live controller first shut it down before
+ * moving on.
+ */
+ if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
+ nvme_dev_disable(dev, false);
+ nvme_sync_queues(&dev->ctrl);
+
+ mutex_lock(&dev->shutdown_lock);
+ result = nvme_pci_enable(dev);
+ if (result)
+ goto out_unlock;
+ nvme_unquiesce_admin_queue(&dev->ctrl);
+ mutex_unlock(&dev->shutdown_lock);
+
+ /*
+ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller CONNECTING\n");
+ result = -EBUSY;
+ goto out;
+ }
+
+ result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend);
+ if (result)
+ goto out;
+
+ nvme_dbbuf_dma_alloc(dev);
+
+ result = nvme_setup_host_mem(dev);
+ if (result < 0)
+ goto out;
+
+ result = nvme_setup_io_queues(dev);
+ if (result)
+ goto out;
+
+ /*
+ * Freeze and update the number of I/O queues as thos might have
+ * changed. If there are no I/O queues left after this reset, keep the
+ * controller around but remove all namespaces.
+ */
+ if (dev->online_queues > 1) {
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ nvme_wait_freeze(&dev->ctrl);
+ nvme_pci_update_nr_queues(dev);
+ nvme_dbbuf_set(dev);
+ nvme_unfreeze(&dev->ctrl);
+ } else {
+ dev_warn(dev->ctrl.device, "IO queues lost\n");
+ nvme_mark_namespaces_dead(&dev->ctrl);
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ nvme_remove_namespaces(&dev->ctrl);
+ nvme_free_tagset(dev);
+ }
+
+ /*
+ * If only admin queue live, keep it to do further investigation or
+ * recovery.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller live state\n");
+ result = -ENODEV;
+ goto out;
+ }
+
+ nvme_start_ctrl(&dev->ctrl);
+ return;
+
+ out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ out:
+ /*
+ * Set state to deleting now to avoid blocking nvme_wait_reset(), which
+ * may be holding this pci_dev's device lock.
+ */
+ dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
+ result);
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ nvme_dev_disable(dev, true);
+ nvme_sync_queues(&dev->ctrl);
+ nvme_mark_namespaces_dead(&dev->ctrl);
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+}
+
+static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ *val = readl(to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
+static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ writel(val, to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
+static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
+static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+
+ return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
+}
+
+static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
+{
+ struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ dev_err(ctrl->device,
+ "VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
+ pdev->vendor, pdev->device,
+ nvme_strlen(subsys->model, sizeof(subsys->model)),
+ subsys->model, nvme_strlen(subsys->firmware_rev,
+ sizeof(subsys->firmware_rev)),
+ subsys->firmware_rev);
+}
+
+static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ return dma_pci_p2pdma_supported(dev->dev);
+}
+
+static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .name = "pcie",
+ .module = THIS_MODULE,
+ .flags = NVME_F_METADATA_SUPPORTED,
+ .dev_attr_groups = nvme_pci_dev_attr_groups,
+ .reg_read32 = nvme_pci_reg_read32,
+ .reg_write32 = nvme_pci_reg_write32,
+ .reg_read64 = nvme_pci_reg_read64,
+ .free_ctrl = nvme_pci_free_ctrl,
+ .submit_async_event = nvme_pci_submit_async_event,
+ .get_address = nvme_pci_get_address,
+ .print_device_info = nvme_pci_print_device_info,
+ .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
+};
+
+static int nvme_dev_map(struct nvme_dev *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (pci_request_mem_regions(pdev, "nvme"))
+ return -ENODEV;
+
+ if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
+ goto release;
+
+ return 0;
+ release:
+ pci_release_mem_regions(pdev);
+ return -ENODEV;
+}
+
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+{
+ if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
+ /*
+ * Several Samsung devices seem to drop off the PCIe bus
+ * randomly when APST is on and uses the deepest sleep state.
+ * This has been observed on a Samsung "SM951 NVMe SAMSUNG
+ * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
+ * 950 PRO 256GB", but it seems to be restricted to two Dell
+ * laptops.
+ */
+ if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
+ (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
+ dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
+ return NVME_QUIRK_NO_DEEPEST_PS;
+ } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+ /*
+ * Samsung SSD 960 EVO drops off the PCIe bus after system
+ * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
+ * within few minutes after bootup on a Coffee Lake board -
+ * ASUS PRIME Z370-A
+ */
+ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+ (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
+ dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
+ return NVME_QUIRK_NO_APST;
+ } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
+ pdev->device == 0xa808 || pdev->device == 0xa809)) ||
+ (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
+ /*
+ * Forcing to use host managed nvme power settings for
+ * lowest idle power with quick resume latency on
+ * Samsung and Toshiba SSDs based on suspend behavior
+ * on Coffee Lake board for LENOVO C640
+ */
+ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
+ dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
+ return NVME_QUIRK_SIMPLE_SUSPEND;
+ } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
+ pdev->device == 0x500f)) {
+ /*
+ * Exclude some Kingston NV1 and A2000 devices from
+ * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
+ * lot fo energy with s2idle sleep on some TUXEDO platforms.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
+ dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
+ dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
+ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ }
+
+ return 0;
+}
+
+static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ unsigned long quirks = id->driver_data;
+ int node = dev_to_node(&pdev->dev);
+ struct nvme_dev *dev;
+ int ret = -ENOMEM;
+
+ dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
+ mutex_init(&dev->shutdown_lock);
+
+ dev->nr_write_queues = write_queues;
+ dev->nr_poll_queues = poll_queues;
+ dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
+ dev->queues = kcalloc_node(dev->nr_allocated_queues,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
+ if (!dev->queues)
+ goto out_free_dev;
+
+ dev->dev = get_device(&pdev->dev);
+
+ quirks |= check_vendor_combination_bug(pdev);
+ if (!noacpi &&
+ !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
+ acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+ */
+ dev_info(&pdev->dev,
+ "platform quirk: setting simple suspend\n");
+ quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
+ }
+ ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
+ quirks);
+ if (ret)
+ goto out_put_device;
+
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ else
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1);
+ dma_set_max_seg_size(&pdev->dev, 0xffffffff);
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ dev->ctrl.max_hw_sectors = min_t(u32,
+ NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
+ dev->ctrl.max_segments = NVME_MAX_SEGS;
+
+ /*
+ * There is no support for SGLs for metadata (yet), so we are limited to
+ * a single integrity segment for the separate metadata pointer.
+ */
+ dev->ctrl.max_integrity_segments = 1;
+ return dev;
+
+out_put_device:
+ put_device(dev->dev);
+ kfree(dev->queues);
+out_free_dev:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct nvme_dev *dev;
+ int result = -ENOMEM;
+
+ dev = nvme_pci_alloc_dev(pdev, id);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ result = nvme_dev_map(dev);
+ if (result)
+ goto out_uninit_ctrl;
+
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto out_dev_unmap;
+
+ result = nvme_pci_alloc_iod_mempool(dev);
+ if (result)
+ goto out_release_prp_pools;
+
+ dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
+ result = nvme_pci_enable(dev);
+ if (result)
+ goto out_release_iod_mempool;
+
+ result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset,
+ &nvme_mq_admin_ops, sizeof(struct nvme_iod));
+ if (result)
+ goto out_disable;
+
+ /*
+ * Mark the controller as connecting before sending admin commands to
+ * allow the timeout handler to do the right thing.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller CONNECTING\n");
+ result = -EBUSY;
+ goto out_disable;
+ }
+
+ result = nvme_init_ctrl_finish(&dev->ctrl, false);
+ if (result)
+ goto out_disable;
+
+ nvme_dbbuf_dma_alloc(dev);
+
+ result = nvme_setup_host_mem(dev);
+ if (result < 0)
+ goto out_disable;
+
+ result = nvme_setup_io_queues(dev);
+ if (result)
+ goto out_disable;
+
+ if (dev->online_queues > 1) {
+ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+ nvme_dbbuf_set(dev);
+ }
+
+ if (!dev->ctrl.tagset)
+ dev_warn(dev->ctrl.device, "IO queues not created\n");
+
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller live state\n");
+ result = -ENODEV;
+ goto out_disable;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ nvme_start_ctrl(&dev->ctrl);
+ nvme_put_ctrl(&dev->ctrl);
+ flush_work(&dev->ctrl.scan_work);
+ return 0;
+
+out_disable:
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ nvme_dev_disable(dev, true);
+ nvme_free_host_mem(dev);
+ nvme_dev_remove_admin(dev);
+ nvme_dbbuf_dma_free(dev);
+ nvme_free_queues(dev, 0);
+out_release_iod_mempool:
+ mempool_destroy(dev->iod_mempool);
+out_release_prp_pools:
+ nvme_release_prp_pools(dev);
+out_dev_unmap:
+ nvme_dev_unmap(dev);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&dev->ctrl);
+ nvme_put_ctrl(&dev->ctrl);
+ return result;
+}
+
+static void nvme_reset_prepare(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ /*
+ * We don't need to check the return value from waiting for the reset
+ * state as pci_dev device lock is held, making it impossible to race
+ * with ->remove().
+ */
+ nvme_disable_prepare_reset(dev, false);
+ nvme_sync_queues(&dev->ctrl);
+}
+
+static void nvme_reset_done(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ if (!nvme_try_sched_reset(&dev->ctrl))
+ flush_work(&dev->ctrl.reset_work);
+}
+
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ nvme_disable_prepare_reset(dev, true);
+}
+
+/*
+ * The driver's remove may be called on a device in a partially initialized
+ * state. This function must not have any dependencies on the device state in
+ * order to proceed.
+ */
+static void nvme_remove(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ pci_set_drvdata(pdev, NULL);
+
+ if (!pci_device_is_present(pdev)) {
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+ nvme_dev_disable(dev, true);
+ }
+
+ flush_work(&dev->ctrl.reset_work);
+ nvme_stop_ctrl(&dev->ctrl);
+ nvme_remove_namespaces(&dev->ctrl);
+ nvme_dev_disable(dev, true);
+ nvme_free_host_mem(dev);
+ nvme_dev_remove_admin(dev);
+ nvme_dbbuf_dma_free(dev);
+ nvme_free_queues(dev, 0);
+ mempool_destroy(dev->iod_mempool);
+ nvme_release_prp_pools(dev);
+ nvme_dev_unmap(dev);
+ nvme_uninit_ctrl(&dev->ctrl);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
+{
+ return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
+}
+
+static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
+{
+ return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
+}
+
+static int nvme_resume(struct device *dev)
+{
+ struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
+ struct nvme_ctrl *ctrl = &ndev->ctrl;
+
+ if (ndev->last_ps == U32_MAX ||
+ nvme_set_power_state(ctrl, ndev->last_ps) != 0)
+ goto reset;
+ if (ctrl->hmpre && nvme_setup_host_mem(ndev))
+ goto reset;
+
+ return 0;
+reset:
+ return nvme_try_sched_reset(ctrl);
+}
+
+static int nvme_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct nvme_dev *ndev = pci_get_drvdata(pdev);
+ struct nvme_ctrl *ctrl = &ndev->ctrl;
+ int ret = -EBUSY;
+
+ ndev->last_ps = U32_MAX;
+
+ /*
+ * The platform does not remove power for a kernel managed suspend so
+ * use host managed nvme power settings for lowest idle power if
+ * possible. This should have quicker resume latency than a full device
+ * shutdown. But if the firmware is involved after the suspend or the
+ * device does not support any non-default power states, shut down the
+ * device fully.
+ *
+ * If ASPM is not enabled for the device, shut down the device and allow
+ * the PCI bus layer to put it into D3 in order to take the PCIe link
+ * down, so as to allow the platform to achieve its minimum low-power
+ * state (which may not be possible if the link is up).
+ */
+ if (pm_suspend_via_firmware() || !ctrl->npss ||
+ !pcie_aspm_enabled(pdev) ||
+ (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
+ return nvme_disable_prepare_reset(ndev, true);
+
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ nvme_sync_queues(ctrl);
+
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
+ goto unfreeze;
+
+ /*
+ * Host memory access may not be successful in a system suspend state,
+ * but the specification allows the controller to access memory in a
+ * non-operational power state.
+ */
+ if (ndev->hmb) {
+ ret = nvme_set_host_mem(ndev, 0);
+ if (ret < 0)
+ goto unfreeze;
+ }
+
+ ret = nvme_get_power_state(ctrl, &ndev->last_ps);
+ if (ret < 0)
+ goto unfreeze;
+
+ /*
+ * A saved state prevents pci pm from generically controlling the
+ * device's power. If we're using protocol specific settings, we don't
+ * want pci interfering.
+ */
+ pci_save_state(pdev);
+
+ ret = nvme_set_power_state(ctrl, ctrl->npss);
+ if (ret < 0)
+ goto unfreeze;
+
+ if (ret) {
+ /* discard the saved state */
+ pci_load_saved_state(pdev, NULL);
+
+ /*
+ * Clearing npss forces a controller reset on resume. The
+ * correct value will be rediscovered then.
+ */
+ ret = nvme_disable_prepare_reset(ndev, true);
+ ctrl->npss = 0;
+ }
+unfreeze:
+ nvme_unfreeze(ctrl);
+ return ret;
+}
+
+static int nvme_simple_suspend(struct device *dev)
+{
+ struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
+
+ return nvme_disable_prepare_reset(ndev, true);
+}
+
+static int nvme_simple_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct nvme_dev *ndev = pci_get_drvdata(pdev);
+
+ return nvme_try_sched_reset(&ndev->ctrl);
+}
+
+static const struct dev_pm_ops nvme_dev_pm_ops = {
+ .suspend = nvme_suspend,
+ .resume = nvme_resume,
+ .freeze = nvme_simple_suspend,
+ .thaw = nvme_simple_resume,
+ .poweroff = nvme_simple_suspend,
+ .restore = nvme_simple_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ /*
+ * A frozen channel requires a reset. When detected, this method will
+ * shutdown the controller to quiesce. The controller will be restarted
+ * after the slot reset through driver's slot_reset callback.
+ */
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(dev->ctrl.device,
+ "frozen state error detected, reset controller\n");
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
+ nvme_dev_disable(dev, true);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ nvme_dev_disable(dev, false);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(dev->ctrl.device,
+ "failure state error detected, request disconnect\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ dev_info(dev->ctrl.device, "restart after slot reset\n");
+ pci_restore_state(pdev);
+ if (!nvme_try_sched_reset(&dev->ctrl))
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void nvme_error_resume(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ flush_work(&dev->ctrl.reset_work);
+}
+
+static const struct pci_error_handlers nvme_err_handler = {
+ .error_detected = nvme_error_detected,
+ .slot_reset = nvme_slot_reset,
+ .resume = nvme_error_resume,
+ .reset_prepare = nvme_reset_prepare,
+ .reset_done = nvme_reset_done,
+};
+
+static const struct pci_device_id nvme_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DEALLOCATE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DEALLOCATE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DEALLOCATE_ZEROES |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DEALLOCATE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_MEDIUM_PRIO_SQ |
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_IDENTIFY_CNS |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_NO_NS_DESC_LIST, },
+ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES|
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
+ { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */
+ .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
+ { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ .driver_data = NVME_QUIRK_BOGUS_NID |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
+ NVME_QUIRK_128_BYTES_SQES |
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN |
+ NVME_QUIRK_IDENTIFY_CNS },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+ .name = "nvme",
+ .id_table = nvme_id_table,
+ .probe = nvme_probe,
+ .remove = nvme_remove,
+ .shutdown = nvme_shutdown,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &nvme_dev_pm_ops,
+#endif
+ },
+ .sriov_configure = pci_sriov_configure_simple,
+ .err_handler = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+ BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+ BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE);
+ BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE);
+ BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS);
+
+ return pci_register_driver(&nvme_driver);
+}
+
+static void __exit nvme_exit(void)
+{
+ pci_unregister_driver(&nvme_driver);
+ flush_workqueue(nvme_wq);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+module_init(nvme_init);
+module_exit(nvme_exit);
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
new file mode 100644
index 0000000000..391b1465eb
--- /dev/null
+++ b/drivers/nvme/host/pr.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015 Intel Corporation
+ * Keith Busch <kbusch@kernel.org>
+ */
+#include <linux/blkdev.h>
+#include <linux/pr.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
+{
+ switch (type) {
+ case PR_WRITE_EXCLUSIVE:
+ return NVME_PR_WRITE_EXCLUSIVE;
+ case PR_EXCLUSIVE_ACCESS:
+ return NVME_PR_EXCLUSIVE_ACCESS;
+ case PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+
+static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
+{
+ switch (type) {
+ case NVME_PR_WRITE_EXCLUSIVE:
+ return PR_WRITE_EXCLUSIVE;
+ case NVME_PR_EXCLUSIVE_ACCESS:
+ return PR_EXCLUSIVE_ACCESS;
+ case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+
+static int nvme_send_ns_head_pr_command(struct block_device *bdev,
+ struct nvme_command *c, void *data, unsigned int data_len)
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EWOULDBLOCK;
+
+ if (ns) {
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
+ void *data, unsigned int data_len)
+{
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
+}
+
+static int nvme_sc_to_pr_err(int nvme_sc)
+{
+ if (nvme_is_path_error(nvme_sc))
+ return PR_STS_PATH_FAILED;
+
+ switch (nvme_sc) {
+ case NVME_SC_SUCCESS:
+ return PR_STS_SUCCESS;
+ case NVME_SC_RESERVATION_CONFLICT:
+ return PR_STS_RESERVATION_CONFLICT;
+ case NVME_SC_ONCS_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ return -EINVAL;
+ default:
+ return PR_STS_IOERR;
+ }
+}
+
+static int nvme_send_pr_command(struct block_device *bdev,
+ struct nvme_command *c, void *data, unsigned int data_len)
+{
+ if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
+ bdev->bd_disk->fops == &nvme_ns_head_ops)
+ return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
+
+ return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
+ data_len);
+}
+
+static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
+ u64 key, u64 sa_key, u8 op)
+{
+ struct nvme_command c = { };
+ u8 data[16] = { 0, };
+ int ret;
+
+ put_unaligned_le64(key, &data[0]);
+ put_unaligned_le64(sa_key, &data[8]);
+
+ c.common.opcode = op;
+ c.common.cdw10 = cpu_to_le32(cdw10);
+
+ ret = nvme_send_pr_command(bdev, &c, data, sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ return nvme_sc_to_pr_err(ret);
+}
+
+static int nvme_pr_register(struct block_device *bdev, u64 old,
+ u64 new, unsigned flags)
+{
+ u32 cdw10;
+
+ if (flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+
+ cdw10 = old ? 2 : 0;
+ cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
+ cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
+ return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
+}
+
+static int nvme_pr_reserve(struct block_device *bdev, u64 key,
+ enum pr_type type, unsigned flags)
+{
+ u32 cdw10;
+
+ if (flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+
+ cdw10 = nvme_pr_type_from_blk(type) << 8;
+ cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
+ enum pr_type type, bool abort)
+{
+ u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1);
+
+ return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_clear(struct block_device *bdev, u64 key)
+{
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+}
+
+static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+ u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+}
+
+static int nvme_pr_resv_report(struct block_device *bdev, void *data,
+ u32 data_len, bool *eds)
+{
+ struct nvme_command c = { };
+ int ret;
+
+ c.common.opcode = nvme_cmd_resv_report;
+ c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len));
+ c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT);
+ *eds = true;
+
+retry:
+ ret = nvme_send_pr_command(bdev, &c, data, data_len);
+ if (ret == NVME_SC_HOST_ID_INCONSIST &&
+ c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) {
+ c.common.cdw11 = 0;
+ *eds = false;
+ goto retry;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return nvme_sc_to_pr_err(ret);
+}
+
+static int nvme_pr_read_keys(struct block_device *bdev,
+ struct pr_keys *keys_info)
+{
+ u32 rse_len, num_keys = keys_info->num_keys;
+ struct nvme_reservation_status_ext *rse;
+ int ret, i;
+ bool eds;
+
+ /*
+ * Assume we are using 128-bit host IDs and allocate a buffer large
+ * enough to get enough keys to fill the return keys buffer.
+ */
+ rse_len = struct_size(rse, regctl_eds, num_keys);
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+ ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
+ if (ret)
+ goto free_rse;
+
+ keys_info->generation = le32_to_cpu(rse->gen);
+ keys_info->num_keys = get_unaligned_le16(&rse->regctl);
+
+ num_keys = min(num_keys, keys_info->num_keys);
+ for (i = 0; i < num_keys; i++) {
+ if (eds) {
+ keys_info->keys[i] =
+ le64_to_cpu(rse->regctl_eds[i].rkey);
+ } else {
+ struct nvme_reservation_status *rs;
+
+ rs = (struct nvme_reservation_status *)rse;
+ keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
+ }
+ }
+
+free_rse:
+ kfree(rse);
+ return ret;
+}
+
+static int nvme_pr_read_reservation(struct block_device *bdev,
+ struct pr_held_reservation *resv)
+{
+ struct nvme_reservation_status_ext tmp_rse, *rse;
+ int ret, i, num_regs;
+ u32 rse_len;
+ bool eds;
+
+get_num_regs:
+ /*
+ * Get the number of registrations so we know how big to allocate
+ * the response buffer.
+ */
+ ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
+ if (ret)
+ return ret;
+
+ num_regs = get_unaligned_le16(&tmp_rse.regctl);
+ if (!num_regs) {
+ resv->generation = le32_to_cpu(tmp_rse.gen);
+ return 0;
+ }
+
+ rse_len = struct_size(rse, regctl_eds, num_regs);
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+ ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
+ if (ret)
+ goto free_rse;
+
+ if (num_regs != get_unaligned_le16(&rse->regctl)) {
+ kfree(rse);
+ goto get_num_regs;
+ }
+
+ resv->generation = le32_to_cpu(rse->gen);
+ resv->type = block_pr_type_from_nvme(rse->rtype);
+
+ for (i = 0; i < num_regs; i++) {
+ if (eds) {
+ if (rse->regctl_eds[i].rcsts) {
+ resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
+ break;
+ }
+ } else {
+ struct nvme_reservation_status *rs;
+
+ rs = (struct nvme_reservation_status *)rse;
+ if (rs->regctl_ds[i].rcsts) {
+ resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
+ break;
+ }
+ }
+ }
+
+free_rse:
+ kfree(rse);
+ return ret;
+}
+
+const struct pr_ops nvme_pr_ops = {
+ .pr_register = nvme_pr_register,
+ .pr_reserve = nvme_pr_reserve,
+ .pr_release = nvme_pr_release,
+ .pr_preempt = nvme_pr_preempt,
+ .pr_clear = nvme_pr_clear,
+ .pr_read_keys = nvme_pr_read_keys,
+ .pr_read_reservation = nvme_pr_read_reservation,
+};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
new file mode 100644
index 0000000000..c04317a966
--- /dev/null
+++ b/drivers/nvme/host/rdma.c
@@ -0,0 +1,2397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <rdma/mr_pool.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/nvme.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <linux/nvme-rdma.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+
+#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
+
+#define NVME_RDMA_MAX_SEGMENTS 256
+
+#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
+
+#define NVME_RDMA_DATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
+#define NVME_RDMA_METADATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
+
+struct nvme_rdma_device {
+ struct ib_device *dev;
+ struct ib_pd *pd;
+ struct kref ref;
+ struct list_head entry;
+ unsigned int num_inline_segments;
+};
+
+struct nvme_rdma_qe {
+ struct ib_cqe cqe;
+ void *data;
+ u64 dma;
+};
+
+struct nvme_rdma_sgl {
+ int nents;
+ struct sg_table sg_table;
+};
+
+struct nvme_rdma_queue;
+struct nvme_rdma_request {
+ struct nvme_request req;
+ struct ib_mr *mr;
+ struct nvme_rdma_qe sqe;
+ union nvme_result result;
+ __le16 status;
+ refcount_t ref;
+ struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
+ u32 num_sge;
+ struct ib_reg_wr reg_wr;
+ struct ib_cqe reg_cqe;
+ struct nvme_rdma_queue *queue;
+ struct nvme_rdma_sgl data_sgl;
+ struct nvme_rdma_sgl *metadata_sgl;
+ bool use_sig_mr;
+};
+
+enum nvme_rdma_queue_flags {
+ NVME_RDMA_Q_ALLOCATED = 0,
+ NVME_RDMA_Q_LIVE = 1,
+ NVME_RDMA_Q_TR_READY = 2,
+};
+
+struct nvme_rdma_queue {
+ struct nvme_rdma_qe *rsp_ring;
+ int queue_size;
+ size_t cmnd_capsule_len;
+ struct nvme_rdma_ctrl *ctrl;
+ struct nvme_rdma_device *device;
+ struct ib_cq *ib_cq;
+ struct ib_qp *qp;
+
+ unsigned long flags;
+ struct rdma_cm_id *cm_id;
+ int cm_error;
+ struct completion cm_done;
+ bool pi_support;
+ int cq_size;
+ struct mutex queue_lock;
+};
+
+struct nvme_rdma_ctrl {
+ /* read only in the hot path */
+ struct nvme_rdma_queue *queues;
+
+ /* other member variables */
+ struct blk_mq_tag_set tag_set;
+ struct work_struct err_work;
+
+ struct nvme_rdma_qe async_event_sqe;
+
+ struct delayed_work reconnect_work;
+
+ struct list_head list;
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct nvme_rdma_device *device;
+
+ u32 max_fr_pages;
+
+ struct sockaddr_storage addr;
+ struct sockaddr_storage src_addr;
+
+ struct nvme_ctrl ctrl;
+ bool use_inline_data;
+ u32 io_queues[HCTX_MAX_TYPES];
+};
+
+static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
+}
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static LIST_HEAD(nvme_rdma_ctrl_list);
+static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
+
+/*
+ * Disabling this option makes small I/O goes faster, but is fundamentally
+ * unsafe. With it turned off we will have to register a global rkey that
+ * allows read and write access to all physical memory.
+ */
+static bool register_always = true;
+module_param(register_always, bool, 0444);
+MODULE_PARM_DESC(register_always,
+ "Use memory registration even for contiguous memory regions");
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvme_rdma_complete_rq(struct request *rq);
+
+static const struct blk_mq_ops nvme_rdma_mq_ops;
+static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
+
+static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
+{
+ return nvme_rdma_queue_idx(queue) >
+ queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ queue->ctrl->io_queues[HCTX_TYPE_READ];
+}
+
+static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
+{
+ return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+}
+
+static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
+ kfree(qe->data);
+}
+
+static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ qe->data = kzalloc(capsule_size, GFP_KERNEL);
+ if (!qe->data)
+ return -ENOMEM;
+
+ qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
+ if (ib_dma_mapping_error(ibdev, qe->dma)) {
+ kfree(qe->data);
+ qe->data = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nvme_rdma_free_ring(struct ib_device *ibdev,
+ struct nvme_rdma_qe *ring, size_t ib_queue_size,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ib_queue_size; i++)
+ nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
+ kfree(ring);
+}
+
+static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
+ size_t ib_queue_size, size_t capsule_size,
+ enum dma_data_direction dir)
+{
+ struct nvme_rdma_qe *ring;
+ int i;
+
+ ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ /*
+ * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
+ * lifetime. It's safe, since any chage in the underlying RDMA device
+ * will issue error recovery and queue re-creation.
+ */
+ for (i = 0; i < ib_queue_size; i++) {
+ if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
+ goto out_free_ring;
+ }
+
+ return ring;
+
+out_free_ring:
+ nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
+ return NULL;
+}
+
+static void nvme_rdma_qp_event(struct ib_event *event, void *context)
+{
+ pr_debug("QP event %s (%d)\n",
+ ib_event_msg(event->event), event->event);
+
+}
+
+static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible(&queue->cm_done);
+ if (ret)
+ return ret;
+ WARN_ON_ONCE(queue->cm_error > 0);
+ return queue->cm_error;
+}
+
+static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
+{
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_qp_init_attr init_attr;
+ int ret;
+
+ memset(&init_attr, 0, sizeof(init_attr));
+ init_attr.event_handler = nvme_rdma_qp_event;
+ /* +1 for drain */
+ init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = queue->queue_size + 1;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = queue->ib_cq;
+ init_attr.recv_cq = queue->ib_cq;
+ if (queue->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+ init_attr.qp_context = queue;
+
+ ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
+
+ queue->qp = queue->cm_id->qp;
+ return ret;
+}
+
+static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ kfree(req->sqe.data);
+}
+
+static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
+ req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
+ if (!req->sqe.data)
+ return -ENOMEM;
+
+ /* metadata nvme_rdma_sgl struct is located after command's data SGL */
+ if (queue->pi_support)
+ req->metadata_sgl = (void *)nvme_req(rq) +
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+
+ req->queue = queue;
+ nvme_req(rq)->cmd = req->sqe.data;
+
+ return 0;
+}
+
+static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
+ struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static void nvme_rdma_free_dev(struct kref *ref)
+{
+ struct nvme_rdma_device *ndev =
+ container_of(ref, struct nvme_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ ib_dealloc_pd(ndev->pd);
+ kfree(ndev);
+}
+
+static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
+{
+ kref_put(&dev->ref, nvme_rdma_free_dev);
+}
+
+static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+static struct nvme_rdma_device *
+nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvme_rdma_device *ndev;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->dev->node_guid == cm_id->device->node_guid &&
+ nvme_rdma_dev_get(ndev))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ ndev->dev = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->dev,
+ register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (!(ndev->dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ dev_err(&ndev->dev->dev,
+ "Memory registrations not supported.\n");
+ goto out_free_pd;
+ }
+
+ ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
+ ndev->dev->attrs.max_send_sge - 1);
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ return ndev;
+
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
+{
+ if (nvme_rdma_poll_queue(queue))
+ ib_free_cq(queue->ib_cq);
+ else
+ ib_cq_pool_put(queue->ib_cq, queue->cq_size);
+}
+
+static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
+
+ if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+ return;
+
+ dev = queue->device;
+ ibdev = dev->dev;
+
+ if (queue->pi_support)
+ ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+ /*
+ * The cm_id object might have been destroyed during RDMA connection
+ * establishment error flow to avoid getting other cma events, thus
+ * the destruction of the QP shouldn't use rdma_cm API.
+ */
+ ib_destroy_qp(queue->qp);
+ nvme_rdma_free_cq(queue);
+
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+
+ nvme_rdma_dev_put(dev);
+}
+
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
+{
+ u32 max_page_list_len;
+
+ if (pi_support)
+ max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
+ else
+ max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
+
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
+}
+
+static int nvme_rdma_create_cq(struct ib_device *ibdev,
+ struct nvme_rdma_queue *queue)
+{
+ int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
+
+ /*
+ * Spread I/O queues completion vectors according their queue index.
+ * Admin queues can always go on completion vector 0.
+ */
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
+
+ /* Polling queues need direct cq polling context */
+ if (nvme_rdma_poll_queue(queue))
+ queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
+ comp_vector, IB_POLL_DIRECT);
+ else
+ queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
+ comp_vector, IB_POLL_SOFTIRQ);
+
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
+{
+ struct ib_device *ibdev;
+ const int send_wr_factor = 3; /* MR, SEND, INV */
+ const int cq_factor = send_wr_factor + 1; /* + RECV */
+ int ret, pages_per_mr;
+
+ queue->device = nvme_rdma_find_get_device(queue->cm_id);
+ if (!queue->device) {
+ dev_err(queue->cm_id->device->dev.parent,
+ "no client data found!\n");
+ return -ECONNREFUSED;
+ }
+ ibdev = queue->device->dev;
+
+ /* +1 for ib_drain_qp */
+ queue->cq_size = cq_factor * queue->queue_size + 1;
+
+ ret = nvme_rdma_create_cq(ibdev, queue);
+ if (ret)
+ goto out_put_dev;
+
+ ret = nvme_rdma_create_qp(queue, send_wr_factor);
+ if (ret)
+ goto out_destroy_ib_cq;
+
+ queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+ if (!queue->rsp_ring) {
+ ret = -ENOMEM;
+ goto out_destroy_qp;
+ }
+
+ /*
+ * Currently we don't use SG_GAPS MR's so if the first entry is
+ * misaligned we'll end up using two entries for a single data page,
+ * so one additional entry is required.
+ */
+ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+ queue->queue_size,
+ IB_MR_TYPE_MEM_REG,
+ pages_per_mr, 0);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize MR pool sized %d for QID %d\n",
+ queue->queue_size, nvme_rdma_queue_idx(queue));
+ goto out_destroy_ring;
+ }
+
+ if (queue->pi_support) {
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
+ queue->queue_size, IB_MR_TYPE_INTEGRITY,
+ pages_per_mr, pages_per_mr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize PI MR pool sized %d for QID %d\n",
+ queue->queue_size, nvme_rdma_queue_idx(queue));
+ goto out_destroy_mr_pool;
+ }
+ }
+
+ set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
+ return 0;
+
+out_destroy_mr_pool:
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+out_destroy_ring:
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+out_destroy_qp:
+ rdma_destroy_qp(queue->cm_id);
+out_destroy_ib_cq:
+ nvme_rdma_free_cq(queue);
+out_put_dev:
+ nvme_rdma_dev_put(queue->device);
+ return ret;
+}
+
+static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
+ int idx, size_t queue_size)
+{
+ struct nvme_rdma_queue *queue;
+ struct sockaddr *src_addr = NULL;
+ int ret;
+
+ queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
+ queue->ctrl = ctrl;
+ if (idx && ctrl->ctrl.max_integrity_segments)
+ queue->pi_support = true;
+ else
+ queue->pi_support = false;
+ init_completion(&queue->cm_done);
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ queue->queue_size = queue_size;
+
+ queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(queue->cm_id)) {
+ dev_info(ctrl->ctrl.device,
+ "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
+ }
+
+ if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
+ src_addr = (struct sockaddr *)&ctrl->src_addr;
+
+ queue->cm_error = -ETIMEDOUT;
+ ret = rdma_resolve_addr(queue->cm_id, src_addr,
+ (struct sockaddr *)&ctrl->addr,
+ NVME_RDMA_CM_TIMEOUT_MS);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ ret = nvme_rdma_wait_for_cm(queue);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma connection establishment failed (%d)\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
+
+ return 0;
+
+out_destroy_cm_id:
+ rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
+ return ret;
+}
+
+static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
+static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
+}
+
+static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+{
+ if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
+ rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
+ mutex_destroy(&queue->queue_lock);
+}
+
+static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++)
+ nvme_rdma_free_queue(&ctrl->queues[i]);
+}
+
+static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++)
+ nvme_rdma_stop_queue(&ctrl->queues[i]);
+}
+
+static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
+{
+ struct nvme_rdma_queue *queue = &ctrl->queues[idx];
+ int ret;
+
+ if (idx)
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
+ else
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+
+ if (!ret) {
+ set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
+ } else {
+ if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ dev_info(ctrl->ctrl.device,
+ "failed to connect queue: %d ret=%d\n", idx, ret);
+ }
+ return ret;
+}
+
+static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
+ int first, int last)
+{
+ int i, ret = 0;
+
+ for (i = first; i < last; i++) {
+ ret = nvme_rdma_start_queue(ctrl, i);
+ if (ret)
+ goto out_stop_queues;
+ }
+
+ return 0;
+
+out_stop_queues:
+ for (i--; i >= first; i--)
+ nvme_rdma_stop_queue(&ctrl->queues[i]);
+ return ret;
+}
+
+static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
+ int i, ret;
+
+ nr_io_queues = nvmf_nr_io_queues(opts);
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret)
+ return ret;
+
+ if (nr_io_queues == 0) {
+ dev_err(ctrl->ctrl.device,
+ "unable to set any I/O queues\n");
+ return -ENOMEM;
+ }
+
+ ctrl->ctrl.queue_count = nr_io_queues + 1;
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", nr_io_queues);
+
+ nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ ret = nvme_rdma_alloc_queue(ctrl, i,
+ ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_free_queues;
+ }
+
+ return 0;
+
+out_free_queues:
+ for (i--; i >= 1; i--)
+ nvme_rdma_free_queue(&ctrl->queues[i]);
+
+ return ret;
+}
+
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
+{
+ unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+
+ if (ctrl->max_integrity_segments)
+ cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
+
+ return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+ &nvme_rdma_mq_ops,
+ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ cmd_size);
+}
+
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ if (ctrl->async_event_sqe.data) {
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
+ }
+ nvme_rdma_free_queue(&ctrl->queues[0]);
+}
+
+static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ bool new)
+{
+ bool pi_capable = false;
+ int error;
+
+ error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+ if (error)
+ return error;
+
+ ctrl->device = ctrl->queues[0].device;
+ ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
+
+ /* T10-PI support */
+ if (ctrl->device->dev->attrs.kernel_cap_flags &
+ IBK_INTEGRITY_HANDOVER)
+ pi_capable = true;
+
+ ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
+ pi_capable);
+
+ /*
+ * Bind the async event SQE DMA mapping to the admin queue lifetime.
+ * It's safe, since any chage in the underlying RDMA device will issue
+ * error recovery and queue re-creation.
+ */
+ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ if (error)
+ goto out_free_queue;
+
+ if (new) {
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+ &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE);
+ if (error)
+ goto out_free_async_qe;
+
+ }
+
+ error = nvme_rdma_start_queue(ctrl, 0);
+ if (error)
+ goto out_remove_admin_tag_set;
+
+ error = nvme_enable_ctrl(&ctrl->ctrl);
+ if (error)
+ goto out_stop_queue;
+
+ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+ if (pi_capable)
+ ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
+ else
+ ctrl->ctrl.max_integrity_segments = 0;
+
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+
+ error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
+ if (error)
+ goto out_quiesce_queue;
+
+ return 0;
+
+out_quiesce_queue:
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
+out_stop_queue:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+out_remove_admin_tag_set:
+ if (new)
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+out_free_async_qe:
+ if (ctrl->async_event_sqe.data) {
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
+ }
+out_free_queue:
+ nvme_rdma_free_queue(&ctrl->queues[0]);
+ return error;
+}
+
+static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+{
+ int ret, nr_queues;
+
+ ret = nvme_rdma_alloc_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ if (new) {
+ ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
+ if (ret)
+ goto out_free_io_queues;
+ }
+
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
+ ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
+ if (ret)
+ goto out_cleanup_tagset;
+
+ if (!new) {
+ nvme_start_freeze(&ctrl->ctrl);
+ nvme_unquiesce_io_queues(&ctrl->ctrl);
+ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ nvme_unfreeze(&ctrl->ctrl);
+ goto out_wait_freeze_timed_out;
+ }
+ blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
+ ctrl->ctrl.queue_count - 1);
+ nvme_unfreeze(&ctrl->ctrl);
+ }
+
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
+ ctrl->tag_set.nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
+ return 0;
+
+out_wait_freeze_timed_out:
+ nvme_quiesce_io_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+out_cleanup_tagset:
+ nvme_cancel_tagset(&ctrl->ctrl);
+ if (new)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+out_free_io_queues:
+ nvme_rdma_free_io_queues(ctrl);
+ return ret;
+}
+
+static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+{
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+ if (remove) {
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_destroy_admin_queue(ctrl);
+}
+
+static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+{
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_quiesce_io_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ nvme_cancel_tagset(&ctrl->ctrl);
+ if (remove) {
+ nvme_unquiesce_io_queues(&ctrl->ctrl);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_free_io_queues(ctrl);
+ }
+}
+
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ flush_work(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+}
+
+static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl->queues);
+ kfree(ctrl);
+}
+
+static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
+{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ /* If we are resetting/deleting then do nothing */
+ if (state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
+ return;
+ }
+
+ if (nvmf_should_reconnect(&ctrl->ctrl)) {
+ dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
+ ctrl->ctrl.opts->reconnect_delay);
+ queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
+ ctrl->ctrl.opts->reconnect_delay * HZ);
+ } else {
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+}
+
+static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
+{
+ int ret;
+ bool changed;
+
+ ret = nvme_rdma_configure_admin_queue(ctrl, new);
+ if (ret)
+ return ret;
+
+ if (ctrl->ctrl.icdoff) {
+ ret = -EOPNOTSUPP;
+ dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ ret = -EOPNOTSUPP;
+ dev_err(ctrl->ctrl.device,
+ "Mandatory keyed sgls are not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
+ }
+
+ if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ dev_warn(ctrl->ctrl.device,
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
+ ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ }
+
+ if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+ dev_warn(ctrl->ctrl.device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+ ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
+ }
+
+ if (ctrl->ctrl.sgls & (1 << 20))
+ ctrl->use_inline_data = true;
+
+ if (ctrl->ctrl.queue_count > 1) {
+ ret = nvme_rdma_configure_io_queues(ctrl, new);
+ if (ret)
+ goto destroy_admin;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ if (!changed) {
+ /*
+ * state change failure is ok if we started ctrl delete,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
+ ret = -EINVAL;
+ goto destroy_io;
+ }
+
+ nvme_start_ctrl(&ctrl->ctrl);
+ return 0;
+
+destroy_io:
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_quiesce_io_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ nvme_cancel_tagset(&ctrl->ctrl);
+ if (new)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
+ }
+destroy_admin:
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+ if (new)
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+ return ret;
+}
+
+static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_rdma_ctrl, reconnect_work);
+
+ ++ctrl->ctrl.nr_reconnects;
+
+ if (nvme_rdma_setup_ctrl(ctrl, false))
+ goto requeue;
+
+ dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
+ ctrl->ctrl.nr_reconnects);
+
+ ctrl->ctrl.nr_reconnects = 0;
+
+ return;
+
+requeue:
+ dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
+ ctrl->ctrl.nr_reconnects);
+ nvme_rdma_reconnect_or_remove(ctrl);
+}
+
+static void nvme_rdma_error_recovery_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, err_work);
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
+ nvme_rdma_teardown_io_queues(ctrl, false);
+ nvme_unquiesce_io_queues(&ctrl->ctrl);
+ nvme_rdma_teardown_admin_queue(ctrl, false);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_auth_stop(&ctrl->ctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we started ctrl delete */
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
+ return;
+ }
+
+ nvme_rdma_reconnect_or_remove(ctrl);
+}
+
+static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return;
+
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ queue_work(nvme_reset_wq, &ctrl->err_work);
+}
+
+static void nvme_rdma_end_request(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ if (!nvme_try_complete_req(rq, req->status, req->result))
+ nvme_rdma_complete_rq(rq);
+}
+
+static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+ const char *op)
+{
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
+ dev_info(ctrl->ctrl.device,
+ "%s for CQE 0x%p failed with status %s (%d)\n",
+ op, wc->wr_cqe,
+ ib_wc_status_msg(wc->status), wc->status);
+ nvme_rdma_error_recovery(ctrl);
+}
+
+static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "MEMREG");
+}
+
+static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvme_rdma_request *req =
+ container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+ else
+ nvme_rdma_end_request(req);
+}
+
+static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .next = NULL,
+ .num_sge = 0,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+
+ req->reg_cqe.done = nvme_rdma_inv_rkey_done;
+ wr.wr_cqe = &req->reg_cqe;
+
+ return ib_post_send(queue->qp, &wr, NULL);
+}
+
+static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+}
+
+static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ struct list_head *pool = &queue->qp->rdma_mrs;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ return;
+
+ if (req->use_sig_mr)
+ pool = &queue->qp->sig_mrs;
+
+ if (req->mr) {
+ ib_mr_pool_put(queue->qp, pool, req->mr);
+ req->mr = NULL;
+ }
+
+ nvme_rdma_dma_unmap_req(ibdev, rq);
+}
+
+static int nvme_rdma_set_sg_null(struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = 0;
+ put_unaligned_le24(0, sg->length);
+ put_unaligned_le32(0, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+ struct ib_sge *sge = &req->sge[1];
+ struct scatterlist *sgl;
+ u32 len = 0;
+ int i;
+
+ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
+ sge->addr = sg_dma_address(sgl);
+ sge->length = sg_dma_len(sgl);
+ sge->lkey = queue->device->pd->local_dma_lkey;
+ len += sge->length;
+ sge++;
+ }
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+ sg->length = cpu_to_le32(len);
+ sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
+
+ req->num_sge += count;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
+ put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
+ /*
+ * Align the MR to a 4K page size to match the ctrl page size and
+ * the block virtual boundary.
+ */
+ nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
+ SZ_4K);
+ if (unlikely(nr < count)) {
+ ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ req->mr = NULL;
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+ }
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_memreg_done;
+ memset(&req->reg_wr, 0, sizeof(req->reg_wr));
+ req->reg_wr.wr.opcode = IB_WR_REG_MR;
+ req->reg_wr.wr.wr_cqe = &req->reg_cqe;
+ req->reg_wr.wr.num_sge = 0;
+ req->reg_wr.mr = req->mr;
+ req->reg_wr.key = req->mr->rkey;
+ req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
+ NVME_SGL_FMT_INVALIDATE;
+
+ return 0;
+}
+
+static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
+ u8 pi_type)
+{
+ u16 control = le16_to_cpu(cmd->rw.control);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no memory domain */
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+}
+
+static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
+{
+ *mask = 0;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
+ *mask |= IB_SIG_CHECK_REFTAG;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
+ *mask |= IB_SIG_CHECK_GUARD;
+}
+
+static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SIG");
+}
+
+static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count, int pi_count)
+{
+ struct nvme_rdma_sgl *sgl = &req->data_sgl;
+ struct ib_reg_wr *wr = &req->reg_wr;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct bio *bio = rq->bio;
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
+ nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
+ req->metadata_sgl->sg_table.sgl, pi_count, NULL,
+ SZ_4K);
+ if (unlikely(nr))
+ goto mr_put;
+
+ nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
+ req->mr->sig_attrs, ns->pi_type);
+ nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_sig_done;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ wr->wr.wr_cqe = &req->reg_cqe;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = req->mr;
+ wr->key = req->mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+
+ return 0;
+
+mr_put:
+ ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
+ req->mr = NULL;
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+}
+
+static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
+ int *count, int *pi_count)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ int ret;
+
+ req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
+ ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
+ blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
+ NVME_INLINE_SG_CNT);
+ if (ret)
+ return -ENOMEM;
+
+ req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
+ req->data_sgl.sg_table.sgl);
+
+ *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
+ if (unlikely(*count <= 0)) {
+ ret = -EIO;
+ goto out_free_table;
+ }
+
+ if (blk_integrity_rq(rq)) {
+ req->metadata_sgl->sg_table.sgl =
+ (struct scatterlist *)(req->metadata_sgl + 1);
+ ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
+ blk_rq_count_integrity_sg(rq->q, rq->bio),
+ req->metadata_sgl->sg_table.sgl,
+ NVME_INLINE_METADATA_SG_CNT);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto out_unmap_sg;
+ }
+
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
+ rq->bio, req->metadata_sgl->sg_table.sgl);
+ *pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(*pi_count <= 0)) {
+ ret = -EIO;
+ goto out_free_pi_table;
+ }
+ }
+
+ return 0;
+
+out_free_pi_table:
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+out_unmap_sg:
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+out_free_table:
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ return ret;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
+ int count, ret;
+
+ req->num_sge = 1;
+ refcount_set(&req->ref, 2); /* send and recv completions */
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
+ if (unlikely(ret))
+ return ret;
+
+ if (req->use_sig_mr) {
+ ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
+ goto out;
+ }
+
+ if (count <= dev->num_inline_segments) {
+ if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+ queue->ctrl->use_inline_data &&
+ blk_rq_payload_bytes(rq) <=
+ nvme_rdma_inline_data_size(queue)) {
+ ret = nvme_rdma_map_sg_inline(queue, req, c, count);
+ goto out;
+ }
+
+ if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ ret = nvme_rdma_map_sg_single(queue, req, c);
+ goto out;
+ }
+ }
+
+ ret = nvme_rdma_map_sg_fr(queue, req, c, count);
+out:
+ if (unlikely(ret))
+ goto out_dma_unmap_req;
+
+ return 0;
+
+out_dma_unmap_req:
+ nvme_rdma_dma_unmap_req(ibdev, rq);
+ return ret;
+}
+
+static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_request *req =
+ container_of(qe, struct nvme_rdma_request, sqe);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SEND");
+ else
+ nvme_rdma_end_request(req);
+}
+
+static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
+ struct ib_send_wr *first)
+{
+ struct ib_send_wr wr;
+ int ret;
+
+ sge->addr = qe->dma;
+ sge->length = sizeof(struct nvme_command);
+ sge->lkey = queue->device->pd->local_dma_lkey;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = sge;
+ wr.num_sge = num_sge;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ if (first)
+ first->next = &wr;
+ else
+ first = &wr;
+
+ ret = ib_post_send(queue->qp, first, NULL);
+ if (unlikely(ret)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe)
+{
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+ int ret;
+
+ list.addr = qe->dma;
+ list.length = sizeof(struct nvme_completion);
+ list.lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_recv_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ ret = ib_post_recv(queue->qp, &wr, NULL);
+ if (unlikely(ret)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
+{
+ u32 queue_idx = nvme_rdma_queue_idx(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+ struct ib_device *dev = queue->device->dev;
+ struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
+ struct nvme_command *cmd = sqe->data;
+ struct ib_sge sge;
+ int ret;
+
+ ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->common.opcode = nvme_admin_async_event;
+ cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+ cmd->common.flags |= NVME_CMD_SGL_METABUF;
+ nvme_rdma_set_sg_null(cmd);
+
+ sqe->cqe.done = nvme_rdma_async_done;
+
+ ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
+ DMA_TO_DEVICE);
+
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
+ struct nvme_completion *cqe, struct ib_wc *wc)
+{
+ struct request *rq;
+ struct nvme_rdma_request *req;
+
+ rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "got bad command_id %#x on QP %#x\n",
+ cqe->command_id, queue->qp->qp_num);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ req->status = cqe->status;
+ req->result = cqe->result;
+
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+ if (unlikely(!req->mr ||
+ wc->ex.invalidate_rkey != req->mr->rkey)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Bogus remote invalidation for rkey %#x\n",
+ req->mr ? req->mr->rkey : 0);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ } else if (req->mr) {
+ int ret;
+
+ ret = nvme_rdma_inv_rkey(queue, req);
+ if (unlikely(ret < 0)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ req->mr->rkey, ret);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ /* the local invalidation completion will end the request */
+ return;
+ }
+
+ nvme_rdma_end_request(req);
+}
+
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
+ struct ib_device *ibdev = queue->device->dev;
+ struct nvme_completion *cqe = qe->data;
+ const size_t len = sizeof(struct nvme_completion);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "RECV");
+ return;
+ }
+
+ /* sanity checking for received data length */
+ if (unlikely(wc->byte_len < len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected nvme completion length(%d)\n", wc->byte_len);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+ cqe->command_id)))
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
+ else
+ nvme_rdma_process_nvme_rsp(queue, cqe, wc);
+ ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+
+ nvme_rdma_post_recv(queue, qe);
+}
+
+static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
+{
+ int ret, i;
+
+ for (i = 0; i < queue->queue_size; i++) {
+ ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
+ struct rdma_cm_event *ev)
+{
+ struct rdma_cm_id *cm_id = queue->cm_id;
+ int status = ev->status;
+ const char *rej_msg;
+ const struct nvme_rdma_cm_rej *rej_data;
+ u8 rej_data_len;
+
+ rej_msg = rdma_reject_msg(cm_id, status);
+ rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
+
+ if (rej_data && rej_data_len >= sizeof(u16)) {
+ u16 sts = le16_to_cpu(rej_data->sts);
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected: status %d (%s) nvme status %d (%s).\n",
+ status, rej_msg, sts, nvme_rdma_cm_msg(sts));
+ } else {
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected: status %d (%s).\n", status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
+ int ret;
+
+ ret = nvme_rdma_create_queue_ib(queue);
+ if (ret)
+ return ret;
+
+ if (ctrl->opts->tos >= 0)
+ rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
+ if (ret) {
+ dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
+ queue->cm_error);
+ goto out_destroy_queue;
+ }
+
+ return 0;
+
+out_destroy_queue:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_req priv = { };
+ int ret;
+
+ param.qp_num = queue->qp->qp_num;
+ param.flow_control = 1;
+
+ param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
+ /* maximum retry count */
+ param.retry_count = 7;
+ param.rnr_retry_count = 7;
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
+ /*
+ * set the admin queue depth to the minimum size
+ * specified by the Fabrics standard.
+ */
+ if (priv.qid == 0) {
+ priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
+ priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
+ } else {
+ /*
+ * current interpretation of the fabrics spec
+ * is at minimum you make hrqsize sqsize+1, or a
+ * 1's based representation of sqsize.
+ */
+ priv.hrqsize = cpu_to_le16(queue->queue_size);
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ }
+
+ ret = rdma_connect_locked(queue->cm_id, &param);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "rdma_connect_locked failed (%d).\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct nvme_rdma_queue *queue = cm_id->context;
+ int cm_error = 0;
+
+ dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
+ rdma_event_msg(ev->event), ev->event,
+ ev->status, cm_id);
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_error = nvme_rdma_addr_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_error = nvme_rdma_route_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ queue->cm_error = nvme_rdma_conn_established(queue);
+ /* complete cm_done regardless of success/failure */
+ complete(&queue->cm_done);
+ return 0;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_error = nvme_rdma_conn_rejected(queue, ev);
+ break;
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "CM error event %d\n", ev->event);
+ cm_error = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "disconnect received - connection closed\n");
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /* device removal is handled via the ib_client API */
+ break;
+ default:
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected RDMA CM event (%d)\n", ev->event);
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ }
+
+ if (cm_error) {
+ queue->cm_error = cm_error;
+ complete(&queue->cm_done);
+ }
+
+ return 0;
+}
+
+static void nvme_rdma_complete_timed_out(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+
+ nvme_rdma_stop_queue(queue);
+ nvmf_complete_timed_out_request(rq);
+}
+
+static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+ rq->tag, nvme_rdma_queue_idx(queue));
+
+ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
+ /*
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
+ */
+ nvme_rdma_complete_timed_out(rq);
+ return BLK_EH_DONE;
+ }
+
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
+ nvme_rdma_error_recovery(ctrl);
+ return BLK_EH_RESET_TIMER;
+}
+
+static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct request *rq = bd->rq;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_qe *sqe = &req->sqe;
+ struct nvme_command *c = nvme_req(rq)->cmd;
+ struct ib_device *dev;
+ bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
+ blk_status_t ret;
+ int err;
+
+ WARN_ON_ONCE(rq->tag < 0);
+
+ if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
+ dev = queue->device->dev;
+
+ req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
+ sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ err = ib_dma_mapping_error(dev, req->sqe.dma);
+ if (unlikely(err))
+ return BLK_STS_RESOURCE;
+
+ ib_dma_sync_single_for_cpu(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ ret = nvme_setup_cmd(ns, rq);
+ if (ret)
+ goto unmap_qe;
+
+ nvme_start_request(rq);
+
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ queue->pi_support &&
+ (c->common.opcode == nvme_cmd_write ||
+ c->common.opcode == nvme_cmd_read) &&
+ nvme_ns_has_pi(ns))
+ req->use_sig_mr = true;
+ else
+ req->use_sig_mr = false;
+
+ err = nvme_rdma_map_data(queue, rq, c);
+ if (unlikely(err < 0)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", err);
+ goto err;
+ }
+
+ sqe->cqe.done = nvme_rdma_send_done;
+
+ ib_dma_sync_single_for_device(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+ req->mr ? &req->reg_wr.wr : NULL);
+ if (unlikely(err))
+ goto err_unmap;
+
+ return BLK_STS_OK;
+
+err_unmap:
+ nvme_rdma_unmap_data(queue, rq);
+err:
+ if (err == -EIO)
+ ret = nvme_host_path_error(rq);
+ else if (err == -ENOMEM || err == -EAGAIN)
+ ret = BLK_STS_RESOURCE;
+ else
+ ret = BLK_STS_IOERR;
+ nvme_cleanup_cmd(rq);
+unmap_qe:
+ ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+
+ return ib_process_cq_direct(queue->ib_cq, -1);
+}
+
+static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct ib_mr_status mr_status;
+ int ret;
+
+ ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ nvme_req(rq)->status = NVME_SC_INVALID_PI;
+ return;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type, mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+}
+
+static void nvme_rdma_complete_rq(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct ib_device *ibdev = queue->device->dev;
+
+ if (req->use_sig_mr)
+ nvme_rdma_check_pi_status(req);
+
+ nvme_rdma_unmap_data(queue, rq);
+ ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ nvme_complete_rq(rq);
+}
+
+static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
+
+ nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
+}
+
+static const struct blk_mq_ops nvme_rdma_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .init_request = nvme_rdma_init_request,
+ .exit_request = nvme_rdma_exit_request,
+ .init_hctx = nvme_rdma_init_hctx,
+ .timeout = nvme_rdma_timeout,
+ .map_queues = nvme_rdma_map_queues,
+ .poll = nvme_rdma_poll,
+};
+
+static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .init_request = nvme_rdma_init_request,
+ .exit_request = nvme_rdma_exit_request,
+ .init_hctx = nvme_rdma_init_admin_hctx,
+ .timeout = nvme_rdma_timeout,
+};
+
+static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
+{
+ nvme_rdma_teardown_io_queues(ctrl, shutdown);
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ nvme_disable_ctrl(&ctrl->ctrl, shutdown);
+ nvme_rdma_teardown_admin_queue(ctrl, shutdown);
+}
+
+static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
+}
+
+static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl =
+ container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
+
+ nvme_stop_ctrl(&ctrl->ctrl);
+ nvme_rdma_shutdown_ctrl(ctrl, false);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (nvme_rdma_setup_ctrl(ctrl, false))
+ goto out_fail;
+
+ return;
+
+out_fail:
+ ++ctrl->ctrl.nr_reconnects;
+ nvme_rdma_reconnect_or_remove(ctrl);
+}
+
+static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
+ .name = "rdma",
+ .module = THIS_MODULE,
+ .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .free_ctrl = nvme_rdma_free_ctrl,
+ .submit_async_event = nvme_rdma_submit_async_event,
+ .delete_ctrl = nvme_rdma_delete_ctrl,
+ .get_address = nvmf_get_address,
+ .stop_ctrl = nvme_rdma_stop_ctrl,
+};
+
+/*
+ * Fails a connection request if it matches an existing controller
+ * (association) with the same tuple:
+ * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
+ *
+ * if local address is not specified in the request, it will match an
+ * existing controller with all the other parameters the same and no
+ * local port address specified as well.
+ *
+ * The ports don't need to be compared as they are intrinsically
+ * already matched by the port pointers supplied.
+ */
+static bool
+nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ bool found = false;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ found = nvmf_ip_options_match(&ctrl->ctrl, opts);
+ if (found)
+ break;
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ return found;
+}
+
+static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ int ret;
+ bool changed;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ opts->trsvcid =
+ kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
+ if (!opts->trsvcid) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+ opts->mask |= NVMF_OPT_TRSVCID;
+ }
+
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
+ opts->traddr, opts->trsvcid, &ctrl->addr);
+ if (ret) {
+ pr_err("malformed address passed: %s:%s\n",
+ opts->traddr, opts->trsvcid);
+ goto out_free_ctrl;
+ }
+
+ if (opts->mask & NVMF_OPT_HOST_TRADDR) {
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
+ opts->host_traddr, NULL, &ctrl->src_addr);
+ if (ret) {
+ pr_err("malformed src address passed: %s\n",
+ opts->host_traddr);
+ goto out_free_ctrl;
+ }
+ }
+
+ if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
+ ret = -EALREADY;
+ goto out_free_ctrl;
+ }
+
+ INIT_DELAYED_WORK(&ctrl->reconnect_work,
+ nvme_rdma_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
+ INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
+
+ ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
+ opts->nr_poll_queues + 1;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_free_ctrl;
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_kfree_queues;
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
+ WARN_ON_ONCE(!changed);
+
+ ret = nvme_rdma_setup_ctrl(ctrl, true);
+ if (ret)
+ goto out_uninit_ctrl;
+
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ return &ctrl->ctrl;
+
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvmf_transport_ops nvme_rdma_transport = {
+ .name = "rdma",
+ .module = THIS_MODULE,
+ .required_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
+ NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
+ NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
+ NVMF_OPT_TOS,
+ .create_ctrl = nvme_rdma_create_ctrl,
+};
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ struct nvme_rdma_device *ndev;
+ bool found = false;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->dev == ib_device) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&device_list_mutex);
+
+ if (!found)
+ return;
+
+ /* Delete all controllers using this device */
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ if (ctrl->device->dev != ib_device)
+ continue;
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ flush_workqueue(nvme_delete_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+ .name = "nvme_rdma",
+ .remove = nvme_rdma_remove_one
+};
+
+static int __init nvme_rdma_init_module(void)
+{
+ int ret;
+
+ ret = ib_register_client(&nvme_rdma_ib_client);
+ if (ret)
+ return ret;
+
+ ret = nvmf_register_transport(&nvme_rdma_transport);
+ if (ret)
+ goto err_unreg_client;
+
+ return 0;
+
+err_unreg_client:
+ ib_unregister_client(&nvme_rdma_ib_client);
+ return ret;
+}
+
+static void __exit nvme_rdma_cleanup_module(void)
+{
+ struct nvme_rdma_ctrl *ctrl;
+
+ nvmf_unregister_transport(&nvme_rdma_transport);
+ ib_unregister_client(&nvme_rdma_ib_client);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
+ nvme_delete_ctrl(&ctrl->ctrl);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+ flush_workqueue(nvme_delete_wq);
+}
+
+module_init(nvme_rdma_init_module);
+module_exit(nvme_rdma_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
new file mode 100644
index 0000000000..212e1b05d2
--- /dev/null
+++ b/drivers/nvme/host/sysfs.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sysfs interface for the NVMe core driver.
+ *
+ * Copyright (c) 2011-2014, Intel Corporation.
+ */
+
+#include <linux/nvme-auth.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+static ssize_t nvme_sysfs_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = nvme_reset_ctrl_sync(ctrl);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ nvme_queue_scan(ctrl);
+ return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
+static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (disk->fops == &nvme_bdev_ops)
+ return nvme_get_ns_from_dev(dev)->head;
+ else
+ return disk->private_data;
+}
+
+static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ struct nvme_ns_ids *ids = &head->ids;
+ struct nvme_subsystem *subsys = head->subsys;
+ int serial_len = sizeof(subsys->serial);
+ int model_len = sizeof(subsys->model);
+
+ if (!uuid_is_null(&ids->uuid))
+ return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
+
+ if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
+
+ if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
+
+ while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
+ subsys->serial[serial_len - 1] == '\0'))
+ serial_len--;
+ while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
+ subsys->model[model_len - 1] == '\0'))
+ model_len--;
+
+ return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
+ serial_len, subsys->serial, model_len, subsys->model,
+ head->ns_id);
+}
+static DEVICE_ATTR_RO(wwid);
+
+static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
+}
+static DEVICE_ATTR_RO(nguid);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
+
+ /* For backward compatibility expose the NGUID to userspace if
+ * we have no UUID set
+ */
+ if (uuid_is_null(&ids->uuid)) {
+ dev_warn_once(dev,
+ "No UUID available providing old NGUID\n");
+ return sysfs_emit(buf, "%pU\n", ids->nguid);
+ }
+ return sysfs_emit(buf, "%pU\n", &ids->uuid);
+}
+static DEVICE_ATTR_RO(uuid);
+
+static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
+}
+static DEVICE_ATTR_RO(eui);
+
+static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
+}
+static DEVICE_ATTR_RO(nsid);
+
+static struct attribute *nvme_ns_id_attrs[] = {
+ &dev_attr_wwid.attr,
+ &dev_attr_uuid.attr,
+ &dev_attr_nguid.attr,
+ &dev_attr_eui.attr,
+ &dev_attr_nsid.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+ &dev_attr_ana_grpid.attr,
+ &dev_attr_ana_state.attr,
+#endif
+ NULL,
+};
+
+static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
+
+ if (a == &dev_attr_uuid.attr) {
+ if (uuid_is_null(&ids->uuid) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return 0;
+ }
+ if (a == &dev_attr_nguid.attr) {
+ if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return 0;
+ }
+ if (a == &dev_attr_eui.attr) {
+ if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ return 0;
+ }
+#ifdef CONFIG_NVME_MULTIPATH
+ if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
+ if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
+ return 0;
+ if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
+ return 0;
+ }
+#endif
+ return a->mode;
+}
+
+static const struct attribute_group nvme_ns_id_attr_group = {
+ .attrs = nvme_ns_id_attrs,
+ .is_visible = nvme_ns_id_attrs_are_visible,
+};
+
+const struct attribute_group *nvme_ns_id_attr_groups[] = {
+ &nvme_ns_id_attr_group,
+ NULL,
+};
+
+#define nvme_show_str_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sysfs_emit(buf, "%.*s\n", \
+ (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_str_function(model);
+nvme_show_str_function(serial);
+nvme_show_str_function(firmware_rev);
+
+#define nvme_show_int_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sysfs_emit(buf, "%d\n", ctrl->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_int_function(cntlid);
+nvme_show_int_function(numa_node);
+nvme_show_int_function(queue_count);
+nvme_show_int_function(sqsize);
+nvme_show_int_function(kato);
+
+static ssize_t nvme_sysfs_delete(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
+ return -EBUSY;
+
+ if (device_remove_file_self(dev, attr))
+ nvme_delete_ctrl_sync(ctrl);
+ return count;
+}
+static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
+
+static ssize_t nvme_sysfs_show_transport(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->ops->name);
+}
+static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
+
+static ssize_t nvme_sysfs_show_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ static const char *const state_name[] = {
+ [NVME_CTRL_NEW] = "new",
+ [NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_RESETTING] = "resetting",
+ [NVME_CTRL_CONNECTING] = "connecting",
+ [NVME_CTRL_DELETING] = "deleting",
+ [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
+ [NVME_CTRL_DEAD] = "dead",
+ };
+
+ if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
+ state_name[ctrl->state])
+ return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+
+ return sysfs_emit(buf, "unknown state\n");
+}
+
+static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
+
+static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
+}
+static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
+}
+static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
+}
+static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
+
+static ssize_t nvme_sysfs_show_address(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+
+static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (ctrl->opts->max_reconnects == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n",
+ opts->max_reconnects * opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ctrl_loss_tmo, err;
+
+ err = kstrtoint(buf, 10, &ctrl_loss_tmo);
+ if (err)
+ return -EINVAL;
+
+ if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+ else
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ return count;
+}
+static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
+
+static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->reconnect_delay == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int v;
+ int err;
+
+ err = kstrtou32(buf, 10, &v);
+ if (err)
+ return err;
+
+ ctrl->opts->reconnect_delay = v;
+ return count;
+}
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
+ nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
+
+static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->fast_io_fail_tmo == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
+}
+
+static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int fast_io_fail_tmo, err;
+
+ err = kstrtoint(buf, 10, &fast_io_fail_tmo);
+ if (err)
+ return -EINVAL;
+
+ if (fast_io_fail_tmo < 0)
+ opts->fast_io_fail_tmo = -1;
+ else
+ opts->fast_io_fail_tmo = fast_io_fail_tmo;
+ return count;
+}
+static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
+
+static ssize_t cntrltype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_CTRL_IO] = "io\n",
+ [NVME_CTRL_DISC] = "discovery\n",
+ [NVME_CTRL_ADMIN] = "admin\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->cntrltype]);
+}
+static DEVICE_ATTR_RO(cntrltype);
+
+static ssize_t dctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_DCTYPE_NOT_REPORTED] = "none\n",
+ [NVME_DCTYPE_DDC] = "ddc\n",
+ [NVME_DCTYPE_CDC] = "cdc\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->dctype]);
+}
+static DEVICE_ATTR_RO(dctype);
+
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ struct nvme_dhchap_key *key, *host_key;
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &key);
+ if (ret) {
+ kfree(dhchap_secret);
+ return ret;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ host_key = ctrl->host_key;
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ ctrl->host_key = key;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ nvme_auth_free_key(host_key);
+ } else
+ kfree(dhchap_secret);
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ struct nvme_dhchap_key *key, *ctrl_key;
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &key);
+ if (ret) {
+ kfree(dhchap_secret);
+ return ret;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ ctrl_key = ctrl->ctrl_key;
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ ctrl->ctrl_key = key;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ nvme_auth_free_key(ctrl_key);
+ } else
+ kfree(dhchap_secret);
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
+static struct attribute *nvme_dev_attrs[] = {
+ &dev_attr_reset_controller.attr,
+ &dev_attr_rescan_controller.attr,
+ &dev_attr_model.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_firmware_rev.attr,
+ &dev_attr_cntlid.attr,
+ &dev_attr_delete_controller.attr,
+ &dev_attr_transport.attr,
+ &dev_attr_subsysnqn.attr,
+ &dev_attr_address.attr,
+ &dev_attr_state.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_queue_count.attr,
+ &dev_attr_sqsize.attr,
+ &dev_attr_hostnqn.attr,
+ &dev_attr_hostid.attr,
+ &dev_attr_ctrl_loss_tmo.attr,
+ &dev_attr_reconnect_delay.attr,
+ &dev_attr_fast_io_fail_tmo.attr,
+ &dev_attr_kato.attr,
+ &dev_attr_cntrltype.attr,
+ &dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
+ NULL
+};
+
+static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
+ return 0;
+ if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
+ return 0;
+ if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_hostid.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
+ return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
+
+ return a->mode;
+}
+
+const struct attribute_group nvme_dev_attrs_group = {
+ .attrs = nvme_dev_attrs,
+ .is_visible = nvme_dev_attrs_are_visible,
+};
+EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
+
+const struct attribute_group *nvme_dev_attr_groups[] = {
+ &nvme_dev_attrs_group,
+ NULL,
+};
+
+#define SUBSYS_ATTR_RO(_name, _mode, _show) \
+ struct device_attribute subsys_attr_##_name = \
+ __ATTR(_name, _mode, _show, NULL)
+
+static ssize_t nvme_subsys_show_nqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ return sysfs_emit(buf, "%s\n", subsys->subnqn);
+}
+static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+
+static ssize_t nvme_subsys_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ switch (subsys->subtype) {
+ case NVME_NQN_DISC:
+ return sysfs_emit(buf, "discovery\n");
+ case NVME_NQN_NVME:
+ return sysfs_emit(buf, "nvm\n");
+ default:
+ return sysfs_emit(buf, "reserved\n");
+ }
+}
+static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
+
+#define nvme_subsys_show_str_function(field) \
+static ssize_t subsys_##field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_subsystem *subsys = \
+ container_of(dev, struct nvme_subsystem, dev); \
+ return sysfs_emit(buf, "%.*s\n", \
+ (int)sizeof(subsys->field), subsys->field); \
+} \
+static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
+
+nvme_subsys_show_str_function(model);
+nvme_subsys_show_str_function(serial);
+nvme_subsys_show_str_function(firmware_rev);
+
+static struct attribute *nvme_subsys_attrs[] = {
+ &subsys_attr_model.attr,
+ &subsys_attr_serial.attr,
+ &subsys_attr_firmware_rev.attr,
+ &subsys_attr_subsysnqn.attr,
+ &subsys_attr_subsystype.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+ &subsys_attr_iopolicy.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group nvme_subsys_attrs_group = {
+ .attrs = nvme_subsys_attrs,
+};
+
+const struct attribute_group *nvme_subsys_attrs_groups[] = {
+ &nvme_subsys_attrs_group,
+ NULL,
+};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
new file mode 100644
index 0000000000..f1d62d7442
--- /dev/null
+++ b/drivers/nvme/host/tcp.c
@@ -0,0 +1,2673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP host.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/nvme-tcp.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/blk-mq.h>
+#include <crypto/hash.h>
+#include <net/busy_poll.h>
+#include <trace/events/sock.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+struct nvme_tcp_queue;
+
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/* lockdep can detect a circular dependency of the form
+ * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+ * because dependencies are tracked for both nvme-tcp and user contexts. Using
+ * a separate class prevents lockdep from conflating nvme-tcp socket use with
+ * user-space socket API use.
+ */
+static struct lock_class_key nvme_tcp_sk_key[2];
+static struct lock_class_key nvme_tcp_slock_key[2];
+
+static void nvme_tcp_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
+ &nvme_tcp_slock_key[0],
+ "sk_lock-AF_INET-NVME",
+ &nvme_tcp_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
+ &nvme_tcp_slock_key[1],
+ "sk_lock-AF_INET6-NVME",
+ &nvme_tcp_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+#else
+static void nvme_tcp_reclassify_socket(struct socket *sock) { }
+#endif
+
+enum nvme_tcp_send_state {
+ NVME_TCP_SEND_CMD_PDU = 0,
+ NVME_TCP_SEND_H2C_PDU,
+ NVME_TCP_SEND_DATA,
+ NVME_TCP_SEND_DDGST,
+};
+
+struct nvme_tcp_request {
+ struct nvme_request req;
+ void *pdu;
+ struct nvme_tcp_queue *queue;
+ u32 data_len;
+ u32 pdu_len;
+ u32 pdu_sent;
+ u32 h2cdata_left;
+ u32 h2cdata_offset;
+ u16 ttag;
+ __le16 status;
+ struct list_head entry;
+ struct llist_node lentry;
+ __le32 ddgst;
+
+ struct bio *curr_bio;
+ struct iov_iter iter;
+
+ /* send state */
+ size_t offset;
+ size_t data_sent;
+ enum nvme_tcp_send_state state;
+};
+
+enum nvme_tcp_queue_flags {
+ NVME_TCP_Q_ALLOCATED = 0,
+ NVME_TCP_Q_LIVE = 1,
+ NVME_TCP_Q_POLLING = 2,
+};
+
+enum nvme_tcp_recv_state {
+ NVME_TCP_RECV_PDU = 0,
+ NVME_TCP_RECV_DATA,
+ NVME_TCP_RECV_DDGST,
+};
+
+struct nvme_tcp_ctrl;
+struct nvme_tcp_queue {
+ struct socket *sock;
+ struct work_struct io_work;
+ int io_cpu;
+
+ struct mutex queue_lock;
+ struct mutex send_mutex;
+ struct llist_head req_list;
+ struct list_head send_list;
+
+ /* recv state */
+ void *pdu;
+ int pdu_remaining;
+ int pdu_offset;
+ size_t data_remaining;
+ size_t ddgst_remaining;
+ unsigned int nr_cqe;
+
+ /* send state */
+ struct nvme_tcp_request *request;
+
+ u32 maxh2cdata;
+ size_t cmnd_capsule_len;
+ struct nvme_tcp_ctrl *ctrl;
+ unsigned long flags;
+ bool rd_enabled;
+
+ bool hdr_digest;
+ bool data_digest;
+ struct ahash_request *rcv_hash;
+ struct ahash_request *snd_hash;
+ __le32 exp_ddgst;
+ __le32 recv_ddgst;
+
+ struct page_frag_cache pf_cache;
+
+ void (*state_change)(struct sock *);
+ void (*data_ready)(struct sock *);
+ void (*write_space)(struct sock *);
+};
+
+struct nvme_tcp_ctrl {
+ /* read only in the hot path */
+ struct nvme_tcp_queue *queues;
+ struct blk_mq_tag_set tag_set;
+
+ /* other member variables */
+ struct list_head list;
+ struct blk_mq_tag_set admin_tag_set;
+ struct sockaddr_storage addr;
+ struct sockaddr_storage src_addr;
+ struct nvme_ctrl ctrl;
+
+ struct work_struct err_work;
+ struct delayed_work connect_work;
+ struct nvme_tcp_request async_req;
+ u32 io_queues[HCTX_MAX_TYPES];
+};
+
+static LIST_HEAD(nvme_tcp_ctrl_list);
+static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
+static struct workqueue_struct *nvme_tcp_wq;
+static const struct blk_mq_ops nvme_tcp_mq_ops;
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
+
+static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
+}
+
+static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
+{
+ u32 queue_idx = nvme_tcp_queue_id(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
+{
+ return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
+{
+ return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
+{
+ return req->pdu;
+}
+
+static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
+{
+ /* use the pdu space in the back for the data pdu */
+ return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
+ sizeof(struct nvme_tcp_data_pdu);
+}
+
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
+{
+ if (nvme_is_fabrics(req->req.cmd))
+ return NVME_TCP_ADMIN_CCSZ;
+ return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
+}
+
+static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
+{
+ return req == &req->queue->ctrl->async_req;
+}
+
+static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
+{
+ struct request *rq;
+
+ if (unlikely(nvme_tcp_async_req(req)))
+ return false; /* async events don't have a request */
+
+ rq = blk_mq_rq_from_pdu(req);
+
+ return rq_data_dir(rq) == WRITE && req->data_len &&
+ req->data_len <= nvme_tcp_inline_data_size(req);
+}
+
+static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
+{
+ return req->iter.bvec->bv_page;
+}
+
+static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
+{
+ return req->iter.bvec->bv_offset + req->iter.iov_offset;
+}
+
+static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
+{
+ return min_t(size_t, iov_iter_single_seg_count(&req->iter),
+ req->pdu_len - req->pdu_sent);
+}
+
+static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
+{
+ return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
+ req->pdu_len - req->pdu_sent : 0;
+}
+
+static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
+ int len)
+{
+ return nvme_tcp_pdu_data_left(req) <= len;
+}
+
+static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
+ unsigned int dir)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct bio_vec *vec;
+ unsigned int size;
+ int nr_bvec;
+ size_t offset;
+
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ vec = &rq->special_vec;
+ nr_bvec = 1;
+ size = blk_rq_payload_bytes(rq);
+ offset = 0;
+ } else {
+ struct bio *bio = req->curr_bio;
+ struct bvec_iter bi;
+ struct bio_vec bv;
+
+ vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ nr_bvec = 0;
+ bio_for_each_bvec(bv, bio, bi) {
+ nr_bvec++;
+ }
+ size = bio->bi_iter.bi_size;
+ offset = bio->bi_iter.bi_bvec_done;
+ }
+
+ iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
+ req->iter.iov_offset = offset;
+}
+
+static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
+ int len)
+{
+ req->data_sent += len;
+ req->pdu_sent += len;
+ iov_iter_advance(&req->iter, len);
+ if (!iov_iter_count(&req->iter) &&
+ req->data_sent < req->data_len) {
+ req->curr_bio = req->curr_bio->bi_next;
+ nvme_tcp_init_iter(req, ITER_SOURCE);
+ }
+}
+
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list);
+}
+
+static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ bool sync, bool last)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ bool empty;
+
+ empty = llist_add(&req->lentry, &queue->req_list) &&
+ list_empty(&queue->send_list) && !queue->request;
+
+ /*
+ * if we're the first on the send_list and we can try to send
+ * directly, otherwise queue io_work. Also, only do that if we
+ * are on the same cpu, so we don't introduce contention.
+ */
+ if (queue->io_cpu == raw_smp_processor_id() &&
+ sync && empty && mutex_trylock(&queue->send_mutex)) {
+ nvme_tcp_send_all(queue);
+ mutex_unlock(&queue->send_mutex);
+ }
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+ struct llist_node *node;
+
+ for (node = llist_del_all(&queue->req_list); node; node = node->next) {
+ req = llist_entry(node, struct nvme_tcp_request, lentry);
+ list_add(&req->entry, &queue->send_list);
+ }
+}
+
+static inline struct nvme_tcp_request *
+nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+
+ req = list_first_entry_or_null(&queue->send_list,
+ struct nvme_tcp_request, entry);
+ if (!req) {
+ nvme_tcp_process_req_list(queue);
+ req = list_first_entry_or_null(&queue->send_list,
+ struct nvme_tcp_request, entry);
+ if (unlikely(!req))
+ return NULL;
+ }
+
+ list_del(&req->entry);
+ return req;
+}
+
+static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
+ __le32 *dgst)
+{
+ ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
+ crypto_ahash_final(hash);
+}
+
+static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
+ struct page *page, off_t off, size_t len)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, len, off);
+ ahash_request_set_crypt(hash, &sg, NULL, len);
+ crypto_ahash_update(hash);
+}
+
+static inline void nvme_tcp_hdgst(struct ahash_request *hash,
+ void *pdu, size_t len)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, pdu, len);
+ ahash_request_set_crypt(hash, &sg, pdu + len, len);
+ crypto_ahash_digest(hash);
+}
+
+static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
+ void *pdu, size_t pdu_len)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ __le32 recv_digest;
+ __le32 exp_digest;
+
+ if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d: header digest flag is cleared\n",
+ nvme_tcp_queue_id(queue));
+ return -EPROTO;
+ }
+
+ recv_digest = *(__le32 *)(pdu + hdr->hlen);
+ nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
+ exp_digest = *(__le32 *)(pdu + hdr->hlen);
+ if (recv_digest != exp_digest) {
+ dev_err(queue->ctrl->ctrl.device,
+ "header digest error: recv %#x expected %#x\n",
+ le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ u8 digest_len = nvme_tcp_hdgst_len(queue);
+ u32 len;
+
+ len = le32_to_cpu(hdr->plen) - hdr->hlen -
+ ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
+
+ if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d: data digest flag is cleared\n",
+ nvme_tcp_queue_id(queue));
+ return -EPROTO;
+ }
+ crypto_ahash_init(queue->rcv_hash);
+
+ return 0;
+}
+
+static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ page_frag_free(req->pdu);
+}
+
+static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_cmd_pdu *pdu;
+ int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+ struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+
+ req->pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!req->pdu)
+ return -ENOMEM;
+
+ pdu = req->pdu;
+ req->queue = queue;
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
+ nvme_req(rq)->cmd = &pdu->cmd;
+
+ return 0;
+}
+
+static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
+ struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
+ struct nvme_tcp_queue *queue = &ctrl->queues[0];
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static enum nvme_tcp_recv_state
+nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
+{
+ return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
+ (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
+ NVME_TCP_RECV_DATA;
+}
+
+static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
+{
+ queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
+ nvme_tcp_hdgst_len(queue);
+ queue->pdu_offset = 0;
+ queue->data_remaining = -1;
+ queue->ddgst_remaining = 0;
+}
+
+static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ return;
+
+ dev_warn(ctrl->device, "starting error recovery\n");
+ queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
+}
+
+static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
+ struct nvme_completion *cqe)
+{
+ struct nvme_tcp_request *req;
+ struct request *rq;
+
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "got bad cqe.command_id %#x on queue %d\n",
+ cqe->command_id, nvme_tcp_queue_id(queue));
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return -EINVAL;
+ }
+
+ req = blk_mq_rq_to_pdu(rq);
+ if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
+ req->status = cqe->status;
+
+ if (!nvme_try_complete_req(rq, req->status, cqe->result))
+ nvme_complete_rq(rq);
+ queue->nr_cqe++;
+
+ return 0;
+}
+
+static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_data_pdu *pdu)
+{
+ struct request *rq;
+
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "got bad c2hdata.command_id %#x on queue %d\n",
+ pdu->command_id, nvme_tcp_queue_id(queue));
+ return -ENOENT;
+ }
+
+ if (!blk_rq_payload_bytes(rq)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x unexpected data\n",
+ nvme_tcp_queue_id(queue), rq->tag);
+ return -EIO;
+ }
+
+ queue->data_remaining = le32_to_cpu(pdu->data_length);
+
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
+ unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x SUCCESS set but not last PDU\n",
+ nvme_tcp_queue_id(queue), rq->tag);
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_rsp_pdu *pdu)
+{
+ struct nvme_completion *cqe = &pdu->cqe;
+ int ret = 0;
+
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+ cqe->command_id)))
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
+ else
+ ret = nvme_tcp_process_nvme_cqe(queue, cqe);
+
+ return ret;
+}
+
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
+ struct nvme_tcp_queue *queue = req->queue;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ u32 h2cdata_sent = req->pdu_len;
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+ u8 ddgst = nvme_tcp_ddgst_len(queue);
+
+ req->state = NVME_TCP_SEND_H2C_PDU;
+ req->offset = 0;
+ req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
+ req->pdu_sent = 0;
+ req->h2cdata_left -= req->pdu_len;
+ req->h2cdata_offset += h2cdata_sent;
+
+ memset(data, 0, sizeof(*data));
+ data->hdr.type = nvme_tcp_h2c_data;
+ if (!req->h2cdata_left)
+ data->hdr.flags = NVME_TCP_F_DATA_LAST;
+ if (queue->hdr_digest)
+ data->hdr.flags |= NVME_TCP_F_HDGST;
+ if (queue->data_digest)
+ data->hdr.flags |= NVME_TCP_F_DDGST;
+ data->hdr.hlen = sizeof(*data);
+ data->hdr.pdo = data->hdr.hlen + hdgst;
+ data->hdr.plen =
+ cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
+ data->ttag = req->ttag;
+ data->command_id = nvme_cid(rq);
+ data->data_offset = cpu_to_le32(req->h2cdata_offset);
+ data->data_length = cpu_to_le32(req->pdu_len);
+}
+
+static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_r2t_pdu *pdu)
+{
+ struct nvme_tcp_request *req;
+ struct request *rq;
+ u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+ u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
+
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "got bad r2t.command_id %#x on queue %d\n",
+ pdu->command_id, nvme_tcp_queue_id(queue));
+ return -ENOENT;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ if (unlikely(!r2t_length)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len is %u, probably a bug...\n",
+ rq->tag, r2t_length);
+ return -EPROTO;
+ }
+
+ if (unlikely(req->data_sent + r2t_length > req->data_len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len %u exceeded data len %u (%zu sent)\n",
+ rq->tag, r2t_length, req->data_len, req->data_sent);
+ return -EPROTO;
+ }
+
+ if (unlikely(r2t_offset < req->data_sent)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d unexpected r2t offset %u (expected %zu)\n",
+ rq->tag, r2t_offset, req->data_sent);
+ return -EPROTO;
+ }
+
+ req->pdu_len = 0;
+ req->h2cdata_left = r2t_length;
+ req->h2cdata_offset = r2t_offset;
+ req->ttag = pdu->ttag;
+
+ nvme_tcp_setup_h2c_data_pdu(req);
+ nvme_tcp_queue_request(req, false, true);
+
+ return 0;
+}
+
+static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ unsigned int *offset, size_t *len)
+{
+ struct nvme_tcp_hdr *hdr;
+ char *pdu = queue->pdu;
+ size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
+ int ret;
+
+ ret = skb_copy_bits(skb, *offset,
+ &pdu[queue->pdu_offset], rcv_len);
+ if (unlikely(ret))
+ return ret;
+
+ queue->pdu_remaining -= rcv_len;
+ queue->pdu_offset += rcv_len;
+ *offset += rcv_len;
+ *len -= rcv_len;
+ if (queue->pdu_remaining)
+ return 0;
+
+ hdr = queue->pdu;
+ if (queue->hdr_digest) {
+ ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
+ if (unlikely(ret))
+ return ret;
+ }
+
+
+ if (queue->data_digest) {
+ ret = nvme_tcp_check_ddgst(queue, queue->pdu);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ switch (hdr->type) {
+ case nvme_tcp_c2h_data:
+ return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
+ case nvme_tcp_rsp:
+ nvme_tcp_init_recv_ctx(queue);
+ return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
+ case nvme_tcp_r2t:
+ nvme_tcp_init_recv_ctx(queue);
+ return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
+ default:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
+ }
+}
+
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
+{
+ union nvme_result res = {};
+
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
+ nvme_complete_rq(rq);
+}
+
+static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ unsigned int *offset, size_t *len)
+{
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
+ struct request *rq =
+ nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ while (true) {
+ int recv_len, ret;
+
+ recv_len = min_t(size_t, *len, queue->data_remaining);
+ if (!recv_len)
+ break;
+
+ if (!iov_iter_count(&req->iter)) {
+ req->curr_bio = req->curr_bio->bi_next;
+
+ /*
+ * If we don`t have any bios it means that controller
+ * sent more data than we requested, hence error
+ */
+ if (!req->curr_bio) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d no space in request %#x",
+ nvme_tcp_queue_id(queue), rq->tag);
+ nvme_tcp_init_recv_ctx(queue);
+ return -EIO;
+ }
+ nvme_tcp_init_iter(req, ITER_DEST);
+ }
+
+ /* we can read only from what is left in this bio */
+ recv_len = min_t(size_t, recv_len,
+ iov_iter_count(&req->iter));
+
+ if (queue->data_digest)
+ ret = skb_copy_and_hash_datagram_iter(skb, *offset,
+ &req->iter, recv_len, queue->rcv_hash);
+ else
+ ret = skb_copy_datagram_iter(skb, *offset,
+ &req->iter, recv_len);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d failed to copy request %#x data",
+ nvme_tcp_queue_id(queue), rq->tag);
+ return ret;
+ }
+
+ *len -= recv_len;
+ *offset += recv_len;
+ queue->data_remaining -= recv_len;
+ }
+
+ if (!queue->data_remaining) {
+ if (queue->data_digest) {
+ nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
+ queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
+ } else {
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
+ nvme_tcp_end_request(rq,
+ le16_to_cpu(req->status));
+ queue->nr_cqe++;
+ }
+ nvme_tcp_init_recv_ctx(queue);
+ }
+ }
+
+ return 0;
+}
+
+static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb, unsigned int *offset, size_t *len)
+{
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
+ char *ddgst = (char *)&queue->recv_ddgst;
+ size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
+ off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
+ int ret;
+
+ ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
+ if (unlikely(ret))
+ return ret;
+
+ queue->ddgst_remaining -= recv_len;
+ *offset += recv_len;
+ *len -= recv_len;
+ if (queue->ddgst_remaining)
+ return 0;
+
+ if (queue->recv_ddgst != queue->exp_ddgst) {
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
+
+ dev_err(queue->ctrl->ctrl.device,
+ "data digest error: recv %#x expected %#x\n",
+ le32_to_cpu(queue->recv_ddgst),
+ le32_to_cpu(queue->exp_ddgst));
+ }
+
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ nvme_tcp_end_request(rq, le16_to_cpu(req->status));
+ queue->nr_cqe++;
+ }
+
+ nvme_tcp_init_recv_ctx(queue);
+ return 0;
+}
+
+static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
+{
+ struct nvme_tcp_queue *queue = desc->arg.data;
+ size_t consumed = len;
+ int result;
+
+ if (unlikely(!queue->rd_enabled))
+ return -EFAULT;
+
+ while (len) {
+ switch (nvme_tcp_recv_state(queue)) {
+ case NVME_TCP_RECV_PDU:
+ result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
+ break;
+ case NVME_TCP_RECV_DATA:
+ result = nvme_tcp_recv_data(queue, skb, &offset, &len);
+ break;
+ case NVME_TCP_RECV_DDGST:
+ result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
+ break;
+ default:
+ result = -EFAULT;
+ }
+ if (result) {
+ dev_err(queue->ctrl->ctrl.device,
+ "receive failed: %d\n", result);
+ queue->rd_enabled = false;
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return result;
+ }
+ }
+
+ return consumed;
+}
+
+static void nvme_tcp_data_ready(struct sock *sk)
+{
+ struct nvme_tcp_queue *queue;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (likely(queue && queue->rd_enabled) &&
+ !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvme_tcp_write_space(struct sock *sk)
+{
+ struct nvme_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (likely(queue && sk_stream_is_writeable(sk))) {
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvme_tcp_state_change(struct sock *sk)
+{
+ struct nvme_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (!queue)
+ goto done;
+
+ switch (sk->sk_state) {
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ case TCP_LAST_ACK:
+ case TCP_FIN_WAIT1:
+ case TCP_FIN_WAIT2:
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ break;
+ default:
+ dev_info(queue->ctrl->ctrl.device,
+ "queue %d socket state %d\n",
+ nvme_tcp_queue_id(queue), sk->sk_state);
+ }
+
+ queue->state_change(sk);
+done:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
+{
+ queue->request = NULL;
+}
+
+static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
+{
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
+}
+
+static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ int req_data_len = req->data_len;
+ u32 h2cdata_left = req->h2cdata_left;
+
+ while (true) {
+ struct bio_vec bvec;
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
+ };
+ struct page *page = nvme_tcp_req_cur_page(req);
+ size_t offset = nvme_tcp_req_cur_offset(req);
+ size_t len = nvme_tcp_req_cur_length(req);
+ bool last = nvme_tcp_pdu_last_send(req, len);
+ int req_data_sent = req->data_sent;
+ int ret;
+
+ if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
+ msg.msg_flags |= MSG_EOR;
+ else
+ msg.msg_flags |= MSG_MORE;
+
+ if (!sendpage_ok(page))
+ msg.msg_flags &= ~MSG_SPLICE_PAGES;
+
+ bvec_set_page(&bvec, page, len, offset);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
+ ret = sock_sendmsg(queue->sock, &msg);
+ if (ret <= 0)
+ return ret;
+
+ if (queue->data_digest)
+ nvme_tcp_ddgst_update(queue->snd_hash, page,
+ offset, ret);
+
+ /*
+ * update the request iterator except for the last payload send
+ * in the request where we don't want to modify it as we may
+ * compete with the RX path completing the request.
+ */
+ if (req_data_sent + ret < req_data_len)
+ nvme_tcp_advance_req(req, ret);
+
+ /* fully successful last send in current PDU */
+ if (last && ret == len) {
+ if (queue->data_digest) {
+ nvme_tcp_ddgst_final(queue->snd_hash,
+ &req->ddgst);
+ req->state = NVME_TCP_SEND_DDGST;
+ req->offset = 0;
+ } else {
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
+ }
+ return 1;
+ }
+ }
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
+ struct bio_vec bvec;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
+ bool inline_data = nvme_tcp_has_inline_data(req);
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+ int len = sizeof(*pdu) + hdgst - req->offset;
+ int ret;
+
+ if (inline_data || nvme_tcp_queue_more(queue))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
+ if (queue->hdr_digest && !req->offset)
+ nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+
+ bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
+ ret = sock_sendmsg(queue->sock, &msg);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ len -= ret;
+ if (!len) {
+ if (inline_data) {
+ req->state = NVME_TCP_SEND_DATA;
+ if (queue->data_digest)
+ crypto_ahash_init(queue->snd_hash);
+ } else {
+ nvme_tcp_done_send_req(queue);
+ }
+ return 1;
+ }
+ req->offset += ret;
+
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
+ struct bio_vec bvec;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+ int len = sizeof(*pdu) - req->offset + hdgst;
+ int ret;
+
+ if (queue->hdr_digest && !req->offset)
+ nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+
+ if (!req->h2cdata_left)
+ msg.msg_flags |= MSG_SPLICE_PAGES;
+
+ bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
+ ret = sock_sendmsg(queue->sock, &msg);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ len -= ret;
+ if (!len) {
+ req->state = NVME_TCP_SEND_DATA;
+ if (queue->data_digest)
+ crypto_ahash_init(queue->snd_hash);
+ return 1;
+ }
+ req->offset += ret;
+
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ size_t offset = req->offset;
+ u32 h2cdata_left = req->h2cdata_left;
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+ .iov_base = (u8 *)&req->ddgst + req->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
+ };
+
+ if (nvme_tcp_queue_more(queue))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
+ return 1;
+ }
+
+ req->offset += ret;
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+ unsigned int noreclaim_flag;
+ int ret = 1;
+
+ if (!queue->request) {
+ queue->request = nvme_tcp_fetch_request(queue);
+ if (!queue->request)
+ return 0;
+ }
+ req = queue->request;
+
+ noreclaim_flag = memalloc_noreclaim_save();
+ if (req->state == NVME_TCP_SEND_CMD_PDU) {
+ ret = nvme_tcp_try_send_cmd_pdu(req);
+ if (ret <= 0)
+ goto done;
+ if (!nvme_tcp_has_inline_data(req))
+ goto out;
+ }
+
+ if (req->state == NVME_TCP_SEND_H2C_PDU) {
+ ret = nvme_tcp_try_send_data_pdu(req);
+ if (ret <= 0)
+ goto done;
+ }
+
+ if (req->state == NVME_TCP_SEND_DATA) {
+ ret = nvme_tcp_try_send_data(req);
+ if (ret <= 0)
+ goto done;
+ }
+
+ if (req->state == NVME_TCP_SEND_DDGST)
+ ret = nvme_tcp_try_send_ddgst(req);
+done:
+ if (ret == -EAGAIN) {
+ ret = 0;
+ } else if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to send request %d\n", ret);
+ nvme_tcp_fail_request(queue->request);
+ nvme_tcp_done_send_req(queue);
+ }
+out:
+ memalloc_noreclaim_restore(noreclaim_flag);
+ return ret;
+}
+
+static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+ struct sock *sk = sock->sk;
+ read_descriptor_t rd_desc;
+ int consumed;
+
+ rd_desc.arg.data = queue;
+ rd_desc.count = 1;
+ lock_sock(sk);
+ queue->nr_cqe = 0;
+ consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
+ release_sock(sk);
+ return consumed;
+}
+
+static void nvme_tcp_io_work(struct work_struct *w)
+{
+ struct nvme_tcp_queue *queue =
+ container_of(w, struct nvme_tcp_queue, io_work);
+ unsigned long deadline = jiffies + msecs_to_jiffies(1);
+
+ do {
+ bool pending = false;
+ int result;
+
+ if (mutex_trylock(&queue->send_mutex)) {
+ result = nvme_tcp_try_send(queue);
+ mutex_unlock(&queue->send_mutex);
+ if (result > 0)
+ pending = true;
+ else if (unlikely(result < 0))
+ break;
+ }
+
+ result = nvme_tcp_try_recv(queue);
+ if (result > 0)
+ pending = true;
+ else if (unlikely(result < 0))
+ return;
+
+ if (!pending || !queue->rd_enabled)
+ return;
+
+ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
+
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
+
+ ahash_request_free(queue->rcv_hash);
+ ahash_request_free(queue->snd_hash);
+ crypto_free_ahash(tfm);
+}
+
+static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->snd_hash)
+ goto free_tfm;
+ ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
+
+ queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->rcv_hash)
+ goto free_snd_hash;
+ ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
+
+ return 0;
+free_snd_hash:
+ ahash_request_free(queue->snd_hash);
+free_tfm:
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+}
+
+static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
+{
+ struct nvme_tcp_request *async = &ctrl->async_req;
+
+ page_frag_free(async->pdu);
+}
+
+static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
+{
+ struct nvme_tcp_queue *queue = &ctrl->queues[0];
+ struct nvme_tcp_request *async = &ctrl->async_req;
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+
+ async->pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!async->pdu)
+ return -ENOMEM;
+
+ async->queue = &ctrl->queues[0];
+ return 0;
+}
+
+static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct page *page;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ unsigned int noreclaim_flag;
+
+ if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ return;
+
+ if (queue->hdr_digest || queue->data_digest)
+ nvme_tcp_free_crypto(queue);
+
+ if (queue->pf_cache.va) {
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ queue->pf_cache.va = NULL;
+ }
+
+ noreclaim_flag = memalloc_noreclaim_save();
+ sock_release(queue->sock);
+ memalloc_noreclaim_restore(noreclaim_flag);
+
+ kfree(queue->pdu);
+ mutex_destroy(&queue->send_mutex);
+ mutex_destroy(&queue->queue_lock);
+}
+
+static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_icreq_pdu *icreq;
+ struct nvme_tcp_icresp_pdu *icresp;
+ struct msghdr msg = {};
+ struct kvec iov;
+ bool ctrl_hdgst, ctrl_ddgst;
+ u32 maxh2cdata;
+ int ret;
+
+ icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
+ if (!icreq)
+ return -ENOMEM;
+
+ icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
+ if (!icresp) {
+ ret = -ENOMEM;
+ goto free_icreq;
+ }
+
+ icreq->hdr.type = nvme_tcp_icreq;
+ icreq->hdr.hlen = sizeof(*icreq);
+ icreq->hdr.pdo = 0;
+ icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
+ icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+ icreq->maxr2t = 0; /* single inflight r2t supported */
+ icreq->hpda = 0; /* no alignment constraint */
+ if (queue->hdr_digest)
+ icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+ if (queue->data_digest)
+ icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
+
+ iov.iov_base = icreq;
+ iov.iov_len = sizeof(*icreq);
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (ret < 0)
+ goto free_icresp;
+
+ memset(&msg, 0, sizeof(msg));
+ iov.iov_base = icresp;
+ iov.iov_len = sizeof(*icresp);
+ ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (ret < 0)
+ goto free_icresp;
+
+ ret = -EINVAL;
+ if (icresp->hdr.type != nvme_tcp_icresp) {
+ pr_err("queue %d: bad type returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->hdr.type);
+ goto free_icresp;
+ }
+
+ if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
+ pr_err("queue %d: bad pdu length returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->hdr.plen);
+ goto free_icresp;
+ }
+
+ if (icresp->pfv != NVME_TCP_PFV_1_0) {
+ pr_err("queue %d: bad pfv returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->pfv);
+ goto free_icresp;
+ }
+
+ ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
+ if ((queue->data_digest && !ctrl_ddgst) ||
+ (!queue->data_digest && ctrl_ddgst)) {
+ pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
+ nvme_tcp_queue_id(queue),
+ queue->data_digest ? "enabled" : "disabled",
+ ctrl_ddgst ? "enabled" : "disabled");
+ goto free_icresp;
+ }
+
+ ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
+ if ((queue->hdr_digest && !ctrl_hdgst) ||
+ (!queue->hdr_digest && ctrl_hdgst)) {
+ pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
+ nvme_tcp_queue_id(queue),
+ queue->hdr_digest ? "enabled" : "disabled",
+ ctrl_hdgst ? "enabled" : "disabled");
+ goto free_icresp;
+ }
+
+ if (icresp->cpda != 0) {
+ pr_err("queue %d: unsupported cpda returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->cpda);
+ goto free_icresp;
+ }
+
+ maxh2cdata = le32_to_cpu(icresp->maxdata);
+ if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+ pr_err("queue %d: invalid maxh2cdata returned %u\n",
+ nvme_tcp_queue_id(queue), maxh2cdata);
+ goto free_icresp;
+ }
+ queue->maxh2cdata = maxh2cdata;
+
+ ret = 0;
+free_icresp:
+ kfree(icresp);
+free_icreq:
+ kfree(icreq);
+ return ret;
+}
+
+static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
+{
+ return nvme_tcp_queue_id(queue) == 0;
+}
+
+static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
+}
+
+static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
+}
+
+static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ !nvme_tcp_read_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ] +
+ ctrl->io_queues[HCTX_TYPE_POLL];
+}
+
+static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+ int n = 0;
+
+ if (nvme_tcp_default_queue(queue))
+ n = qid - 1;
+ else if (nvme_tcp_read_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ else if (nvme_tcp_poll_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+ ctrl->io_queues[HCTX_TYPE_READ] - 1;
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+}
+
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ int ret, rcv_pdu_size;
+
+ mutex_init(&queue->queue_lock);
+ queue->ctrl = ctrl;
+ init_llist_head(&queue->req_list);
+ INIT_LIST_HEAD(&queue->send_list);
+ mutex_init(&queue->send_mutex);
+ INIT_WORK(&queue->io_work, nvme_tcp_io_work);
+
+ if (qid > 0)
+ queue->cmnd_capsule_len = nctrl->ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command) +
+ NVME_TCP_ADMIN_CCSZ;
+
+ ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
+ IPPROTO_TCP, &queue->sock);
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to create socket: %d\n", ret);
+ goto err_destroy_mutex;
+ }
+
+ nvme_tcp_reclassify_socket(queue->sock);
+
+ /* Single syn retry */
+ tcp_sock_set_syncnt(queue->sock->sk, 1);
+
+ /* Set TCP no delay */
+ tcp_sock_set_nodelay(queue->sock->sk);
+
+ /*
+ * Cleanup whatever is sitting in the TCP transmit queue on socket
+ * close. This is done to prevent stale data from being sent should
+ * the network connection be restored before TCP times out.
+ */
+ sock_no_linger(queue->sock->sk);
+
+ if (so_priority > 0)
+ sock_set_priority(queue->sock->sk, so_priority);
+
+ /* Set socket type of service */
+ if (nctrl->opts->tos >= 0)
+ ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
+
+ /* Set 10 seconds timeout for icresp recvmsg */
+ queue->sock->sk->sk_rcvtimeo = 10 * HZ;
+
+ queue->sock->sk->sk_allocation = GFP_ATOMIC;
+ queue->sock->sk->sk_use_task_frag = false;
+ nvme_tcp_set_queue_io_cpu(queue);
+ queue->request = NULL;
+ queue->data_remaining = 0;
+ queue->ddgst_remaining = 0;
+ queue->pdu_remaining = 0;
+ queue->pdu_offset = 0;
+ sk_set_memalloc(queue->sock->sk);
+
+ if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
+ ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
+ sizeof(ctrl->src_addr));
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to bind queue %d socket %d\n",
+ qid, ret);
+ goto err_sock;
+ }
+ }
+
+ if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
+ char *iface = nctrl->opts->host_iface;
+ sockptr_t optval = KERNEL_SOCKPTR(iface);
+
+ ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
+ optval, strlen(iface));
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to bind to interface %s queue %d err %d\n",
+ iface, qid, ret);
+ goto err_sock;
+ }
+ }
+
+ queue->hdr_digest = nctrl->opts->hdr_digest;
+ queue->data_digest = nctrl->opts->data_digest;
+ if (queue->hdr_digest || queue->data_digest) {
+ ret = nvme_tcp_alloc_crypto(queue);
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to allocate queue %d crypto\n", qid);
+ goto err_sock;
+ }
+ }
+
+ rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
+ nvme_tcp_hdgst_len(queue);
+ queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
+ if (!queue->pdu) {
+ ret = -ENOMEM;
+ goto err_crypto;
+ }
+
+ dev_dbg(nctrl->device, "connecting queue %d\n",
+ nvme_tcp_queue_id(queue));
+
+ ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
+ sizeof(ctrl->addr), 0);
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to connect socket: %d\n", ret);
+ goto err_rcv_pdu;
+ }
+
+ ret = nvme_tcp_init_connection(queue);
+ if (ret)
+ goto err_init_connect;
+
+ set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
+
+ return 0;
+
+err_init_connect:
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+err_rcv_pdu:
+ kfree(queue->pdu);
+err_crypto:
+ if (queue->hdr_digest || queue->data_digest)
+ nvme_tcp_free_crypto(queue);
+err_sock:
+ sock_release(queue->sock);
+ queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->send_mutex);
+ mutex_destroy(&queue->queue_lock);
+ return ret;
+}
+
+static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = NULL;
+ sock->sk->sk_data_ready = queue->data_ready;
+ sock->sk->sk_state_change = queue->state_change;
+ sock->sk->sk_write_space = queue->write_space;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+}
+
+static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+{
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ nvme_tcp_restore_sock_ops(queue);
+ cancel_work_sync(&queue->io_work);
+}
+
+static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ return;
+
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
+}
+
+static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+{
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_user_data = queue;
+ queue->state_change = queue->sock->sk->sk_state_change;
+ queue->data_ready = queue->sock->sk->sk_data_ready;
+ queue->write_space = queue->sock->sk->sk_write_space;
+ queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+ queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+ queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ queue->sock->sk->sk_ll_usec = 1;
+#endif
+ write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+}
+
+static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[idx];
+ int ret;
+
+ queue->rd_enabled = true;
+ nvme_tcp_init_recv_ctx(queue);
+ nvme_tcp_setup_sock_ops(queue);
+
+ if (idx)
+ ret = nvmf_connect_io_queue(nctrl, idx);
+ else
+ ret = nvmf_connect_admin_queue(nctrl);
+
+ if (!ret) {
+ set_bit(NVME_TCP_Q_LIVE, &queue->flags);
+ } else {
+ if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ dev_err(nctrl->device,
+ "failed to connect queue: %d ret=%d\n", idx, ret);
+ }
+ return ret;
+}
+
+static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (to_tcp_ctrl(ctrl)->async_req.pdu) {
+ cancel_work_sync(&ctrl->async_event_work);
+ nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
+ to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
+ }
+
+ nvme_tcp_free_queue(ctrl, 0);
+}
+
+static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_tcp_free_queue(ctrl, i);
+}
+
+static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_tcp_stop_queue(ctrl, i);
+}
+
+static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+ int first, int last)
+{
+ int i, ret;
+
+ for (i = first; i < last; i++) {
+ ret = nvme_tcp_start_queue(ctrl, i);
+ if (ret)
+ goto out_stop_queues;
+ }
+
+ return 0;
+
+out_stop_queues:
+ for (i--; i >= first; i--)
+ nvme_tcp_stop_queue(ctrl, i);
+ return ret;
+}
+
+static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ ret = nvme_tcp_alloc_queue(ctrl, 0);
+ if (ret)
+ return ret;
+
+ ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
+ if (ret)
+ goto out_free_queue;
+
+ return 0;
+
+out_free_queue:
+ nvme_tcp_free_queue(ctrl, 0);
+ return ret;
+}
+
+static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_tcp_alloc_queue(ctrl, i);
+ if (ret)
+ goto out_free_queues;
+ }
+
+ return 0;
+
+out_free_queues:
+ for (i--; i >= 1; i--)
+ nvme_tcp_free_queue(ctrl, i);
+
+ return ret;
+}
+
+static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+{
+ unsigned int nr_io_queues;
+ int ret;
+
+ nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
+ ret = nvme_set_queue_count(ctrl, &nr_io_queues);
+ if (ret)
+ return ret;
+
+ if (nr_io_queues == 0) {
+ dev_err(ctrl->device,
+ "unable to set any I/O queues\n");
+ return -ENOMEM;
+ }
+
+ ctrl->queue_count = nr_io_queues + 1;
+ dev_info(ctrl->device,
+ "creating %d I/O queues.\n", nr_io_queues);
+
+ nvmf_set_io_queues(ctrl->opts, nr_io_queues,
+ to_tcp_ctrl(ctrl)->io_queues);
+ return __nvme_tcp_alloc_io_queues(ctrl);
+}
+
+static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
+{
+ nvme_tcp_stop_io_queues(ctrl);
+ if (remove)
+ nvme_remove_io_tag_set(ctrl);
+ nvme_tcp_free_io_queues(ctrl);
+}
+
+static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+{
+ int ret, nr_queues;
+
+ ret = nvme_tcp_alloc_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ if (new) {
+ ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ &nvme_tcp_mq_ops,
+ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ sizeof(struct nvme_tcp_request));
+ if (ret)
+ goto out_free_io_queues;
+ }
+
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
+ ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
+ if (ret)
+ goto out_cleanup_connect_q;
+
+ if (!new) {
+ nvme_start_freeze(ctrl);
+ nvme_unquiesce_io_queues(ctrl);
+ if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ nvme_unfreeze(ctrl);
+ goto out_wait_freeze_timed_out;
+ }
+ blk_mq_update_nr_hw_queues(ctrl->tagset,
+ ctrl->queue_count - 1);
+ nvme_unfreeze(ctrl);
+ }
+
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
+ ctrl->tagset->nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
+ return 0;
+
+out_wait_freeze_timed_out:
+ nvme_quiesce_io_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+out_cleanup_connect_q:
+ nvme_cancel_tagset(ctrl);
+ if (new)
+ nvme_remove_io_tag_set(ctrl);
+out_free_io_queues:
+ nvme_tcp_free_io_queues(ctrl);
+ return ret;
+}
+
+static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
+{
+ nvme_tcp_stop_queue(ctrl, 0);
+ if (remove)
+ nvme_remove_admin_tag_set(ctrl);
+ nvme_tcp_free_admin_queue(ctrl);
+}
+
+static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+{
+ int error;
+
+ error = nvme_tcp_alloc_admin_queue(ctrl);
+ if (error)
+ return error;
+
+ if (new) {
+ error = nvme_alloc_admin_tag_set(ctrl,
+ &to_tcp_ctrl(ctrl)->admin_tag_set,
+ &nvme_tcp_admin_mq_ops,
+ sizeof(struct nvme_tcp_request));
+ if (error)
+ goto out_free_queue;
+ }
+
+ error = nvme_tcp_start_queue(ctrl, 0);
+ if (error)
+ goto out_cleanup_tagset;
+
+ error = nvme_enable_ctrl(ctrl);
+ if (error)
+ goto out_stop_queue;
+
+ nvme_unquiesce_admin_queue(ctrl);
+
+ error = nvme_init_ctrl_finish(ctrl, false);
+ if (error)
+ goto out_quiesce_queue;
+
+ return 0;
+
+out_quiesce_queue:
+ nvme_quiesce_admin_queue(ctrl);
+ blk_sync_queue(ctrl->admin_q);
+out_stop_queue:
+ nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
+out_cleanup_tagset:
+ if (new)
+ nvme_remove_admin_tag_set(ctrl);
+out_free_queue:
+ nvme_tcp_free_admin_queue(ctrl);
+ return error;
+}
+
+static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
+ bool remove)
+{
+ nvme_quiesce_admin_queue(ctrl);
+ blk_sync_queue(ctrl->admin_q);
+ nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
+ if (remove)
+ nvme_unquiesce_admin_queue(ctrl);
+ nvme_tcp_destroy_admin_queue(ctrl, remove);
+}
+
+static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ bool remove)
+{
+ if (ctrl->queue_count <= 1)
+ return;
+ nvme_quiesce_admin_queue(ctrl);
+ nvme_quiesce_io_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ nvme_cancel_tagset(ctrl);
+ if (remove)
+ nvme_unquiesce_io_queues(ctrl);
+ nvme_tcp_destroy_io_queues(ctrl, remove);
+}
+
+static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
+{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ /* If we are resetting/deleting then do nothing */
+ if (state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
+ return;
+ }
+
+ if (nvmf_should_reconnect(ctrl)) {
+ dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
+ ctrl->opts->reconnect_delay);
+ queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
+ ctrl->opts->reconnect_delay * HZ);
+ } else {
+ dev_info(ctrl->device, "Removing controller...\n");
+ nvme_delete_ctrl(ctrl);
+ }
+}
+
+static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
+{
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ret;
+
+ ret = nvme_tcp_configure_admin_queue(ctrl, new);
+ if (ret)
+ return ret;
+
+ if (ctrl->icdoff) {
+ ret = -EOPNOTSUPP;
+ dev_err(ctrl->device, "icdoff is not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (!nvme_ctrl_sgl_supported(ctrl)) {
+ ret = -EOPNOTSUPP;
+ dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (opts->queue_size > ctrl->sqsize + 1)
+ dev_warn(ctrl->device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ opts->queue_size, ctrl->sqsize + 1);
+
+ if (ctrl->sqsize + 1 > ctrl->maxcmd) {
+ dev_warn(ctrl->device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->sqsize + 1, ctrl->maxcmd);
+ ctrl->sqsize = ctrl->maxcmd - 1;
+ }
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_tcp_configure_io_queues(ctrl, new);
+ if (ret)
+ goto destroy_admin;
+ }
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
+ /*
+ * state change failure is ok if we started ctrl delete,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
+ ret = -EINVAL;
+ goto destroy_io;
+ }
+
+ nvme_start_ctrl(ctrl);
+ return 0;
+
+destroy_io:
+ if (ctrl->queue_count > 1) {
+ nvme_quiesce_io_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ nvme_cancel_tagset(ctrl);
+ nvme_tcp_destroy_io_queues(ctrl, new);
+ }
+destroy_admin:
+ nvme_quiesce_admin_queue(ctrl);
+ blk_sync_queue(ctrl->admin_q);
+ nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
+ nvme_tcp_destroy_admin_queue(ctrl, new);
+ return ret;
+}
+
+static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
+ struct nvme_tcp_ctrl, connect_work);
+ struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+
+ ++ctrl->nr_reconnects;
+
+ if (nvme_tcp_setup_ctrl(ctrl, false))
+ goto requeue;
+
+ dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
+ ctrl->nr_reconnects);
+
+ ctrl->nr_reconnects = 0;
+
+ return;
+
+requeue:
+ dev_info(ctrl->device, "Failed reconnect attempt %d\n",
+ ctrl->nr_reconnects);
+ nvme_tcp_reconnect_or_remove(ctrl);
+}
+
+static void nvme_tcp_error_recovery_work(struct work_struct *work)
+{
+ struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
+ struct nvme_tcp_ctrl, err_work);
+ struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+
+ nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
+ nvme_tcp_teardown_io_queues(ctrl, false);
+ /* unquiesce to fail fast pending requests */
+ nvme_unquiesce_io_queues(ctrl);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
+ nvme_unquiesce_admin_queue(ctrl);
+ nvme_auth_stop(ctrl);
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we started ctrl delete */
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
+ return;
+ }
+
+ nvme_tcp_reconnect_or_remove(ctrl);
+}
+
+static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
+{
+ nvme_tcp_teardown_io_queues(ctrl, shutdown);
+ nvme_quiesce_admin_queue(ctrl);
+ nvme_disable_ctrl(ctrl, shutdown);
+ nvme_tcp_teardown_admin_queue(ctrl, shutdown);
+}
+
+static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ nvme_tcp_teardown_ctrl(ctrl, true);
+}
+
+static void nvme_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, reset_work);
+
+ nvme_stop_ctrl(ctrl);
+ nvme_tcp_teardown_ctrl(ctrl, false);
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we started ctrl delete */
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
+ return;
+ }
+
+ if (nvme_tcp_setup_ctrl(ctrl, false))
+ goto out_fail;
+
+ return;
+
+out_fail:
+ ++ctrl->nr_reconnects;
+ nvme_tcp_reconnect_or_remove(ctrl);
+}
+
+static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+ flush_work(&to_tcp_ctrl(ctrl)->err_work);
+ cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+}
+
+static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl->queues);
+ kfree(ctrl);
+}
+
+static void nvme_tcp_set_sg_null(struct nvme_command *c)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ sg->addr = 0;
+ sg->length = 0;
+ sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+ NVME_SGL_FMT_TRANSPORT_A;
+}
+
+static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
+ struct nvme_command *c, u32 data_len)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+ sg->length = cpu_to_le32(data_len);
+ sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
+}
+
+static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
+ u32 data_len)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ sg->addr = 0;
+ sg->length = cpu_to_le32(data_len);
+ sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+ NVME_SGL_FMT_TRANSPORT_A;
+}
+
+static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
+ struct nvme_tcp_queue *queue = &ctrl->queues[0];
+ struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
+ struct nvme_command *cmd = &pdu->cmd;
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+
+ memset(pdu, 0, sizeof(*pdu));
+ pdu->hdr.type = nvme_tcp_cmd;
+ if (queue->hdr_digest)
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
+
+ cmd->common.opcode = nvme_admin_async_event;
+ cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+ cmd->common.flags |= NVME_CMD_SGL_METABUF;
+ nvme_tcp_set_sg_null(cmd);
+
+ ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
+ ctrl->async_req.offset = 0;
+ ctrl->async_req.curr_bio = NULL;
+ ctrl->async_req.data_len = 0;
+
+ nvme_tcp_queue_request(&ctrl->async_req, true, true);
+}
+
+static void nvme_tcp_complete_timed_out(struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
+
+ nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
+ nvmf_complete_timed_out_request(rq);
+}
+
+static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
+ u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ int qid = nvme_tcp_queue_id(req->queue);
+
+ dev_warn(ctrl->device,
+ "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
+ nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
+ opc, nvme_opcode_str(qid, opc, fctype));
+
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
+ /*
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
+ */
+ nvme_tcp_complete_timed_out(rq);
+ return BLK_EH_DONE;
+ }
+
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
+ nvme_tcp_error_recovery(ctrl);
+ return BLK_EH_RESET_TIMER;
+}
+
+static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
+ struct nvme_command *c = &pdu->cmd;
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ nvme_tcp_set_sg_null(c);
+ else if (rq_data_dir(rq) == WRITE &&
+ req->data_len <= nvme_tcp_inline_data_size(req))
+ nvme_tcp_set_sg_inline(queue, c, req->data_len);
+ else
+ nvme_tcp_set_sg_host_data(c, req->data_len);
+
+ return 0;
+}
+
+static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
+ struct nvme_tcp_queue *queue = req->queue;
+ u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
+ blk_status_t ret;
+
+ ret = nvme_setup_cmd(ns, rq);
+ if (ret)
+ return ret;
+
+ req->state = NVME_TCP_SEND_CMD_PDU;
+ req->status = cpu_to_le16(NVME_SC_SUCCESS);
+ req->offset = 0;
+ req->data_sent = 0;
+ req->pdu_len = 0;
+ req->pdu_sent = 0;
+ req->h2cdata_left = 0;
+ req->data_len = blk_rq_nr_phys_segments(rq) ?
+ blk_rq_payload_bytes(rq) : 0;
+ req->curr_bio = rq->bio;
+ if (req->curr_bio && req->data_len)
+ nvme_tcp_init_iter(req, rq_data_dir(rq));
+
+ if (rq_data_dir(rq) == WRITE &&
+ req->data_len <= nvme_tcp_inline_data_size(req))
+ req->pdu_len = req->data_len;
+
+ pdu->hdr.type = nvme_tcp_cmd;
+ pdu->hdr.flags = 0;
+ if (queue->hdr_digest)
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ if (queue->data_digest && req->pdu_len) {
+ pdu->hdr.flags |= NVME_TCP_F_DDGST;
+ ddgst = nvme_tcp_ddgst_len(queue);
+ }
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
+ pdu->hdr.plen =
+ cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
+
+ ret = nvme_tcp_map_data(queue, rq);
+ if (unlikely(ret)) {
+ nvme_cleanup_cmd(rq);
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+
+ if (!llist_empty(&queue->req_list))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+ struct request *rq = bd->rq;
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
+ blk_status_t ret;
+
+ if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
+ ret = nvme_tcp_setup_cmd_pdu(ns, rq);
+ if (unlikely(ret))
+ return ret;
+
+ nvme_start_request(rq);
+
+ nvme_tcp_queue_request(req, true, bd->last);
+
+ return BLK_STS_OK;
+}
+
+static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
+
+ nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
+}
+
+static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+ struct sock *sk = queue->sock->sk;
+
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return 0;
+
+ set_bit(NVME_TCP_Q_POLLING, &queue->flags);
+ if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
+ sk_busy_loop(sk, true);
+ nvme_tcp_try_recv(queue);
+ clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
+ return queue->nr_cqe;
+}
+
+static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
+ struct sockaddr_storage src_addr;
+ int ret, len;
+
+ len = nvmf_get_address(ctrl, buf, size);
+
+ mutex_lock(&queue->queue_lock);
+
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ goto done;
+ ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+ if (ret > 0) {
+ if (len > 0)
+ len--; /* strip trailing newline */
+ len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+ (len) ? "," : "", &src_addr);
+ }
+done:
+ mutex_unlock(&queue->queue_lock);
+
+ return len;
+}
+
+static const struct blk_mq_ops nvme_tcp_mq_ops = {
+ .queue_rq = nvme_tcp_queue_rq,
+ .commit_rqs = nvme_tcp_commit_rqs,
+ .complete = nvme_complete_rq,
+ .init_request = nvme_tcp_init_request,
+ .exit_request = nvme_tcp_exit_request,
+ .init_hctx = nvme_tcp_init_hctx,
+ .timeout = nvme_tcp_timeout,
+ .map_queues = nvme_tcp_map_queues,
+ .poll = nvme_tcp_poll,
+};
+
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+ .queue_rq = nvme_tcp_queue_rq,
+ .complete = nvme_complete_rq,
+ .init_request = nvme_tcp_init_request,
+ .exit_request = nvme_tcp_exit_request,
+ .init_hctx = nvme_tcp_init_admin_hctx,
+ .timeout = nvme_tcp_timeout,
+};
+
+static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
+ .name = "tcp",
+ .module = THIS_MODULE,
+ .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .free_ctrl = nvme_tcp_free_ctrl,
+ .submit_async_event = nvme_tcp_submit_async_event,
+ .delete_ctrl = nvme_tcp_delete_ctrl,
+ .get_address = nvme_tcp_get_address,
+ .stop_ctrl = nvme_tcp_stop_ctrl,
+};
+
+static bool
+nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
+{
+ struct nvme_tcp_ctrl *ctrl;
+ bool found = false;
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
+ found = nvmf_ip_options_match(&ctrl->ctrl, opts);
+ if (found)
+ break;
+ }
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+
+ return found;
+}
+
+static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_tcp_ctrl *ctrl;
+ int ret;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ctrl->list);
+ ctrl->ctrl.opts = opts;
+ ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
+ opts->nr_poll_queues + 1;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ INIT_DELAYED_WORK(&ctrl->connect_work,
+ nvme_tcp_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
+ INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
+
+ if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ opts->trsvcid =
+ kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
+ if (!opts->trsvcid) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+ opts->mask |= NVMF_OPT_TRSVCID;
+ }
+
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
+ opts->traddr, opts->trsvcid, &ctrl->addr);
+ if (ret) {
+ pr_err("malformed address passed: %s:%s\n",
+ opts->traddr, opts->trsvcid);
+ goto out_free_ctrl;
+ }
+
+ if (opts->mask & NVMF_OPT_HOST_TRADDR) {
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
+ opts->host_traddr, NULL, &ctrl->src_addr);
+ if (ret) {
+ pr_err("malformed src address passed: %s\n",
+ opts->host_traddr);
+ goto out_free_ctrl;
+ }
+ }
+
+ if (opts->mask & NVMF_OPT_HOST_IFACE) {
+ if (!__dev_get_by_name(&init_net, opts->host_iface)) {
+ pr_err("invalid interface passed: %s\n",
+ opts->host_iface);
+ ret = -ENODEV;
+ goto out_free_ctrl;
+ }
+ }
+
+ if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
+ ret = -EALREADY;
+ goto out_free_ctrl;
+ }
+
+ ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
+ if (ret)
+ goto out_kfree_queues;
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ WARN_ON_ONCE(1);
+ ret = -EINTR;
+ goto out_uninit_ctrl;
+ }
+
+ ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
+ if (ret)
+ goto out_uninit_ctrl;
+
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+
+ return &ctrl->ctrl;
+
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvmf_transport_ops nvme_tcp_transport = {
+ .name = "tcp",
+ .module = THIS_MODULE,
+ .required_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
+ NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
+ NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
+ NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
+ NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
+ .create_ctrl = nvme_tcp_create_ctrl,
+};
+
+static int __init nvme_tcp_init_module(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
+
+ nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!nvme_tcp_wq)
+ return -ENOMEM;
+
+ nvmf_register_transport(&nvme_tcp_transport);
+ return 0;
+}
+
+static void __exit nvme_tcp_cleanup_module(void)
+{
+ struct nvme_tcp_ctrl *ctrl;
+
+ nvmf_unregister_transport(&nvme_tcp_transport);
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
+ nvme_delete_ctrl(&ctrl->ctrl);
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+ flush_workqueue(nvme_delete_wq);
+
+ destroy_workqueue(nvme_tcp_wq);
+}
+
+module_init(nvme_tcp_init_module);
+module_exit(nvme_tcp_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
new file mode 100644
index 0000000000..1c36fcedea
--- /dev/null
+++ b/drivers/nvme/host/trace.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+
+ trace_seq_printf(p, "sqid=%u", sqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 sq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 cqid = get_unaligned_le16(cdw10 + 6);
+
+
+ trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
+ sqid, qsize, sq_flags, cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_delete_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+
+ trace_seq_printf(p, "cqid=%u", cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 cq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 irq_vector = get_unaligned_le16(cdw10 + 6);
+
+ trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
+ cqid, qsize, cq_flags, irq_vector);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_set_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sv = cdw10[3] & 0x8;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_get_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sel = cdw10[1] & 0x7;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_get_lba_status(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 mndw = get_unaligned_le32(cdw10 + 8);
+ u16 rl = get_unaligned_le16(cdw10 + 12);
+ u8 atype = cdw10[15];
+
+ trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
+ slba, mndw, rl, atype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 lbaf = cdw10[0] & 0xF;
+ u8 mset = (cdw10[0] >> 4) & 0x1;
+ u8 pi = (cdw10[0] >> 5) & 0x7;
+ u8 pil = cdw10[1] & 0x1;
+ u8 ses = (cdw10[1] >> 1) & 0x7;
+
+ trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
+ lbaf, mset, pi, pil, ses);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(cdw10 + 8);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ u8 pr = cdw10[14];
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
+ slba, numd, zra, zrasf, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_delete_sq:
+ return nvme_trace_delete_sq(p, cdw10);
+ case nvme_admin_create_sq:
+ return nvme_trace_create_sq(p, cdw10);
+ case nvme_admin_delete_cq:
+ return nvme_trace_delete_cq(p, cdw10);
+ case nvme_admin_create_cq:
+ return nvme_trace_create_cq(p, cdw10);
+ case nvme_admin_identify:
+ return nvme_trace_admin_identify(p, cdw10);
+ case nvme_admin_set_features:
+ return nvme_trace_admin_set_features(p, cdw10);
+ case nvme_admin_get_features:
+ return nvme_trace_admin_get_features(p, cdw10);
+ case nvme_admin_get_lba_status:
+ return nvme_trace_get_lba_status(p, cdw10);
+ case nvme_admin_format_nvm:
+ return nvme_trace_admin_format_nvm(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
+ return nvme_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvme_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvme_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvme_trace_zone_mgmt_recv(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+static const char *nvme_trace_fabrics_property_set(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+ u64 value = get_unaligned_le64(spc + 8);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
+ attrib, ofst, value);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_connect(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 recfmt = get_unaligned_le16(spc);
+ u16 qid = get_unaligned_le16(spc + 2);
+ u16 sqsize = get_unaligned_le16(spc + 4);
+ u8 cattr = spc[6];
+ u32 kato = get_unaligned_le32(spc + 8);
+
+ trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
+ recfmt, qid, sqsize, cattr, kato);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "specific=%*ph", 24, spc);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
+ u8 fctype, u8 *spc)
+{
+ switch (fctype) {
+ case nvme_fabrics_type_property_set:
+ return nvme_trace_fabrics_property_set(p, spc);
+ case nvme_fabrics_type_connect:
+ return nvme_trace_fabrics_connect(p, spc);
+ case nvme_fabrics_type_property_get:
+ return nvme_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvme_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvme_trace_fabrics_auth_receive(p, spc);
+ default:
+ return nvme_trace_fabrics_common(p, spc);
+ }
+}
+
+const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (*name)
+ trace_seq_printf(p, "disk=%s, ", name);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(nvme_sq);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
new file mode 100644
index 0000000000..4fb5922ffd
--- /dev/null
+++ b/drivers/nvme/host/trace.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvme
+
+#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVME_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvme.h"
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ u8 *spc);
+
+#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \
+ ((opcode) == nvme_fabrics_command ? \
+ nvme_trace_parse_fabrics_cmd(p, fctype, cdw10) : \
+ ((qid) ? \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)))
+
+const char *nvme_trace_disk_name(struct trace_seq *p, char *name);
+#define __print_disk_name(name) \
+ nvme_trace_disk_name(p, name)
+
+#ifndef TRACE_HEADER_MULTI_READ
+static inline void __assign_disk_name(char *name, struct gendisk *disk)
+{
+ if (disk)
+ memcpy(name, disk->disk_name, DISK_NAME_LEN);
+ else
+ memset(name, 0, DISK_NAME_LEN);
+}
+#endif
+
+TRACE_EVENT(nvme_setup_cmd,
+ TP_PROTO(struct request *req, struct nvme_command *cmd),
+ TP_ARGS(req, cmd),
+ TP_STRUCT__entry(
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, ctrl_id)
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u8, fctype)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(bool, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __entry->qid = nvme_req_qid(req);
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = !!blk_integrity_rq(req);
+ __entry->fctype = cmd->fabrics.fctype;
+ __assign_disk_name(__entry->disk, req->q->disk);
+ memcpy(__entry->cdw10, &cmd->common.cdws,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->nsid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->qid, __entry->opcode,
+ __entry->fctype),
+ parse_nvme_cmd(__entry->qid, __entry->opcode,
+ __entry->fctype, __entry->cdw10))
+);
+
+TRACE_EVENT(nvme_complete_rq,
+ TP_PROTO(struct request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, ctrl_id)
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __entry->qid = nvme_req_qid(req);
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ __assign_disk_name(__entry->disk, req->q->disk);
+ ),
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->result,
+ __entry->retries, __entry->flags, __entry->status)
+
+);
+
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvme_async_event,
+ TP_PROTO(struct nvme_ctrl *ctrl, u32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->instance;
+ __entry->result = result;
+ ),
+ TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result & 0x7,
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_NOTICE),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+
+#undef aer_name
+
+TRACE_EVENT(nvme_sq,
+ TP_PROTO(struct request *req, __le16 sq_head, int sq_tail),
+ TP_ARGS(req, sq_head, sq_tail),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(u16, sq_head)
+ __field(u16, sq_tail)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __assign_disk_name(__entry->disk, req->q->disk);
+ __entry->qid = nvme_req_qid(req);
+ __entry->sq_head = le16_to_cpu(sq_head);
+ __entry->sq_tail = sq_tail;
+ ),
+ TP_printk("nvme%d: %sqid=%d, head=%u, tail=%u",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
+ __entry->qid, __entry->sq_head, __entry->sq_tail
+ )
+);
+
+#endif /* _TRACE_NVME_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
new file mode 100644
index 0000000000..ec8557810c
--- /dev/null
+++ b/drivers/nvme/host/zns.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+#include "nvme.h"
+
+int nvme_revalidate_zones(struct nvme_ns *ns)
+{
+ struct request_queue *q = ns->queue;
+
+ blk_queue_chunk_sectors(q, ns->zsze);
+ blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
+
+ return blk_revalidate_disk_zones(ns->disk, NULL);
+}
+
+static int nvme_set_max_append(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c = { };
+ struct nvme_id_ctrl_zns *id;
+ int status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (status) {
+ kfree(id);
+ return status;
+ }
+
+ if (id->zasl)
+ ctrl->max_zone_append = 1 << (id->zasl + 3);
+ else
+ ctrl->max_zone_append = ctrl->max_hw_sectors;
+ kfree(id);
+ return 0;
+}
+
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+{
+ struct nvme_effects_log *log = ns->head->effects;
+ struct request_queue *q = ns->queue;
+ struct nvme_command c = { };
+ struct nvme_id_ns_zns *id;
+ int status;
+
+ /* Driver requires zone append support */
+ if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) &
+ NVME_CMD_EFFECTS_CSUPP)) {
+ if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags))
+ dev_warn(ns->ctrl->device,
+ "Zone Append supported for zoned namespace:%d. Remove read-only mode\n",
+ ns->head->ns_id);
+ } else {
+ set_bit(NVME_NS_FORCE_RO, &ns->flags);
+ dev_warn(ns->ctrl->device,
+ "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n",
+ ns->head->ns_id);
+ }
+
+ /* Lazily query controller append limit for the first zoned namespace */
+ if (!ns->ctrl->max_zone_append) {
+ status = nvme_set_max_append(ns->ctrl);
+ if (status)
+ return status;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.cns = NVME_ID_CNS_CS_NS;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id));
+ if (status)
+ goto free_data;
+
+ /*
+ * We currently do not handle devices requiring any of the zoned
+ * operation characteristics.
+ */
+ if (id->zoc) {
+ dev_warn(ns->ctrl->device,
+ "zone operations:%x not supported for namespace:%u\n",
+ le16_to_cpu(id->zoc), ns->head->ns_id);
+ status = -ENODEV;
+ goto free_data;
+ }
+
+ ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
+ if (!is_power_of_2(ns->zsze)) {
+ dev_warn(ns->ctrl->device,
+ "invalid zone size:%llu for namespace:%u\n",
+ ns->zsze, ns->head->ns_id);
+ status = -ENODEV;
+ goto free_data;
+ }
+
+ disk_set_zoned(ns->disk, BLK_ZONED_HM);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
+ disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
+free_data:
+ kfree(id);
+ return status;
+}
+
+static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
+ unsigned int nr_zones, size_t *buflen)
+{
+ struct request_queue *q = ns->disk->queue;
+ size_t bufsize;
+ void *buf;
+
+ const size_t min_bufsize = sizeof(struct nvme_zone_report) +
+ sizeof(struct nvme_zone_descriptor);
+
+ nr_zones = min_t(unsigned int, nr_zones,
+ get_capacity(ns->disk) >> ilog2(ns->zsze));
+
+ bufsize = sizeof(struct nvme_zone_report) +
+ nr_zones * sizeof(struct nvme_zone_descriptor);
+ bufsize = min_t(size_t, bufsize,
+ queue_max_hw_sectors(q) << SECTOR_SHIFT);
+ bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+
+ while (bufsize >= min_bufsize) {
+ buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
+ return NULL;
+}
+
+static int nvme_zone_parse_entry(struct nvme_ns *ns,
+ struct nvme_zone_descriptor *entry,
+ unsigned int idx, report_zones_cb cb,
+ void *data)
+{
+ struct blk_zone zone = { };
+
+ if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
+ dev_err(ns->ctrl->device, "invalid zone type %#x\n",
+ entry->zt);
+ return -EINVAL;
+ }
+
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone.cond = entry->zs >> 4;
+ zone.len = ns->zsze;
+ zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
+ zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
+ if (zone.cond == BLK_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+ else
+ zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+
+ return cb(&zone, idx, data);
+}
+
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_zone_report *report;
+ struct nvme_command c = { };
+ int ret, zone_idx = 0;
+ unsigned int nz, i;
+ size_t buflen;
+
+ if (ns->head->ids.csi != NVME_CSI_ZNS)
+ return -EINVAL;
+
+ report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
+ if (!report)
+ return -ENOMEM;
+
+ c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
+ c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
+ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
+ c.zmr.zra = NVME_ZRA_ZONE_REPORT;
+ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
+ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
+
+ sector &= ~(ns->zsze - 1);
+ while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
+ memset(report, 0, buflen);
+
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out_free;
+ }
+
+ nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+ if (!nz)
+ break;
+
+ for (i = 0; i < nz && zone_idx < nr_zones; i++) {
+ ret = nvme_zone_parse_entry(ns, &report->entries[i],
+ zone_idx, cb, data);
+ if (ret)
+ goto out_free;
+ zone_idx++;
+ }
+
+ sector += ns->zsze * nz;
+ }
+
+ if (zone_idx > 0)
+ ret = zone_idx;
+ else
+ ret = -EINVAL;
+out_free:
+ kvfree(report);
+ return ret;
+}
+
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *c, enum nvme_zone_mgmt_action action)
+{
+ memset(c, 0, sizeof(*c));
+
+ c->zms.opcode = nvme_cmd_zone_mgmt_send;
+ c->zms.nsid = cpu_to_le32(ns->head->ns_id);
+ c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ c->zms.zsa = action;
+
+ if (req_op(req) == REQ_OP_ZONE_RESET_ALL)
+ c->zms.select_all = 1;
+
+ return BLK_STS_OK;
+}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
new file mode 100644
index 0000000000..79fc64035e
--- /dev/null
+++ b/drivers/nvme/target/Kconfig
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_TARGET
+ tristate "NVMe Target support"
+ depends on BLOCK
+ depends on CONFIGFS_FS
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
+ select SGL_ALLOC
+ help
+ This enabled target side support for the NVMe protocol, that is
+ it allows the Linux kernel to implement NVMe subsystems and
+ controllers and export Linux block devices as NVMe namespaces.
+ You need to select at least one of the transports below to make this
+ functionality useful.
+
+ To configure the NVMe target you probably want to use the nvmetcli
+ tool from http://git.infradead.org/users/hch/nvmetcli.git.
+
+config NVME_TARGET_PASSTHRU
+ bool "NVMe Target Passthrough support"
+ depends on NVME_TARGET
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables target side NVMe passthru controller support for the
+ NVMe Over Fabrics protocol. It allows for hosts to manage and
+ directly access an actual NVMe controller residing on the target
+ side, including executing Vendor Unique Commands.
+
+ If unsure, say N.
+
+config NVME_TARGET_LOOP
+ tristate "NVMe loopback device support"
+ depends on NVME_TARGET
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This enables the NVMe loopback device support, which can be useful
+ to test NVMe host and target side features.
+
+ If unsure, say N.
+
+config NVME_TARGET_RDMA
+ tristate "NVMe over Fabrics RDMA target support"
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS
+ depends on NVME_TARGET
+ select SGL_ALLOC
+ help
+ This enables the NVMe RDMA target support, which allows exporting NVMe
+ devices over RDMA.
+
+ If unsure, say N.
+
+config NVME_TARGET_FC
+ tristate "NVMe over Fabrics FC target driver"
+ depends on NVME_TARGET
+ depends on HAS_DMA
+ select SGL_ALLOC
+ help
+ This enables the NVMe FC target support, which allows exporting NVMe
+ devices over FC.
+
+ If unsure, say N.
+
+config NVME_TARGET_FCLOOP
+ tristate "NVMe over Fabrics FC Transport Loopback Test driver"
+ depends on NVME_TARGET
+ select NVME_FABRICS
+ select SG_POOL
+ depends on NVME_FC
+ depends on NVME_TARGET_FC
+ help
+ This enables the NVMe FC loopback test support, which can be useful
+ to test NVMe-FC transport interfaces.
+
+ If unsure, say N.
+
+config NVME_TARGET_TCP
+ tristate "NVMe over Fabrics TCP target support"
+ depends on INET
+ depends on NVME_TARGET
+ help
+ This enables the NVMe TCP target support, which allows exporting NVMe
+ devices over TCP.
+
+ If unsure, say N.
+
+config NVME_TARGET_AUTH
+ bool "NVMe over Fabrics In-band Authentication support"
+ depends on NVME_TARGET
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This enables support for NVMe over Fabrics In-band Authentication
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
new file mode 100644
index 0000000000..c668201024
--- /dev/null
+++ b/drivers/nvme/target/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_TARGET) += nvmet.o
+obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
+obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
+obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
+obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
+obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
+
+nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
+ discovery.o io-cmd-file.o io-cmd-bdev.o
+nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
+nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
+nvme-loop-y += loop.o
+nvmet-rdma-y += rdma.o
+nvmet-fc-y += fc.o
+nvme-fcloop-y += fcloop.o
+nvmet-tcp-y += tcp.o
+nvmet-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
new file mode 100644
index 0000000000..39cb570f83
--- /dev/null
+++ b/drivers/nvme/target/admin-cmd.c
@@ -0,0 +1,1045 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe admin command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/part_stat.h>
+
+#include <generated/utsrelease.h>
+#include <asm/unaligned.h>
+#include "nvmet.h"
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd)
+{
+ u32 len = le16_to_cpu(cmd->get_log_page.numdu);
+
+ len <<= 16;
+ len += le16_to_cpu(cmd->get_log_page.numdl);
+ /* NUMD is a 0's based value */
+ len += 1;
+ len *= sizeof(u32);
+
+ return len;
+}
+
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_HOST_ID:
+ return sizeof(req->sq->ctrl->hostid);
+ default:
+ return 0;
+ }
+}
+
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+ return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
+static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
+{
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
+}
+
+static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ unsigned long flags;
+ off_t offset = 0;
+ u64 slot;
+ u64 i;
+
+ spin_lock_irqsave(&ctrl->error_lock, flags);
+ slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
+
+ for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
+ if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
+ sizeof(struct nvme_error_slot)))
+ break;
+
+ if (slot == 0)
+ slot = NVMET_ERROR_LOG_SLOTS - 1;
+ else
+ slot--;
+ offset += sizeof(struct nvme_error_slot);
+ }
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
+ nvmet_req_complete(req, 0);
+}
+
+static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u64 host_reads, host_writes, data_units_read, data_units_written;
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
+
+ /* we don't have the right data for file backed ns */
+ if (!req->ns->bdev)
+ return NVME_SC_SUCCESS;
+
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
+ data_units_read =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
+ data_units_written =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u64 host_reads = 0, host_writes = 0;
+ u64 data_units_read = 0, data_units_written = 0;
+ struct nvmet_ns *ns;
+ struct nvmet_ctrl *ctrl;
+ unsigned long idx;
+
+ ctrl = req->sq->ctrl;
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ /* we don't have the right data for file backed ns */
+ if (!ns->bdev)
+ continue;
+ host_reads += part_stat_read(ns->bdev, ios[READ]);
+ data_units_read += DIV_ROUND_UP(
+ part_stat_read(ns->bdev, sectors[READ]), 1000);
+ host_writes += part_stat_read(ns->bdev, ios[WRITE]);
+ data_units_written += DIV_ROUND_UP(
+ part_stat_read(ns->bdev, sectors[WRITE]), 1000);
+ }
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
+{
+ struct nvme_smart_log *log;
+ u16 status = NVME_SC_INTERNAL;
+ unsigned long flags;
+
+ if (req->transfer_len != sizeof(*log))
+ goto out;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto out;
+
+ if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
+ status = nvmet_get_smart_log_all(req, log);
+ else
+ status = nvmet_get_smart_log_nsid(req, log);
+ if (status)
+ goto out_free_log;
+
+ spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
+ put_unaligned_le64(req->sq->ctrl->err_counter,
+ &log->num_err_log_entries);
+ spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+out_free_log:
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+{
+ log->acs[nvme_admin_get_log_page] =
+ log->acs[nvme_admin_identify] =
+ log->acs[nvme_admin_abort_cmd] =
+ log->acs[nvme_admin_set_features] =
+ log->acs[nvme_admin_get_features] =
+ log->acs[nvme_admin_async_event] =
+ log->acs[nvme_admin_keep_alive] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+
+ log->iocs[nvme_cmd_read] =
+ log->iocs[nvme_cmd_flush] =
+ log->iocs[nvme_cmd_dsm] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ log->iocs[nvme_cmd_write] =
+ log->iocs[nvme_cmd_write_zeroes] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
+}
+
+static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
+{
+ log->iocs[nvme_cmd_zone_append] =
+ log->iocs[nvme_cmd_zone_mgmt_send] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_zone_mgmt_recv] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+}
+
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+ struct nvme_effects_log *log;
+ u16 status = NVME_SC_SUCCESS;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ switch (req->cmd->get_log_page.csi) {
+ case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_nvm(log);
+ break;
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ status = NVME_SC_INVALID_IO_CMD_SET;
+ goto free;
+ }
+ nvmet_get_cmd_effects_nvm(log);
+ nvmet_get_cmd_effects_zns(log);
+ break;
+ default:
+ status = NVME_SC_INVALID_LOG_PAGE;
+ goto free;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+free:
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_INTERNAL;
+ size_t len;
+
+ if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+ goto out;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_changed_ns == U32_MAX)
+ len = sizeof(__le32);
+ else
+ len = ctrl->nr_changed_ns * sizeof(__le32);
+ status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
+ if (!status)
+ status = nvmet_zero_sgl(req, len, req->transfer_len - len);
+ ctrl->nr_changed_ns = 0;
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
+ mutex_unlock(&ctrl->lock);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
+ struct nvme_ana_group_desc *desc)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+ u32 count = 0;
+
+ if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ if (ns->anagrpid == grpid)
+ desc->nsids[count++] = cpu_to_le32(ns->nsid);
+ }
+
+ desc->grpid = cpu_to_le32(grpid);
+ desc->nnsids = cpu_to_le32(count);
+ desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+ desc->state = req->port->ana_state[grpid];
+ memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
+ return struct_size(desc, nsids, count);
+}
+
+static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
+{
+ struct nvme_ana_rsp_hdr hdr = { 0, };
+ struct nvme_ana_group_desc *desc;
+ size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
+ size_t len;
+ u32 grpid;
+ u16 ngrps = 0;
+ u16 status;
+
+ status = NVME_SC_INTERNAL;
+ desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
+ GFP_KERNEL);
+ if (!desc)
+ goto out;
+
+ down_read(&nvmet_ana_sem);
+ for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (!nvmet_ana_group_enabled[grpid])
+ continue;
+ len = nvmet_format_ana_group(req, grpid, desc);
+ status = nvmet_copy_to_sgl(req, offset, desc, len);
+ if (status)
+ break;
+ offset += len;
+ ngrps++;
+ }
+ for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (nvmet_ana_group_enabled[grpid])
+ ngrps++;
+ }
+
+ hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+ hdr.ngrps = cpu_to_le16(ngrps);
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
+ up_read(&nvmet_ana_sem);
+
+ kfree(desc);
+
+ /* copy the header last once we know the number of groups */
+ status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
+ return;
+
+ switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_ERROR:
+ return nvmet_execute_get_log_page_error(req);
+ case NVME_LOG_SMART:
+ return nvmet_execute_get_log_page_smart(req);
+ case NVME_LOG_FW_SLOT:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ return nvmet_execute_get_log_page_noop(req);
+ case NVME_LOG_CHANGED_NS:
+ return nvmet_execute_get_log_changed_ns(req);
+ case NVME_LOG_CMD_EFFECTS:
+ return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ANA:
+ return nvmet_execute_get_log_page_ana(req);
+ }
+ pr_debug("unhandled lid %d on qid %d\n",
+ req->cmd->get_log_page.lid, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
+static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_subsys *subsys = ctrl->subsys;
+ struct nvme_id_ctrl *id;
+ u32 cmd_capsule_size;
+ u16 status = 0;
+
+ if (!subsys->subsys_discovered) {
+ mutex_lock(&subsys->lock);
+ subsys->subsys_discovered = true;
+ mutex_unlock(&subsys->lock);
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /* XXX: figure out how to assign real vendors IDs. */
+ id->vid = 0;
+ id->ssvid = 0;
+
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
+ memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
+ strlen(subsys->model_number), ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
+
+ put_unaligned_le24(subsys->ieee_oui, id->ieee);
+
+ id->rab = 6;
+
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ id->cntrltype = NVME_CTRL_DISC;
+ else
+ id->cntrltype = NVME_CTRL_IO;
+
+ /* we support multiple ports, multiples hosts and ANA: */
+ id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
+ NVME_CTRL_CMIC_ANA;
+
+ /* Limit MDTS according to transport capability */
+ if (ctrl->ops->get_mdts)
+ id->mdts = ctrl->ops->get_mdts(ctrl);
+ else
+ id->mdts = 0;
+
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /* XXX: figure out what to do about RTD3R/RTD3 */
+ id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
+ id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
+ NVME_CTRL_ATTR_TBKAS);
+
+ id->oacs = 0;
+
+ /*
+ * We don't really have a practical limit on the number of abort
+ * comands. But we don't do anything useful for abort either, so
+ * no point in allowing more abort commands than the spec requires.
+ */
+ id->acl = 3;
+
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* first slot is read-only, only one slot supported */
+ id->frmw = (1 << 0) | (1 << 1);
+ id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
+ id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
+ id->npss = 0;
+
+ /* We support keep-alive timeout in granularity of seconds */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ id->sqes = (0x6 << 4) | 0x6;
+ id->cqes = (0x4 << 4) | 0x4;
+
+ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
+ id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
+ id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
+ NVME_CTRL_ONCS_WRITE_ZEROES);
+
+ /* XXX: don't report vwc if the underlying device is write through */
+ id->vwc = NVME_CTRL_VWC_PRESENT;
+
+ /*
+ * We can't support atomic writes bigger than a LBA without support
+ * from the backend device.
+ */
+ id->awun = 0;
+ id->awupf = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (req->port->inline_data_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+
+ /*
+ * Max command capsule size is sqe + in-capsule data size.
+ * Disable in-capsule data for Metadata capable controllers.
+ */
+ cmd_capsule_size = sizeof(struct nvme_command);
+ if (!ctrl->pi_support)
+ cmd_capsule_size += req->port->inline_data_size;
+ id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
+
+ /* Max response capsule size is cqe */
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
+ id->anatt = 10; /* random value */
+ id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
+ id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
+
+ /*
+ * Meh, we don't really support any power state. Fake up the same
+ * values that qemu does.
+ */
+ id->psd[0].max_power = cpu_to_le16(0x9c4);
+ id->psd[0].entry_lat = cpu_to_le32(0x10);
+ id->psd[0].exit_lat = cpu_to_le32(0x4);
+
+ id->nwpc = 1 << 0; /* write protect and no write protect */
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ns(struct nvmet_req *req)
+{
+ struct nvme_id_ns *id;
+ u16 status;
+
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /* return an all zeroed buffer if we can't find an active namespace */
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = 0;
+ goto done;
+ }
+
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
+
+ /*
+ * nuse = ncap = nsze isn't always true, but we have no way to find
+ * that out from the underlying device.
+ */
+ id->ncap = id->nsze =
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
+ switch (req->port->ana_state[req->ns->anagrpid]) {
+ case NVME_ANA_INACCESSIBLE:
+ case NVME_ANA_PERSISTENT_LOSS:
+ break;
+ default:
+ id->nuse = id->nsze;
+ break;
+ }
+
+ if (req->ns->bdev)
+ nvmet_bdev_set_limits(req->ns->bdev, id);
+
+ /*
+ * We just provide a single LBA format that matches what the
+ * underlying device reports.
+ */
+ id->nlbaf = 0;
+ id->flbas = 0;
+
+ /*
+ * Our namespace might always be shared. Not just with other
+ * controllers, but also with any other user of the block device.
+ */
+ id->nmic = NVME_NS_NMIC_SHARED;
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
+
+ id->lbaf[0].ds = req->ns->blksize_shift;
+
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
+ id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
+ NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
+ NVME_NS_DPC_PI_TYPE3;
+ id->mc = NVME_MC_EXTENDED_LBA;
+ id->dps = req->ns->pi_type;
+ id->flbas = NVME_NS_FLBAS_META_EXT;
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
+ }
+
+ if (req->ns->readonly)
+ id->nsattr |= NVME_NS_ATTR_RO;
+done:
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_nslist(struct nvmet_req *req)
+{
+ static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+ u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
+ __le32 *list;
+ u16 status = 0;
+ int i = 0;
+
+ list = kzalloc(buf_size, GFP_KERNEL);
+ if (!list) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->nsid <= min_nsid)
+ continue;
+ list[i++] = cpu_to_le32(ns->nsid);
+ if (i == buf_size / sizeof(__le32))
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, list, buf_size);
+
+ kfree(list);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
+ void *id, off_t *off)
+{
+ struct nvme_ns_id_desc desc = {
+ .nidt = type,
+ .nidl = len,
+ };
+ u16 status;
+
+ status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
+ if (status)
+ return status;
+ *off += sizeof(desc);
+
+ status = nvmet_copy_to_sgl(req, *off, id, len);
+ if (status)
+ return status;
+ *off += len;
+
+ return 0;
+}
+
+static void nvmet_execute_identify_desclist(struct nvmet_req *req)
+{
+ off_t off = 0;
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
+ status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
+ NVME_NIDT_UUID_LEN,
+ &req->ns->uuid, &off);
+ if (status)
+ goto out;
+ }
+ if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
+ status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
+ NVME_NIDT_NGUID_LEN,
+ &req->ns->nguid, &off);
+ if (status)
+ goto out;
+ }
+
+ status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
+ NVME_NIDT_CSI_LEN,
+ &req->ns->csi, &off);
+ if (status)
+ goto out;
+
+ if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
+ off) != NVME_IDENTIFY_DATA_SIZE - off)
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
+{
+ /* Not supported: return zeroes */
+ nvmet_req_complete(req,
+ nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
+}
+
+static void nvmet_execute_identify(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_NS:
+ nvmet_execute_identify_ns(req);
+ return;
+ case NVME_ID_CNS_CTRL:
+ nvmet_execute_identify_ctrl(req);
+ return;
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
+ nvmet_execute_identify_nslist(req);
+ return;
+ case NVME_ID_CNS_NS_DESC_LIST:
+ nvmet_execute_identify_desclist(req);
+ return;
+ case NVME_ID_CNS_CS_NS:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ /* Not supported */
+ break;
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ nvmet_execute_identify_ns_zns(req);
+ return;
+ }
+ break;
+ }
+ break;
+ case NVME_ID_CNS_CS_CTRL:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ nvmet_execute_identify_ctrl_nvm(req);
+ return;
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ nvmet_execute_identify_ctrl_zns(req);
+ return;
+ }
+ break;
+ }
+ break;
+ }
+
+ pr_debug("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
+/*
+ * A "minimum viable" abort implementation: the command is mandatory in the
+ * spec, but we are not required to do any useful work. We couldn't really
+ * do a useful abort, so don't bother even with waiting for the command
+ * to be exectuted and return immediately telling the command to abort
+ * wasn't found.
+ */
+static void nvmet_execute_abort(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+ nvmet_set_result(req, 1);
+ nvmet_req_complete(req, 0);
+}
+
+static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
+{
+ u16 status;
+
+ if (req->ns->file)
+ status = nvmet_file_flush(req);
+ else
+ status = nvmet_bdev_flush(req);
+
+ if (status)
+ pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
+ return status;
+}
+
+static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
+{
+ u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
+
+ mutex_lock(&subsys->lock);
+ switch (write_protect) {
+ case NVME_NS_WRITE_PROTECT:
+ req->ns->readonly = true;
+ status = nvmet_write_protect_flush_sync(req);
+ if (status)
+ req->ns->readonly = false;
+ break;
+ case NVME_NS_NO_WRITE_PROTECT:
+ req->ns->readonly = false;
+ status = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (!status)
+ nvmet_ns_changed(subsys, req->ns->nsid);
+ mutex_unlock(&subsys->lock);
+ return status;
+}
+
+u16 nvmet_set_feat_kato(struct nvmet_req *req)
+{
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+
+ nvmet_stop_keep_alive_timer(req->sq->ctrl);
+ req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+ nvmet_start_keep_alive_timer(req->sq->ctrl);
+
+ nvmet_set_result(req, req->sq->ctrl->kato);
+
+ return 0;
+}
+
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
+{
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+
+ if (val32 & ~mask) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
+ nvmet_set_result(req, val32);
+
+ return 0;
+}
+
+void nvmet_execute_set_features(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ u16 status = 0;
+ u16 nsqr;
+ u16 ncqr;
+
+ if (!nvmet_check_data_len_lte(req, 0))
+ return;
+
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_NUM_QUEUES:
+ ncqr = (cdw11 >> 16) & 0xffff;
+ nsqr = cdw11 & 0xffff;
+ if (ncqr == 0xffff || nsqr == 0xffff) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ nvmet_set_result(req,
+ (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
+ break;
+ case NVME_FEAT_KATO:
+ status = nvmet_set_feat_kato(req);
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
+ break;
+ case NVME_FEAT_HOST_ID:
+ status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ break;
+ case NVME_FEAT_WRITE_PROTECT:
+ status = nvmet_set_feat_write_protect(req);
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u32 result;
+
+ result = nvmet_req_find_ns(req);
+ if (result)
+ return result;
+
+ mutex_lock(&subsys->lock);
+ if (req->ns->readonly == true)
+ result = NVME_NS_WRITE_PROTECT;
+ else
+ result = NVME_NS_NO_WRITE_PROTECT;
+ nvmet_set_result(req, result);
+ mutex_unlock(&subsys->lock);
+
+ return 0;
+}
+
+void nvmet_get_feat_kato(struct nvmet_req *req)
+{
+ nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+}
+
+void nvmet_get_feat_async_event(struct nvmet_req *req)
+{
+ nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
+}
+
+void nvmet_execute_get_features(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u16 status = 0;
+
+ if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
+ return;
+
+ switch (cdw10 & 0xff) {
+ /*
+ * These features are mandatory in the spec, but we don't
+ * have a useful way to implement them. We'll eventually
+ * need to come up with some fake values for these.
+ */
+#if 0
+ case NVME_FEAT_ARBITRATION:
+ break;
+ case NVME_FEAT_POWER_MGMT:
+ break;
+ case NVME_FEAT_TEMP_THRESH:
+ break;
+ case NVME_FEAT_ERR_RECOVERY:
+ break;
+ case NVME_FEAT_IRQ_COALESCE:
+ break;
+ case NVME_FEAT_IRQ_CONFIG:
+ break;
+ case NVME_FEAT_WRITE_ATOMIC:
+ break;
+#endif
+ case NVME_FEAT_ASYNC_EVENT:
+ nvmet_get_feat_async_event(req);
+ break;
+ case NVME_FEAT_VOLATILE_WC:
+ nvmet_set_result(req, 1);
+ break;
+ case NVME_FEAT_NUM_QUEUES:
+ nvmet_set_result(req,
+ (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
+ break;
+ case NVME_FEAT_KATO:
+ nvmet_get_feat_kato(req);
+ break;
+ case NVME_FEAT_HOST_ID:
+ /* need 128-bit host identifier flag */
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw11);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
+ sizeof(req->sq->ctrl->hostid));
+ break;
+ case NVME_FEAT_WRITE_PROTECT:
+ status = nvmet_get_feat_write_protect(req);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_execute_async_event(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
+ return;
+ }
+ ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
+ mutex_unlock(&ctrl->lock);
+
+ queue_work(nvmet_wq, &ctrl->async_event_work);
+}
+
+void nvmet_execute_keep_alive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = 0;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ if (!ctrl->kato) {
+ status = NVME_SC_KA_TIMEOUT_INVALID;
+ goto out;
+ }
+
+ pr_debug("ctrl %d update keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 ret;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_admin_cmd(req);
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
+ return nvmet_parse_discovery_cmd(req);
+
+ ret = nvmet_check_ctrl_status(req);
+ if (unlikely(ret))
+ return ret;
+
+ if (nvmet_is_passthru_req(req))
+ return nvmet_parse_passthru_admin_cmd(req);
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
+ case nvme_admin_identify:
+ req->execute = nvmet_execute_identify;
+ return 0;
+ case nvme_admin_abort_cmd:
+ req->execute = nvmet_execute_abort;
+ return 0;
+ case nvme_admin_set_features:
+ req->execute = nvmet_execute_set_features;
+ return 0;
+ case nvme_admin_get_features:
+ req->execute = nvmet_execute_get_features;
+ return 0;
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ return 0;
+ case nvme_admin_keep_alive:
+ req->execute = nvmet_execute_keep_alive;
+ return 0;
+ default:
+ return nvmet_report_invalid_opcode(req);
+ }
+}
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 0000000000..4dcddcf952
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl)
+{
+ unsigned char key_hash;
+ char *dhchap_secret;
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
+ return -EINVAL;
+ if (key_hash > 3) {
+ pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+ key_hash);
+ return -EINVAL;
+ }
+ if (key_hash > 0) {
+ /* Validate selected hash algorithm */
+ const char *hmac = nvme_auth_hmac_name(key_hash);
+
+ if (!crypto_has_shash(hmac, 0, 0)) {
+ pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+ return -ENOTSUPP;
+ }
+ }
+ dhchap_secret = kstrdup(secret, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ if (set_ctrl) {
+ kfree(host->dhchap_ctrl_secret);
+ host->dhchap_ctrl_secret = strim(dhchap_secret);
+ host->dhchap_ctrl_key_hash = key_hash;
+ } else {
+ kfree(host->dhchap_secret);
+ host->dhchap_secret = strim(dhchap_secret);
+ host->dhchap_key_hash = key_hash;
+ }
+ return 0;
+}
+
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
+{
+ const char *dhgroup_kpp;
+ int ret = 0;
+
+ pr_debug("%s: ctrl %d selecting dhgroup %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+
+ if (ctrl->dh_tfm) {
+ if (ctrl->dh_gid == dhgroup_id) {
+ pr_debug("%s: ctrl %d reuse existing DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return 0;
+ }
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+
+ if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
+ return 0;
+
+ dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+ if (!dhgroup_kpp) {
+ pr_debug("%s: ctrl %d invalid DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return -EINVAL;
+ }
+ ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
+ if (IS_ERR(ctrl->dh_tfm)) {
+ pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
+ __func__, ctrl->cntlid, dhgroup_id,
+ PTR_ERR(ctrl->dh_tfm));
+ ret = PTR_ERR(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ } else {
+ ctrl->dh_gid = dhgroup_id;
+ pr_debug("%s: ctrl %d setup DH group %d\n",
+ __func__, ctrl->cntlid, ctrl->dh_gid);
+ ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
+ if (ret < 0) {
+ pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
+ __func__, ctrl->cntlid, ret);
+ kfree_sensitive(ctrl->dh_key);
+ return ret;
+ }
+ ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d failed to allocate public key\n",
+ ctrl->cntlid);
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
+ ctrl->dh_keysize);
+ if (ret < 0) {
+ pr_warn("ctrl %d failed to generate public key\n",
+ ctrl->cntlid);
+ kfree(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ int ret = 0;
+ struct nvmet_host_link *p;
+ struct nvmet_host *host = NULL;
+ const char *hash_name;
+
+ down_read(&nvmet_config_sem);
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ goto out_unlock;
+
+ if (ctrl->subsys->allow_any_host)
+ goto out_unlock;
+
+ list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+ pr_debug("check %s\n", nvmet_host_name(p->host));
+ if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+ continue;
+ host = p->host;
+ break;
+ }
+ if (!host) {
+ pr_debug("host %s not found\n", ctrl->hostnqn);
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
+ if (ret < 0)
+ pr_warn("Failed to setup DH group");
+
+ if (!host->dhchap_secret) {
+ pr_debug("No authentication provided\n");
+ goto out_unlock;
+ }
+
+ if (host->dhchap_hash_id == ctrl->shash_id) {
+ pr_debug("Re-use existing hash ID %d\n",
+ ctrl->shash_id);
+ } else {
+ hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ctrl->shash_id = host->dhchap_hash_id;
+ }
+
+ /* Skip the 'DHHC-1:XX:' prefix */
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
+ host->dhchap_key_hash);
+ if (IS_ERR(ctrl->host_key)) {
+ ret = PTR_ERR(ctrl->host_key);
+ ctrl->host_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using hash %s key %*ph\n", __func__,
+ ctrl->host_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
+ (int)ctrl->host_key->len, ctrl->host_key->key);
+
+ nvme_auth_free_key(ctrl->ctrl_key);
+ if (!host->dhchap_ctrl_secret) {
+ ctrl->ctrl_key = NULL;
+ goto out_unlock;
+ }
+
+ ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
+ host->dhchap_ctrl_key_hash);
+ if (IS_ERR(ctrl->ctrl_key)) {
+ ret = PTR_ERR(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
+ ctrl->ctrl_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
+ (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
+
+out_free_hash:
+ if (ret) {
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ ctrl->shash_id = 0;
+ }
+out_unlock:
+ up_read(&nvmet_config_sem);
+
+ return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+ cancel_delayed_work(&sq->auth_expired_work);
+ kfree(sq->dhchap_c1);
+ sq->dhchap_c1 = NULL;
+ kfree(sq->dhchap_c2);
+ sq->dhchap_c2 = NULL;
+ kfree(sq->dhchap_skey);
+ sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+ ctrl->shash_id = 0;
+
+ if (ctrl->dh_tfm) {
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ if (req->sq->ctrl->host_key &&
+ !req->sq->authenticated)
+ return false;
+ return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
+ if (IS_ERR(host_response)) {
+ ret = PTR_ERR(host_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, host_response,
+ ctrl->host_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid);
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(host_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, ctrl_response,
+ ctrl->ctrl_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(ctrl_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
+ return -ENOKEY;
+ }
+ if (buf_size != ctrl->dh_keysize) {
+ pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
+ ctrl->cntlid, ctrl->dh_keysize, buf_size);
+ ret = -EINVAL;
+ } else {
+ memcpy(buf, ctrl->dh_key, buf_size);
+ pr_debug("%s: ctrl %d public key %*ph\n", __func__,
+ ctrl->cntlid, (int)buf_size, buf);
+ }
+
+ return ret;
+}
+
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *pkey, int pkey_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret;
+
+ req->sq->dhchap_skey_len = ctrl->dh_keysize;
+ req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
+ if (!req->sq->dhchap_skey)
+ return -ENOMEM;
+ ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
+ pkey, pkey_size,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len);
+ if (ret)
+ pr_debug("failed to compute shared secret, err %d\n", ret);
+ else
+ pr_debug("%s: shared secret %*ph\n", __func__,
+ (int)req->sq->dhchap_skey_len,
+ req->sq->dhchap_skey);
+
+ return ret;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
new file mode 100644
index 0000000000..01b2a3d1a5
--- /dev/null
+++ b/drivers/nvme/target/configfs.c
@@ -0,0 +1,2059 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Configfs interface for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kstrtox.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include <linux/nvme-auth.h>
+#endif
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include <linux/nospec.h>
+
+#include "nvmet.h"
+
+static const struct config_item_type nvmet_host_type;
+static const struct config_item_type nvmet_subsys_type;
+
+static LIST_HEAD(nvmet_ports_list);
+struct list_head *nvmet_ports = &nvmet_ports_list;
+
+struct nvmet_type_name_map {
+ u8 type;
+ const char *name;
+};
+
+static struct nvmet_type_name_map nvmet_transport[] = {
+ { NVMF_TRTYPE_RDMA, "rdma" },
+ { NVMF_TRTYPE_FC, "fc" },
+ { NVMF_TRTYPE_TCP, "tcp" },
+ { NVMF_TRTYPE_LOOP, "loop" },
+};
+
+static const struct nvmet_type_name_map nvmet_addr_family[] = {
+ { NVMF_ADDR_FAMILY_PCI, "pcie" },
+ { NVMF_ADDR_FAMILY_IP4, "ipv4" },
+ { NVMF_ADDR_FAMILY_IP6, "ipv6" },
+ { NVMF_ADDR_FAMILY_IB, "ib" },
+ { NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_LOOP, "loop" },
+};
+
+static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
+{
+ if (p->enabled)
+ pr_err("Disable port '%u' before changing attribute in %s\n",
+ le16_to_cpu(p->disc_addr.portid), caller);
+ return p->enabled;
+}
+
+/*
+ * nvmet_port Generic ConfigFS definitions.
+ * Used in any place in the ConfigFS tree that refers to an address.
+ */
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
+{
+ u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (nvmet_addr_family[i].type == adrfam)
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_family[i].name);
+ }
+
+ return snprintf(page, PAGE_SIZE, "\n");
+}
+
+static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int i;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (sysfs_streq(page, nvmet_addr_family[i].name))
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for adrfam\n", page);
+ return -EINVAL;
+
+found:
+ port->disc_addr.adrfam = nvmet_addr_family[i].type;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_adrfam);
+
+static ssize_t nvmet_addr_portid_show(struct config_item *item,
+ char *page)
+{
+ __le16 portid = to_nvmet_port(item)->disc_addr.portid;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
+}
+
+static ssize_t nvmet_addr_portid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u16 portid = 0;
+
+ if (kstrtou16(page, 0, &portid)) {
+ pr_err("Invalid value '%s' for portid\n", page);
+ return -EINVAL;
+ }
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ port->disc_addr.portid = cpu_to_le16(portid);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_portid);
+
+static ssize_t nvmet_addr_traddr_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
+}
+
+static ssize_t nvmet_addr_traddr_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (count > NVMF_TRADDR_SIZE) {
+ pr_err("Invalid value '%s' for traddr\n", page);
+ return -EINVAL;
+ }
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
+ return -EINVAL;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_traddr);
+
+static const struct nvmet_type_name_map nvmet_addr_treq[] = {
+ { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
+ { NVMF_TREQ_REQUIRED, "required" },
+ { NVMF_TREQ_NOT_REQUIRED, "not required" },
+};
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
+{
+ u8 treq = to_nvmet_port(item)->disc_addr.treq &
+ NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (treq == nvmet_addr_treq[i].type)
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_treq[i].name);
+ }
+
+ return snprintf(page, PAGE_SIZE, "\n");
+}
+
+static ssize_t nvmet_addr_treq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (sysfs_streq(page, nvmet_addr_treq[i].name))
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for treq\n", page);
+ return -EINVAL;
+
+found:
+ treq |= nvmet_addr_treq[i].type;
+ port->disc_addr.treq = treq;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_treq);
+
+static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
+}
+
+static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (count > NVMF_TRSVCID_SIZE) {
+ pr_err("Invalid value '%s' for trsvcid\n", page);
+ return -EINVAL;
+ }
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
+ return -EINVAL;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trsvcid);
+
+static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
+}
+
+static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->inline_data_size);
+ if (ret) {
+ pr_err("Invalid value '%s' for inline_data_size\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
+}
+
+static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ port->pi_enable = val;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_pi_enable);
+#endif
+
+static ssize_t nvmet_addr_trtype_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (port->disc_addr.trtype == nvmet_transport[i].type)
+ return snprintf(page, PAGE_SIZE,
+ "%s\n", nvmet_transport[i].name);
+ }
+
+ return sprintf(page, "\n");
+}
+
+static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
+{
+ port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
+ port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
+ port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
+}
+
+static ssize_t nvmet_addr_trtype_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int i;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (sysfs_streq(page, nvmet_transport[i].name))
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for trtype\n", page);
+ return -EINVAL;
+
+found:
+ memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+ port->disc_addr.trtype = nvmet_transport[i].type;
+ if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
+ nvmet_port_init_tsas_rdma(port);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trtype);
+
+/*
+ * Namespace structures & file operation functions below
+ */
+static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
+}
+
+static ssize_t nvmet_ns_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ size_t len;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+ ret = -EBUSY;
+ if (ns->enabled)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+
+ kfree(ns->device_path);
+ ret = -ENOMEM;
+ ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!ns->device_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+ return count;
+
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_path);
+
+#ifdef CONFIG_PCI_P2PDMA
+static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
+}
+
+static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct pci_dev *p2p_dev = NULL;
+ bool use_p2pmem;
+ int ret = count;
+ int error;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
+ if (error) {
+ ret = error;
+ goto out_unlock;
+ }
+
+ ns->use_p2pmem = use_p2pmem;
+ pci_dev_put(ns->p2p_dev);
+ ns->p2p_dev = p2p_dev;
+
+out_unlock:
+ mutex_unlock(&ns->subsys->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, p2pmem);
+#endif /* CONFIG_PCI_P2PDMA */
+
+static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
+}
+
+static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ if (uuid_parse(page, &ns->uuid))
+ ret = -EINVAL;
+
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_uuid);
+
+static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
+}
+
+static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ u8 nguid[16];
+ const char *p = page;
+ int i;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (p + 2 > page + count) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!isxdigit(p[0]) || !isxdigit(p[1])) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
+ p += 2;
+
+ if (*p == '-' || *p == ':')
+ p++;
+ }
+
+ memcpy(&ns->nguid, nguid, sizeof(nguid));
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_nguid);
+
+static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
+}
+
+static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ u32 oldgrpid, newgrpid;
+ int ret;
+
+ ret = kstrtou32(page, 0, &newgrpid);
+ if (ret)
+ return ret;
+
+ if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
+ return -EINVAL;
+
+ down_write(&nvmet_ana_sem);
+ oldgrpid = ns->anagrpid;
+ newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
+ nvmet_ana_group_enabled[newgrpid]++;
+ ns->anagrpid = newgrpid;
+ nvmet_ana_group_enabled[oldgrpid]--;
+ nvmet_ana_chgcnt++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_send_ana_event(ns->subsys, NULL);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
+
+static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
+}
+
+static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool enable;
+ int ret = 0;
+
+ if (kstrtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_ns_enable(ns);
+ else
+ nvmet_ns_disable(ns);
+
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, enable);
+
+static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
+}
+
+static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ pr_err("disable ns before setting buffered_io value.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+
+ ns->buffered_io = val;
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+
+static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (!ns->enabled) {
+ pr_err("enable ns before revalidate.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+ if (nvmet_ns_revalidate(ns))
+ nvmet_ns_changed(ns->subsys, ns->nsid);
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
+
+static struct configfs_attribute *nvmet_ns_attrs[] = {
+ &nvmet_ns_attr_device_path,
+ &nvmet_ns_attr_device_nguid,
+ &nvmet_ns_attr_device_uuid,
+ &nvmet_ns_attr_ana_grpid,
+ &nvmet_ns_attr_enable,
+ &nvmet_ns_attr_buffered_io,
+ &nvmet_ns_attr_revalidate_size,
+#ifdef CONFIG_PCI_P2PDMA
+ &nvmet_ns_attr_p2pmem,
+#endif
+ NULL,
+};
+
+static void nvmet_ns_release(struct config_item *item)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ nvmet_ns_free(ns);
+}
+
+static struct configfs_item_operations nvmet_ns_item_ops = {
+ .release = nvmet_ns_release,
+};
+
+static const struct config_item_type nvmet_ns_type = {
+ .ct_item_ops = &nvmet_ns_item_ops,
+ .ct_attrs = nvmet_ns_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ns_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
+ struct nvmet_ns *ns;
+ int ret;
+ u32 nsid;
+
+ ret = kstrtou32(name, 0, &nsid);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (nsid == 0 || nsid == NVME_NSID_ALL) {
+ pr_err("invalid nsid %#x", nsid);
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ ns = nvmet_ns_alloc(subsys, nsid);
+ if (!ns)
+ goto out;
+ config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
+
+ pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
+
+ return &ns->group;
+out:
+ return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_namespaces_group_ops = {
+ .make_group = nvmet_ns_make,
+};
+
+static const struct config_item_type nvmet_namespaces_type = {
+ .ct_group_ops = &nvmet_namespaces_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+
+static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
+}
+
+static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ size_t len;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+
+ ret = -EBUSY;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+
+ kfree(subsys->passthru_ctrl_path);
+ ret = -ENOMEM;
+ subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+
+ return count;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_passthru_, device_path);
+
+static ssize_t nvmet_passthru_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
+}
+
+static ssize_t nvmet_passthru_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ bool enable;
+ int ret = 0;
+
+ if (kstrtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_passthru_ctrl_enable(subsys);
+ else
+ nvmet_passthru_ctrl_disable(subsys);
+
+ return ret ? ret : count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, enable);
+
+static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
+}
+
+static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int timeout;
+
+ if (kstrtouint(page, 0, &timeout))
+ return -EINVAL;
+ subsys->admin_timeout = timeout;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
+
+static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
+}
+
+static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int timeout;
+
+ if (kstrtouint(page, 0, &timeout))
+ return -EINVAL;
+ subsys->io_timeout = timeout;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
+
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int clear_ids;
+
+ if (kstrtouint(page, 0, &clear_ids))
+ return -EINVAL;
+ subsys->clear_ids = clear_ids;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
+static struct configfs_attribute *nvmet_passthru_attrs[] = {
+ &nvmet_passthru_attr_device_path,
+ &nvmet_passthru_attr_enable,
+ &nvmet_passthru_attr_admin_timeout,
+ &nvmet_passthru_attr_io_timeout,
+ &nvmet_passthru_attr_clear_ids,
+ NULL,
+};
+
+static const struct config_item_type nvmet_passthru_type = {
+ .ct_attrs = nvmet_passthru_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+ config_group_init_type_name(&subsys->passthru_group,
+ "passthru", &nvmet_passthru_type);
+ configfs_add_default_group(&subsys->passthru_group,
+ &subsys->group);
+}
+
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+}
+
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static int nvmet_port_subsys_allow_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+ struct nvmet_subsys *subsys;
+ struct nvmet_subsys_link *link, *p;
+ int ret;
+
+ if (target->ci_type != &nvmet_subsys_type) {
+ pr_err("can only link subsystems into the subsystems dir.!\n");
+ return -EINVAL;
+ }
+ subsys = to_subsys(target);
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ link->subsys = subsys;
+
+ down_write(&nvmet_config_sem);
+ ret = -EEXIST;
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (p->subsys == subsys)
+ goto out_free_link;
+ }
+
+ if (list_empty(&port->subsystems)) {
+ ret = nvmet_enable_port(port);
+ if (ret)
+ goto out_free_link;
+ }
+
+ list_add_tail(&link->entry, &port->subsystems);
+ nvmet_port_disc_changed(port, subsys);
+
+ up_write(&nvmet_config_sem);
+ return 0;
+
+out_free_link:
+ up_write(&nvmet_config_sem);
+ kfree(link);
+ return ret;
+}
+
+static void nvmet_port_subsys_drop_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+ struct nvmet_subsys *subsys = to_subsys(target);
+ struct nvmet_subsys_link *p;
+
+ down_write(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (p->subsys == subsys)
+ goto found;
+ }
+ up_write(&nvmet_config_sem);
+ return;
+
+found:
+ list_del(&p->entry);
+ nvmet_port_del_ctrls(port, subsys);
+ nvmet_port_disc_changed(port, subsys);
+
+ if (list_empty(&port->subsystems))
+ nvmet_disable_port(port);
+ up_write(&nvmet_config_sem);
+ kfree(p);
+}
+
+static struct configfs_item_operations nvmet_port_subsys_item_ops = {
+ .allow_link = nvmet_port_subsys_allow_link,
+ .drop_link = nvmet_port_subsys_drop_link,
+};
+
+static const struct config_item_type nvmet_port_subsys_type = {
+ .ct_item_ops = &nvmet_port_subsys_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+ struct nvmet_host *host;
+ struct nvmet_host_link *link, *p;
+ int ret;
+
+ if (target->ci_type != &nvmet_host_type) {
+ pr_err("can only link hosts into the allowed_hosts directory!\n");
+ return -EINVAL;
+ }
+
+ host = to_host(target);
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ link->host = host;
+
+ down_write(&nvmet_config_sem);
+ ret = -EINVAL;
+ if (subsys->allow_any_host) {
+ pr_err("can't add hosts when allow_any_host is set!\n");
+ goto out_free_link;
+ }
+
+ ret = -EEXIST;
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+ goto out_free_link;
+ }
+ list_add_tail(&link->entry, &subsys->hosts);
+ nvmet_subsys_disc_changed(subsys, host);
+
+ up_write(&nvmet_config_sem);
+ return 0;
+out_free_link:
+ up_write(&nvmet_config_sem);
+ kfree(link);
+ return ret;
+}
+
+static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+ struct nvmet_host *host = to_host(target);
+ struct nvmet_host_link *p;
+
+ down_write(&nvmet_config_sem);
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+ goto found;
+ }
+ up_write(&nvmet_config_sem);
+ return;
+
+found:
+ list_del(&p->entry);
+ nvmet_subsys_disc_changed(subsys, host);
+
+ up_write(&nvmet_config_sem);
+ kfree(p);
+}
+
+static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
+ .allow_link = nvmet_allowed_hosts_allow_link,
+ .drop_link = nvmet_allowed_hosts_drop_link,
+};
+
+static const struct config_item_type nvmet_allowed_hosts_type = {
+ .ct_item_ops = &nvmet_allowed_hosts_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_subsys(item)->allow_any_host);
+}
+
+static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool allow_any_host;
+ int ret = 0;
+
+ if (kstrtobool(page, &allow_any_host))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (allow_any_host && !list_empty(&subsys->hosts)) {
+ pr_err("Can't set allow_any_host when explicit hosts are set!\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (subsys->allow_any_host != allow_any_host) {
+ subsys->allow_any_host = allow_any_host;
+ nvmet_subsys_disc_changed(subsys, NULL);
+ }
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
+
+static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ if (NVME_TERTIARY(subsys->ver))
+ return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+
+ return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
+}
+
+static ssize_t
+nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ int major, minor, tertiary = 0;
+ int ret;
+
+ if (subsys->subsys_discovered) {
+ if (NVME_TERTIARY(subsys->ver))
+ pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ else
+ pr_err("Can't set version number. %llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
+ return -EINVAL;
+ }
+
+ /* passthru subsystems use the underlying controller's version */
+ if (nvmet_is_passthru_subsys(subsys))
+ return -EINVAL;
+
+ ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
+ if (ret != 2 && ret != 3)
+ return -EINVAL;
+
+ subsys->ver = NVME_VS(major, minor, tertiary);
+
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+
+/* See Section 1.5 of NVMe 1.4 */
+static bool nvmet_is_ascii(const char c)
+{
+ return c >= 0x20 && c <= 0x7e;
+}
+
+static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return snprintf(page, PAGE_SIZE, "%.*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
+}
+
+static ssize_t
+nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ int pos, len = strcspn(page, "\n");
+
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set serial number. %s is already assigned\n",
+ subsys->serial);
+ return -EINVAL;
+ }
+
+ if (!len || len > NVMET_SN_MAX_SIZE) {
+ pr_err("Serial Number can not be empty or exceed %d Bytes\n",
+ NVMET_SN_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos])) {
+ pr_err("Serial Number must contain only ASCII strings\n");
+ return -EINVAL;
+ }
+ }
+
+ memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
+
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
+
+static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_min;
+
+ if (sscanf(page, "%hu\n", &cntlid_min) != 1)
+ return -EINVAL;
+
+ if (cntlid_min == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_min >= to_subsys(item)->cntlid_max)
+ goto out_unlock;
+ to_subsys(item)->cntlid_min = cntlid_min;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
+
+static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_max;
+
+ if (sscanf(page, "%hu\n", &cntlid_max) != 1)
+ return -EINVAL;
+
+ if (cntlid_max == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_max <= to_subsys(item)->cntlid_min)
+ goto out_unlock;
+ to_subsys(item)->cntlid_max = cntlid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+
+static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
+}
+
+static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ int pos = 0, len;
+ char *val;
+
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set model number. %s is already assigned\n",
+ subsys->model_number);
+ return -EINVAL;
+ }
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ if (len > NVMET_MN_MAX_SIZE) {
+ pr_err("Model number size can not exceed %d Bytes\n",
+ NVMET_MN_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos]))
+ return -EINVAL;
+ }
+
+ val = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ kfree(subsys->model_number);
+ subsys->model_number = val;
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+
+static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
+}
+
+static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ uint32_t val = 0;
+ int ret;
+
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
+ subsys->ieee_oui);
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= 0x1000000)
+ return -EINVAL;
+
+ subsys->ieee_oui = val;
+
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
+
+static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return sysfs_emit(page, "%s\n", subsys->firmware_rev);
+}
+
+static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ int pos = 0, len;
+ char *val;
+
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set firmware revision. %s is already assigned\n",
+ subsys->firmware_rev);
+ return -EINVAL;
+ }
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ if (len > NVMET_FR_MAX_SIZE) {
+ pr_err("Firmware revision size can not exceed %d Bytes\n",
+ NVMET_FR_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos]))
+ return -EINVAL;
+ }
+
+ val = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+
+ kfree(subsys->firmware_rev);
+
+ subsys->firmware_rev = val;
+
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
+}
+
+static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool pi_enable;
+
+ if (kstrtobool(page, &pi_enable))
+ return -EINVAL;
+
+ subsys->pi_support = pi_enable;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
+#endif
+
+static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
+}
+
+static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ struct nvmet_ctrl *ctrl;
+ u16 qid_max;
+
+ if (sscanf(page, "%hu\n", &qid_max) != 1)
+ return -EINVAL;
+
+ if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ subsys->max_qid = qid_max;
+
+ /* Force reconnect */
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ ctrl->ops->delete_ctrl(ctrl);
+ up_write(&nvmet_config_sem);
+
+ return cnt;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
+
+static struct configfs_attribute *nvmet_subsys_attrs[] = {
+ &nvmet_subsys_attr_attr_allow_any_host,
+ &nvmet_subsys_attr_attr_version,
+ &nvmet_subsys_attr_attr_serial,
+ &nvmet_subsys_attr_attr_cntlid_min,
+ &nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_qid_max,
+ &nvmet_subsys_attr_attr_ieee_oui,
+ &nvmet_subsys_attr_attr_firmware,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_subsys_attr_attr_pi_enable,
+#endif
+ NULL,
+};
+
+/*
+ * Subsystem structures & folder operation functions below
+ */
+static void nvmet_subsys_release(struct config_item *item)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ nvmet_subsys_del_ctrls(subsys);
+ nvmet_subsys_put(subsys);
+}
+
+static struct configfs_item_operations nvmet_subsys_item_ops = {
+ .release = nvmet_subsys_release,
+};
+
+static const struct config_item_type nvmet_subsys_type = {
+ .ct_item_ops = &nvmet_subsys_item_ops,
+ .ct_attrs = nvmet_subsys_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_subsys_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_subsys *subsys;
+
+ if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
+ pr_err("can't create discovery subsystem through configfs\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
+ if (IS_ERR(subsys))
+ return ERR_CAST(subsys);
+
+ config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
+
+ config_group_init_type_name(&subsys->namespaces_group,
+ "namespaces", &nvmet_namespaces_type);
+ configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
+
+ config_group_init_type_name(&subsys->allowed_hosts_group,
+ "allowed_hosts", &nvmet_allowed_hosts_type);
+ configfs_add_default_group(&subsys->allowed_hosts_group,
+ &subsys->group);
+
+ nvmet_add_passthru_group(subsys);
+
+ return &subsys->group;
+}
+
+static struct configfs_group_operations nvmet_subsystems_group_ops = {
+ .make_group = nvmet_subsys_make,
+};
+
+static const struct config_item_type nvmet_subsystems_type = {
+ .ct_group_ops = &nvmet_subsystems_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t nvmet_referral_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
+}
+
+static ssize_t nvmet_referral_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool enable;
+
+ if (kstrtobool(page, &enable))
+ goto inval;
+
+ if (enable)
+ nvmet_referral_enable(parent, port);
+ else
+ nvmet_referral_disable(parent, port);
+
+ return count;
+inval:
+ pr_err("Invalid value '%s' for enable\n", page);
+ return -EINVAL;
+}
+
+CONFIGFS_ATTR(nvmet_referral_, enable);
+
+/*
+ * Discovery Service subsystem definitions
+ */
+static struct configfs_attribute *nvmet_referral_attrs[] = {
+ &nvmet_attr_addr_adrfam,
+ &nvmet_attr_addr_portid,
+ &nvmet_attr_addr_treq,
+ &nvmet_attr_addr_traddr,
+ &nvmet_attr_addr_trsvcid,
+ &nvmet_attr_addr_trtype,
+ &nvmet_referral_attr_enable,
+ NULL,
+};
+
+static void nvmet_referral_notify(struct config_group *group,
+ struct config_item *item)
+{
+ struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ nvmet_referral_disable(parent, port);
+}
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ kfree(port);
+}
+
+static struct configfs_item_operations nvmet_referral_item_ops = {
+ .release = nvmet_referral_release,
+};
+
+static const struct config_item_type nvmet_referral_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_attrs = nvmet_referral_attrs,
+ .ct_item_ops = &nvmet_referral_item_ops,
+};
+
+static struct config_group *nvmet_referral_make(
+ struct config_group *group, const char *name)
+{
+ struct nvmet_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&port->entry);
+ config_group_init_type_name(&port->group, name, &nvmet_referral_type);
+
+ return &port->group;
+}
+
+static struct configfs_group_operations nvmet_referral_group_ops = {
+ .make_group = nvmet_referral_make,
+ .disconnect_notify = nvmet_referral_notify,
+};
+
+static const struct config_item_type nvmet_referrals_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &nvmet_referral_group_ops,
+};
+
+static struct nvmet_type_name_map nvmet_ana_state[] = {
+ { NVME_ANA_OPTIMIZED, "optimized" },
+ { NVME_ANA_NONOPTIMIZED, "non-optimized" },
+ { NVME_ANA_INACCESSIBLE, "inaccessible" },
+ { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
+ { NVME_ANA_CHANGE, "change" },
+};
+
+static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (state == nvmet_ana_state[i].type)
+ return sprintf(page, "%s\n", nvmet_ana_state[i].name);
+ }
+
+ return sprintf(page, "\n");
+}
+
+static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state *ana_state = grp->port->ana_state;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (sysfs_streq(page, nvmet_ana_state[i].name))
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for ana_state\n", page);
+ return -EINVAL;
+
+found:
+ down_write(&nvmet_ana_sem);
+ ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
+ nvmet_ana_chgcnt++;
+ up_write(&nvmet_ana_sem);
+ nvmet_port_send_ana_event(grp->port);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
+
+static struct configfs_attribute *nvmet_ana_group_attrs[] = {
+ &nvmet_ana_group_attr_ana_state,
+ NULL,
+};
+
+static void nvmet_ana_group_release(struct config_item *item)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+
+ if (grp == &grp->port->ana_default_group)
+ return;
+
+ down_write(&nvmet_ana_sem);
+ grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
+ nvmet_ana_group_enabled[grp->grpid]--;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+ kfree(grp);
+}
+
+static struct configfs_item_operations nvmet_ana_group_item_ops = {
+ .release = nvmet_ana_group_release,
+};
+
+static const struct config_item_type nvmet_ana_group_type = {
+ .ct_item_ops = &nvmet_ana_group_item_ops,
+ .ct_attrs = nvmet_ana_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ana_groups_make_group(
+ struct config_group *group, const char *name)
+{
+ struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
+ struct nvmet_ana_group *grp;
+ u32 grpid;
+ int ret;
+
+ ret = kstrtou32(name, 0, &grpid);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
+ goto out;
+
+ ret = -ENOMEM;
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ goto out;
+ grp->port = port;
+ grp->grpid = grpid;
+
+ down_write(&nvmet_ana_sem);
+ grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
+ nvmet_ana_group_enabled[grpid]++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+
+ config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
+ return &grp->group;
+out:
+ return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_ana_groups_group_ops = {
+ .make_group = nvmet_ana_groups_make_group,
+};
+
+static const struct config_item_type nvmet_ana_groups_type = {
+ .ct_group_ops = &nvmet_ana_groups_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Ports definitions.
+ */
+static void nvmet_port_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ /* Let inflight controllers teardown complete */
+ flush_workqueue(nvmet_wq);
+ list_del(&port->global_entry);
+
+ kfree(port->ana_state);
+ kfree(port);
+}
+
+static struct configfs_attribute *nvmet_port_attrs[] = {
+ &nvmet_attr_addr_adrfam,
+ &nvmet_attr_addr_treq,
+ &nvmet_attr_addr_traddr,
+ &nvmet_attr_addr_trsvcid,
+ &nvmet_attr_addr_trtype,
+ &nvmet_attr_param_inline_data_size,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_attr_param_pi_enable,
+#endif
+ NULL,
+};
+
+static struct configfs_item_operations nvmet_port_item_ops = {
+ .release = nvmet_port_release,
+};
+
+static const struct config_item_type nvmet_port_type = {
+ .ct_attrs = nvmet_port_attrs,
+ .ct_item_ops = &nvmet_port_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ports_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_port *port;
+ u16 portid;
+ u32 i;
+
+ if (kstrtou16(name, 0, &portid))
+ return ERR_PTR(-EINVAL);
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
+ sizeof(*port->ana_state), GFP_KERNEL);
+ if (!port->ana_state) {
+ kfree(port);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
+ if (i == NVMET_DEFAULT_ANA_GRPID)
+ port->ana_state[1] = NVME_ANA_OPTIMIZED;
+ else
+ port->ana_state[i] = NVME_ANA_INACCESSIBLE;
+ }
+
+ list_add(&port->global_entry, &nvmet_ports_list);
+
+ INIT_LIST_HEAD(&port->entry);
+ INIT_LIST_HEAD(&port->subsystems);
+ INIT_LIST_HEAD(&port->referrals);
+ port->inline_data_size = -1; /* < 0 == let the transport choose */
+
+ port->disc_addr.portid = cpu_to_le16(portid);
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
+ port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
+ config_group_init_type_name(&port->group, name, &nvmet_port_type);
+
+ config_group_init_type_name(&port->subsys_group,
+ "subsystems", &nvmet_port_subsys_type);
+ configfs_add_default_group(&port->subsys_group, &port->group);
+
+ config_group_init_type_name(&port->referrals_group,
+ "referrals", &nvmet_referrals_type);
+ configfs_add_default_group(&port->referrals_group, &port->group);
+
+ config_group_init_type_name(&port->ana_groups_group,
+ "ana_groups", &nvmet_ana_groups_type);
+ configfs_add_default_group(&port->ana_groups_group, &port->group);
+
+ port->ana_default_group.port = port;
+ port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
+ config_group_init_type_name(&port->ana_default_group.group,
+ __stringify(NVMET_DEFAULT_ANA_GRPID),
+ &nvmet_ana_group_type);
+ configfs_add_default_group(&port->ana_default_group.group,
+ &port->ana_groups_group);
+
+ return &port->group;
+}
+
+static struct configfs_group_operations nvmet_ports_group_ops = {
+ .make_group = nvmet_ports_make,
+};
+
+static const struct config_item_type nvmet_ports_type = {
+ .ct_group_ops = &nvmet_ports_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group nvmet_subsystems_group;
+static struct config_group nvmet_ports_group;
+
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, false);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, true);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+ return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ u8 hmac_id;
+
+ hmac_id = nvme_auth_hmac_id(page);
+ if (hmac_id == NVME_AUTH_HASH_INVALID)
+ return -EINVAL;
+ if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+ return -ENOTSUPP;
+ host->dhchap_hash_id = hmac_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
+
+ return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
+}
+
+static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int dhgroup_id;
+
+ dhgroup_id = nvme_auth_dhgroup_id(page);
+ if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
+ return -EINVAL;
+ if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
+ const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+
+ if (!crypto_has_kpp(kpp, 0, 0))
+ return -EINVAL;
+ }
+ host->dhchap_dhgroup_id = dhgroup_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+ &nvmet_host_attr_dhchap_key,
+ &nvmet_host_attr_dhchap_ctrl_key,
+ &nvmet_host_attr_dhchap_hash,
+ &nvmet_host_attr_dhchap_dhgroup,
+ NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
+static void nvmet_host_release(struct config_item *item)
+{
+ struct nvmet_host *host = to_host(item);
+
+#ifdef CONFIG_NVME_TARGET_AUTH
+ kfree(host->dhchap_secret);
+ kfree(host->dhchap_ctrl_secret);
+#endif
+ kfree(host);
+}
+
+static struct configfs_item_operations nvmet_host_item_ops = {
+ .release = nvmet_host_release,
+};
+
+static const struct config_item_type nvmet_host_type = {
+ .ct_item_ops = &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+ .ct_attrs = nvmet_host_attrs,
+#endif
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_hosts_make_group(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_host *host;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return ERR_PTR(-ENOMEM);
+
+#ifdef CONFIG_NVME_TARGET_AUTH
+ /* Default to SHA256 */
+ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
+#endif
+
+ config_group_init_type_name(&host->group, name, &nvmet_host_type);
+
+ return &host->group;
+}
+
+static struct configfs_group_operations nvmet_hosts_group_ops = {
+ .make_group = nvmet_hosts_make_group,
+};
+
+static const struct config_item_type nvmet_hosts_type = {
+ .ct_group_ops = &nvmet_hosts_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group nvmet_hosts_group;
+
+static const struct config_item_type nvmet_root_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem nvmet_configfs_subsystem = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "nvmet",
+ .ci_type = &nvmet_root_type,
+ },
+ },
+};
+
+int __init nvmet_init_configfs(void)
+{
+ int ret;
+
+ config_group_init(&nvmet_configfs_subsystem.su_group);
+ mutex_init(&nvmet_configfs_subsystem.su_mutex);
+
+ config_group_init_type_name(&nvmet_subsystems_group,
+ "subsystems", &nvmet_subsystems_type);
+ configfs_add_default_group(&nvmet_subsystems_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ config_group_init_type_name(&nvmet_ports_group,
+ "ports", &nvmet_ports_type);
+ configfs_add_default_group(&nvmet_ports_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ config_group_init_type_name(&nvmet_hosts_group,
+ "hosts", &nvmet_hosts_type);
+ configfs_add_default_group(&nvmet_hosts_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
+ if (ret) {
+ pr_err("configfs_register_subsystem: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit nvmet_exit_configfs(void)
+{
+ configfs_unregister_subsystem(&nvmet_configfs_subsystem);
+}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
new file mode 100644
index 0000000000..3935165048
--- /dev/null
+++ b/drivers/nvme/target/core.c
@@ -0,0 +1,1711 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common code for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/scatterlist.h>
+
+#include <generated/utsrelease.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#include "nvmet.h"
+
+struct kmem_cache *nvmet_bvec_cache;
+struct workqueue_struct *buffered_io_wq;
+struct workqueue_struct *zbd_wq;
+static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+static DEFINE_IDA(cntlid_ida);
+
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
+/*
+ * This read/write semaphore is used to synchronize access to configuration
+ * information on a target system that will result in discovery log page
+ * information change for at least one host.
+ * The full list of resources to protected by this semaphore is:
+ *
+ * - subsystems list
+ * - per-subsystem allowed hosts list
+ * - allow_any_host subsystem attribute
+ * - nvmet_genctr
+ * - the nvmet_transports array
+ *
+ * When updating any of those lists/structures write lock should be obtained,
+ * while when reading (popolating discovery log page or checking host-subsystem
+ * link) read lock is obtained to allow concurrent reads.
+ */
+DECLARE_RWSEM(nvmet_config_sem);
+
+u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+u64 nvmet_ana_chgcnt;
+DECLARE_RWSEM(nvmet_ana_sem);
+
+inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
+{
+ switch (errno) {
+ case 0:
+ return NVME_SC_SUCCESS;
+ case -ENOSPC:
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ case -EREMOTEIO:
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ case -EOPNOTSUPP:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_dsm:
+ case nvme_cmd_write_zeroes:
+ return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ default:
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ break;
+ case -ENODATA:
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
+ return NVME_SC_ACCESS_DENIED;
+ case -EIO:
+ fallthrough;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ }
+}
+
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
+{
+ pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
+ req->sq->qid);
+
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+ const char *subsysnqn);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+ size_t len)
+{
+ if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
+ return 0;
+}
+
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
+{
+ if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
+ return 0;
+}
+
+u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
+{
+ if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
+ return 0;
+}
+
+static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ns *cur;
+ unsigned long idx;
+ u32 nsid = 0;
+
+ xa_for_each(&subsys->namespaces, idx, cur)
+ nsid = cur->nsid;
+
+ return nsid;
+}
+
+static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
+{
+ return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
+}
+
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_req *req;
+
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ mutex_lock(&ctrl->lock);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_async_event *aen;
+ struct nvmet_req *req;
+
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
+ aen = list_first_entry(&ctrl->async_events,
+ struct nvmet_async_event, entry);
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ nvmet_set_result(req, nvmet_async_event_result(aen));
+
+ list_del(&aen->entry);
+ kfree(aen);
+
+ mutex_unlock(&ctrl->lock);
+ trace_nvmet_async_event(ctrl, req->cqe->result.u32);
+ nvmet_req_complete(req, 0);
+ mutex_lock(&ctrl->lock);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_async_event *aen, *tmp;
+
+ mutex_lock(&ctrl->lock);
+ list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
+ list_del(&aen->entry);
+ kfree(aen);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, async_event_work);
+
+ nvmet_async_events_process(ctrl);
+}
+
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ u8 event_info, u8 log_page)
+{
+ struct nvmet_async_event *aen;
+
+ aen = kmalloc(sizeof(*aen), GFP_KERNEL);
+ if (!aen)
+ return;
+
+ aen->event_type = event_type;
+ aen->event_info = event_info;
+ aen->log_page = log_page;
+
+ mutex_lock(&ctrl->lock);
+ list_add_tail(&aen->entry, &ctrl->async_events);
+ mutex_unlock(&ctrl->lock);
+
+ queue_work(nvmet_wq, &ctrl->async_event_work);
+}
+
+static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
+{
+ u32 i;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
+ goto out_unlock;
+
+ for (i = 0; i < ctrl->nr_changed_ns; i++) {
+ if (ctrl->changed_ns_list[i] == nsid)
+ goto out_unlock;
+ }
+
+ if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
+ ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
+ ctrl->nr_changed_ns = U32_MAX;
+ goto out_unlock;
+ }
+
+ ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
+out_unlock:
+ mutex_unlock(&ctrl->lock);
+}
+
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct nvmet_ctrl *ctrl;
+
+ lockdep_assert_held(&subsys->lock);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
+ continue;
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_NS_CHANGED,
+ NVME_LOG_CHANGED_NS);
+ }
+}
+
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+ struct nvmet_port *port)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (port && ctrl->port != port)
+ continue;
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
+ continue;
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_port_send_ana_event(struct nvmet_port *port)
+{
+ struct nvmet_subsys_link *p;
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry)
+ nvmet_send_ana_event(p->subsys, port);
+ up_read(&nvmet_config_sem);
+}
+
+int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
+{
+ int ret = 0;
+
+ down_write(&nvmet_config_sem);
+ if (nvmet_transports[ops->type])
+ ret = -EINVAL;
+ else
+ nvmet_transports[ops->type] = ops;
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_register_transport);
+
+void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
+{
+ down_write(&nvmet_config_sem);
+ nvmet_transports[ops->type] = NULL;
+ up_write(&nvmet_config_sem);
+}
+EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+
+void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->port == port)
+ ctrl->ops->delete_ctrl(ctrl);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+int nvmet_enable_port(struct nvmet_port *port)
+{
+ const struct nvmet_fabrics_ops *ops;
+ int ret;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ ops = nvmet_transports[port->disc_addr.trtype];
+ if (!ops) {
+ up_write(&nvmet_config_sem);
+ request_module("nvmet-transport-%d", port->disc_addr.trtype);
+ down_write(&nvmet_config_sem);
+ ops = nvmet_transports[port->disc_addr.trtype];
+ if (!ops) {
+ pr_err("transport type %d not supported\n",
+ port->disc_addr.trtype);
+ return -EINVAL;
+ }
+ }
+
+ if (!try_module_get(ops->owner))
+ return -EINVAL;
+
+ /*
+ * If the user requested PI support and the transport isn't pi capable,
+ * don't enable the port.
+ */
+ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
+ pr_err("T10-PI is not supported by transport type %d\n",
+ port->disc_addr.trtype);
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ ret = ops->add_port(port);
+ if (ret)
+ goto out_put;
+
+ /* If the transport didn't set inline_data_size, then disable it. */
+ if (port->inline_data_size < 0)
+ port->inline_data_size = 0;
+
+ port->enabled = true;
+ port->tr_ops = ops;
+ return 0;
+
+out_put:
+ module_put(ops->owner);
+ return ret;
+}
+
+void nvmet_disable_port(struct nvmet_port *port)
+{
+ const struct nvmet_fabrics_ops *ops;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ port->enabled = false;
+ port->tr_ops = NULL;
+
+ ops = nvmet_transports[port->disc_addr.trtype];
+ ops->remove_port(port);
+ module_put(ops->owner);
+}
+
+static void nvmet_keep_alive_timer(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvmet_ctrl, ka_work);
+ bool reset_tbkas = ctrl->reset_tbkas;
+
+ ctrl->reset_tbkas = false;
+ if (reset_tbkas) {
+ pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
+ ctrl->cntlid);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ return;
+ }
+
+ pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
+ ctrl->cntlid, ctrl->kato);
+
+ nvmet_ctrl_fatal_error(ctrl);
+}
+
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ pr_debug("ctrl %d start keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
+}
+
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+
+u16 nvmet_req_find_ns(struct nvmet_req *req)
+{
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+
+ req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
+
+ percpu_ref_get(&req->ns->ref);
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_destroy_namespace(struct percpu_ref *ref)
+{
+ struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
+
+ complete(&ns->disable_done);
+}
+
+void nvmet_put_namespace(struct nvmet_ns *ns)
+{
+ percpu_ref_put(&ns->ref);
+}
+
+static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
+{
+ nvmet_bdev_ns_disable(ns);
+ nvmet_file_ns_disable(ns);
+}
+
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct pci_dev *p2p_dev;
+
+ if (!ns->use_p2pmem)
+ return 0;
+
+ if (!ns->bdev) {
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
+ return -EINVAL;
+ }
+
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
+ if (ret < 0)
+ return -EINVAL;
+ } else {
+ /*
+ * Right now we just check that there is p2pmem available so
+ * we can report an error to the user right away if there
+ * is not. We'll find the actual device to use once we
+ * setup the controller when the port's device is available.
+ */
+
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available for %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ pci_dev_put(p2p_dev);
+ }
+
+ return 0;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
+ struct nvmet_ns *ns)
+{
+ struct device *clients[2];
+ struct pci_dev *p2p_dev;
+ int ret;
+
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
+ return;
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
+ if (ret < 0)
+ return;
+
+ p2p_dev = pci_dev_get(ns->p2p_dev);
+ } else {
+ clients[0] = ctrl->p2p_client;
+ clients[1] = nvmet_ns_dev(ns);
+
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
+ dev_name(ctrl->p2p_client), ns->device_path);
+ return;
+ }
+ }
+
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ if (ret < 0)
+ pci_dev_put(p2p_dev);
+
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
+ ns->nsid);
+}
+
+bool nvmet_ns_revalidate(struct nvmet_ns *ns)
+{
+ loff_t oldsize = ns->size;
+
+ if (ns->bdev)
+ nvmet_bdev_ns_revalidate(ns);
+ else
+ nvmet_file_ns_revalidate(ns);
+
+ return oldsize != ns->size;
+}
+
+int nvmet_ns_enable(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+ ret = 0;
+
+ if (nvmet_is_passthru_subsys(subsys)) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
+ if (ns->enabled)
+ goto out_unlock;
+
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
+ ret = nvmet_bdev_ns_enable(ns);
+ if (ret == -ENOTBLK)
+ ret = nvmet_file_ns_enable(ns);
+ if (ret)
+ goto out_unlock;
+
+ ret = nvmet_p2pmem_ns_enable(ns);
+ if (ret)
+ goto out_dev_disable;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+
+ ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
+ 0, GFP_KERNEL);
+ if (ret)
+ goto out_dev_put;
+
+ if (ns->nsid > subsys->max_nsid)
+ subsys->max_nsid = ns->nsid;
+
+ ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
+ if (ret)
+ goto out_restore_subsys_maxnsid;
+
+ subsys->nr_namespaces++;
+
+ nvmet_ns_changed(subsys, ns->nsid);
+ ns->enabled = true;
+ ret = 0;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+
+out_restore_subsys_maxnsid:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
+out_dev_put:
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+out_dev_disable:
+ nvmet_ns_dev_disable(ns);
+ goto out_unlock;
+}
+
+void nvmet_ns_disable(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ if (!ns->enabled)
+ goto out_unlock;
+
+ ns->enabled = false;
+ xa_erase(&ns->subsys->namespaces, ns->nsid);
+ if (ns->nsid == subsys->max_nsid)
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
+ mutex_unlock(&subsys->lock);
+
+ /*
+ * Now that we removed the namespaces from the lookup list, we
+ * can kill the per_cpu ref and wait for any remaining references
+ * to be dropped, as well as a RCU grace period for anyone only
+ * using the namepace under rcu_read_lock(). Note that we can't
+ * use call_rcu here as we need to ensure the namespaces have
+ * been fully destroyed before unloading the module.
+ */
+ percpu_ref_kill(&ns->ref);
+ synchronize_rcu();
+ wait_for_completion(&ns->disable_done);
+ percpu_ref_exit(&ns->ref);
+
+ mutex_lock(&subsys->lock);
+
+ subsys->nr_namespaces--;
+ nvmet_ns_changed(subsys, ns->nsid);
+ nvmet_ns_dev_disable(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+ nvmet_ns_disable(ns);
+
+ down_write(&nvmet_ana_sem);
+ nvmet_ana_group_enabled[ns->anagrpid]--;
+ up_write(&nvmet_ana_sem);
+
+ kfree(ns->device_path);
+ kfree(ns);
+}
+
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+
+ init_completion(&ns->disable_done);
+
+ ns->nsid = nsid;
+ ns->subsys = subsys;
+
+ down_write(&nvmet_ana_sem);
+ ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
+ nvmet_ana_group_enabled[ns->anagrpid]++;
+ up_write(&nvmet_ana_sem);
+
+ uuid_gen(&ns->uuid);
+ ns->buffered_io = false;
+ ns->csi = NVME_CSI_NVM;
+
+ return ns;
+}
+
+static void nvmet_update_sq_head(struct nvmet_req *req)
+{
+ if (req->sq->size) {
+ u32 old_sqhd, new_sqhd;
+
+ old_sqhd = READ_ONCE(req->sq->sqhd);
+ do {
+ new_sqhd = (old_sqhd + 1) % req->sq->size;
+ } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
+ }
+ req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
+}
+
+static void nvmet_set_error(struct nvmet_req *req, u16 status)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_error_slot *new_error_slot;
+ unsigned long flags;
+
+ req->cqe->status = cpu_to_le16(status << 1);
+
+ if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
+ return;
+
+ spin_lock_irqsave(&ctrl->error_lock, flags);
+ ctrl->err_counter++;
+ new_error_slot =
+ &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
+
+ new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
+ new_error_slot->sqid = cpu_to_le16(req->sq->qid);
+ new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
+ new_error_slot->status_field = cpu_to_le16(status << 1);
+ new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
+ new_error_slot->lba = cpu_to_le64(req->error_slba);
+ new_error_slot->nsid = req->cmd->common.nsid;
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
+
+ /* set the more bit for this request */
+ req->cqe->status |= cpu_to_le16(1 << 14);
+}
+
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ struct nvmet_ns *ns = req->ns;
+
+ if (!req->sq->sqhd_disabled)
+ nvmet_update_sq_head(req);
+ req->cqe->sq_id = cpu_to_le16(req->sq->qid);
+ req->cqe->command_id = req->cmd->common.command_id;
+
+ if (unlikely(status))
+ nvmet_set_error(req, status);
+
+ trace_nvmet_req_complete(req);
+
+ req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
+}
+
+void nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ struct nvmet_sq *sq = req->sq;
+
+ __nvmet_req_complete(req, status);
+ percpu_ref_put(&sq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_complete);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+ u16 qid, u16 size)
+{
+ cq->qid = qid;
+ cq->size = size;
+}
+
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ u16 qid, u16 size)
+{
+ sq->sqhd = 0;
+ sq->qid = qid;
+ sq->size = size;
+
+ ctrl->sqs[qid] = sq;
+}
+
+static void nvmet_confirm_sq(struct percpu_ref *ref)
+{
+ struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+ complete(&sq->confirm_done);
+}
+
+void nvmet_sq_destroy(struct nvmet_sq *sq)
+{
+ struct nvmet_ctrl *ctrl = sq->ctrl;
+
+ /*
+ * If this is the admin queue, complete all AERs so that our
+ * queue doesn't have outstanding requests on it.
+ */
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
+ nvmet_async_events_failall(ctrl);
+ percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
+ wait_for_completion(&sq->confirm_done);
+ wait_for_completion(&sq->free_done);
+ percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
+
+ if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+ * send us keep-alive during this period, hence reset the
+ * traffic based keep-alive timer so we don't trigger a
+ * controller teardown as a result of a keep-alive expiration.
+ */
+ ctrl->reset_tbkas = true;
+ sq->ctrl->sqs[sq->qid] = NULL;
+ nvmet_ctrl_put(ctrl);
+ sq->ctrl = NULL; /* allows reusing the queue later */
+ }
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
+
+static void nvmet_sq_free(struct percpu_ref *ref)
+{
+ struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+ complete(&sq->free_done);
+}
+
+int nvmet_sq_init(struct nvmet_sq *sq)
+{
+ int ret;
+
+ ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("percpu_ref init failed!\n");
+ return ret;
+ }
+ init_completion(&sq->free_done);
+ init_completion(&sq->confirm_done);
+ nvmet_auth_sq_init(sq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_init);
+
+static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
+ struct nvmet_ns *ns)
+{
+ enum nvme_ana_state state = port->ana_state[ns->anagrpid];
+
+ if (unlikely(state == NVME_ANA_INACCESSIBLE))
+ return NVME_SC_ANA_INACCESSIBLE;
+ if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
+ return NVME_SC_ANA_PERSISTENT_LOSS;
+ if (unlikely(state == NVME_ANA_CHANGE))
+ return NVME_SC_ANA_TRANSITION;
+ return 0;
+}
+
+static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
+{
+ if (unlikely(req->ns->readonly)) {
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_flush:
+ break;
+ default:
+ return NVME_SC_NS_WRITE_PROTECTED;
+ }
+ }
+
+ return 0;
+}
+
+static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 ret;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_io_cmd(req);
+
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
+ ret = nvmet_check_ctrl_status(req);
+ if (unlikely(ret))
+ return ret;
+
+ if (nvmet_is_passthru_req(req))
+ return nvmet_parse_passthru_io_cmd(req);
+
+ ret = nvmet_req_find_ns(req);
+ if (unlikely(ret))
+ return ret;
+
+ ret = nvmet_check_ana_state(req->port, req->ns);
+ if (unlikely(ret)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return ret;
+ }
+ ret = nvmet_io_cmd_check_access(req);
+ if (unlikely(ret)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return ret;
+ }
+
+ switch (req->ns->csi) {
+ case NVME_CSI_NVM:
+ if (req->ns->file)
+ return nvmet_file_parse_io_cmd(req);
+ return nvmet_bdev_parse_io_cmd(req);
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return nvmet_bdev_zns_parse_io_cmd(req);
+ return NVME_SC_INVALID_IO_CMD_SET;
+ default:
+ return NVME_SC_INVALID_IO_CMD_SET;
+ }
+}
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
+{
+ u8 flags = req->cmd->common.flags;
+ u16 status;
+
+ req->cq = cq;
+ req->sq = sq;
+ req->ops = ops;
+ req->sg = NULL;
+ req->metadata_sg = NULL;
+ req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
+ req->transfer_len = 0;
+ req->metadata_len = 0;
+ req->cqe->status = 0;
+ req->cqe->sq_head = 0;
+ req->ns = NULL;
+ req->error_loc = NVMET_NO_ERROR_LOC;
+ req->error_slba = 0;
+
+ /* no support for fused commands yet */
+ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+ req->error_loc = offsetof(struct nvme_common_command, flags);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ /*
+ * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
+ * contains an address of a single contiguous physical buffer that is
+ * byte aligned.
+ */
+ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
+ req->error_loc = offsetof(struct nvme_common_command, flags);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ if (unlikely(!req->sq->ctrl))
+ /* will return an error for any non-connect command: */
+ status = nvmet_parse_connect_cmd(req);
+ else if (likely(req->sq->qid != 0))
+ status = nvmet_parse_io_cmd(req);
+ else
+ status = nvmet_parse_admin_cmd(req);
+
+ if (status)
+ goto fail;
+
+ trace_nvmet_req_init(req, req->cmd);
+
+ if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ if (sq->ctrl)
+ sq->ctrl->reset_tbkas = true;
+
+ return true;
+
+fail:
+ __nvmet_req_complete(req, status);
+ return false;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_init);
+
+void nvmet_req_uninit(struct nvmet_req *req)
+{
+ percpu_ref_put(&req->sq->ref);
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
+{
+ if (unlikely(len != req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
+
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+ if (unlikely(data_len > req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
+{
+ return req->transfer_len - req->metadata_len;
+}
+
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+ struct nvmet_req *req)
+{
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ nvmet_data_transfer_len(req));
+ if (!req->sg)
+ goto out_err;
+
+ if (req->metadata_len) {
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
+ &req->metadata_sg_cnt, req->metadata_len);
+ if (!req->metadata_sg)
+ goto out_free_sg;
+ }
+
+ req->p2p_dev = p2p_dev;
+
+ return 0;
+out_free_sg:
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+out_err:
+ return -ENOMEM;
+}
+
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
+ return NULL;
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
+}
+
+int nvmet_req_alloc_sgls(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
+ return 0;
+
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+ &req->sg_cnt);
+ if (unlikely(!req->sg))
+ goto out;
+
+ if (req->metadata_len) {
+ req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
+ &req->metadata_sg_cnt);
+ if (unlikely(!req->metadata_sg))
+ goto out_free;
+ }
+
+ return 0;
+out_free:
+ sgl_free(req->sg);
+out:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
+
+void nvmet_req_free_sgls(struct nvmet_req *req)
+{
+ if (req->p2p_dev) {
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ if (req->metadata_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ req->p2p_dev = NULL;
+ } else {
+ sgl_free(req->sg);
+ if (req->metadata_sg)
+ sgl_free(req->metadata_sg);
+ }
+
+ req->sg = NULL;
+ req->metadata_sg = NULL;
+ req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
+
+static inline bool nvmet_cc_en(u32 cc)
+{
+ return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+ return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+ return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+ return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+ return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+ return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+ return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
+}
+
+static inline bool nvmet_css_supported(u8 cc_css)
+{
+ switch (cc_css << NVME_CC_CSS_SHIFT) {
+ case NVME_CC_CSS_NVM:
+ case NVME_CC_CSS_CSI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+{
+ lockdep_assert_held(&ctrl->lock);
+
+ /*
+ * Only I/O controllers should verify iosqes,iocqes.
+ * Strictly speaking, the spec says a discovery controller
+ * should verify iosqes,iocqes are zeroed, however that
+ * would break backwards compatibility, so don't enforce it.
+ */
+ if (!nvmet_is_disc_subsys(ctrl->subsys) &&
+ (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
+ ctrl->csts = NVME_CSTS_CFS;
+ return;
+ }
+
+ if (nvmet_cc_mps(ctrl->cc) != 0 ||
+ nvmet_cc_ams(ctrl->cc) != 0 ||
+ !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
+ ctrl->csts = NVME_CSTS_CFS;
+ return;
+ }
+
+ ctrl->csts = NVME_CSTS_RDY;
+
+ /*
+ * Controllers that are not yet enabled should not really enforce the
+ * keep alive timeout, but we still want to track a timeout and cleanup
+ * in case a host died before it enabled the controller. Hence, simply
+ * reset the keep alive timer when the controller is enabled.
+ */
+ if (ctrl->kato)
+ mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+{
+ lockdep_assert_held(&ctrl->lock);
+
+ /* XXX: tear down queues? */
+ ctrl->csts &= ~NVME_CSTS_RDY;
+ ctrl->cc = 0;
+}
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
+{
+ u32 old;
+
+ mutex_lock(&ctrl->lock);
+ old = ctrl->cc;
+ ctrl->cc = new;
+
+ if (nvmet_cc_en(new) && !nvmet_cc_en(old))
+ nvmet_start_ctrl(ctrl);
+ if (!nvmet_cc_en(new) && nvmet_cc_en(old))
+ nvmet_clear_ctrl(ctrl);
+ if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
+ nvmet_clear_ctrl(ctrl);
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ }
+ if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
+ ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
+{
+ /* command sets supported: NVMe command set: */
+ ctrl->cap = (1ULL << 37);
+ /* Controller supports one or more I/O Command Sets */
+ ctrl->cap |= (1ULL << 43);
+ /* CC.EN timeout in 500msec units: */
+ ctrl->cap |= (15ULL << 24);
+ /* maximum queue entries supported: */
+ if (ctrl->ops->get_max_queue_size)
+ ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ else
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+
+ if (nvmet_is_passthru_subsys(ctrl->subsys))
+ nvmet_passthrough_override_cap(ctrl);
+}
+
+struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
+ const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = NULL;
+ struct nvmet_subsys *subsys;
+
+ subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ if (!subsys) {
+ pr_warn("connect request for invalid subsystem %s!\n",
+ subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ goto out;
+ }
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->cntlid == cntlid) {
+ if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
+ pr_warn("hostnqn mismatch.\n");
+ continue;
+ }
+ if (!kref_get_unless_zero(&ctrl->ref))
+ continue;
+
+ /* ctrl found */
+ goto found;
+ }
+ }
+
+ ctrl = NULL; /* ctrl not found */
+ pr_warn("could not find controller %d for subsys %s / host %s\n",
+ cntlid, subsysnqn, hostnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+
+found:
+ mutex_unlock(&subsys->lock);
+ nvmet_subsys_put(subsys);
+out:
+ return ctrl;
+}
+
+u16 nvmet_check_ctrl_status(struct nvmet_req *req)
+{
+ if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+ pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
+ req->cmd->common.opcode, req->sq->qid);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
+ req->cmd->common.opcode, req->sq->qid);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (unlikely(!nvmet_check_auth_status(req))) {
+ pr_warn("qid %d not authenticated\n", req->sq->qid);
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ }
+ return 0;
+}
+
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
+{
+ struct nvmet_host_link *p;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ if (subsys->allow_any_host)
+ return true;
+
+ if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
+ return true;
+
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), hostnqn))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+ unsigned long idx;
+
+ if (!req->p2p_client)
+ return;
+
+ ctrl->p2p_client = get_device(req->p2p_client);
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
+ pci_dev_put(radix_tree_deref_slot(slot));
+
+ put_device(ctrl->p2p_client);
+}
+
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+ pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
+{
+ struct nvmet_subsys *subsys;
+ struct nvmet_ctrl *ctrl;
+ int ret;
+ u16 status;
+
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ if (!subsys) {
+ pr_warn("connect request for invalid subsystem %s!\n",
+ subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ goto out;
+ }
+
+ down_read(&nvmet_config_sem);
+ if (!nvmet_host_allowed(subsys, hostnqn)) {
+ pr_info("connect by host %s for subsystem %s not allowed\n",
+ hostnqn, subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ up_read(&nvmet_config_sem);
+ status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ goto out_put_subsystem;
+ }
+ up_read(&nvmet_config_sem);
+
+ status = NVME_SC_INTERNAL;
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ goto out_put_subsystem;
+ mutex_init(&ctrl->lock);
+
+ ctrl->port = req->port;
+ ctrl->ops = req->ops;
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ /* By default, set loop targets to clear IDS by default */
+ if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+ subsys->clear_ids = 1;
+#endif
+
+ INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
+ INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
+
+ memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+
+ kref_init(&ctrl->ref);
+ ctrl->subsys = subsys;
+ nvmet_init_cap(ctrl);
+ WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
+
+ ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
+ sizeof(__le32), GFP_KERNEL);
+ if (!ctrl->changed_ns_list)
+ goto out_free_ctrl;
+
+ ctrl->sqs = kcalloc(subsys->max_qid + 1,
+ sizeof(struct nvmet_sq *),
+ GFP_KERNEL);
+ if (!ctrl->sqs)
+ goto out_free_changed_ns_list;
+
+ if (subsys->cntlid_min > subsys->cntlid_max)
+ goto out_free_sqs;
+
+ ret = ida_alloc_range(&cntlid_ida,
+ subsys->cntlid_min, subsys->cntlid_max,
+ GFP_KERNEL);
+ if (ret < 0) {
+ status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ goto out_free_sqs;
+ }
+ ctrl->cntlid = ret;
+
+ /*
+ * Discovery controllers may use some arbitrary high value
+ * in order to cleanup stale discovery sessions
+ */
+ if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
+ kato = NVMET_DISC_KATO_MS;
+
+ /* keep-alive timeout in seconds */
+ ctrl->kato = DIV_ROUND_UP(kato, 1000);
+
+ ctrl->err_counter = 0;
+ spin_lock_init(&ctrl->error_lock);
+
+ nvmet_start_keep_alive_timer(ctrl);
+
+ mutex_lock(&subsys->lock);
+ list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ nvmet_setup_p2p_ns_map(ctrl, req);
+ mutex_unlock(&subsys->lock);
+
+ *ctrlp = ctrl;
+ return 0;
+
+out_free_sqs:
+ kfree(ctrl->sqs);
+out_free_changed_ns_list:
+ kfree(ctrl->changed_ns_list);
+out_free_ctrl:
+ kfree(ctrl);
+out_put_subsystem:
+ nvmet_subsys_put(subsys);
+out:
+ return status;
+}
+
+static void nvmet_ctrl_free(struct kref *ref)
+{
+ struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
+ struct nvmet_subsys *subsys = ctrl->subsys;
+
+ mutex_lock(&subsys->lock);
+ nvmet_release_p2p_ns_map(ctrl);
+ list_del(&ctrl->subsys_entry);
+ mutex_unlock(&subsys->lock);
+
+ nvmet_stop_keep_alive_timer(ctrl);
+
+ flush_work(&ctrl->async_event_work);
+ cancel_work_sync(&ctrl->fatal_err_work);
+
+ nvmet_destroy_auth(ctrl);
+
+ ida_free(&cntlid_ida, ctrl->cntlid);
+
+ nvmet_async_events_free(ctrl);
+ kfree(ctrl->sqs);
+ kfree(ctrl->changed_ns_list);
+ kfree(ctrl);
+
+ nvmet_subsys_put(subsys);
+}
+
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
+{
+ kref_put(&ctrl->ref, nvmet_ctrl_free);
+}
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
+{
+ mutex_lock(&ctrl->lock);
+ if (!(ctrl->csts & NVME_CSTS_CFS)) {
+ ctrl->csts |= NVME_CSTS_CFS;
+ queue_work(nvmet_wq, &ctrl->fatal_err_work);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+ const char *subsysnqn)
+{
+ struct nvmet_subsys_link *p;
+
+ if (!port)
+ return NULL;
+
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
+ if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
+ return NULL;
+ return nvmet_disc_subsys;
+ }
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (!strncmp(p->subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (!kref_get_unless_zero(&p->subsys->ref))
+ break;
+ up_read(&nvmet_config_sem);
+ return p->subsys;
+ }
+ }
+ up_read(&nvmet_config_sem);
+ return NULL;
+}
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+ enum nvme_subsys_type type)
+{
+ struct nvmet_subsys *subsys;
+ char serial[NVMET_SN_MAX_SIZE / 2];
+ int ret;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return ERR_PTR(-ENOMEM);
+
+ subsys->ver = NVMET_DEFAULT_VS;
+ /* generate a random serial number as our controllers are ephemeral: */
+ get_random_bytes(&serial, sizeof(serial));
+ bin2hex(subsys->serial, &serial, sizeof(serial));
+
+ subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
+ if (!subsys->model_number) {
+ ret = -ENOMEM;
+ goto free_subsys;
+ }
+
+ subsys->ieee_oui = 0;
+
+ subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
+ if (!subsys->firmware_rev) {
+ ret = -ENOMEM;
+ goto free_mn;
+ }
+
+ switch (type) {
+ case NVME_NQN_NVME:
+ subsys->max_qid = NVMET_NR_QUEUES;
+ break;
+ case NVME_NQN_DISC:
+ case NVME_NQN_CURR:
+ subsys->max_qid = 0;
+ break;
+ default:
+ pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
+ ret = -EINVAL;
+ goto free_fr;
+ }
+ subsys->type = type;
+ subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
+ GFP_KERNEL);
+ if (!subsys->subsysnqn) {
+ ret = -ENOMEM;
+ goto free_fr;
+ }
+ subsys->cntlid_min = NVME_CNTLID_MIN;
+ subsys->cntlid_max = NVME_CNTLID_MAX;
+ kref_init(&subsys->ref);
+
+ mutex_init(&subsys->lock);
+ xa_init(&subsys->namespaces);
+ INIT_LIST_HEAD(&subsys->ctrls);
+ INIT_LIST_HEAD(&subsys->hosts);
+
+ return subsys;
+
+free_fr:
+ kfree(subsys->firmware_rev);
+free_mn:
+ kfree(subsys->model_number);
+free_subsys:
+ kfree(subsys);
+ return ERR_PTR(ret);
+}
+
+static void nvmet_subsys_free(struct kref *ref)
+{
+ struct nvmet_subsys *subsys =
+ container_of(ref, struct nvmet_subsys, ref);
+
+ WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+
+ xa_destroy(&subsys->namespaces);
+ nvmet_passthru_subsys_free(subsys);
+
+ kfree(subsys->subsysnqn);
+ kfree(subsys->model_number);
+ kfree(subsys->firmware_rev);
+ kfree(subsys);
+}
+
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ ctrl->ops->delete_ctrl(ctrl);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_subsys_put(struct nvmet_subsys *subsys)
+{
+ kref_put(&subsys->ref, nvmet_subsys_free);
+}
+
+static int __init nvmet_init(void)
+{
+ int error = -ENOMEM;
+
+ nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+
+ nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
+ NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!nvmet_bvec_cache)
+ return -ENOMEM;
+
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ if (!zbd_wq)
+ goto out_destroy_bvec_cache;
+
+ buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!buffered_io_wq)
+ goto out_free_zbd_work_queue;
+
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ if (!nvmet_wq)
+ goto out_free_buffered_work_queue;
+
+ error = nvmet_init_discovery();
+ if (error)
+ goto out_free_nvmet_work_queue;
+
+ error = nvmet_init_configfs();
+ if (error)
+ goto out_exit_discovery;
+ return 0;
+
+out_exit_discovery:
+ nvmet_exit_discovery();
+out_free_nvmet_work_queue:
+ destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
+ destroy_workqueue(buffered_io_wq);
+out_free_zbd_work_queue:
+ destroy_workqueue(zbd_wq);
+out_destroy_bvec_cache:
+ kmem_cache_destroy(nvmet_bvec_cache);
+ return error;
+}
+
+static void __exit nvmet_exit(void)
+{
+ nvmet_exit_configfs();
+ nvmet_exit_discovery();
+ ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_wq);
+ destroy_workqueue(buffered_io_wq);
+ destroy_workqueue(zbd_wq);
+ kmem_cache_destroy(nvmet_bvec_cache);
+
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
+}
+
+module_init(nvmet_init);
+module_exit(nvmet_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
new file mode 100644
index 0000000000..668d257fa9
--- /dev/null
+++ b/drivers/nvme/target/discovery.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Discovery service for the NVMe over Fabrics target.
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/slab.h>
+#include <generated/utsrelease.h>
+#include "nvmet.h"
+
+struct nvmet_subsys *nvmet_disc_subsys;
+
+static u64 nvmet_genctr;
+
+static void __nvmet_disc_changed(struct nvmet_port *port,
+ struct nvmet_ctrl *ctrl)
+{
+ if (ctrl->port != port)
+ return;
+
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
+ return;
+
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
+}
+
+void nvmet_port_disc_changed(struct nvmet_port *port,
+ struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ lockdep_assert_held(&nvmet_config_sem);
+ nvmet_genctr++;
+
+ mutex_lock(&nvmet_disc_subsys->lock);
+ list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
+ if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
+ continue;
+
+ __nvmet_disc_changed(port, ctrl);
+ }
+ mutex_unlock(&nvmet_disc_subsys->lock);
+
+ /* If transport can signal change, notify transport */
+ if (port->tr_ops && port->tr_ops->discovery_chg)
+ port->tr_ops->discovery_chg(port);
+}
+
+static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
+ struct nvmet_subsys *subsys,
+ struct nvmet_host *host)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&nvmet_disc_subsys->lock);
+ list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
+ if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
+ continue;
+
+ __nvmet_disc_changed(port, ctrl);
+ }
+ mutex_unlock(&nvmet_disc_subsys->lock);
+}
+
+void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
+ struct nvmet_host *host)
+{
+ struct nvmet_port *port;
+ struct nvmet_subsys_link *s;
+
+ lockdep_assert_held(&nvmet_config_sem);
+ nvmet_genctr++;
+
+ list_for_each_entry(port, nvmet_ports, global_entry)
+ list_for_each_entry(s, &port->subsystems, entry) {
+ if (s->subsys != subsys)
+ continue;
+ __nvmet_subsys_disc_changed(port, subsys, host);
+ }
+}
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
+{
+ down_write(&nvmet_config_sem);
+ if (list_empty(&port->entry)) {
+ list_add_tail(&port->entry, &parent->referrals);
+ port->enabled = true;
+ nvmet_port_disc_changed(parent, NULL);
+ }
+ up_write(&nvmet_config_sem);
+}
+
+void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
+{
+ down_write(&nvmet_config_sem);
+ if (!list_empty(&port->entry)) {
+ port->enabled = false;
+ list_del_init(&port->entry);
+ nvmet_port_disc_changed(parent, NULL);
+ }
+ up_write(&nvmet_config_sem);
+}
+
+static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
+ struct nvmet_port *port, char *subsys_nqn, char *traddr,
+ u8 type, u32 numrec)
+{
+ struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
+
+ e->trtype = port->disc_addr.trtype;
+ e->adrfam = port->disc_addr.adrfam;
+ e->treq = port->disc_addr.treq;
+ e->portid = port->disc_addr.portid;
+ /* we support only dynamic controllers */
+ e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
+ e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
+ e->subtype = type;
+ memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
+ memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
+ memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
+ strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+}
+
+/*
+ * nvmet_set_disc_traddr - set a correct discovery log entry traddr
+ *
+ * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
+ * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
+ * must not contain that "any" IP address. If the transport implements
+ * .disc_traddr, use it. this callback will set the discovery traddr
+ * from the req->port address in case the port in question listens
+ * "any" IP address.
+ */
+static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
+ char *traddr)
+{
+ if (req->ops->disc_traddr)
+ req->ops->disc_traddr(req, port, traddr);
+ else
+ memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+}
+
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ size_t entries = 1;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+ continue;
+ entries++;
+ }
+ list_for_each_entry(r, &req->port->referrals, entry)
+ entries++;
+ return entries;
+}
+
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
+{
+ const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_disc_rsp_page_hdr *hdr;
+ u64 offset = nvmet_get_log_page_offset(req->cmd);
+ size_t data_len = nvmet_get_log_page_len(req->cmd);
+ size_t alloc_len;
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ u32 numrec = 0;
+ u16 status = 0;
+ void *buffer;
+ char traddr[NVMF_TRADDR_SIZE];
+
+ if (!nvmet_check_transfer_len(req, data_len))
+ return;
+
+ if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lid);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ /* Spec requires dword aligned offsets */
+ if (offset & 0x3) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lpo);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ /*
+ * Make sure we're passing at least a buffer of response header size.
+ * If host provided data len is less than the header size, only the
+ * number of bytes requested by host will be sent to host.
+ */
+ down_read(&nvmet_config_sem);
+ alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+ buffer = kzalloc(alloc_len, GFP_KERNEL);
+ if (!buffer) {
+ up_read(&nvmet_config_sem);
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+ hdr = buffer;
+
+ nvmet_set_disc_traddr(req, req->port, traddr);
+
+ nvmet_format_discovery_entry(hdr, req->port,
+ nvmet_disc_subsys->subsysnqn,
+ traddr, NVME_NQN_CURR, numrec);
+ numrec++;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+ continue;
+
+ nvmet_format_discovery_entry(hdr, req->port,
+ p->subsys->subsysnqn, traddr,
+ NVME_NQN_NVME, numrec);
+ numrec++;
+ }
+
+ list_for_each_entry(r, &req->port->referrals, entry) {
+ nvmet_format_discovery_entry(hdr, r,
+ NVME_DISC_SUBSYS_NAME,
+ r->disc_addr.traddr,
+ NVME_NQN_DISC, numrec);
+ numrec++;
+ }
+
+ hdr->genctr = cpu_to_le64(nvmet_genctr);
+ hdr->numrec = cpu_to_le64(numrec);
+ hdr->recfmt = cpu_to_le16(0);
+
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
+
+ up_read(&nvmet_config_sem);
+
+ status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+ kfree(buffer);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl *id;
+ u16 status = 0;
+
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
+ memset(id->fr, ' ', sizeof(id->fr));
+ memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
+ strlen(ctrl->subsys->model_number), ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
+
+ id->cntrltype = NVME_CTRL_DISC;
+
+ /* no limit on data transfer sizes for now */
+ id->mdts = 0;
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+ id->lpa = (1 << 2);
+
+ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (req->port->inline_data_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
+
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_disc_set_features(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u16 stat;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_KATO:
+ stat = nvmet_set_feat_kato(req);
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ stat = nvmet_set_feat_async_event(req,
+ NVMET_DISC_AEN_CFG_OPTIONAL);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
+ stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, stat);
+}
+
+static void nvmet_execute_disc_get_features(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u16 stat = 0;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_KATO:
+ nvmet_get_feat_kato(req);
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ nvmet_get_feat_async_event(req);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
+ stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, stat);
+}
+
+u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("got cmd %d while not ready\n",
+ cmd->common.opcode);
+ req->error_loc =
+ offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ req->execute = nvmet_execute_disc_set_features;
+ return 0;
+ case nvme_admin_get_features:
+ req->execute = nvmet_execute_disc_get_features;
+ return 0;
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ return 0;
+ case nvme_admin_keep_alive:
+ req->execute = nvmet_execute_keep_alive;
+ return 0;
+ case nvme_admin_get_log_page:
+ req->execute = nvmet_execute_disc_get_log_page;
+ return 0;
+ case nvme_admin_identify:
+ req->execute = nvmet_execute_disc_identify;
+ return 0;
+ default:
+ pr_debug("unhandled cmd %d\n", cmd->common.opcode);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+}
+
+int __init nvmet_init_discovery(void)
+{
+ nvmet_disc_subsys =
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
+ return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
+}
+
+void nvmet_exit_discovery(void)
+{
+ nvmet_subsys_put(nvmet_disc_subsys);
+}
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 0000000000..1d9854484e
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+
+static void nvmet_auth_expired_work(struct work_struct *work)
+{
+ struct nvmet_sq *sq = container_of(to_delayed_work(work),
+ struct nvmet_sq, auth_expired_work);
+
+ pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
+ __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ sq->dhchap_tid = -1;
+}
+
+void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+ /* Initialize in-band authentication */
+ INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
+ sq->authenticated = false;
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_negotiate_data *data = d;
+ int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
+
+ pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+ data->auth_protocol[0].dhchap.halen,
+ data->auth_protocol[0].dhchap.dhlen);
+ req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ if (data->sc_c)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+ if (data->napd != 1)
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+ if (data->auth_protocol[0].dhchap.authid !=
+ NVME_AUTH_DHCHAP_AUTH_ID)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+ for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+ u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
+
+ if (!fallback_hash_id &&
+ crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
+ fallback_hash_id = host_hmac_id;
+ if (ctrl->shash_id != host_hmac_id)
+ continue;
+ hash_id = ctrl->shash_id;
+ break;
+ }
+ if (hash_id == 0) {
+ if (fallback_hash_id == 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_hmac_name(fallback_hash_id));
+ ctrl->shash_id = fallback_hash_id;
+ }
+
+ dhgid = -1;
+ fallback_dhgid = -1;
+ for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
+ int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
+
+ if (tmp_dhgid != ctrl->dh_gid) {
+ dhgid = tmp_dhgid;
+ break;
+ }
+ if (fallback_dhgid < 0) {
+ const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
+
+ if (crypto_has_kpp(kpp, 0, 0))
+ fallback_dhgid = tmp_dhgid;
+ }
+ }
+ if (dhgid < 0) {
+ if (fallback_dhgid < 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(fallback_dhgid));
+ ctrl->dh_gid = fallback_dhgid;
+ }
+ pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+ return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_reply_data *data = d;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ u8 *response;
+
+ pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->hl, data->cvalid, dhvlen);
+
+ if (dhvlen) {
+ if (!ctrl->dh_tfm)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
+ dhvlen) < 0)
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+
+ response = kmalloc(data->hl, GFP_KERNEL);
+ if (!response)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ if (!ctrl->host_key) {
+ pr_warn("ctrl %d qid %d no host key\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+ pr_debug("ctrl %d qid %d host hash failed\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+
+ if (memcmp(data->rval, response, data->hl)) {
+ pr_info("ctrl %d qid %d host response mismatch\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ kfree(response);
+ pr_debug("%s: ctrl %d qid %d host authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ if (data->cvalid) {
+ req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
+ GFP_KERNEL);
+ if (!req->sq->dhchap_c2)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
+ __func__, ctrl->cntlid, req->sq->qid, data->hl,
+ req->sq->dhchap_c2);
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else {
+ req->sq->authenticated = true;
+ req->sq->dhchap_c2 = NULL;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_auth_failure2(void *d)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_success2_data *data;
+ void *d;
+ u32 tl;
+ u16 status = 0;
+
+ if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp1);
+ goto done;
+ }
+ tl = le32_to_cpu(req->cmd->auth_send.tl);
+ if (!tl) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, tl);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, tl)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+ return;
+ }
+
+ d = kmalloc(tl, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, tl);
+ if (status)
+ goto done_kfree;
+
+ data = d;
+ pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+ req->sq->dhchap_step);
+ if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+ goto done_failure1;
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+ if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+ /* Restart negotiation */
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ if (!req->sq->qid) {
+ if (nvmet_setup_auth(ctrl) < 0) {
+ status = NVME_SC_INTERNAL;
+ pr_err("ctrl %d qid 0 failed to setup"
+ "re-authentication",
+ ctrl->cntlid);
+ goto done_failure1;
+ }
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ } else if (data->auth_id != req->sq->dhchap_step)
+ goto done_failure1;
+ /* Validate negotiation parameters */
+ status = nvmet_auth_negotiate(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ }
+ if (data->auth_id != req->sq->dhchap_step) {
+ pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->auth_id, req->sq->dhchap_step);
+ goto done_failure1;
+ }
+ if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+ pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ le16_to_cpu(data->t_id),
+ req->sq->dhchap_tid);
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ goto done_kfree;
+ }
+
+ switch (data->auth_id) {
+ case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+ status = nvmet_auth_reply(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ req->sq->authenticated = true;
+ pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ goto done_kfree;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+ status = nvmet_auth_failure2(d);
+ if (status) {
+ pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ status = 0;
+ }
+ goto done_kfree;
+ default:
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ req->sq->authenticated = false;
+ goto done_kfree;
+ }
+done_failure1:
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+ kfree(d);
+done:
+ pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status, req->sq->dhchap_step);
+ if (status)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+ req->cqe->result.u64 = 0;
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+
+ mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ auth_expire_secs * HZ);
+ goto complete;
+ }
+ /* Final states, clear up variables */
+ nvmet_auth_sq_free(req->sq);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ nvmet_ctrl_fatal_error(ctrl);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+ int data_size = sizeof(*d) + hash_len;
+
+ if (ctrl->dh_tfm)
+ data_size += ctrl->dh_keysize;
+ if (al < data_size) {
+ pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+ al, data_size);
+ return -EINVAL;
+ }
+ memset(data, 0, data_size);
+ req->sq->dhchap_s1 = nvme_auth_get_seqnum();
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hashid = ctrl->shash_id;
+ data->hl = hash_len;
+ data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+ req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c1)
+ return -ENOMEM;
+ get_random_bytes(req->sq->dhchap_c1, data->hl);
+ memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+ if (ctrl->dh_tfm) {
+ data->dhgid = ctrl->dh_gid;
+ data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
+ ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
+ ctrl->dh_keysize);
+ }
+ pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
+ __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
+ return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_success1_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+
+ WARN_ON(al < sizeof(*data));
+ memset(data, 0, sizeof(*data));
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hl = hash_len;
+ if (req->sq->dhchap_c2) {
+ if (!ctrl->ctrl_key) {
+ pr_warn("ctrl %d qid %d no ctrl key\n",
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ data->rvalid = 1;
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ }
+ return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ WARN_ON(al < sizeof(*data));
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *d;
+ u32 al;
+ u16 status = 0;
+
+ if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp1);
+ goto done;
+ }
+ al = le32_to_cpu(req->cmd->auth_receive.al);
+ if (!al) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, al);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, al)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+ return;
+ }
+
+ d = kmalloc(al, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ switch (req->sq->dhchap_step) {
+ case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+ if (nvmet_auth_challenge(req, d, al) < 0) {
+ pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ status = NVME_SC_INTERNAL;
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+ status = nvmet_auth_success1(req, d, al);
+ if (status) {
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d failure1 (%x)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+ break;
+ default:
+ pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ nvmet_auth_failure1(req, d, al);
+ status = 0;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+done:
+ req->cqe->result.u64 = 0;
+
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ nvmet_auth_sq_free(req->sq);
+ nvmet_ctrl_fatal_error(ctrl);
+ }
+ nvmet_req_complete(req, status);
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
new file mode 100644
index 0000000000..d8da840a1c
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Fabrics command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+static void nvmet_execute_prop_set(struct nvmet_req *req)
+{
+ u64 val = le64_to_cpu(req->cmd->prop_set.value);
+ u16 status = 0;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ if (req->cmd->prop_set.attrib & 1) {
+ req->error_loc =
+ offsetof(struct nvmf_property_set_command, attrib);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ switch (le32_to_cpu(req->cmd->prop_set.offset)) {
+ case NVME_REG_CC:
+ nvmet_update_cc(req->sq->ctrl, val);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvmf_property_set_command, offset);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_prop_get(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = 0;
+ u64 val = 0;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ if (req->cmd->prop_get.attrib & 1) {
+ switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+ case NVME_REG_CAP:
+ val = ctrl->cap;
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ } else {
+ switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+ case NVME_REG_VS:
+ val = ctrl->subsys->ver;
+ break;
+ case NVME_REG_CC:
+ val = ctrl->cc;
+ break;
+ case NVME_REG_CSTS:
+ val = ctrl->csts;
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ }
+
+ if (status && req->cmd->prop_get.attrib & 1) {
+ req->error_loc =
+ offsetof(struct nvmf_property_get_command, offset);
+ } else {
+ req->error_loc =
+ offsetof(struct nvmf_property_get_command, attrib);
+ }
+
+ req->cqe->result.u64 = cpu_to_le64(val);
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+ case nvme_fabrics_type_property_set:
+ req->execute = nvmet_execute_prop_set;
+ break;
+ case nvme_fabrics_type_property_get:
+ req->execute = nvmet_execute_prop_get;
+ break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ u16 qid = le16_to_cpu(c->qid);
+ u16 sqsize = le16_to_cpu(c->sqsize);
+ struct nvmet_ctrl *old;
+ u16 mqes = NVME_CAP_MQES(ctrl->cap);
+ u16 ret;
+
+ if (!sqsize) {
+ pr_warn("queue size zero!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
+ ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ goto err;
+ }
+
+ if (ctrl->sqs[qid] != NULL) {
+ pr_warn("qid %u has already been created\n", qid);
+ req->error_loc = offsetof(struct nvmf_connect_command, qid);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (sqsize > mqes) {
+ pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
+ sqsize, mqes, ctrl->cntlid);
+ req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
+
+ old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+ if (old) {
+ pr_warn("queue already connected!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ }
+
+ /* note: convert queue size from 0's-based value to 1's-based value */
+ nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
+ nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
+
+ if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
+ req->sq->sqhd_disabled = true;
+ req->cqe->sq_head = cpu_to_le16(0xffff);
+ }
+
+ if (ctrl->ops->install_queue) {
+ ret = ctrl->ops->install_queue(req->sq);
+ if (ret) {
+ pr_err("failed to install queue %d cntlid %d ret %x\n",
+ qid, ctrl->cntlid, ret);
+ ctrl->sqs[qid] = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ req->sq->ctrl = NULL;
+ return ret;
+}
+
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+{
+ return (u32)ctrl->cntlid |
+ (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+}
+
+static void nvmet_execute_admin_connect(struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ struct nvmf_connect_data *d;
+ struct nvmet_ctrl *ctrl = NULL;
+ u16 status = 0;
+ int ret;
+
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto complete;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto out;
+
+ /* zero out initial completion result, assign values as needed */
+ req->cqe->result.u32 = 0;
+
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+ req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
+ status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
+ pr_warn("connect attempt for invalid controller ID %#x\n",
+ d->cntlid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ goto out;
+ }
+
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ le32_to_cpu(c->kato), &ctrl);
+ if (status)
+ goto out;
+
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
+
+ uuid_copy(&ctrl->hostid, &d->hostid);
+
+ ret = nvmet_setup_auth(ctrl);
+ if (ret < 0) {
+ pr_err("Failed to setup authentication, error %d\n", ret);
+ nvmet_ctrl_put(ctrl);
+ if (ret == -EPERM)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ else
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_install_queue(ctrl, req);
+ if (status) {
+ nvmet_ctrl_put(ctrl);
+ goto out;
+ }
+
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+out:
+ kfree(d);
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_io_connect(struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ struct nvmf_connect_data *d;
+ struct nvmet_ctrl *ctrl;
+ u16 qid = le16_to_cpu(c->qid);
+ u16 status = 0;
+
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto complete;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto out;
+
+ /* zero out initial completion result, assign values as needed */
+ req->cqe->result.u32 = 0;
+
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+ status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ goto out;
+ }
+
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid), req);
+ if (!ctrl) {
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (unlikely(qid > ctrl->subsys->max_qid)) {
+ pr_warn("invalid queue id (%d)\n", qid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
+ goto out_ctrl_put;
+ }
+
+ status = nvmet_install_queue(ctrl, req);
+ if (status)
+ goto out_ctrl_put;
+
+ pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+out:
+ kfree(d);
+complete:
+ nvmet_req_complete(req, status);
+ return;
+
+out_ctrl_put:
+ nvmet_ctrl_put(ctrl);
+ goto out;
+}
+
+u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (!nvme_is_fabrics(cmd)) {
+ pr_debug("invalid command 0x%x on unconnected queue.\n",
+ cmd->fabrics.opcode);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+ pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ if (cmd->connect.qid == 0)
+ req->execute = nvmet_execute_admin_connect;
+ else
+ req->execute = nvmet_execute_io_connect;
+ return 0;
+}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
new file mode 100644
index 0000000000..1ab6601fdd
--- /dev/null
+++ b/drivers/nvme/target/fc.c
@@ -0,0 +1,2947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+#include "../host/fc.h"
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+#define NVMET_LS_CTX_COUNT 256
+
+struct nvmet_fc_tgtport;
+struct nvmet_fc_tgt_assoc;
+
+struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
+ struct nvmefc_ls_rsp *lsrsp;
+ struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
+
+ struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
+
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_tgt_assoc *assoc;
+ void *hosthandle;
+
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
+ u16 rqstdatalen;
+ dma_addr_t rspdma;
+
+ struct scatterlist sg[2];
+
+ struct work_struct work;
+} __aligned(sizeof(unsigned long long));
+
+struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
+ struct nvmefc_ls_req ls_req;
+
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+
+ int ls_error;
+ struct list_head lsreq_list; /* tgtport->ls_req_list */
+ bool req_queued;
+};
+
+
+/* desired maximum for a single sequence - if sg list allows it */
+#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
+
+enum nvmet_fcp_datadir {
+ NVMET_FCP_NODATA,
+ NVMET_FCP_WRITE,
+ NVMET_FCP_READ,
+ NVMET_FCP_ABORTED,
+};
+
+struct nvmet_fc_fcp_iod {
+ struct nvmefc_tgt_fcp_req *fcpreq;
+
+ struct nvme_fc_cmd_iu cmdiubuf;
+ struct nvme_fc_ersp_iu rspiubuf;
+ dma_addr_t rspdma;
+ struct scatterlist *next_sg;
+ struct scatterlist *data_sg;
+ int data_sg_cnt;
+ u32 offset;
+ enum nvmet_fcp_datadir io_dir;
+ bool active;
+ bool abort;
+ bool aborted;
+ bool writedataactive;
+ spinlock_t flock;
+
+ struct nvmet_req req;
+ struct work_struct defer_work;
+
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_tgt_queue *queue;
+
+ struct list_head fcp_list; /* tgtport->fcp_list */
+};
+
+struct nvmet_fc_tgtport {
+ struct nvmet_fc_target_port fc_target_port;
+
+ struct list_head tgt_list; /* nvmet_fc_target_list */
+ struct device *dev; /* dev for dma mapping */
+ struct nvmet_fc_target_template *ops;
+
+ struct nvmet_fc_ls_iod *iod;
+ spinlock_t lock;
+ struct list_head ls_rcv_list;
+ struct list_head ls_req_list;
+ struct list_head ls_busylist;
+ struct list_head assoc_list;
+ struct list_head host_list;
+ struct ida assoc_cnt;
+ struct nvmet_fc_port_entry *pe;
+ struct kref ref;
+ u32 max_sg_cnt;
+};
+
+struct nvmet_fc_port_entry {
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_port *port;
+ u64 node_name;
+ u64 port_name;
+ struct list_head pe_list;
+};
+
+struct nvmet_fc_defer_fcp_req {
+ struct list_head req_list;
+ struct nvmefc_tgt_fcp_req *fcp_req;
+};
+
+struct nvmet_fc_tgt_queue {
+ bool ninetypercent;
+ u16 qid;
+ u16 sqsize;
+ u16 ersp_ratio;
+ __le16 sqhd;
+ atomic_t connected;
+ atomic_t sqtail;
+ atomic_t zrspcnt;
+ atomic_t rsn;
+ spinlock_t qlock;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct list_head fod_list;
+ struct list_head pending_cmd_list;
+ struct list_head avail_defer_list;
+ struct workqueue_struct *work_q;
+ struct kref ref;
+ struct rcu_head rcu;
+ struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
+} __aligned(sizeof(unsigned long long));
+
+struct nvmet_fc_hostport {
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+ struct list_head host_list;
+ struct kref ref;
+ u8 invalid;
+};
+
+struct nvmet_fc_tgt_assoc {
+ u64 association_id;
+ u32 a_id;
+ atomic_t terminating;
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_hostport *hostport;
+ struct nvmet_fc_ls_iod *rcv_disconn;
+ struct list_head a_list;
+ struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
+ struct kref ref;
+ struct work_struct del_work;
+ struct rcu_head rcu;
+};
+
+
+static inline int
+nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
+{
+ return (iodptr - iodptr->tgtport->iod);
+}
+
+static inline int
+nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
+{
+ return (fodptr - fodptr->queue->fod);
+}
+
+
+/*
+ * Association and Connection IDs:
+ *
+ * Association ID will have random number in upper 6 bytes and zero
+ * in lower 2 bytes
+ *
+ * Connection IDs will be Association ID with QID or'd in lower 2 bytes
+ *
+ * note: Association ID = Connection ID for queue 0
+ */
+#define BYTES_FOR_QID sizeof(u16)
+#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
+#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
+
+static inline u64
+nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
+{
+ return (assoc->association_id | qid);
+}
+
+static inline u64
+nvmet_fc_getassociationid(u64 connectionid)
+{
+ return connectionid & ~NVMET_FC_QUEUEID_MASK;
+}
+
+static inline u16
+nvmet_fc_getqueueid(u64 connectionid)
+{
+ return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
+}
+
+static inline struct nvmet_fc_tgtport *
+targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
+{
+ return container_of(targetport, struct nvmet_fc_tgtport,
+ fc_target_port);
+}
+
+static inline struct nvmet_fc_fcp_iod *
+nvmet_req_to_fod(struct nvmet_req *nvme_req)
+{
+ return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
+}
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
+
+static LIST_HEAD(nvmet_fc_target_list);
+static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+static LIST_HEAD(nvmet_fc_portentry_list);
+
+
+static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
+static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
+static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod);
+static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ }
+ return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* ********************** FC-NVME LS XMT Handling ************************* */
+
+
+static void
+__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+{
+ struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static int
+__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!tgtport->ops->ls_req)
+ return -EOPNOTSUPP;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+
+ lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
+ ret = -EFAULT;
+ goto out_puttgtport;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
+ lsreq);
+ if (ret)
+ goto out_unlink;
+
+ return 0;
+
+out_unlink:
+ lsop->ls_error = ret;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ lsop->req_queued = false;
+ list_del(&lsop->lsreq_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+out_puttgtport:
+ nvmet_fc_tgtport_put(tgtport);
+
+ return ret;
+}
+
+static int
+nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ /* don't wait for completion */
+
+ return __nvmet_fc_send_ls_req(tgtport, lsop, done);
+}
+
+static void
+nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
+
+ __nvmet_fc_finish_ls_req(lsop);
+
+ /* fc-nvme target doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme target is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme host, so the target may never get a
+ * response even if it tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme host
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
+ struct nvmet_fc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ int ret;
+
+ /*
+ * If ls_req is NULL or no hosthandle, it's an older lldd and no
+ * message is normal. Otherwise, send unless the hostport has
+ * already been invalidated by the lldd.
+ */
+ if (!tgtport->ops->ls_req || !assoc->hostport ||
+ assoc->hostport->invalid)
+ return;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(tgtport->dev,
+ "{%d:%d} send Disconnect Association failed: ENOMEM\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ return;
+ }
+
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (tgtport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
+
+ lsop->tgtport = tgtport;
+ lsop->hosthandle = assoc->hostport->hosthandle;
+
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ assoc->association_id);
+
+ ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
+ nvmet_fc_disconnect_assoc_done);
+ if (ret) {
+ dev_info(tgtport->dev,
+ "{%d:%d} XMT Disconnect Association failed: %d\n",
+ tgtport->fc_target_port.port_num, assoc->a_id, ret);
+ kfree(lsop);
+ }
+}
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+
+static int
+nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod;
+ int i;
+
+ iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
+ GFP_KERNEL);
+ if (!iod)
+ return -ENOMEM;
+
+ tgtport->iod = iod;
+
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+ INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
+ iod->tgtport = tgtport;
+ list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
+
+ iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
+ if (!iod->rqstbuf)
+ goto out_fail;
+
+ iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
+
+ iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
+ sizeof(*iod->rspbuf),
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_rcv_list);
+ for (iod--, i--; i >= 0; iod--, i--) {
+ fc_dma_unmap_single(tgtport->dev, iod->rspdma,
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_rcv_list);
+ }
+
+ kfree(iod);
+
+ return -EFAULT;
+}
+
+static void
+nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod = tgtport->iod;
+ int i;
+
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+ fc_dma_unmap_single(tgtport->dev,
+ iod->rspdma, sizeof(*iod->rspbuf),
+ DMA_TO_DEVICE);
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_rcv_list);
+ }
+ kfree(tgtport->iod);
+}
+
+static struct nvmet_fc_ls_iod *
+nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
+ struct nvmet_fc_ls_iod, ls_rcv_list);
+ if (iod)
+ list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return iod;
+}
+
+
+static void
+nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+static void
+nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ int i;
+
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
+ fod->tgtport = tgtport;
+ fod->queue = queue;
+ fod->active = false;
+ fod->abort = false;
+ fod->aborted = false;
+ fod->fcpreq = NULL;
+ list_add_tail(&fod->fcp_list, &queue->fod_list);
+ spin_lock_init(&fod->flock);
+
+ fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
+ list_del(&fod->fcp_list);
+ for (fod--, i--; i >= 0; fod--, i--) {
+ fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf),
+ DMA_TO_DEVICE);
+ fod->rspdma = 0L;
+ list_del(&fod->fcp_list);
+ }
+
+ return;
+ }
+ }
+}
+
+static void
+nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ int i;
+
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ if (fod->rspdma)
+ fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ }
+}
+
+static struct nvmet_fc_fcp_iod *
+nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod;
+
+ lockdep_assert_held(&queue->qlock);
+
+ fod = list_first_entry_or_null(&queue->fod_list,
+ struct nvmet_fc_fcp_iod, fcp_list);
+ if (fod) {
+ list_del(&fod->fcp_list);
+ fod->active = true;
+ /*
+ * no queue reference is taken, as it was taken by the
+ * queue lookup just prior to the allocation. The iod
+ * will "inherit" that reference.
+ */
+ }
+ return fod;
+}
+
+
+static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+ /*
+ * put all admin cmds on hw queue id 0. All io commands go to
+ * the respective hw queue based on a modulo basis
+ */
+ fcpreq->hwqid = queue->qid ?
+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+static void
+nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
+{
+ struct nvmet_fc_fcp_iod *fod =
+ container_of(work, struct nvmet_fc_fcp_iod, defer_work);
+
+ /* Submit deferred IO for processing */
+ nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
+
+}
+
+static void
+nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
+ unsigned long flags;
+
+ fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+
+ fcpreq->nvmet_fc_private = NULL;
+
+ fod->active = false;
+ fod->abort = false;
+ fod->aborted = false;
+ fod->writedataactive = false;
+ fod->fcpreq = NULL;
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+ /* release the queue lookup reference on the completed IO */
+ nvmet_fc_tgt_q_put(queue);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp) {
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+ return;
+ }
+
+ /* Re-use the fod for the next pending cmd that was deferred */
+ list_del(&deferfcp->req_list);
+
+ fcpreq = deferfcp->fcp_req;
+
+ /* deferfcp can be reused for another IO at a later date */
+ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /* Save NVME CMD IO in fod */
+ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+ /* Setup new fcpreq to be processed */
+ fcpreq->rspaddr = NULL;
+ fcpreq->rsplen = 0;
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+ fod->active = true;
+
+ /* inform LLDD IO is now being processed */
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+ /*
+ * Leave the queue lookup get reference taken when
+ * fod was originally allocated.
+ */
+
+ queue_work(queue->work_q, &fod->defer_work);
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ u16 qid, u16 sqsize)
+{
+ struct nvmet_fc_tgt_queue *queue;
+ int ret;
+
+ if (qid > NVMET_NR_QUEUES)
+ return NULL;
+
+ queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
+ if (!queue)
+ return NULL;
+
+ if (!nvmet_fc_tgt_a_get(assoc))
+ goto out_free_queue;
+
+ queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+ assoc->tgtport->fc_target_port.port_num,
+ assoc->a_id, qid);
+ if (!queue->work_q)
+ goto out_a_put;
+
+ queue->qid = qid;
+ queue->sqsize = sqsize;
+ queue->assoc = assoc;
+ INIT_LIST_HEAD(&queue->fod_list);
+ INIT_LIST_HEAD(&queue->avail_defer_list);
+ INIT_LIST_HEAD(&queue->pending_cmd_list);
+ atomic_set(&queue->connected, 0);
+ atomic_set(&queue->sqtail, 0);
+ atomic_set(&queue->rsn, 1);
+ atomic_set(&queue->zrspcnt, 0);
+ spin_lock_init(&queue->qlock);
+ kref_init(&queue->ref);
+
+ nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_fail_iodlist;
+
+ WARN_ON(assoc->queues[qid]);
+ rcu_assign_pointer(assoc->queues[qid], queue);
+
+ return queue;
+
+out_fail_iodlist:
+ nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+ destroy_workqueue(queue->work_q);
+out_a_put:
+ nvmet_fc_tgt_a_put(assoc);
+out_free_queue:
+ kfree(queue);
+ return NULL;
+}
+
+
+static void
+nvmet_fc_tgt_queue_free(struct kref *ref)
+{
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(ref, struct nvmet_fc_tgt_queue, ref);
+
+ rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
+
+ nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+ nvmet_fc_tgt_a_put(queue->assoc);
+
+ destroy_workqueue(queue->work_q);
+
+ kfree_rcu(queue, rcu);
+}
+
+static void
+nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
+{
+ kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
+}
+
+static int
+nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
+{
+ return kref_get_unless_zero(&queue->ref);
+}
+
+
+static void
+nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
+ unsigned long flags;
+ int i;
+ bool disconnect;
+
+ disconnect = atomic_xchg(&queue->connected, 0);
+
+ /* if not connected, nothing to do */
+ if (!disconnect)
+ return;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ /* abort outstanding io's */
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ if (fod->active) {
+ spin_lock(&fod->flock);
+ fod->abort = true;
+ /*
+ * only call lldd abort routine if waiting for
+ * writedata. other outstanding ops should finish
+ * on their own.
+ */
+ if (fod->writedataactive) {
+ fod->aborted = true;
+ spin_unlock(&fod->flock);
+ tgtport->ops->fcp_abort(
+ &tgtport->fc_target_port, fod->fcpreq);
+ } else
+ spin_unlock(&fod->flock);
+ }
+ }
+
+ /* Cleanup defer'ed IOs in queue */
+ list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+ req_list) {
+ list_del(&deferfcp->req_list);
+ kfree(deferfcp);
+ }
+
+ for (;;) {
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp)
+ break;
+
+ list_del(&deferfcp->req_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+
+ kfree(deferfcp);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ flush_workqueue(queue->work_q);
+
+ nvmet_sq_destroy(&queue->nvme_sq);
+
+ nvmet_fc_tgt_q_put(queue);
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+ u64 connection_id)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ u64 association_id = nvmet_fc_getassociationid(connection_id);
+ u16 qid = nvmet_fc_getqueueid(connection_id);
+
+ if (qid > NVMET_NR_QUEUES)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+ queue = rcu_dereference(assoc->queues[qid]);
+ if (queue &&
+ (!atomic_read(&queue->connected) ||
+ !nvmet_fc_tgt_q_get(queue)))
+ queue = NULL;
+ rcu_read_unlock();
+ return queue;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+static void
+nvmet_fc_hostport_free(struct kref *ref)
+{
+ struct nvmet_fc_hostport *hostport =
+ container_of(ref, struct nvmet_fc_hostport, ref);
+ struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del(&hostport->host_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ if (tgtport->ops->host_release && hostport->invalid)
+ tgtport->ops->host_release(hostport->hosthandle);
+ kfree(hostport);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
+{
+ kref_put(&hostport->ref, nvmet_fc_hostport_free);
+}
+
+static int
+nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
+{
+ return kref_get_unless_zero(&hostport->ref);
+}
+
+static void
+nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
+{
+ /* if LLDD not implemented, leave as NULL */
+ if (!hostport || !hostport->hosthandle)
+ return;
+
+ nvmet_fc_hostport_put(hostport);
+}
+
+static struct nvmet_fc_hostport *
+nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_hostport *host;
+
+ lockdep_assert_held(&tgtport->lock);
+
+ list_for_each_entry(host, &tgtport->host_list, host_list) {
+ if (host->hosthandle == hosthandle && !host->invalid) {
+ if (nvmet_fc_hostport_get(host))
+ return (host);
+ }
+ }
+
+ return NULL;
+}
+
+static struct nvmet_fc_hostport *
+nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_hostport *newhost, *match = NULL;
+ unsigned long flags;
+
+ /* if LLDD not implemented, leave as NULL */
+ if (!hosthandle)
+ return NULL;
+
+ /*
+ * take reference for what will be the newly allocated hostport if
+ * we end up using a new allocation
+ */
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ match = nvmet_fc_match_hostport(tgtport, hosthandle);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ if (match) {
+ /* no new allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ return match;
+ }
+
+ newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
+ if (!newhost) {
+ /* no new allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ match = nvmet_fc_match_hostport(tgtport, hosthandle);
+ if (match) {
+ /* new allocation not needed */
+ kfree(newhost);
+ newhost = match;
+ /* no new allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ } else {
+ newhost->tgtport = tgtport;
+ newhost->hosthandle = hosthandle;
+ INIT_LIST_HEAD(&newhost->host_list);
+ kref_init(&newhost->ref);
+
+ list_add_tail(&newhost->host_list, &tgtport->host_list);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ return newhost;
+}
+
+static void
+nvmet_fc_delete_assoc(struct work_struct *work)
+{
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ unsigned long flags;
+ u64 ran;
+ int idx;
+ bool needrandom = true;
+
+ assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+ if (!assoc)
+ return NULL;
+
+ idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
+ if (idx < 0)
+ goto out_free_assoc;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ goto out_ida;
+
+ assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
+ if (IS_ERR(assoc->hostport))
+ goto out_put;
+
+ assoc->tgtport = tgtport;
+ assoc->a_id = idx;
+ INIT_LIST_HEAD(&assoc->a_list);
+ kref_init(&assoc->ref);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ atomic_set(&assoc->terminating, 0);
+
+ while (needrandom) {
+ get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
+ ran = ran << BYTES_FOR_QID_SHIFT;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ needrandom = false;
+ list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+ if (ran == tmpassoc->association_id) {
+ needrandom = true;
+ break;
+ }
+ }
+ if (!needrandom) {
+ assoc->association_id = ran;
+ list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ }
+
+ return assoc;
+
+out_put:
+ nvmet_fc_tgtport_put(tgtport);
+out_ida:
+ ida_free(&tgtport->assoc_cnt, idx);
+out_free_assoc:
+ kfree(assoc);
+ return NULL;
+}
+
+static void
+nvmet_fc_target_assoc_free(struct kref *ref)
+{
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(ref, struct nvmet_fc_tgt_assoc, ref);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_ls_iod *oldls;
+ unsigned long flags;
+
+ /* Send Disconnect now that all i/o has completed */
+ nvmet_fc_xmt_disconnect_assoc(assoc);
+
+ nvmet_fc_free_hostport(assoc->hostport);
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del_rcu(&assoc->a_list);
+ oldls = assoc->rcv_disconn;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* if pending Rcv Disconnect Association LS, send rsp now */
+ if (oldls)
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ ida_free(&tgtport->assoc_cnt, assoc->a_id);
+ dev_info(tgtport->dev,
+ "{%d:%d} Association freed\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ kfree_rcu(assoc, rcu);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
+{
+ kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
+}
+
+static int
+nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
+{
+ return kref_get_unless_zero(&assoc->ref);
+}
+
+static void
+nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_tgt_queue *queue;
+ int i, terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
+
+
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ rcu_read_lock();
+ queue = rcu_dereference(assoc->queues[i]);
+ if (!queue) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ if (!nvmet_fc_tgt_q_get(queue)) {
+ rcu_read_unlock();
+ continue;
+ }
+ rcu_read_unlock();
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
+ }
+
+ dev_info(tgtport->dev,
+ "{%d:%d} Association deleted\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
+ u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_assoc *ret = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+ ret = assoc;
+ if (!nvmet_fc_tgt_a_get(assoc))
+ ret = NULL;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void
+nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_port_entry *pe,
+ struct nvmet_port *port)
+{
+ lockdep_assert_held(&nvmet_fc_tgtlock);
+
+ pe->tgtport = tgtport;
+ tgtport->pe = pe;
+
+ pe->port = port;
+ port->priv = pe;
+
+ pe->node_name = tgtport->fc_target_port.node_name;
+ pe->port_name = tgtport->fc_target_port.port_name;
+ INIT_LIST_HEAD(&pe->pe_list);
+
+ list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
+}
+
+static void
+nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport)
+ pe->tgtport->pe = NULL;
+ list_del(&pe->pe_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a targetport deregisters. Breaks the relationship
+ * with the nvmet port, but leaves the port_entry in place so that
+ * re-registration can resume operation.
+ */
+static void
+nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_port_entry *pe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ pe = tgtport->pe;
+ if (pe)
+ pe->tgtport = NULL;
+ tgtport->pe = NULL;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a new targetport is registered. Looks in the
+ * existing nvmet port_entries to see if the nvmet layer is
+ * configured for the targetport's wwn's. (the targetport existed,
+ * nvmet configured, the lldd unregistered the tgtport, and is now
+ * reregistering the same targetport). If so, set the nvmet port
+ * port entry on the targetport.
+ */
+static void
+nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_port_entry *pe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
+ if (tgtport->fc_target_port.node_name == pe->node_name &&
+ tgtport->fc_target_port.port_name == pe->port_name) {
+ WARN_ON(pe->tgtport);
+ tgtport->pe = pe;
+ pe->tgtport = tgtport;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/**
+ * nvmet_fc_register_targetport - transport entry point called by an
+ * LLDD to register the existence of a local
+ * NVME subystem FC port.
+ * @pinfo: pointer to information about the port to be registered
+ * @template: LLDD entrypoints and operational parameters for the port
+ * @dev: physical hardware device node port corresponds to. Will be
+ * used for DMA mappings
+ * @portptr: pointer to a local port pointer. Upon success, the routine
+ * will allocate a nvme_fc_local_port structure and place its
+ * address in the local port pointer. Upon failure, local port
+ * pointer will be set to NULL.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ struct nvmet_fc_target_template *template,
+ struct device *dev,
+ struct nvmet_fc_target_port **portptr)
+{
+ struct nvmet_fc_tgtport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!template->xmt_ls_rsp || !template->fcp_op ||
+ !template->fcp_abort ||
+ !template->fcp_req_release || !template->targetport_delete ||
+ !template->max_hw_queues || !template->max_sgl_segments ||
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
+ ret = -EINVAL;
+ goto out_regtgt_failed;
+ }
+
+ newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_regtgt_failed;
+ }
+
+ idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_fail_kfree;
+ }
+
+ if (!get_device(dev) && dev) {
+ ret = -ENODEV;
+ goto out_ida_put;
+ }
+
+ newrec->fc_target_port.node_name = pinfo->node_name;
+ newrec->fc_target_port.port_name = pinfo->port_name;
+ if (template->target_priv_sz)
+ newrec->fc_target_port.private = &newrec[1];
+ else
+ newrec->fc_target_port.private = NULL;
+ newrec->fc_target_port.port_id = pinfo->port_id;
+ newrec->fc_target_port.port_num = idx;
+ INIT_LIST_HEAD(&newrec->tgt_list);
+ newrec->dev = dev;
+ newrec->ops = template;
+ spin_lock_init(&newrec->lock);
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
+ INIT_LIST_HEAD(&newrec->ls_req_list);
+ INIT_LIST_HEAD(&newrec->ls_busylist);
+ INIT_LIST_HEAD(&newrec->assoc_list);
+ INIT_LIST_HEAD(&newrec->host_list);
+ kref_init(&newrec->ref);
+ ida_init(&newrec->assoc_cnt);
+ newrec->max_sg_cnt = template->max_sgl_segments;
+
+ ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_free_newrec;
+ }
+
+ nvmet_fc_portentry_rebind_tgt(newrec);
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ *portptr = &newrec->fc_target_port;
+ return 0;
+
+out_free_newrec:
+ put_device(dev);
+out_ida_put:
+ ida_free(&nvmet_fc_tgtport_cnt, idx);
+out_fail_kfree:
+ kfree(newrec);
+out_regtgt_failed:
+ *portptr = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
+
+
+static void
+nvmet_fc_free_tgtport(struct kref *ref)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(ref, struct nvmet_fc_tgtport, ref);
+ struct device *dev = tgtport->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_del(&tgtport->tgt_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ nvmet_fc_free_ls_iodlist(tgtport);
+
+ /* let the LLDD know we've finished tearing it down */
+ tgtport->ops->targetport_delete(&tgtport->fc_target_port);
+
+ ida_free(&nvmet_fc_tgtport_cnt,
+ tgtport->fc_target_port.port_num);
+
+ ida_destroy(&tgtport->assoc_cnt);
+
+ kfree(tgtport);
+
+ put_device(dev);
+}
+
+static void
+nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
+{
+ kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
+}
+
+static int
+nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
+{
+ return kref_get_unless_zero(&tgtport->ref);
+}
+
+static void
+__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * nvmet_fc_invalidate_host - transport entry point called by an LLDD
+ * to remove references to a hosthandle for LS's.
+ *
+ * The nvmet-fc layer ensures that any references to the hosthandle
+ * on the targetport are forgotten (set to NULL). The LLDD will
+ * typically call this when a login with a remote host port has been
+ * lost, thus LS's for the remote host port are no longer possible.
+ *
+ * If an LS request is outstanding to the targetport/hosthandle (or
+ * issued concurrently with the call to invalidate the host), the
+ * LLDD is responsible for terminating/aborting the LS and completing
+ * the LS request. It is recommended that these terminations/aborts
+ * occur after calling to invalidate the host handle to avoid additional
+ * retries by the nvmet-fc transport. The nvmet-fc transport may
+ * continue to reference host handle while it cleans up outstanding
+ * NVME associations. The nvmet-fc transport will call the
+ * ops->host_release() callback to notify the LLDD that all references
+ * are complete and the related host handle can be recovered.
+ * Note: if there are no references, the callback may be called before
+ * the invalidate host call returns.
+ *
+ * @target_port: pointer to the (registered) target port that a prior
+ * LS was received on and which supplied the transport the
+ * hosthandle.
+ * @hosthandle: the handle (pointer) that represents the host port
+ * that no longer has connectivity and that LS's should
+ * no longer be directed to.
+ */
+void
+nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ void *hosthandle)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvmet_fc_tgt_assoc *assoc, *next;
+ unsigned long flags;
+ bool noassoc = true;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry_safe(assoc, next,
+ &tgtport->assoc_list, a_list) {
+ if (!assoc->hostport ||
+ assoc->hostport->hosthandle != hosthandle)
+ continue;
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ /* if there's nothing to wait for - call the callback */
+ if (noassoc && tgtport->ops->host_release)
+ tgtport->ops->host_release(hosthandle);
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
+
+/*
+ * nvmet layer has called to terminate an association
+ */
+static void
+nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_fc_tgtport *tgtport, *next;
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ bool found_ctrl = false;
+
+ /* this is a bit ugly, but don't want to make locks layered */
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
+ tgt_list) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ queue = rcu_dereference(assoc->queues[0]);
+ if (queue && queue->nvme_sq.ctrl == ctrl) {
+ if (nvmet_fc_tgt_a_get(assoc))
+ found_ctrl = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ nvmet_fc_tgtport_put(tgtport);
+
+ if (found_ctrl) {
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ return;
+ }
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/**
+ * nvmet_fc_unregister_targetport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a local NVME subsystem FC port.
+ * @target_port: pointer to the (registered) target port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+
+ nvmet_fc_portentry_unbind_tgt(tgtport);
+
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+
+ /*
+ * should terminate LS's as well. However, LS's will be generated
+ * at the tail end of association termination, so they likely don't
+ * exist yet. And even if they did, it's worthwhile to just let
+ * them finish and targetport ref counting will clean things up.
+ */
+
+ nvmet_fc_tgtport_put(tgtport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
+
+
+/* ********************** FC-NVME LS RCV Handling ************************* */
+
+
+static void
+nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
+ struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ /*
+ * FC-NVME spec changes. There are initiators sending different
+ * lengths as padding sizes for Create Association Cmd descriptor
+ * was incorrect.
+ * Accept anything of "minimum" length. Assume format per 1.15
+ * spec (with HOSTID reduced to 16 bytes), ignore how long the
+ * trailing pad length is.
+ */
+ if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
+ ret = VERR_CR_ASSOC_LEN;
+ else if (be32_to_cpu(rqst->desc_list_len) <
+ FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
+ ret = VERR_CR_ASSOC_RQST_LEN;
+ else if (rqst->assoc_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
+ ret = VERR_CR_ASSOC_CMD;
+ else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
+ FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
+ ret = VERR_CR_ASSOC_CMD_LEN;
+ else if (!rqst->assoc_cmd.ersp_ratio ||
+ (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
+ be16_to_cpu(rqst->assoc_cmd.sqsize)))
+ ret = VERR_ERSP_RATIO;
+
+ else {
+ /* new association w/ admin queue */
+ iod->assoc = nvmet_fc_alloc_target_assoc(
+ tgtport, iod->hosthandle);
+ if (!iod->assoc)
+ ret = VERR_ASSOC_ALLOC_FAIL;
+ else {
+ queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+ be16_to_cpu(rqst->assoc_cmd.sqsize));
+ if (!queue) {
+ ret = VERR_QUEUE_ALLOC_FAIL;
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
+ }
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Create Association LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return;
+ }
+
+ queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
+ atomic_set(&queue->connected, 1);
+ queue->sqhd = 0; /* best place to init value */
+
+ dev_info(tgtport->dev,
+ "{%d:%d} Association created\n",
+ tgtport->fc_target_port.port_num, iod->assoc->a_id);
+
+ /* format a response */
+
+ iod->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_acc)),
+ FCNVME_LS_CREATE_ASSOCIATION);
+ acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ acc->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+ acc->associd.association_id =
+ cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
+ acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+ acc->connectid.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_conn_id));
+ acc->connectid.connection_id = acc->associd.association_id;
+}
+
+static void
+nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
+ struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
+ struct nvmet_fc_tgt_queue *queue;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
+ ret = VERR_CR_CONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_conn_rqst)))
+ ret = VERR_CR_CONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->connect_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
+ ret = VERR_CR_CONN_CMD;
+ else if (rqst->connect_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
+ ret = VERR_CR_CONN_CMD_LEN;
+ else if (!rqst->connect_cmd.ersp_ratio ||
+ (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
+ be16_to_cpu(rqst->connect_cmd.sqsize)))
+ ret = VERR_ERSP_RATIO;
+
+ else {
+ /* new io queue */
+ iod->assoc = nvmet_fc_find_target_assoc(tgtport,
+ be64_to_cpu(rqst->associd.association_id));
+ if (!iod->assoc)
+ ret = VERR_NO_ASSOC;
+ else {
+ queue = nvmet_fc_alloc_target_queue(iod->assoc,
+ be16_to_cpu(rqst->connect_cmd.qid),
+ be16_to_cpu(rqst->connect_cmd.sqsize));
+ if (!queue)
+ ret = VERR_QUEUE_ALLOC_FAIL;
+
+ /* release get taken in nvmet_fc_find_target_assoc */
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Create Connection LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return;
+ }
+
+ queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
+ atomic_set(&queue->connected, 1);
+ queue->sqhd = 0; /* best place to init value */
+
+ /* format a response */
+
+ iod->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
+ FCNVME_LS_CREATE_CONNECTION);
+ acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+ acc->connectid.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_conn_id));
+ acc->connectid.connection_id =
+ cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
+ be16_to_cpu(rqst->connect_cmd.qid)));
+}
+
+/*
+ * Returns true if the LS response is to be transmit
+ * Returns false if the LS response is to be delayed
+ */
+static int
+nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &iod->rqstbuf->rq_dis_assoc;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ &iod->rspbuf->rsp_dis_assoc;
+ struct nvmet_fc_tgt_assoc *assoc = NULL;
+ struct nvmet_fc_ls_iod *oldls = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association - takes an assoc ref if !NULL */
+ assoc = nvmet_fc_find_target_assoc(tgtport,
+ be64_to_cpu(rqst->associd.association_id));
+ iod->assoc = assoc;
+ if (!assoc)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret || !assoc) {
+ dev_err(tgtport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return true;
+ }
+
+ /* format a response */
+
+ iod->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
+
+ /* release get taken in nvmet_fc_find_target_assoc */
+ nvmet_fc_tgt_a_put(assoc);
+
+ /*
+ * The rules for LS response says the response cannot
+ * go back until ABTS's have been sent for all outstanding
+ * I/O and a Disconnect Association LS has been sent.
+ * So... save off the Disconnect LS to send the response
+ * later. If there was a prior LS already saved, replace
+ * it with the newer one and send a can't perform reject
+ * on the older one.
+ */
+ spin_lock_irqsave(&tgtport->lock, flags);
+ oldls = assoc->rcv_disconn;
+ assoc->rcv_disconn = iod;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_delete_target_assoc(assoc);
+
+ if (oldls) {
+ dev_info(tgtport->dev,
+ "{%d:%d} Multiple Disconnect Association LS's "
+ "received\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*iod->rspbuf),
+ /* ok to use rqst, LS is same */
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ }
+
+ return false;
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
+
+static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
+
+static void
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
+ struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+ fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
+ nvmet_fc_free_ls_iod(tgtport, iod);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ int ret;
+
+ fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
+
+ ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
+ if (ret)
+ nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
+ bool sendrsp = true;
+
+ iod->lsrsp->nvme_fc_private = iod;
+ iod->lsrsp->rspbuf = iod->rspbuf;
+ iod->lsrsp->rspdma = iod->rspdma;
+ iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ iod->lsrsp->rsplen = 0;
+
+ iod->assoc = NULL;
+
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ /* Creates Association and initial Admin Queue/Connection */
+ nvmet_fc_ls_create_association(tgtport, iod);
+ break;
+ case FCNVME_LS_CREATE_CONNECTION:
+ /* Creates an IO Queue/Connection */
+ nvmet_fc_ls_create_connection(tgtport, iod);
+ break;
+ case FCNVME_LS_DISCONNECT_ASSOC:
+ /* Terminate a Queue/Connection or the Association */
+ sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
+ break;
+ default:
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
+ sizeof(*iod->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
+ }
+
+ if (sendrsp)
+ nvmet_fc_xmt_ls_rsp(tgtport, iod);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvmet_fc_ls_iod *iod =
+ container_of(work, struct nvmet_fc_ls_iod, work);
+ struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+ nvmet_fc_handle_ls_rqst(tgtport, iod);
+}
+
+
+/**
+ * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvmet-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @target_port: pointer to the (registered) target port the LS was
+ * received on.
+ * @hosthandle: pointer to the host specific data, gets stored in iod.
+ * @lsrsp: pointer to a lsrsp structure to be used to reference
+ * the exchange corresponding to the LS.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
+ void *hosthandle,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvmet_fc_ls_iod *iod;
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: payload too large (%d)\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "",
+ lsreqbuf_len);
+ return -E2BIG;
+ }
+
+ if (!nvmet_fc_tgtport_get(tgtport)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: target deleting\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ return -ESHUTDOWN;
+ }
+
+ iod = nvmet_fc_alloc_ls_iod(tgtport);
+ if (!iod) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: context allocation failed\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ nvmet_fc_tgtport_put(tgtport);
+ return -ENOENT;
+ }
+
+ iod->lsrsp = lsrsp;
+ iod->fcpreq = NULL;
+ memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
+ iod->rqstdatalen = lsreqbuf_len;
+ iod->hosthandle = hosthandle;
+
+ queue_work(nvmet_wq, &iod->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
+
+
+/*
+ * **********************
+ * Start of FCP handling
+ * **********************
+ */
+
+static int
+nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+ struct scatterlist *sg;
+ unsigned int nent;
+
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
+ if (!sg)
+ goto out;
+
+ fod->data_sg = sg;
+ fod->data_sg_cnt = nent;
+ fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+ ((fod->io_dir == NVMET_FCP_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ /* note: write from initiator perspective */
+ fod->next_sg = fod->data_sg;
+
+ return 0;
+
+out:
+ return NVME_SC_INTERNAL;
+}
+
+static void
+nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+ if (!fod->data_sg || !fod->data_sg_cnt)
+ return;
+
+ fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+ ((fod->io_dir == NVMET_FCP_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ sgl_free(fod->data_sg);
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
+}
+
+
+static bool
+queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
+{
+ u32 sqtail, used;
+
+ /* egad, this is ugly. And sqtail is just a best guess */
+ sqtail = atomic_read(&q->sqtail) % q->sqsize;
+
+ used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
+ return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
+}
+
+/*
+ * Prep RSP payload.
+ * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
+ */
+static void
+nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
+ struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+ struct nvme_completion *cqe = &ersp->cqe;
+ u32 *cqewd = (u32 *)cqe;
+ bool send_ersp = false;
+ u32 rsn, rspcnt, xfr_length;
+
+ if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
+ xfr_length = fod->req.transfer_len;
+ else
+ xfr_length = fod->offset;
+
+ /*
+ * check to see if we can send a 0's rsp.
+ * Note: to send a 0's response, the NVME-FC host transport will
+ * recreate the CQE. The host transport knows: sq id, SQHD (last
+ * seen in an ersp), and command_id. Thus it will create a
+ * zero-filled CQE with those known fields filled in. Transport
+ * must send an ersp for any condition where the cqe won't match
+ * this.
+ *
+ * Here are the FC-NVME mandated cases where we must send an ersp:
+ * every N responses, where N=ersp_ratio
+ * force fabric commands to send ersp's (not in FC-NVME but good
+ * practice)
+ * normal cmds: any time status is non-zero, or status is zero
+ * but words 0 or 1 are non-zero.
+ * the SQ is 90% or more full
+ * the cmd is a fused command
+ * transferred data length not equal to cmd iu length
+ */
+ rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
+ if (!(rspcnt % fod->queue->ersp_ratio) ||
+ nvme_is_fabrics((struct nvme_command *) sqe) ||
+ xfr_length != fod->req.transfer_len ||
+ (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
+ (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
+ queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
+ send_ersp = true;
+
+ /* re-set the fields */
+ fod->fcpreq->rspaddr = ersp;
+ fod->fcpreq->rspdma = fod->rspdma;
+
+ if (!send_ersp) {
+ memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
+ fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
+ } else {
+ ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
+ rsn = atomic_inc_return(&fod->queue->rsn);
+ ersp->rsn = cpu_to_be32(rsn);
+ ersp->xfrd_len = cpu_to_be32(xfr_length);
+ fod->fcpreq->rsplen = sizeof(*ersp);
+ }
+
+ fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+}
+
+static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
+
+static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ /*
+ * if an ABTS was received or we issued the fcp_abort early
+ * don't call abort routine again.
+ */
+ /* no need to take lock - lock was taken earlier to get here */
+ if (!fod->aborted)
+ tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
+
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+}
+
+static void
+nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ int ret;
+
+ fod->fcpreq->op = NVMET_FCOP_RSP;
+ fod->fcpreq->timeout = 0;
+
+ nvmet_fc_prep_fcp_rsp(tgtport, fod);
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+ if (ret)
+ nvmet_fc_abort_op(tgtport, fod);
+}
+
+static void
+nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod, u8 op)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct scatterlist *sg = fod->next_sg;
+ unsigned long flags;
+ u32 remaininglen = fod->req.transfer_len - fod->offset;
+ u32 tlen = 0;
+ int ret;
+
+ fcpreq->op = op;
+ fcpreq->offset = fod->offset;
+ fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+
+ /*
+ * for next sequence:
+ * break at a sg element boundary
+ * attempt to keep sequence length capped at
+ * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+ * be longer if a single sg element is larger
+ * than that amount. This is done to avoid creating
+ * a new sg list to use for the tgtport api.
+ */
+ fcpreq->sg = sg;
+ fcpreq->sg_cnt = 0;
+ while (tlen < remaininglen &&
+ fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+ tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+ fcpreq->sg_cnt++;
+ tlen += sg_dma_len(sg);
+ sg = sg_next(sg);
+ }
+ if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+ fcpreq->sg_cnt++;
+ tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+ sg = sg_next(sg);
+ }
+ if (tlen < remaininglen)
+ fod->next_sg = sg;
+ else
+ fod->next_sg = NULL;
+
+ fcpreq->transfer_length = tlen;
+ fcpreq->transferred_length = 0;
+ fcpreq->fcp_error = 0;
+ fcpreq->rsplen = 0;
+
+ /*
+ * If the last READDATA request: check if LLDD supports
+ * combined xfr with response.
+ */
+ if ((op == NVMET_FCOP_READDATA) &&
+ ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
+ (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
+ fcpreq->op = NVMET_FCOP_READDATA_RSP;
+ nvmet_fc_prep_fcp_rsp(tgtport, fod);
+ }
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+ if (ret) {
+ /*
+ * should be ok to set w/o lock as its in the thread of
+ * execution (not an async timer routine) and doesn't
+ * contend with any clearing action
+ */
+ fod->abort = true;
+
+ if (op == NVMET_FCOP_WRITEDATA) {
+ spin_lock_irqsave(&fod->flock, flags);
+ fod->writedataactive = false;
+ spin_unlock_irqrestore(&fod->flock, flags);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
+ } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+ fcpreq->fcp_error = ret;
+ fcpreq->transferred_length = 0;
+ nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
+ }
+ }
+}
+
+static inline bool
+__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ /* if in the middle of an io and we need to tear down */
+ if (abort) {
+ if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
+ return true;
+ }
+
+ nvmet_fc_abort_op(tgtport, fod);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * actual done handler for FCP operations when completed by the lldd
+ */
+static void
+nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ unsigned long flags;
+ bool abort;
+
+ spin_lock_irqsave(&fod->flock, flags);
+ abort = fod->abort;
+ fod->writedataactive = false;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ switch (fcpreq->op) {
+
+ case NVMET_FCOP_WRITEDATA:
+ if (__nvmet_fc_fod_op_abort(fod, abort))
+ return;
+ if (fcpreq->fcp_error ||
+ fcpreq->transferred_length != fcpreq->transfer_length) {
+ spin_lock_irqsave(&fod->flock, flags);
+ fod->abort = true;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
+ return;
+ }
+
+ fod->offset += fcpreq->transferred_length;
+ if (fod->offset != fod->req.transfer_len) {
+ spin_lock_irqsave(&fod->flock, flags);
+ fod->writedataactive = true;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ /* transfer the next chunk */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_WRITEDATA);
+ return;
+ }
+
+ /* data transfer complete, resume with nvmet layer */
+ fod->req.execute(&fod->req);
+ break;
+
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ if (__nvmet_fc_fod_op_abort(fod, abort))
+ return;
+ if (fcpreq->fcp_error ||
+ fcpreq->transferred_length != fcpreq->transfer_length) {
+ nvmet_fc_abort_op(tgtport, fod);
+ return;
+ }
+
+ /* success */
+
+ if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+ return;
+ }
+
+ fod->offset += fcpreq->transferred_length;
+ if (fod->offset != fod->req.transfer_len) {
+ /* transfer the next chunk */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_READDATA);
+ return;
+ }
+
+ /* data transfer complete, send response */
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+
+ break;
+
+ case NVMET_FCOP_RSP:
+ if (__nvmet_fc_fod_op_abort(fod, abort))
+ return;
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+ nvmet_fc_fod_op_done(fod);
+}
+
+/*
+ * actual completion handler after execution by the nvmet layer
+ */
+static void
+__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod, int status)
+{
+ struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+ struct nvme_completion *cqe = &fod->rspiubuf.cqe;
+ unsigned long flags;
+ bool abort;
+
+ spin_lock_irqsave(&fod->flock, flags);
+ abort = fod->abort;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ /* if we have a CQE, snoop the last sq_head value */
+ if (!status)
+ fod->queue->sqhd = cqe->sq_head;
+
+ if (abort) {
+ nvmet_fc_abort_op(tgtport, fod);
+ return;
+ }
+
+ /* if an error handling the cmd post initial parsing */
+ if (status) {
+ /* fudge up a failed CQE status for our transport error */
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
+ cqe->sq_id = cpu_to_le16(fod->queue->qid);
+ cqe->command_id = sqe->command_id;
+ cqe->status = cpu_to_le16(status);
+ } else {
+
+ /*
+ * try to push the data even if the SQE status is non-zero.
+ * There may be a status where data still was intended to
+ * be moved
+ */
+ if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
+ /* push the data over before sending rsp */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_READDATA);
+ return;
+ }
+
+ /* writes & no data - fall thru */
+ }
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+}
+
+
+static void
+nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
+{
+ struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
+}
+
+
+/*
+ * Actual processing routine for received FC-NVME I/O Requests from the LLD
+ */
+static void
+nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+ u32 xfrlen = be32_to_cpu(cmdiu->data_len);
+ int ret;
+
+ /*
+ * Fused commands are currently not supported in the linux
+ * implementation.
+ *
+ * As such, the implementation of the FC transport does not
+ * look at the fused commands and order delivery to the upper
+ * layer until we have both based on csn.
+ */
+
+ fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
+
+ if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
+ fod->io_dir = NVMET_FCP_WRITE;
+ if (!nvme_is_write(&cmdiu->sqe))
+ goto transport_error;
+ } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
+ fod->io_dir = NVMET_FCP_READ;
+ if (nvme_is_write(&cmdiu->sqe))
+ goto transport_error;
+ } else {
+ fod->io_dir = NVMET_FCP_NODATA;
+ if (xfrlen)
+ goto transport_error;
+ }
+
+ fod->req.cmd = &fod->cmdiubuf.sqe;
+ fod->req.cqe = &fod->rspiubuf.cqe;
+ if (tgtport->pe)
+ fod->req.port = tgtport->pe->port;
+
+ /* clear any response payload */
+ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
+
+ ret = nvmet_req_init(&fod->req,
+ &fod->queue->nvme_cq,
+ &fod->queue->nvme_sq,
+ &nvmet_fc_tgt_fcp_ops);
+ if (!ret) {
+ /* bad SQE content or invalid ctrl state */
+ /* nvmet layer has already called op done to send rsp. */
+ return;
+ }
+
+ fod->req.transfer_len = xfrlen;
+
+ /* keep a running counter of tail position */
+ atomic_inc(&fod->queue->sqtail);
+
+ if (fod->req.transfer_len) {
+ ret = nvmet_fc_alloc_tgt_pgs(fod);
+ if (ret) {
+ nvmet_req_complete(&fod->req, ret);
+ return;
+ }
+ }
+ fod->req.sg = fod->data_sg;
+ fod->req.sg_cnt = fod->data_sg_cnt;
+ fod->offset = 0;
+
+ if (fod->io_dir == NVMET_FCP_WRITE) {
+ /* pull the data over before invoking nvmet layer */
+ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
+ return;
+ }
+
+ /*
+ * Reads or no data:
+ *
+ * can invoke the nvmet_layer now. If read data, cmd completion will
+ * push the data
+ */
+ fod->req.execute(&fod->req);
+ return;
+
+transport_error:
+ nvmet_fc_abort_op(tgtport, fod);
+}
+
+/**
+ * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
+ * upon the reception of a NVME FCP CMD IU.
+ *
+ * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
+ * layer for processing.
+ *
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
+ *
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
+ *
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ * was received on.
+ * @fcpreq: pointer to a fcpreq request structure to be used to reference
+ * the exchange corresponding to the FCP Exchange.
+ * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
+ * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
+ */
+int
+nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_fcp_req *fcpreq,
+ void *cmdiubuf, u32 cmdiubuf_len)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
+ struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_fcp_iod *fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
+ unsigned long flags;
+
+ /* validate iu, so the connection id can be used to find the queue */
+ if ((cmdiubuf_len != sizeof(*cmdiu)) ||
+ (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
+ (cmdiu->fc_id != NVME_CMD_FC_ID) ||
+ (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
+ return -EIO;
+
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(cmdiu->connection_id));
+ if (!queue)
+ return -ENOTCONN;
+
+ /*
+ * note: reference taken by find_target_queue
+ * After successful fod allocation, the fod will inherit the
+ * ownership of that reference and will remove the reference
+ * when the fod is freed.
+ */
+
+ spin_lock_irqsave(&queue->qlock, flags);
+
+ fod = nvmet_fc_alloc_fcp_iod(queue);
+ if (fod) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+
+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+ return 0;
+ }
+
+ if (!tgtport->ops->defer_rcv) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOENT;
+ }
+
+ deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (deferfcp) {
+ /* Just re-use one that was previously allocated */
+ list_del(&deferfcp->req_list);
+ } else {
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /* Now we need to dynamically allocate one */
+ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+ if (!deferfcp) {
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
+
+ /* For now, use rspaddr / rsplen to save payload information */
+ fcpreq->rspaddr = cmdiubuf;
+ fcpreq->rsplen = cmdiubuf_len;
+ deferfcp->fcp_req = fcpreq;
+
+ /* defer processing till a fod becomes available */
+ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+ /* NOTE: the queue lookup reference is still valid */
+
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ return -EOVERFLOW;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
+
+/**
+ * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
+ * upon the reception of an ABTS for a FCP command
+ *
+ * Notify the transport that an ABTS has been received for a FCP command
+ * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
+ * LLDD believes the command is still being worked on
+ * (template_ops->fcp_req_release() has not been called).
+ *
+ * The transport will wait for any outstanding work (an op to the LLDD,
+ * which the lldd should complete with error due to the ABTS; or the
+ * completion from the nvmet layer of the nvme command), then will
+ * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
+ * return the i/o context to the LLDD. The LLDD may send the BA_ACC
+ * to the ABTS either after return from this function (assuming any
+ * outstanding op work has been terminated) or upon the callback being
+ * called.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ * was received on.
+ * @fcpreq: pointer to the fcpreq request structure that corresponds
+ * to the exchange that received the ABTS.
+ */
+void
+nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+
+ if (!fod || fod->fcpreq != fcpreq)
+ /* job appears to have already completed, ignore abort */
+ return;
+
+ queue = fod->queue;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ if (fod->active) {
+ /*
+ * mark as abort. The abort handler, invoked upon completion
+ * of any work, will detect the aborted status and do the
+ * callback.
+ */
+ spin_lock(&fod->flock);
+ fod->abort = true;
+ fod->aborted = true;
+ spin_unlock(&fod->flock);
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
+
+
+struct nvmet_fc_traddr {
+ u64 nn;
+ u64 pn;
+};
+
+static int
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
+{
+ u64 token64;
+
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
+
+ return 0;
+}
+
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate if string is one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
+
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
+
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
+}
+
+static int
+nvmet_fc_add_port(struct nvmet_port *port)
+{
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_port_entry *pe;
+ struct nvmet_fc_traddr traddr = { 0L, 0L };
+ unsigned long flags;
+ int ret;
+
+ /* validate the address info */
+ if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
+ (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
+ return -EINVAL;
+
+ /* map the traddr address info to a target port */
+
+ ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
+ sizeof(port->disc_addr.traddr));
+ if (ret)
+ return ret;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ ret = -ENXIO;
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
+ if ((tgtport->fc_target_port.node_name == traddr.nn) &&
+ (tgtport->fc_target_port.port_name == traddr.pn)) {
+ /* a FC port can only be 1 nvmet port id */
+ if (!tgtport->pe) {
+ nvmet_fc_portentry_bind(tgtport, pe, port);
+ ret = 0;
+ } else
+ ret = -EALREADY;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (ret)
+ kfree(pe);
+
+ return ret;
+}
+
+static void
+nvmet_fc_remove_port(struct nvmet_port *port)
+{
+ struct nvmet_fc_port_entry *pe = port->priv;
+
+ nvmet_fc_portentry_unbind(pe);
+
+ kfree(pe);
+}
+
+static void
+nvmet_fc_discovery_chg(struct nvmet_port *port)
+{
+ struct nvmet_fc_port_entry *pe = port->priv;
+ struct nvmet_fc_tgtport *tgtport = pe->tgtport;
+
+ if (tgtport && tgtport->ops->discovery_event)
+ tgtport->ops->discovery_event(&tgtport->fc_target_port);
+}
+
+static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_FC,
+ .msdbd = 1,
+ .add_port = nvmet_fc_add_port,
+ .remove_port = nvmet_fc_remove_port,
+ .queue_response = nvmet_fc_fcp_nvme_cmd_done,
+ .delete_ctrl = nvmet_fc_delete_ctrl,
+ .discovery_chg = nvmet_fc_discovery_chg,
+};
+
+static int __init nvmet_fc_init_module(void)
+{
+ return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+}
+
+static void __exit nvmet_fc_exit_module(void)
+{
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvmet_fc_target_list))
+ pr_warn("%s: targetport list not empty\n", __func__);
+
+ nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
+
+ ida_destroy(&nvmet_fc_tgtport_cnt);
+}
+
+module_init(nvmet_fc_init_module);
+module_exit(nvmet_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
new file mode 100644
index 0000000000..c65a73433c
--- /dev/null
+++ b/drivers/nvme/target/fcloop.c
@@ -0,0 +1,1656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+
+#include "../host/nvme.h"
+#include "../target/nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_WWNN = 1 << 0,
+ NVMF_OPT_WWPN = 1 << 1,
+ NVMF_OPT_ROLES = 1 << 2,
+ NVMF_OPT_FCADDR = 1 << 3,
+ NVMF_OPT_LPWWNN = 1 << 4,
+ NVMF_OPT_LPWWPN = 1 << 5,
+};
+
+struct fcloop_ctrl_options {
+ int mask;
+ u64 wwnn;
+ u64 wwpn;
+ u32 roles;
+ u32 fcaddr;
+ u64 lpwwnn;
+ u64 lpwwpn;
+};
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_WWNN, "wwnn=%s" },
+ { NVMF_OPT_WWPN, "wwpn=%s" },
+ { NVMF_OPT_ROLES, "roles=%d" },
+ { NVMF_OPT_FCADDR, "fcaddr=%x" },
+ { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
+ { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int fcloop_verify_addr(substring_t *s)
+{
+ size_t blen = s->to - s->from + 1;
+
+ if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
+ strncmp(s->from, "0x", 2))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+fcloop_parse_options(struct fcloop_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_WWNN:
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->wwnn = token64;
+ break;
+ case NVMF_OPT_WWPN:
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->wwpn = token64;
+ break;
+ case NVMF_OPT_ROLES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->roles = token;
+ break;
+ case NVMF_OPT_FCADDR:
+ if (match_hex(args, &token)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->fcaddr = token;
+ break;
+ case NVMF_OPT_LPWWNN:
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->lpwwnn = token64;
+ break;
+ case NVMF_OPT_LPWWPN:
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->lpwwpn = token64;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ }
+
+out_free_options:
+ kfree(options);
+ return ret;
+}
+
+
+static int
+fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ *nname = -1;
+ *pname = -1;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ switch (token) {
+ case NVMF_OPT_WWNN:
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ *nname = token64;
+ break;
+ case NVMF_OPT_WWPN:
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ *pname = token64;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ }
+
+out_free_options:
+ kfree(options);
+
+ if (!ret) {
+ if (*nname == -1)
+ return -EINVAL;
+ if (*pname == -1)
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+
+#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
+ NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+
+static DEFINE_SPINLOCK(fcloop_lock);
+static LIST_HEAD(fcloop_lports);
+static LIST_HEAD(fcloop_nports);
+
+struct fcloop_lport {
+ struct nvme_fc_local_port *localport;
+ struct list_head lport_list;
+ struct completion unreg_done;
+};
+
+struct fcloop_lport_priv {
+ struct fcloop_lport *lport;
+};
+
+struct fcloop_rport {
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
+};
+
+struct fcloop_tport {
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
+};
+
+struct fcloop_nport {
+ struct fcloop_rport *rport;
+ struct fcloop_tport *tport;
+ struct fcloop_lport *lport;
+ struct list_head nport_list;
+ struct kref ref;
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+};
+
+struct fcloop_lsreq {
+ struct nvmefc_ls_req *lsreq;
+ struct nvmefc_ls_rsp ls_rsp;
+ int lsdir; /* H2T or T2H */
+ int status;
+ struct list_head ls_list; /* fcloop_rport->ls_list */
+};
+
+struct fcloop_rscn {
+ struct fcloop_tport *tport;
+ struct work_struct work;
+};
+
+enum {
+ INI_IO_START = 0,
+ INI_IO_ACTIVE = 1,
+ INI_IO_ABORTED = 2,
+ INI_IO_COMPLETED = 3,
+};
+
+struct fcloop_fcpreq {
+ struct fcloop_tport *tport;
+ struct nvmefc_fcp_req *fcpreq;
+ spinlock_t reqlock;
+ u16 status;
+ u32 inistate;
+ bool active;
+ bool aborted;
+ struct kref ref;
+ struct work_struct fcp_rcv_work;
+ struct work_struct abort_rcv_work;
+ struct work_struct tio_done_work;
+ struct nvmefc_tgt_fcp_req tgt_fcp_req;
+};
+
+struct fcloop_ini_fcpreq {
+ struct nvmefc_fcp_req *fcpreq;
+ struct fcloop_fcpreq *tfcp_req;
+ spinlock_t inilock;
+};
+
+static inline struct fcloop_lsreq *
+ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
+{
+ return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
+}
+
+static inline struct fcloop_fcpreq *
+tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
+}
+
+
+static int
+fcloop_create_queue(struct nvme_fc_local_port *localport,
+ unsigned int qidx, u16 qsize,
+ void **handle)
+{
+ *handle = localport;
+ return 0;
+}
+
+static void
+fcloop_delete_queue(struct nvme_fc_local_port *localport,
+ unsigned int idx, void *handle)
+{
+}
+
+static void
+fcloop_rport_lsrqst_work(struct work_struct *work)
+{
+ struct fcloop_rport *rport =
+ container_of(work, struct fcloop_rport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&rport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&rport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&rport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&rport->lock);
+ }
+ spin_unlock(&rport->lock);
+}
+
+static int
+fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_rport *rport = remoteport->private;
+ int ret = 0;
+
+ tls_req->lsreq = lsreq;
+ INIT_LIST_HEAD(&tls_req->ls_list);
+
+ if (!rport->targetport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
+ &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ struct nvmefc_ls_rsp *lsrsp)
+{
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_tport *tport = targetport->private;
+ struct nvme_fc_remote_port *remoteport = tport->remoteport;
+ struct fcloop_rport *rport;
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+
+ lsrsp->done(lsrsp);
+
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ }
+
+ return 0;
+}
+
+static void
+fcloop_tport_lsrqst_work(struct work_struct *work)
+{
+ struct fcloop_tport *tport =
+ container_of(work, struct fcloop_tport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&tport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&tport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&tport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&tport->lock);
+ }
+ spin_unlock(&tport->lock);
+}
+
+static int
+fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_tport *tport = targetport->private;
+ int ret = 0;
+
+ /*
+ * hosthandle should be the dst.rport value.
+ * hosthandle ignored as fcloop currently is
+ * 1:1 tgtport vs remoteport
+ */
+ tls_req->lsreq = lsreq;
+ INIT_LIST_HEAD(&tls_req->ls_list);
+
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp)
+{
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+ lsrsp->done(lsrsp);
+
+ if (targetport) {
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
+ }
+
+ return 0;
+}
+
+static void
+fcloop_t2h_host_release(void *hosthandle)
+{
+ /* host handle ignored for now */
+}
+
+/*
+ * Simulate reception of RSCN and converting it to a initiator transport
+ * call to rescan a remote port.
+ */
+static void
+fcloop_tgt_rscn_work(struct work_struct *work)
+{
+ struct fcloop_rscn *tgt_rscn =
+ container_of(work, struct fcloop_rscn, work);
+ struct fcloop_tport *tport = tgt_rscn->tport;
+
+ if (tport->remoteport)
+ nvme_fc_rescan_remoteport(tport->remoteport);
+ kfree(tgt_rscn);
+}
+
+static void
+fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
+{
+ struct fcloop_rscn *tgt_rscn;
+
+ tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
+ if (!tgt_rscn)
+ return;
+
+ tgt_rscn->tport = tgtport->private;
+ INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
+
+ queue_work(nvmet_wq, &tgt_rscn->work);
+}
+
+static void
+fcloop_tfcp_req_free(struct kref *ref)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(ref, struct fcloop_fcpreq, ref);
+
+ kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+ return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+ struct fcloop_fcpreq *tfcp_req, int status)
+{
+ struct fcloop_ini_fcpreq *inireq = NULL;
+
+ if (fcpreq) {
+ inireq = fcpreq->private;
+ spin_lock(&inireq->inilock);
+ inireq->tfcp_req = NULL;
+ spin_unlock(&inireq->inilock);
+
+ fcpreq->status = status;
+ fcpreq->done(fcpreq);
+ }
+
+ /* release original io reference on tgt struct */
+ fcloop_tfcp_req_put(tfcp_req);
+}
+
+static bool drop_fabric_opcode;
+#define DROP_OPCODE_MASK 0x00FF
+/* fabrics opcode will have a bit set above 1st byte */
+static int drop_opcode = -1;
+static int drop_instance;
+static int drop_amount;
+static int drop_current_cnt;
+
+/*
+ * Routine to parse io and determine if the io is to be dropped.
+ * Returns:
+ * 0 if io is not obstructed
+ * 1 if io was dropped
+ */
+static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
+{
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
+ struct nvme_command *sqe = &cmdiu->sqe;
+
+ if (drop_opcode == -1)
+ return 0;
+
+ pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
+ "inst %d start %d amt %d\n",
+ __func__, sqe->common.opcode, sqe->fabrics.fctype,
+ drop_fabric_opcode ? "y" : "n",
+ drop_opcode, drop_current_cnt, drop_instance, drop_amount);
+
+ if ((drop_fabric_opcode &&
+ (sqe->common.opcode != nvme_fabrics_command ||
+ sqe->fabrics.fctype != drop_opcode)) ||
+ (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
+ return 0;
+
+ if (++drop_current_cnt >= drop_instance) {
+ if (drop_current_cnt >= drop_instance + drop_amount)
+ drop_opcode = -1;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ unsigned long flags;
+ int ret = 0;
+ bool aborted = false;
+
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ tfcp_req->inistate = INI_IO_ACTIVE;
+ break;
+ case INI_IO_ABORTED:
+ aborted = true;
+ break;
+ default:
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ if (unlikely(aborted))
+ ret = -ECANCELED;
+ else {
+ if (likely(!check_for_drop(tfcp_req)))
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+ else
+ pr_info("%s: dropped command ********\n", __func__);
+ }
+ if (ret)
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ struct nvmefc_fcp_req *fcpreq;
+ bool completed = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ fcpreq = tfcp_req->fcpreq;
+ switch (tfcp_req->inistate) {
+ case INI_IO_ABORTED:
+ break;
+ case INI_IO_COMPLETED:
+ completed = true;
+ break;
+ default:
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ if (unlikely(completed)) {
+ /* remove reference taken in original abort downcall */
+ fcloop_tfcp_req_put(tfcp_req);
+ return;
+ }
+
+ if (tfcp_req->tport->targetport)
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req);
+
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ tfcp_req->fcpreq = NULL;
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ /* call_host_done releases reference for abort downcall */
+}
+
+/*
+ * FCP IO operation done by target completion.
+ * call back up initiator "done" flows.
+ */
+static void
+fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
+ struct nvmefc_fcp_req *fcpreq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ fcpreq = tfcp_req->fcpreq;
+ tfcp_req->inistate = INI_IO_COMPLETED;
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
+}
+
+
+static int
+fcloop_fcp_req(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *fcpreq)
+{
+ struct fcloop_rport *rport = remoteport->private;
+ struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+ struct fcloop_fcpreq *tfcp_req;
+
+ if (!rport->targetport)
+ return -ECONNREFUSED;
+
+ tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
+ if (!tfcp_req)
+ return -ENOMEM;
+
+ inireq->fcpreq = fcpreq;
+ inireq->tfcp_req = tfcp_req;
+ spin_lock_init(&inireq->inilock);
+
+ tfcp_req->fcpreq = fcpreq;
+ tfcp_req->tport = rport->targetport->private;
+ tfcp_req->inistate = INI_IO_START;
+ spin_lock_init(&tfcp_req->reqlock);
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
+
+ queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
+
+ return 0;
+}
+
+static void
+fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
+ struct scatterlist *io_sg, u32 offset, u32 length)
+{
+ void *data_p, *io_p;
+ u32 data_len, io_len, tlen;
+
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+
+ for ( ; offset; ) {
+ tlen = min_t(u32, offset, io_len);
+ offset -= tlen;
+ io_len -= tlen;
+ if (!io_len) {
+ io_sg = sg_next(io_sg);
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+ } else
+ io_p += tlen;
+ }
+
+ data_p = sg_virt(data_sg);
+ data_len = data_sg->length;
+
+ for ( ; length; ) {
+ tlen = min_t(u32, io_len, data_len);
+ tlen = min_t(u32, tlen, length);
+
+ if (op == NVMET_FCOP_WRITEDATA)
+ memcpy(data_p, io_p, tlen);
+ else
+ memcpy(io_p, data_p, tlen);
+
+ length -= tlen;
+
+ io_len -= tlen;
+ if ((!io_len) && (length)) {
+ io_sg = sg_next(io_sg);
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+ } else
+ io_p += tlen;
+
+ data_len -= tlen;
+ if ((!data_len) && (length)) {
+ data_sg = sg_next(data_sg);
+ data_p = sg_virt(data_sg);
+ data_len = data_sg->length;
+ } else
+ data_p += tlen;
+ }
+}
+
+static int
+fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ struct nvmefc_fcp_req *fcpreq;
+ u32 rsplen = 0, xfrlen = 0;
+ int fcp_err = 0, active, aborted;
+ u8 op = tgt_fcpreq->op;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ fcpreq = tfcp_req->fcpreq;
+ active = tfcp_req->active;
+ aborted = tfcp_req->aborted;
+ tfcp_req->active = true;
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ if (unlikely(active))
+ /* illegal - call while i/o active */
+ return -EALREADY;
+
+ if (unlikely(aborted)) {
+ /* target transport has aborted i/o prior */
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ tfcp_req->active = false;
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ tgt_fcpreq->transferred_length = 0;
+ tgt_fcpreq->fcp_error = -ECANCELED;
+ tgt_fcpreq->done(tgt_fcpreq);
+ return 0;
+ }
+
+ /*
+ * if fcpreq is NULL, the I/O has been aborted (from
+ * initiator side). For the target side, act as if all is well
+ * but don't actually move data.
+ */
+
+ switch (op) {
+ case NVMET_FCOP_WRITEDATA:
+ xfrlen = tgt_fcpreq->transfer_length;
+ if (fcpreq) {
+ fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
+ fcpreq->first_sgl, tgt_fcpreq->offset,
+ xfrlen);
+ fcpreq->transferred_length += xfrlen;
+ }
+ break;
+
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ xfrlen = tgt_fcpreq->transfer_length;
+ if (fcpreq) {
+ fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
+ fcpreq->first_sgl, tgt_fcpreq->offset,
+ xfrlen);
+ fcpreq->transferred_length += xfrlen;
+ }
+ if (op == NVMET_FCOP_READDATA)
+ break;
+
+ /* Fall-Thru to RSP handling */
+ fallthrough;
+
+ case NVMET_FCOP_RSP:
+ if (fcpreq) {
+ rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
+ fcpreq->rsplen : tgt_fcpreq->rsplen);
+ memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
+ if (rsplen < tgt_fcpreq->rsplen)
+ fcp_err = -E2BIG;
+ fcpreq->rcv_rsplen = rsplen;
+ fcpreq->status = 0;
+ }
+ tfcp_req->status = 0;
+ break;
+
+ default:
+ fcp_err = -EINVAL;
+ break;
+ }
+
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ tfcp_req->active = false;
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ tgt_fcpreq->transferred_length = xfrlen;
+ tgt_fcpreq->fcp_error = fcp_err;
+ tgt_fcpreq->done(tgt_fcpreq);
+
+ return 0;
+}
+
+static void
+fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ unsigned long flags;
+
+ /*
+ * mark aborted only in case there were 2 threads in transport
+ * (one doing io, other doing abort) and only kills ops posted
+ * after the abort request
+ */
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ tfcp_req->aborted = true;
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ tfcp_req->status = NVME_SC_INTERNAL;
+
+ /*
+ * nothing more to do. If io wasn't active, the transport should
+ * immediately call the req_release. If it was active, the op
+ * will complete, and the lldd should call req_release.
+ */
+}
+
+static void
+fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+
+ queue_work(nvmet_wq, &tfcp_req->tio_done_work);
+}
+
+static void
+fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
+fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
+fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *fcpreq)
+{
+ struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+ struct fcloop_fcpreq *tfcp_req;
+ bool abortio = true;
+ unsigned long flags;
+
+ spin_lock(&inireq->inilock);
+ tfcp_req = inireq->tfcp_req;
+ if (tfcp_req)
+ fcloop_tfcp_req_get(tfcp_req);
+ spin_unlock(&inireq->inilock);
+
+ if (!tfcp_req)
+ /* abort has already been called */
+ return;
+
+ /* break initiator/target relationship for io */
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ case INI_IO_ACTIVE:
+ tfcp_req->inistate = INI_IO_ABORTED;
+ break;
+ case INI_IO_COMPLETED:
+ abortio = false;
+ break;
+ default:
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+ * nothing more to do. So release the reference taken above
+ */
+ fcloop_tfcp_req_put(tfcp_req);
+ }
+}
+
+static void
+fcloop_nport_free(struct kref *ref)
+{
+ struct fcloop_nport *nport =
+ container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+ kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+ return kref_get_unless_zero(&nport->ref);
+}
+
+static void
+fcloop_localport_delete(struct nvme_fc_local_port *localport)
+{
+ struct fcloop_lport_priv *lport_priv = localport->private;
+ struct fcloop_lport *lport = lport_priv->lport;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&lport->unreg_done);
+}
+
+static void
+fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+ struct fcloop_rport *rport = remoteport->private;
+
+ flush_work(&rport->ls_work);
+ fcloop_nport_put(rport->nport);
+}
+
+static void
+fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+ struct fcloop_tport *tport = targetport->private;
+
+ flush_work(&tport->ls_work);
+ fcloop_nport_put(tport->nport);
+}
+
+#define FCLOOP_HW_QUEUES 4
+#define FCLOOP_SGL_SEGS 256
+#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
+
+static struct nvme_fc_port_template fctemplate = {
+ .localport_delete = fcloop_localport_delete,
+ .remoteport_delete = fcloop_remoteport_delete,
+ .create_queue = fcloop_create_queue,
+ .delete_queue = fcloop_delete_queue,
+ .ls_req = fcloop_h2t_ls_req,
+ .fcp_io = fcloop_fcp_req,
+ .ls_abort = fcloop_h2t_ls_abort,
+ .fcp_abort = fcloop_fcp_abort,
+ .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
+ .max_hw_queues = FCLOOP_HW_QUEUES,
+ .max_sgl_segments = FCLOOP_SGL_SEGS,
+ .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
+ .dma_boundary = FCLOOP_DMABOUND_4G,
+ /* sizes of additional private data for data structures */
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
+ .remote_priv_sz = sizeof(struct fcloop_rport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
+ .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
+};
+
+static struct nvmet_fc_target_template tgttemplate = {
+ .targetport_delete = fcloop_targetport_delete,
+ .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
+ .fcp_op = fcloop_fcp_op,
+ .fcp_abort = fcloop_tgt_fcp_abort,
+ .fcp_req_release = fcloop_fcp_req_release,
+ .discovery_event = fcloop_tgt_discovery_evt,
+ .ls_req = fcloop_t2h_ls_req,
+ .ls_abort = fcloop_t2h_ls_abort,
+ .host_release = fcloop_t2h_host_release,
+ .max_hw_queues = FCLOOP_HW_QUEUES,
+ .max_sgl_segments = FCLOOP_SGL_SEGS,
+ .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
+ .dma_boundary = FCLOOP_DMABOUND_4G,
+ /* optional features */
+ .target_features = 0,
+ /* sizes of additional private data for data structures */
+ .target_priv_sz = sizeof(struct fcloop_tport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
+};
+
+static ssize_t
+fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_fc_port_info pinfo;
+ struct fcloop_ctrl_options *opts;
+ struct nvme_fc_local_port *localport;
+ struct fcloop_lport *lport;
+ struct fcloop_lport_priv *lport_priv;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+ if (!lport)
+ return -ENOMEM;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ goto out_free_lport;
+
+ ret = fcloop_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /* everything there ? */
+ if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
+ ret = -EINVAL;
+ goto out_free_opts;
+ }
+
+ memset(&pinfo, 0, sizeof(pinfo));
+ pinfo.node_name = opts->wwnn;
+ pinfo.port_name = opts->wwpn;
+ pinfo.port_role = opts->roles;
+ pinfo.port_id = opts->fcaddr;
+
+ ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
+ if (!ret) {
+ /* success */
+ lport_priv = localport->private;
+ lport_priv->lport = lport;
+
+ lport->localport = localport;
+ INIT_LIST_HEAD(&lport->lport_list);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_add_tail(&lport->lport_list, &fcloop_lports);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+ }
+
+out_free_opts:
+ kfree(opts);
+out_free_lport:
+ /* free only if we're going to fail */
+ if (ret)
+ kfree(lport);
+
+ return ret ? ret : count;
+}
+
+
+static void
+__unlink_local_port(struct fcloop_lport *lport)
+{
+ list_del(&lport->lport_list);
+}
+
+static int
+__wait_localport_unreg(struct fcloop_lport *lport)
+{
+ int ret;
+
+ init_completion(&lport->unreg_done);
+
+ ret = nvme_fc_unregister_localport(lport->localport);
+
+ if (!ret)
+ wait_for_completion(&lport->unreg_done);
+
+ kfree(lport);
+
+ return ret;
+}
+
+
+static ssize_t
+fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_lport *tlport, *lport = NULL;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tlport, &fcloop_lports, lport_list) {
+ if (tlport->localport->node_name == nodename &&
+ tlport->localport->port_name == portname) {
+ lport = tlport;
+ __unlink_local_port(lport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!lport)
+ return -ENOENT;
+
+ ret = __wait_localport_unreg(lport);
+
+ return ret ? ret : count;
+}
+
+static struct fcloop_nport *
+fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
+{
+ struct fcloop_nport *newnport, *nport = NULL;
+ struct fcloop_lport *tmplport, *lport = NULL;
+ struct fcloop_ctrl_options *opts;
+ unsigned long flags;
+ u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return NULL;
+
+ ret = fcloop_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /* everything there ? */
+ if ((opts->mask & opts_mask) != opts_mask) {
+ ret = -EINVAL;
+ goto out_free_opts;
+ }
+
+ newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
+ if (!newnport)
+ goto out_free_opts;
+
+ INIT_LIST_HEAD(&newnport->nport_list);
+ newnport->node_name = opts->wwnn;
+ newnport->port_name = opts->wwpn;
+ if (opts->mask & NVMF_OPT_ROLES)
+ newnport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ newnport->port_id = opts->fcaddr;
+ kref_init(&newnport->ref);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
+ if (tmplport->localport->node_name == opts->wwnn &&
+ tmplport->localport->port_name == opts->wwpn)
+ goto out_invalid_opts;
+
+ if (tmplport->localport->node_name == opts->lpwwnn &&
+ tmplport->localport->port_name == opts->lpwwpn)
+ lport = tmplport;
+ }
+
+ if (remoteport) {
+ if (!lport)
+ goto out_invalid_opts;
+ newnport->lport = lport;
+ }
+
+ list_for_each_entry(nport, &fcloop_nports, nport_list) {
+ if (nport->node_name == opts->wwnn &&
+ nport->port_name == opts->wwpn) {
+ if ((remoteport && nport->rport) ||
+ (!remoteport && nport->tport)) {
+ nport = NULL;
+ goto out_invalid_opts;
+ }
+
+ fcloop_nport_get(nport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (remoteport)
+ nport->lport = lport;
+ if (opts->mask & NVMF_OPT_ROLES)
+ nport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ nport->port_id = opts->fcaddr;
+ goto out_free_newnport;
+ }
+ }
+
+ list_add_tail(&newnport->nport_list, &fcloop_nports);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(opts);
+ return newnport;
+
+out_invalid_opts:
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+out_free_newnport:
+ kfree(newnport);
+out_free_opts:
+ kfree(opts);
+ return nport;
+}
+
+static ssize_t
+fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_rport *rport;
+ struct nvme_fc_port_info pinfo;
+ int ret;
+
+ nport = fcloop_alloc_nport(buf, count, true);
+ if (!nport)
+ return -EIO;
+
+ memset(&pinfo, 0, sizeof(pinfo));
+ pinfo.node_name = nport->node_name;
+ pinfo.port_name = nport->port_name;
+ pinfo.port_role = nport->port_role;
+ pinfo.port_id = nport->port_id;
+
+ ret = nvme_fc_register_remoteport(nport->lport->localport,
+ &pinfo, &remoteport);
+ if (ret || !remoteport) {
+ fcloop_nport_put(nport);
+ return ret;
+ }
+
+ /* success */
+ rport = remoteport->private;
+ rport->remoteport = remoteport;
+ rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
+ if (nport->tport) {
+ nport->tport->remoteport = remoteport;
+ nport->tport->lport = nport->lport;
+ }
+ rport->nport = nport;
+ rport->lport = nport->lport;
+ nport->rport = rport;
+ spin_lock_init(&rport->lock);
+ INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
+ INIT_LIST_HEAD(&rport->ls_list);
+
+ return count;
+}
+
+
+static struct fcloop_rport *
+__unlink_remote_port(struct fcloop_nport *nport)
+{
+ struct fcloop_rport *rport = nport->rport;
+
+ if (rport && nport->tport)
+ nport->tport->remoteport = NULL;
+ nport->rport = NULL;
+
+ return rport;
+}
+
+static int
+__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+{
+ if (!rport)
+ return -EALREADY;
+
+ return nvme_fc_unregister_remoteport(rport->remoteport);
+}
+
+static ssize_t
+fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_nport *nport = NULL, *tmpport;
+ static struct fcloop_rport *rport;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+ if (tmpport->node_name == nodename &&
+ tmpport->port_name == portname && tmpport->rport) {
+ nport = tmpport;
+ rport = __unlink_remote_port(nport);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!nport)
+ return -ENOENT;
+
+ ret = __remoteport_unreg(nport, rport);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
+ struct nvmet_fc_port_info tinfo;
+ int ret;
+
+ nport = fcloop_alloc_nport(buf, count, false);
+ if (!nport)
+ return -EIO;
+
+ tinfo.node_name = nport->node_name;
+ tinfo.port_name = nport->port_name;
+ tinfo.port_id = nport->port_id;
+
+ ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
+ &targetport);
+ if (ret) {
+ fcloop_nport_put(nport);
+ return ret;
+ }
+
+ /* success */
+ tport = targetport->private;
+ tport->targetport = targetport;
+ tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
+ if (nport->rport)
+ nport->rport->targetport = targetport;
+ tport->nport = nport;
+ tport->lport = nport->lport;
+ nport->tport = tport;
+ spin_lock_init(&tport->lock);
+ INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
+ INIT_LIST_HEAD(&tport->ls_list);
+
+ return count;
+}
+
+
+static struct fcloop_tport *
+__unlink_target_port(struct fcloop_nport *nport)
+{
+ struct fcloop_tport *tport = nport->tport;
+
+ if (tport && nport->rport)
+ nport->rport->targetport = NULL;
+ nport->tport = NULL;
+
+ return tport;
+}
+
+static int
+__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+{
+ if (!tport)
+ return -EALREADY;
+
+ return nvmet_fc_unregister_targetport(tport->targetport);
+}
+
+static ssize_t
+fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_nport *nport = NULL, *tmpport;
+ struct fcloop_tport *tport = NULL;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+ if (tmpport->node_name == nodename &&
+ tmpport->port_name == portname && tmpport->tport) {
+ nport = tmpport;
+ tport = __unlink_target_port(nport);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!nport)
+ return -ENOENT;
+
+ ret = __targetport_unreg(nport, tport);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int opcode;
+ int starting, amount;
+
+ if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
+ return -EBADRQC;
+
+ drop_current_cnt = 0;
+ drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
+ drop_opcode = (opcode & DROP_OPCODE_MASK);
+ drop_instance = starting;
+ /* the check to drop routine uses instance + count to know when
+ * to end. Thus, if dropping 1 instance, count should be 0.
+ * so subtract 1 from the count.
+ */
+ drop_amount = amount - 1;
+
+ pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
+ "instances\n",
+ __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
+ drop_opcode, drop_amount);
+
+ return count;
+}
+
+
+static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
+static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
+static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
+static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
+static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
+static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
+
+static struct attribute *fcloop_dev_attrs[] = {
+ &dev_attr_add_local_port.attr,
+ &dev_attr_del_local_port.attr,
+ &dev_attr_add_remote_port.attr,
+ &dev_attr_del_remote_port.attr,
+ &dev_attr_add_target_port.attr,
+ &dev_attr_del_target_port.attr,
+ &dev_attr_set_cmd_drop.attr,
+ NULL
+};
+
+static const struct attribute_group fclopp_dev_attrs_group = {
+ .attrs = fcloop_dev_attrs,
+};
+
+static const struct attribute_group *fcloop_dev_attr_groups[] = {
+ &fclopp_dev_attrs_group,
+ NULL,
+};
+
+static struct class *fcloop_class;
+static struct device *fcloop_device;
+
+
+static int __init fcloop_init(void)
+{
+ int ret;
+
+ fcloop_class = class_create("fcloop");
+ if (IS_ERR(fcloop_class)) {
+ pr_err("couldn't register class fcloop\n");
+ ret = PTR_ERR(fcloop_class);
+ return ret;
+ }
+
+ fcloop_device = device_create_with_groups(
+ fcloop_class, NULL, MKDEV(0, 0), NULL,
+ fcloop_dev_attr_groups, "ctl");
+ if (IS_ERR(fcloop_device)) {
+ pr_err("couldn't create ctl device!\n");
+ ret = PTR_ERR(fcloop_device);
+ goto out_destroy_class;
+ }
+
+ get_device(fcloop_device);
+
+ return 0;
+
+out_destroy_class:
+ class_destroy(fcloop_class);
+ return ret;
+}
+
+static void __exit fcloop_exit(void)
+{
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
+ struct fcloop_tport *tport;
+ struct fcloop_rport *rport;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ for (;;) {
+ nport = list_first_entry_or_null(&fcloop_nports,
+ typeof(*nport), nport_list);
+ if (!nport)
+ break;
+
+ tport = __unlink_target_port(nport);
+ rport = __unlink_remote_port(nport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ ret = __targetport_unreg(nport, tport);
+ if (ret)
+ pr_warn("%s: Failed deleting target port\n", __func__);
+
+ ret = __remoteport_unreg(nport, rport);
+ if (ret)
+ pr_warn("%s: Failed deleting remote port\n", __func__);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ }
+
+ for (;;) {
+ lport = list_first_entry_or_null(&fcloop_lports,
+ typeof(*lport), lport_list);
+ if (!lport)
+ break;
+
+ __unlink_local_port(lport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ ret = __wait_localport_unreg(lport);
+ if (ret)
+ pr_warn("%s: Failed deleting local port\n", __func__);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ put_device(fcloop_device);
+
+ device_destroy(fcloop_class, MKDEV(0, 0));
+ class_destroy(fcloop_class);
+}
+
+module_init(fcloop_init);
+module_exit(fcloop_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
new file mode 100644
index 0000000000..468833675c
--- /dev/null
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe I/O command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
+#include <linux/memremap.h>
+#include <linux/module.h>
+#include "nvmet.h"
+
+void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
+{
+ /* Logical blocks per physical block, 0's based. */
+ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
+ bdev_logical_block_size(bdev));
+
+ /*
+ * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
+ * NAWUPF, and NACWU are defined for this namespace and should be
+ * used by the host for this namespace instead of the AWUN, AWUPF,
+ * and ACWU fields in the Identify Controller data structure. If
+ * any of these fields are zero that means that the corresponding
+ * field from the identify controller data structure should be used.
+ */
+ id->nsfeat |= 1 << 1;
+ id->nawun = lpp0b;
+ id->nawupf = lpp0b;
+ id->nacwu = lpp0b;
+
+ /*
+ * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
+ * NOWS are defined for this namespace and should be used by
+ * the host for I/O optimization.
+ */
+ id->nsfeat |= 1 << 4;
+ /* NPWG = Namespace Preferred Write Granularity. 0's based */
+ id->npwg = lpp0b;
+ /* NPWA = Namespace Preferred Write Alignment. 0's based */
+ id->npwa = id->npwg;
+ /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
+ id->npdg = to0based(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
+ /* NPDG = Namespace Preferred Deallocate Alignment */
+ id->npda = id->npdg;
+ /* NOWS = Namespace Optimal Write Size */
+ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+}
+
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->bdev) {
+ blkdev_put(ns->bdev, NULL);
+ ns->bdev = NULL;
+ }
+}
+
+static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
+{
+ struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
+
+ if (bi) {
+ ns->metadata_size = bi->tuple_size;
+ if (bi->profile == &t10_pi_type1_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE1;
+ else if (bi->profile == &t10_pi_type3_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ else
+ /* Unsupported metadata type */
+ ns->metadata_size = 0;
+ }
+}
+
+int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+
+ /*
+ * When buffered_io namespace attribute is enabled that means user want
+ * this block device to be used as a file, so block device can take
+ * an advantage of cache.
+ */
+ if (ns->buffered_io)
+ return -ENOTBLK;
+
+ ns->bdev = blkdev_get_by_path(ns->device_path,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
+ if (IS_ERR(ns->bdev)) {
+ ret = PTR_ERR(ns->bdev);
+ if (ret != -ENOTBLK) {
+ pr_err("failed to open block device %s: (%ld)\n",
+ ns->device_path, PTR_ERR(ns->bdev));
+ }
+ ns->bdev = NULL;
+ return ret;
+ }
+ ns->size = bdev_nr_bytes(ns->bdev);
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ ns->pi_type = 0;
+ ns->metadata_size = 0;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ nvmet_bdev_ns_enable_integrity(ns);
+
+ if (bdev_is_zoned(ns->bdev)) {
+ if (!nvmet_bdev_zns_enable(ns)) {
+ nvmet_bdev_ns_disable(ns);
+ return -EINVAL;
+ }
+ ns->csi = NVME_CSI_ZNS;
+ }
+
+ return 0;
+}
+
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
+{
+ ns->size = bdev_nr_bytes(ns->bdev);
+}
+
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+{
+ u16 status = NVME_SC_SUCCESS;
+
+ if (likely(blk_sts == BLK_STS_OK))
+ return status;
+ /*
+ * Right now there exists M : 1 mapping between block layer error
+ * to the NVMe status code (see nvme_error_status()). For consistency,
+ * when we reverse map we use most appropriate NVMe Status code from
+ * the group of the NVMe staus codes used in the nvme_error_status().
+ */
+ switch (blk_sts) {
+ case BLK_STS_NOSPC:
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ break;
+ case BLK_STS_TARGET:
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ break;
+ case BLK_STS_NOTSUPP:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_dsm:
+ case nvme_cmd_write_zeroes:
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ break;
+ default:
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ break;
+ case BLK_STS_MEDIUM:
+ status = NVME_SC_ACCESS_DENIED;
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
+ break;
+ case BLK_STS_IOERR:
+ default:
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ }
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->error_slba = le64_to_cpu(req->cmd->rw.slba);
+ break;
+ case nvme_cmd_write_zeroes:
+ req->error_slba =
+ le64_to_cpu(req->cmd->write_zeroes.slba);
+ break;
+ default:
+ req->error_slba = 0;
+ }
+ return status;
+}
+
+static void nvmet_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
+ nvmet_req_bio_put(req, bio);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ int rc;
+ size_t resid, len;
+
+ bi = bdev_get_integrity(req->ns->bdev);
+ if (unlikely(!bi)) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ bio_max_segs(req->metadata_sg_cnt));
+ if (IS_ERR(bip)) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return PTR_ERR(bip);
+ }
+
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
+
+ resid = bio_integrity_bytes(bi, bio_sectors(bio));
+ while (resid > 0 && sg_miter_next(miter)) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (unlikely(rc != len)) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
+ return -ENOMEM;
+ }
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
+ }
+ sg_miter_stop(miter);
+
+ return 0;
+}
+#else
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+static void nvmet_bdev_execute_rw(struct nvmet_req *req)
+{
+ unsigned int sg_cnt = req->sg_cnt;
+ struct bio *bio;
+ struct scatterlist *sg;
+ struct blk_plug plug;
+ sector_t sector;
+ blk_opf_t opf;
+ int i, rc;
+ struct sg_mapping_iter prot_miter;
+ unsigned int iter_flags;
+ unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
+
+ if (!nvmet_check_transfer_len(req, total_len))
+ return;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (req->cmd->rw.opcode == nvme_cmd_write) {
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ opf |= REQ_FUA;
+ iter_flags = SG_MITER_TO_SG;
+ } else {
+ opf = REQ_OP_READ;
+ iter_flags = SG_MITER_FROM_SG;
+ }
+
+ if (is_pci_p2pdma_page(sg_page(req->sg)))
+ opf |= REQ_NOMERGE;
+
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->b.inline_bio;
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), opf);
+ } else {
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
+ GFP_KERNEL);
+ }
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+
+ blk_start_plug(&plug);
+ if (req->metadata_len)
+ sg_miter_start(&prot_miter, req->metadata_sg,
+ req->metadata_sg_cnt, iter_flags);
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+ struct bio *prev = bio;
+
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio,
+ &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
+ opf, GFP_KERNEL);
+ bio->bi_iter.bi_sector = sector;
+
+ bio_chain(bio, prev);
+ submit_bio(prev);
+ }
+
+ sector += sg->length >> 9;
+ sg_cnt--;
+ }
+
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
+ submit_bio(bio);
+ blk_finish_plug(&plug);
+}
+
+static void nvmet_bdev_execute_flush(struct nvmet_req *req)
+{
+ struct bio *bio = &req->b.inline_bio;
+
+ if (!bdev_write_cache(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+ return;
+ }
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+
+ submit_bio(bio);
+}
+
+u16 nvmet_bdev_flush(struct nvmet_req *req)
+{
+ if (!bdev_write_cache(req->ns->bdev))
+ return 0;
+
+ if (blkdev_issue_flush(req->ns->bdev))
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
+static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
+ struct nvme_dsm_range *range, struct bio **bio)
+{
+ struct nvmet_ns *ns = req->ns;
+ int ret;
+
+ ret = __blkdev_issue_discard(ns->bdev,
+ nvmet_lba_to_sect(ns, range->slba),
+ le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
+ GFP_KERNEL, bio);
+ if (ret && ret != -EOPNOTSUPP) {
+ req->error_slba = le64_to_cpu(range->slba);
+ return errno_to_nvme_status(req, ret);
+ }
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_bdev_execute_discard(struct nvmet_req *req)
+{
+ struct nvme_dsm_range range;
+ struct bio *bio = NULL;
+ int i;
+ u16 status;
+
+ for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (status)
+ break;
+
+ status = nvmet_bdev_discard_range(req, &range, &bio);
+ if (status)
+ break;
+ }
+
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ if (status)
+ bio_io_error(bio);
+ else
+ submit_bio(bio);
+ } else {
+ nvmet_req_complete(req, status);
+ }
+}
+
+static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ return;
+
+ switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+ case NVME_DSMGMT_AD:
+ nvmet_bdev_execute_discard(req);
+ return;
+ case NVME_DSMGMT_IDR:
+ case NVME_DSMGMT_IDW:
+ default:
+ /* Not supported yet */
+ nvmet_req_complete(req, 0);
+ return;
+ }
+}
+
+static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
+{
+ struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+ struct bio *bio = NULL;
+ sector_t sector;
+ sector_t nr_sector;
+ int ret;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
+ nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+ (req->ns->blksize_shift - 9));
+
+ ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
+ GFP_KERNEL, &bio, 0);
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ submit_bio(bio);
+ } else {
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
+ }
+}
+
+u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
+{
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->execute = nvmet_bdev_execute_rw;
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ req->metadata_len = nvmet_rw_metadata_len(req);
+ return 0;
+ case nvme_cmd_flush:
+ req->execute = nvmet_bdev_execute_flush;
+ return 0;
+ case nvme_cmd_dsm:
+ req->execute = nvmet_bdev_execute_dsm;
+ return 0;
+ case nvme_cmd_write_zeroes:
+ req->execute = nvmet_bdev_execute_write_zeroes;
+ return 0;
+ default:
+ return nvmet_report_invalid_opcode(req);
+ }
+}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
new file mode 100644
index 0000000000..2d068439b1
--- /dev/null
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target File I/O commands implementation.
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/uio.h>
+#include <linux/falloc.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include "nvmet.h"
+
+#define NVMET_MIN_MPOOL_OBJ 16
+
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+{
+ ns->size = i_size_read(ns->file->f_mapping->host);
+}
+
+void nvmet_file_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->file) {
+ if (ns->buffered_io)
+ flush_workqueue(buffered_io_wq);
+ mempool_destroy(ns->bvec_pool);
+ ns->bvec_pool = NULL;
+ fput(ns->file);
+ ns->file = NULL;
+ }
+}
+
+int nvmet_file_ns_enable(struct nvmet_ns *ns)
+{
+ int flags = O_RDWR | O_LARGEFILE;
+ int ret = 0;
+
+ if (!ns->buffered_io)
+ flags |= O_DIRECT;
+
+ ns->file = filp_open(ns->device_path, flags, 0);
+ if (IS_ERR(ns->file)) {
+ ret = PTR_ERR(ns->file);
+ pr_err("failed to open file %s: (%d)\n",
+ ns->device_path, ret);
+ ns->file = NULL;
+ return ret;
+ }
+
+ nvmet_file_ns_revalidate(ns);
+
+ /*
+ * i_blkbits can be greater than the universally accepted upper bound,
+ * so make sure we export a sane namespace lba_shift.
+ */
+ ns->blksize_shift = min_t(u8,
+ file_inode(ns->file)->i_blkbits, 12);
+
+ ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
+ mempool_free_slab, nvmet_bvec_cache);
+
+ if (!ns->bvec_pool) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return ret;
+err:
+ fput(ns->file);
+ ns->file = NULL;
+ ns->size = 0;
+ ns->blksize_shift = 0;
+ return ret;
+}
+
+static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
+ unsigned long nr_segs, size_t count, int ki_flags)
+{
+ struct kiocb *iocb = &req->f.iocb;
+ ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
+ struct iov_iter iter;
+ int rw;
+
+ if (req->cmd->rw.opcode == nvme_cmd_write) {
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ ki_flags |= IOCB_DSYNC;
+ call_iter = req->ns->file->f_op->write_iter;
+ rw = ITER_SOURCE;
+ } else {
+ call_iter = req->ns->file->f_op->read_iter;
+ rw = ITER_DEST;
+ }
+
+ iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
+
+ iocb->ki_pos = pos;
+ iocb->ki_filp = req->ns->file;
+ iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags;
+
+ return call_iter(iocb, &iter);
+}
+
+static void nvmet_file_io_done(struct kiocb *iocb, long ret)
+{
+ struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
+ u16 status = NVME_SC_SUCCESS;
+
+ if (req->f.bvec != req->inline_bvec) {
+ if (likely(req->f.mpool_alloc == false))
+ kfree(req->f.bvec);
+ else
+ mempool_free(req->f.bvec, req->ns->bvec_pool);
+ }
+
+ if (unlikely(ret != req->transfer_len))
+ status = errno_to_nvme_status(req, ret);
+ nvmet_req_complete(req, status);
+}
+
+static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
+{
+ ssize_t nr_bvec = req->sg_cnt;
+ unsigned long bv_cnt = 0;
+ bool is_sync = false;
+ size_t len = 0, total_len = 0;
+ ssize_t ret = 0;
+ loff_t pos;
+ int i;
+ struct scatterlist *sg;
+
+ if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
+ is_sync = true;
+
+ pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return true;
+ }
+
+ memset(&req->f.iocb, 0, sizeof(struct kiocb));
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length,
+ sg->offset);
+ len += req->f.bvec[bv_cnt].bv_len;
+ total_len += req->f.bvec[bv_cnt].bv_len;
+ bv_cnt++;
+
+ WARN_ON_ONCE((nr_bvec - 1) < 0);
+
+ if (unlikely(is_sync) &&
+ (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
+ if (ret < 0)
+ goto complete;
+
+ pos += len;
+ bv_cnt = 0;
+ len = 0;
+ }
+ nr_bvec--;
+ }
+
+ if (WARN_ON_ONCE(total_len != req->transfer_len)) {
+ ret = -EIO;
+ goto complete;
+ }
+
+ if (unlikely(is_sync)) {
+ ret = total_len;
+ goto complete;
+ }
+
+ /*
+ * A NULL ki_complete ask for synchronous execution, which we want
+ * for the IOCB_NOWAIT case.
+ */
+ if (!(ki_flags & IOCB_NOWAIT))
+ req->f.iocb.ki_complete = nvmet_file_io_done;
+
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
+
+ switch (ret) {
+ case -EIOCBQUEUED:
+ return true;
+ case -EAGAIN:
+ if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
+ goto complete;
+ return false;
+ case -EOPNOTSUPP:
+ /*
+ * For file systems returning error -EOPNOTSUPP, handle
+ * IOCB_NOWAIT error case separately and retry without
+ * IOCB_NOWAIT.
+ */
+ if ((ki_flags & IOCB_NOWAIT))
+ return false;
+ break;
+ }
+
+complete:
+ nvmet_file_io_done(&req->f.iocb, ret);
+ return true;
+}
+
+static void nvmet_file_buffered_io_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ nvmet_file_execute_io(req, 0);
+}
+
+static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+ queue_work(buffered_io_wq, &req->f.work);
+}
+
+static void nvmet_file_execute_rw(struct nvmet_req *req)
+{
+ ssize_t nr_bvec = req->sg_cnt;
+
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
+ return;
+
+ if (!req->sg_cnt || !nr_bvec) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
+ req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ else
+ req->f.bvec = req->inline_bvec;
+
+ if (unlikely(!req->f.bvec)) {
+ /* fallback under memory pressure */
+ req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
+ req->f.mpool_alloc = true;
+ } else
+ req->f.mpool_alloc = false;
+
+ if (req->ns->buffered_io) {
+ if (likely(!req->f.mpool_alloc) &&
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
+ return;
+ nvmet_file_submit_buffered_io(req);
+ } else
+ nvmet_file_execute_io(req, 0);
+}
+
+u16 nvmet_file_flush(struct nvmet_req *req)
+{
+ return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
+}
+
+static void nvmet_file_flush_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ nvmet_req_complete(req, nvmet_file_flush(req));
+}
+
+static void nvmet_file_execute_flush(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+ INIT_WORK(&req->f.work, nvmet_file_flush_work);
+ queue_work(nvmet_wq, &req->f.work);
+}
+
+static void nvmet_file_execute_discard(struct nvmet_req *req)
+{
+ int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+ struct nvme_dsm_range range;
+ loff_t offset, len;
+ u16 status = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (status)
+ break;
+
+ offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
+ len = le32_to_cpu(range.nlb);
+ len <<= req->ns->blksize_shift;
+ if (offset + len > req->ns->size) {
+ req->error_slba = le64_to_cpu(range.slba);
+ status = errno_to_nvme_status(req, -ENOSPC);
+ break;
+ }
+
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
+ if (ret && ret != -EOPNOTSUPP) {
+ req->error_slba = le64_to_cpu(range.slba);
+ status = errno_to_nvme_status(req, ret);
+ break;
+ }
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_file_dsm_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+ case NVME_DSMGMT_AD:
+ nvmet_file_execute_discard(req);
+ return;
+ case NVME_DSMGMT_IDR:
+ case NVME_DSMGMT_IDW:
+ default:
+ /* Not supported yet */
+ nvmet_req_complete(req, 0);
+ return;
+ }
+}
+
+static void nvmet_file_execute_dsm(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ return;
+ INIT_WORK(&req->f.work, nvmet_file_dsm_work);
+ queue_work(nvmet_wq, &req->f.work);
+}
+
+static void nvmet_file_write_zeroes_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+ struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+ int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
+ loff_t offset;
+ loff_t len;
+ int ret;
+
+ offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
+ len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+ req->ns->blksize_shift);
+
+ if (unlikely(offset + len > req->ns->size)) {
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return;
+ }
+
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
+ nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
+}
+
+static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+ INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
+ queue_work(nvmet_wq, &req->f.work);
+}
+
+u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
+{
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->execute = nvmet_file_execute_rw;
+ return 0;
+ case nvme_cmd_flush:
+ req->execute = nvmet_file_execute_flush;
+ return 0;
+ case nvme_cmd_dsm:
+ req->execute = nvmet_file_execute_dsm;
+ return 0;
+ case nvme_cmd_write_zeroes:
+ req->execute = nvmet_file_execute_write_zeroes;
+ return 0;
+ default:
+ return nvmet_report_invalid_opcode(req);
+ }
+}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
new file mode 100644
index 0000000000..48d5df054c
--- /dev/null
+++ b/drivers/nvme/target/loop.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics loopback device.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
+#include <linux/nvme.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include "nvmet.h"
+#include "../host/nvme.h"
+#include "../host/fabrics.h"
+
+#define NVME_LOOP_MAX_SEGMENTS 256
+
+struct nvme_loop_iod {
+ struct nvme_request nvme_req;
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ struct nvmet_req req;
+ struct nvme_loop_queue *queue;
+ struct work_struct work;
+ struct sg_table sg_table;
+ struct scatterlist first_sgl[];
+};
+
+struct nvme_loop_ctrl {
+ struct nvme_loop_queue *queues;
+
+ struct blk_mq_tag_set admin_tag_set;
+
+ struct list_head list;
+ struct blk_mq_tag_set tag_set;
+ struct nvme_loop_iod async_event_iod;
+ struct nvme_ctrl ctrl;
+
+ struct nvmet_port *port;
+};
+
+static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
+}
+
+enum nvme_loop_queue_flags {
+ NVME_LOOP_Q_LIVE = 0,
+};
+
+struct nvme_loop_queue {
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvme_loop_ctrl *ctrl;
+ unsigned long flags;
+};
+
+static LIST_HEAD(nvme_loop_ports);
+static DEFINE_MUTEX(nvme_loop_ports_mutex);
+
+static LIST_HEAD(nvme_loop_ctrl_list);
+static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
+
+static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
+
+static const struct nvmet_fabrics_ops nvme_loop_ops;
+
+static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static void nvme_loop_complete_rq(struct request *req)
+{
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+
+ sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
+ nvme_complete_rq(req);
+}
+
+static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
+{
+ u32 queue_idx = nvme_loop_queue_idx(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static void nvme_loop_queue_response(struct nvmet_req *req)
+{
+ struct nvme_loop_queue *queue =
+ container_of(req->sq, struct nvme_loop_queue, nvme_sq);
+ struct nvme_completion *cqe = req->cqe;
+
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+ cqe->command_id))) {
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
+ } else {
+ struct request *rq;
+
+ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "got bad command_id %#x on queue %d\n",
+ cqe->command_id, nvme_loop_queue_idx(queue));
+ return;
+ }
+
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ nvme_loop_complete_rq(rq);
+ }
+}
+
+static void nvme_loop_execute_work(struct work_struct *work)
+{
+ struct nvme_loop_iod *iod =
+ container_of(work, struct nvme_loop_iod, work);
+
+ iod->req.execute(&iod->req);
+}
+
+static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_loop_queue *queue = hctx->driver_data;
+ struct request *req = bd->rq;
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
+ blk_status_t ret;
+
+ if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
+
+ ret = nvme_setup_cmd(ns, req);
+ if (ret)
+ return ret;
+
+ nvme_start_request(req);
+ iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+ iod->req.port = queue->ctrl->port;
+ if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvme_loop_ops))
+ return BLK_STS_OK;
+
+ if (blk_rq_nr_phys_segments(req)) {
+ iod->sg_table.sgl = iod->first_sgl;
+ if (sg_alloc_table_chained(&iod->sg_table,
+ blk_rq_nr_phys_segments(req),
+ iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
+ nvme_cleanup_cmd(req);
+ return BLK_STS_RESOURCE;
+ }
+
+ iod->req.sg = iod->sg_table.sgl;
+ iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ iod->req.transfer_len = blk_rq_payload_bytes(req);
+ }
+
+ queue_work(nvmet_wq, &iod->work);
+ return BLK_STS_OK;
+}
+
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
+ struct nvme_loop_queue *queue = &ctrl->queues[0];
+ struct nvme_loop_iod *iod = &ctrl->async_event_iod;
+
+ memset(&iod->cmd, 0, sizeof(iod->cmd));
+ iod->cmd.common.opcode = nvme_admin_async_event;
+ iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+ iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
+ &nvme_loop_ops)) {
+ dev_err(ctrl->ctrl.device, "failed async event work\n");
+ return;
+ }
+
+ queue_work(nvmet_wq, &iod->work);
+}
+
+static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
+ struct nvme_loop_iod *iod, unsigned int queue_idx)
+{
+ iod->req.cmd = &iod->cmd;
+ iod->req.cqe = &iod->cqe;
+ iod->queue = &ctrl->queues[queue_idx];
+ INIT_WORK(&iod->work, nvme_loop_execute_work);
+ return 0;
+}
+
+static int nvme_loop_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_req(req)->ctrl = &ctrl->ctrl;
+ nvme_req(req)->cmd = &iod->cmd;
+ return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
+ (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
+}
+
+static struct lock_class_key loop_hctx_fq_lock_key;
+
+static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
+ struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
+
+ /*
+ * flush_end_io() can be called recursively for us, so use our own
+ * lock class key for avoiding lockdep possible recursive locking,
+ * then we can remove the dynamically allocated lock class for each
+ * flush queue, that way may cause horrible boot delay.
+ */
+ blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
+ struct nvme_loop_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static const struct blk_mq_ops nvme_loop_mq_ops = {
+ .queue_rq = nvme_loop_queue_rq,
+ .complete = nvme_loop_complete_rq,
+ .init_request = nvme_loop_init_request,
+ .init_hctx = nvme_loop_init_hctx,
+};
+
+static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
+ .queue_rq = nvme_loop_queue_rq,
+ .complete = nvme_loop_complete_rq,
+ .init_request = nvme_loop_init_request,
+ .init_hctx = nvme_loop_init_admin_hctx,
+};
+
+static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+ if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+ return;
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+}
+
+static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ if (nctrl->tagset)
+ nvme_remove_io_tag_set(nctrl);
+ kfree(ctrl->queues);
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl);
+}
+
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ }
+ ctrl->ctrl.queue_count = 1;
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
+ int ret, i;
+
+ nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret || !nr_io_queues)
+ return ret;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+ for (i = 1; i <= nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->ctrl.queue_count++;
+ }
+
+ return 0;
+
+out_destroy_queues:
+ nvme_loop_destroy_io_queues(ctrl);
+ return ret;
+}
+
+static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ return ret;
+ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+ }
+
+ return 0;
+}
+
+static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+ int error;
+
+ ctrl->queues[0].ctrl = ctrl;
+ error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
+ if (error)
+ return error;
+ ctrl->ctrl.queue_count = 1;
+
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_loop_admin_mq_ops,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ if (error)
+ goto out_free_sq;
+
+ /* reset stopped state for the fresh admin queue */
+ clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_tagset;
+
+ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_tagset;
+
+ ctrl->ctrl.max_hw_sectors =
+ (NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
+
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+
+ error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
+ if (error)
+ goto out_cleanup_tagset;
+
+ return 0;
+
+out_cleanup_tagset:
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+out_free_sq:
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ return error;
+}
+
+static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
+{
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_quiesce_io_queues(&ctrl->ctrl);
+ nvme_cancel_tagset(&ctrl->ctrl);
+ nvme_loop_destroy_io_queues(ctrl);
+ }
+
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ nvme_disable_ctrl(&ctrl->ctrl, true);
+
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+ nvme_loop_destroy_admin_queue(ctrl);
+}
+
+static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
+{
+ nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
+}
+
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl;
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
+ if (ctrl->ctrl.cntlid == nctrl->cntlid)
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+}
+
+static void nvme_loop_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_loop_ctrl *ctrl =
+ container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
+ int ret;
+
+ nvme_stop_ctrl(&ctrl->ctrl);
+ nvme_loop_shutdown_ctrl(ctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ /* state change failure for non-deleted ctrl? */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_disable;
+
+ ret = nvme_loop_init_io_queues(ctrl);
+ if (ret)
+ goto out_destroy_admin;
+
+ ret = nvme_loop_connect_io_queues(ctrl);
+ if (ret)
+ goto out_destroy_io;
+
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+ ctrl->ctrl.queue_count - 1);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
+
+ nvme_start_ctrl(&ctrl->ctrl);
+
+ return;
+
+out_destroy_io:
+ nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
+ nvme_loop_destroy_admin_queue(ctrl);
+out_disable:
+ dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+ nvme_uninit_ctrl(&ctrl->ctrl);
+}
+
+static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
+ .name = "loop",
+ .module = THIS_MODULE,
+ .flags = NVME_F_FABRICS,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .free_ctrl = nvme_loop_free_ctrl,
+ .submit_async_event = nvme_loop_submit_async_event,
+ .delete_ctrl = nvme_loop_delete_ctrl_host,
+ .get_address = nvmf_get_address,
+};
+
+static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ int ret;
+
+ ret = nvme_loop_init_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_loop_mq_ops, 1,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ if (ret)
+ goto out_destroy_queues;
+
+ ret = nvme_loop_connect_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_tagset;
+
+ return 0;
+
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+out_destroy_queues:
+ nvme_loop_destroy_io_queues(ctrl);
+ return ret;
+}
+
+static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
+{
+ struct nvmet_port *p, *found = NULL;
+
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_for_each_entry(p, &nvme_loop_ports, entry) {
+ /* if no transport address is specified use the first port */
+ if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
+ strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
+ continue;
+ found = p;
+ break;
+ }
+ mutex_unlock(&nvme_loop_ports_mutex);
+ return found;
+}
+
+static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_loop_ctrl *ctrl;
+ int ret;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret) {
+ kfree(ctrl);
+ goto out;
+ }
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ WARN_ON_ONCE(1);
+
+ ret = -ENOMEM;
+
+ ctrl->ctrl.kato = opts->kato;
+ ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
+
+ ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_free_queues;
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+
+ if (opts->nr_io_queues) {
+ ret = nvme_loop_create_io_queues(ctrl);
+ if (ret)
+ goto out_remove_admin_queue;
+ }
+
+ nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
+
+ dev_info(ctrl->ctrl.device,
+ "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ nvme_start_ctrl(&ctrl->ctrl);
+
+ return &ctrl->ctrl;
+
+out_remove_admin_queue:
+ nvme_loop_destroy_admin_queue(ctrl);
+out_free_queues:
+ kfree(ctrl->queues);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+out:
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+}
+
+static int nvme_loop_add_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_add_tail(&port->entry, &nvme_loop_ports);
+ mutex_unlock(&nvme_loop_ports_mutex);
+ return 0;
+}
+
+static void nvme_loop_remove_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_del_init(&port->entry);
+ mutex_unlock(&nvme_loop_ports_mutex);
+
+ /*
+ * Ensure any ctrls that are in the process of being
+ * deleted are in fact deleted before we return
+ * and free the port. This is to prevent active
+ * ctrls from using a port after it's freed.
+ */
+ flush_workqueue(nvme_delete_wq);
+}
+
+static const struct nvmet_fabrics_ops nvme_loop_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_LOOP,
+ .add_port = nvme_loop_add_port,
+ .remove_port = nvme_loop_remove_port,
+ .queue_response = nvme_loop_queue_response,
+ .delete_ctrl = nvme_loop_delete_ctrl,
+};
+
+static struct nvmf_transport_ops nvme_loop_transport = {
+ .name = "loop",
+ .module = THIS_MODULE,
+ .create_ctrl = nvme_loop_create_ctrl,
+ .allowed_opts = NVMF_OPT_TRADDR,
+};
+
+static int __init nvme_loop_init_module(void)
+{
+ int ret;
+
+ ret = nvmet_register_transport(&nvme_loop_ops);
+ if (ret)
+ return ret;
+
+ ret = nvmf_register_transport(&nvme_loop_transport);
+ if (ret)
+ nvmet_unregister_transport(&nvme_loop_ops);
+
+ return ret;
+}
+
+static void __exit nvme_loop_cleanup_module(void)
+{
+ struct nvme_loop_ctrl *ctrl, *next;
+
+ nvmf_unregister_transport(&nvme_loop_transport);
+ nvmet_unregister_transport(&nvme_loop_ops);
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
+ nvme_delete_ctrl(&ctrl->ctrl);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ flush_workqueue(nvme_delete_wq);
+}
+
+module_init(nvme_loop_init_module);
+module_exit(nvme_loop_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
new file mode 100644
index 0000000000..8cfd60f3b5
--- /dev/null
+++ b/drivers/nvme/target/nvmet.h
@@ -0,0 +1,741 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+
+#ifndef _NVMET_H
+#define _NVMET_H
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/percpu-refcount.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/uuid.h>
+#include <linux/nvme.h>
+#include <linux/configfs.h>
+#include <linux/rcupdate.h>
+#include <linux/blkdev.h>
+#include <linux/radix-tree.h>
+#include <linux/t10-pi.h>
+
+#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
+
+#define NVMET_ASYNC_EVENTS 4
+#define NVMET_ERROR_LOG_SLOTS 128
+#define NVMET_NO_ERROR_LOC ((u16)-1)
+#define NVMET_DEFAULT_CTRL_MODEL "Linux"
+#define NVMET_MN_MAX_SIZE 40
+#define NVMET_SN_MAX_SIZE 20
+#define NVMET_FR_MAX_SIZE 8
+
+/*
+ * Supported optional AENs:
+ */
+#define NVMET_AEN_CFG_OPTIONAL \
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
+#define NVMET_DISC_AEN_CFG_OPTIONAL \
+ (NVME_AEN_CFG_DISC_CHANGE)
+
+/*
+ * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
+ */
+#define NVMET_AEN_CFG_ALL \
+ (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
+ NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
+ NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
+
+/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
+ * The 16 bit shift is to set IATTR bit to 1, which means offending
+ * offset starts in the data section of connect()
+ */
+#define IPO_IATTR_CONNECT_DATA(x) \
+ (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
+#define IPO_IATTR_CONNECT_SQE(x) \
+ (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
+
+struct nvmet_ns {
+ struct percpu_ref ref;
+ struct block_device *bdev;
+ struct file *file;
+ bool readonly;
+ u32 nsid;
+ u32 blksize_shift;
+ loff_t size;
+ u8 nguid[16];
+ uuid_t uuid;
+ u32 anagrpid;
+
+ bool buffered_io;
+ bool enabled;
+ struct nvmet_subsys *subsys;
+ const char *device_path;
+
+ struct config_group device_group;
+ struct config_group group;
+
+ struct completion disable_done;
+ mempool_t *bvec_pool;
+
+ struct pci_dev *p2p_dev;
+ int use_p2pmem;
+ int pi_type;
+ int metadata_size;
+ u8 csi;
+};
+
+static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_ns, group);
+}
+
+static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
+{
+ return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
+}
+
+struct nvmet_cq {
+ u16 qid;
+ u16 size;
+};
+
+struct nvmet_sq {
+ struct nvmet_ctrl *ctrl;
+ struct percpu_ref ref;
+ u16 qid;
+ u16 size;
+ u32 sqhd;
+ bool sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ bool authenticated;
+ struct delayed_work auth_expired_work;
+ u16 dhchap_tid;
+ u16 dhchap_status;
+ int dhchap_step;
+ u8 *dhchap_c1;
+ u8 *dhchap_c2;
+ u32 dhchap_s1;
+ u32 dhchap_s2;
+ u8 *dhchap_skey;
+ int dhchap_skey_len;
+#endif
+ struct completion free_done;
+ struct completion confirm_done;
+};
+
+struct nvmet_ana_group {
+ struct config_group group;
+ struct nvmet_port *port;
+ u32 grpid;
+};
+
+static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_ana_group,
+ group);
+}
+
+/**
+ * struct nvmet_port - Common structure to keep port
+ * information for the target.
+ * @entry: Entry into referrals or transport list.
+ * @disc_addr: Address information is stored in a format defined
+ * for a discovery log page entry.
+ * @group: ConfigFS group for this element's folder.
+ * @priv: Private data for the transport.
+ */
+struct nvmet_port {
+ struct list_head entry;
+ struct nvmf_disc_rsp_page_entry disc_addr;
+ struct config_group group;
+ struct config_group subsys_group;
+ struct list_head subsystems;
+ struct config_group referrals_group;
+ struct list_head referrals;
+ struct list_head global_entry;
+ struct config_group ana_groups_group;
+ struct nvmet_ana_group ana_default_group;
+ enum nvme_ana_state *ana_state;
+ void *priv;
+ bool enabled;
+ int inline_data_size;
+ const struct nvmet_fabrics_ops *tr_ops;
+ bool pi_enable;
+};
+
+static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_port,
+ group);
+}
+
+static inline struct nvmet_port *ana_groups_to_port(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_port,
+ ana_groups_group);
+}
+
+struct nvmet_ctrl {
+ struct nvmet_subsys *subsys;
+ struct nvmet_sq **sqs;
+
+ bool reset_tbkas;
+
+ struct mutex lock;
+ u64 cap;
+ u32 cc;
+ u32 csts;
+
+ uuid_t hostid;
+ u16 cntlid;
+ u32 kato;
+
+ struct nvmet_port *port;
+
+ u32 aen_enabled;
+ unsigned long aen_masked;
+ struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
+ unsigned int nr_async_event_cmds;
+ struct list_head async_events;
+ struct work_struct async_event_work;
+
+ struct list_head subsys_entry;
+ struct kref ref;
+ struct delayed_work ka_work;
+ struct work_struct fatal_err_work;
+
+ const struct nvmet_fabrics_ops *ops;
+
+ __le32 *changed_ns_list;
+ u32 nr_changed_ns;
+
+ char subsysnqn[NVMF_NQN_FIELD_LEN];
+ char hostnqn[NVMF_NQN_FIELD_LEN];
+
+ struct device *p2p_client;
+ struct radix_tree_root p2p_ns_map;
+
+ spinlock_t error_lock;
+ u64 err_counter;
+ struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
+ bool pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u8 shash_id;
+ struct crypto_kpp *dh_tfm;
+ u8 dh_gid;
+ u8 *dh_key;
+ size_t dh_keysize;
+#endif
+};
+
+struct nvmet_subsys {
+ enum nvme_subsys_type type;
+
+ struct mutex lock;
+ struct kref ref;
+
+ struct xarray namespaces;
+ unsigned int nr_namespaces;
+ u32 max_nsid;
+ u16 cntlid_min;
+ u16 cntlid_max;
+
+ struct list_head ctrls;
+
+ struct list_head hosts;
+ bool allow_any_host;
+
+ u16 max_qid;
+
+ u64 ver;
+ char serial[NVMET_SN_MAX_SIZE];
+ bool subsys_discovered;
+ char *subsysnqn;
+ bool pi_support;
+
+ struct config_group group;
+
+ struct config_group namespaces_group;
+ struct config_group allowed_hosts_group;
+
+ char *model_number;
+ u32 ieee_oui;
+ char *firmware_rev;
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ struct nvme_ctrl *passthru_ctrl;
+ char *passthru_ctrl_path;
+ struct config_group passthru_group;
+ unsigned int admin_timeout;
+ unsigned int io_timeout;
+ unsigned int clear_ids;
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ u8 zasl;
+#endif /* CONFIG_BLK_DEV_ZONED */
+};
+
+static inline struct nvmet_subsys *to_subsys(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_subsys, group);
+}
+
+static inline struct nvmet_subsys *namespaces_to_subsys(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_subsys,
+ namespaces_group);
+}
+
+struct nvmet_host {
+ struct config_group group;
+ u8 *dhchap_secret;
+ u8 *dhchap_ctrl_secret;
+ u8 dhchap_key_hash;
+ u8 dhchap_ctrl_key_hash;
+ u8 dhchap_hash_id;
+ u8 dhchap_dhgroup_id;
+};
+
+static inline struct nvmet_host *to_host(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_host, group);
+}
+
+static inline char *nvmet_host_name(struct nvmet_host *host)
+{
+ return config_item_name(&host->group.cg_item);
+}
+
+struct nvmet_host_link {
+ struct list_head entry;
+ struct nvmet_host *host;
+};
+
+struct nvmet_subsys_link {
+ struct list_head entry;
+ struct nvmet_subsys *subsys;
+};
+
+struct nvmet_req;
+struct nvmet_fabrics_ops {
+ struct module *owner;
+ unsigned int type;
+ unsigned int msdbd;
+ unsigned int flags;
+#define NVMF_KEYED_SGLS (1 << 0)
+#define NVMF_METADATA_SUPPORTED (1 << 1)
+ void (*queue_response)(struct nvmet_req *req);
+ int (*add_port)(struct nvmet_port *port);
+ void (*remove_port)(struct nvmet_port *port);
+ void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
+ void (*disc_traddr)(struct nvmet_req *req,
+ struct nvmet_port *port, char *traddr);
+ u16 (*install_queue)(struct nvmet_sq *nvme_sq);
+ void (*discovery_chg)(struct nvmet_port *port);
+ u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
+ u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
+};
+
+#define NVMET_MAX_INLINE_BIOVEC 8
+#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
+
+struct nvmet_req {
+ struct nvme_command *cmd;
+ struct nvme_completion *cqe;
+ struct nvmet_sq *sq;
+ struct nvmet_cq *cq;
+ struct nvmet_ns *ns;
+ struct scatterlist *sg;
+ struct scatterlist *metadata_sg;
+ struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
+ union {
+ struct {
+ struct bio inline_bio;
+ } b;
+ struct {
+ bool mpool_alloc;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+ struct work_struct work;
+ } f;
+ struct {
+ struct bio inline_bio;
+ struct request *rq;
+ struct work_struct work;
+ bool use_workqueue;
+ } p;
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct {
+ struct bio inline_bio;
+ struct work_struct zmgmt_work;
+ } z;
+#endif /* CONFIG_BLK_DEV_ZONED */
+ };
+ int sg_cnt;
+ int metadata_sg_cnt;
+ /* data length as parsed from the SGL descriptor: */
+ size_t transfer_len;
+ size_t metadata_len;
+
+ struct nvmet_port *port;
+
+ void (*execute)(struct nvmet_req *req);
+ const struct nvmet_fabrics_ops *ops;
+
+ struct pci_dev *p2p_dev;
+ struct device *p2p_client;
+ u16 error_loc;
+ u64 error_slba;
+};
+
+#define NVMET_MAX_MPOOL_BVEC 16
+extern struct kmem_cache *nvmet_bvec_cache;
+extern struct workqueue_struct *buffered_io_wq;
+extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
+
+static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
+{
+ req->cqe->result.u32 = cpu_to_le32(result);
+}
+
+/*
+ * NVMe command writes actually are DMA reads for us on the target side.
+ */
+static inline enum dma_data_direction
+nvmet_data_dir(struct nvmet_req *req)
+{
+ return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+struct nvmet_async_event {
+ struct list_head entry;
+ u8 event_type;
+ u8 event_info;
+ u8 log_page;
+};
+
+static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
+{
+ int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
+
+ if (!rae)
+ clear_bit(bn, &req->sq->ctrl->aen_masked);
+}
+
+static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
+{
+ if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
+ return true;
+ return test_and_set_bit(bn, &ctrl->aen_masked);
+}
+
+void nvmet_get_feat_kato(struct nvmet_req *req);
+void nvmet_get_feat_async_event(struct nvmet_req *req);
+u16 nvmet_set_feat_kato(struct nvmet_req *req);
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
+void nvmet_execute_async_event(struct nvmet_req *req);
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
+
+u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
+u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
+void nvmet_req_uninit(struct nvmet_req *req);
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
+void nvmet_req_complete(struct nvmet_req *req, u16 status);
+int nvmet_req_alloc_sgls(struct nvmet_req *req);
+void nvmet_req_free_sgls(struct nvmet_req *req);
+
+void nvmet_execute_set_features(struct nvmet_req *req);
+void nvmet_execute_get_features(struct nvmet_req *req);
+void nvmet_execute_keep_alive(struct nvmet_req *req);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+ u16 size);
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+ u16 size);
+void nvmet_sq_destroy(struct nvmet_sq *sq);
+int nvmet_sq_init(struct nvmet_sq *sq);
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
+struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
+ const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req);
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
+u16 nvmet_check_ctrl_status(struct nvmet_req *req);
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+ enum nvme_subsys_type type);
+void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
+
+u16 nvmet_req_find_ns(struct nvmet_req *req);
+void nvmet_put_namespace(struct nvmet_ns *ns);
+int nvmet_ns_enable(struct nvmet_ns *ns);
+void nvmet_ns_disable(struct nvmet_ns *ns);
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_ns_free(struct nvmet_ns *ns);
+
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+ struct nvmet_port *port);
+void nvmet_port_send_ana_event(struct nvmet_port *port);
+
+int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
+void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+
+void nvmet_port_del_ctrls(struct nvmet_port *port,
+ struct nvmet_subsys *subsys);
+
+int nvmet_enable_port(struct nvmet_port *port);
+void nvmet_disable_port(struct nvmet_port *port);
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
+void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+ size_t len);
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
+ size_t len);
+u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
+
+extern struct list_head *nvmet_ports;
+void nvmet_port_disc_changed(struct nvmet_port *port,
+ struct nvmet_subsys *subsys);
+void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
+ struct nvmet_host *host);
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ u8 event_info, u8 log_page);
+
+#define NVMET_QUEUE_SIZE 1024
+#define NVMET_NR_QUEUES 128
+#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+
+/*
+ * Nice round number that makes a list of nsids fit into a page.
+ * Should become tunable at some point in the future.
+ */
+#define NVMET_MAX_NAMESPACES 1024
+
+/*
+ * 0 is not a valid ANA group ID, so we start numbering at 1.
+ *
+ * ANA Group 1 exists without manual intervention, has namespaces assigned to it
+ * by default, and is available in an optimized state through all ports.
+ */
+#define NVMET_MAX_ANAGRPS 128
+#define NVMET_DEFAULT_ANA_GRPID 1
+
+#define NVMET_KAS 10
+#define NVMET_DISC_KATO_MS 120000
+
+int __init nvmet_init_configfs(void);
+void __exit nvmet_exit_configfs(void);
+
+int __init nvmet_init_discovery(void);
+void nvmet_exit_discovery(void);
+
+extern struct nvmet_subsys *nvmet_disc_subsys;
+extern struct rw_semaphore nvmet_config_sem;
+
+extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+extern u64 nvmet_ana_chgcnt;
+extern struct rw_semaphore nvmet_ana_sem;
+
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
+
+int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
+int nvmet_file_ns_enable(struct nvmet_ns *ns);
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
+void nvmet_file_ns_disable(struct nvmet_ns *ns);
+u16 nvmet_bdev_flush(struct nvmet_req *req);
+u16 nvmet_file_flush(struct nvmet_req *req);
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
+bool nvmet_ns_revalidate(struct nvmet_ns *ns);
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
+void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
+void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
+
+static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
+{
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
+ req->ns->blksize_shift;
+}
+
+static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return 0;
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
+ req->ns->metadata_size;
+}
+
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
+{
+ return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
+ sizeof(struct nvme_dsm_range);
+}
+
+static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
+{
+ return req->sq->ctrl->subsys;
+}
+
+static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->type != NVME_NQN_NVME;
+}
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->passthru_ctrl;
+}
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+}
+static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+}
+static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
+{
+ return NULL;
+}
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
+{
+ return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
+}
+
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
+
+u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
+
+/* Convert a 32-bit number to a 16-bit 0's based number */
+static inline __le16 to0based(u32 a)
+{
+ return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
+}
+
+static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return false;
+ return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
+}
+
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
+{
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
+}
+
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+{
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+}
+
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
+{
+ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
+ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
+}
+
+static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
+{
+ if (bio != &req->b.inline_bio)
+ bio_put(bio);
+}
+
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_init(struct nvmet_sq *sq);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->host_key != NULL;
+}
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+}
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+#endif
+
+#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
new file mode 100644
index 0000000000..9fe07d7efa
--- /dev/null
+++ b/drivers/nvme/target/passthru.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target Passthrough command implementation.
+ *
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ * Copyright (c) 2019-2020, Eideticom Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+
+#include "../host/nvme.h"
+#include "nvmet.h"
+
+MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
+
+/*
+ * xarray to maintain one passthru subsystem per nvme controller.
+ */
+static DEFINE_XARRAY(passthru_subsystems);
+
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+{
+ /*
+ * Multiple command set support can only be declared if the underlying
+ * controller actually supports it.
+ */
+ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
+ ctrl->cap &= ~(1ULL << 43);
+}
+
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ int pos, len;
+ bool csi_seen = false;
+ void *data;
+ u8 csi;
+
+ if (!ctrl->subsys->clear_ids)
+ return status;
+
+ data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!data)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ if (status)
+ goto out_free;
+
+ for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+ struct nvme_ns_id_desc *cur = data + pos;
+
+ if (cur->nidl == 0)
+ break;
+ if (cur->nidt == NVME_NIDT_CSI) {
+ memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
+ csi_seen = true;
+ break;
+ }
+ len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
+ }
+
+ memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
+ if (csi_seen) {
+ struct nvme_ns_id_desc *cur = data;
+
+ cur->nidt = NVME_NIDT_CSI;
+ cur->nidl = NVME_NIDT_CSI_LEN;
+ memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
+ }
+ status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+out_free:
+ kfree(data);
+ return status;
+}
+
+static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ctrl *id;
+ unsigned int max_hw_sectors;
+ int page_shift;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
+ if (status)
+ goto out_free;
+
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /*
+ * The passthru NVMe driver may have a limit on the number of segments
+ * which depends on the host's memory fragementation. To solve this,
+ * ensure mdts is limited to the pages equal to the number of segments.
+ */
+ max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT,
+ pctrl->max_hw_sectors);
+
+ /*
+ * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * the mdts based on BIO_MAX_VECS as well
+ */
+ max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
+ max_hw_sectors);
+
+ page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+
+ id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
+
+ id->acl = 3;
+ /*
+ * We export aerl limit for the fabrics controller, update this when
+ * passthru based aerl support is added.
+ */
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* emulate kas as most of the PCIe ctrl don't have a support for kas */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ /* don't support host memory buffer */
+ id->hmpre = 0;
+ id->hmmin = 0;
+
+ id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
+ id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ /* don't support fuse commands */
+ id->fuses = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (req->port->inline_data_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ /*
+ * When passthru controller is setup using nvme-loop transport it will
+ * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
+ * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
+ * code path with duplicate ctr subsynqn. In order to prevent that we
+ * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
+ */
+ memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
+
+ /* use fabric id-ctrl values */
+ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+ req->port->inline_data_size) / 16);
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ /* Support multipath connections with fabrics */
+ id->cmic |= 1 << 1;
+
+ /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
+ id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
+{
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ns *id;
+ int i;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
+ if (status)
+ goto out_free;
+
+ for (i = 0; i < (id->nlbaf + 1); i++)
+ if (id->lbaf[i].ms)
+ memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
+
+ id->flbas = id->flbas & ~(1 << 4);
+
+ /*
+ * Presently the NVMEof target code does not support sending
+ * metadata, so we must disable it here. This should be updated
+ * once target starts supporting metadata.
+ */
+ id->mc = 0;
+
+ if (req->sq->ctrl->subsys->clear_ids) {
+ memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+ memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
+ struct request *rq = req->p.rq;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ struct nvme_ns *ns = rq->q->queuedata;
+ u32 effects;
+ int status;
+
+ effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode);
+ status = nvme_execute_rq(rq, false);
+ if (status == NVME_SC_SUCCESS &&
+ req->cmd->common.opcode == nvme_admin_identify) {
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ nvmet_passthru_override_id_ctrl(req);
+ break;
+ case NVME_ID_CNS_NS:
+ nvmet_passthru_override_id_ns(req);
+ break;
+ case NVME_ID_CNS_NS_DESC_LIST:
+ nvmet_passthru_override_id_descs(req);
+ break;
+ }
+ } else if (status < 0)
+ status = NVME_SC_INTERNAL;
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, status);
+ blk_mq_free_request(rq);
+
+ if (effects)
+ nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
+}
+
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
+{
+ struct nvmet_req *req = rq->end_io_data;
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, nvme_req(rq)->status);
+ blk_mq_free_request(rq);
+ return RQ_END_IO_NONE;
+}
+
+static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+{
+ struct scatterlist *sg;
+ struct bio *bio;
+ int i;
+
+ if (req->sg_cnt > BIO_MAX_VECS)
+ return -EINVAL;
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->p.inline_bio;
+ bio_init(bio, NULL, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), req_op(rq));
+ } else {
+ bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
+ GFP_KERNEL);
+ bio->bi_end_io = bio_put;
+ }
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
+ sg->offset) < sg->length) {
+ nvmet_req_bio_put(req, bio);
+ return -EINVAL;
+ }
+ }
+
+ blk_rq_bio_prep(rq, bio, req->sg_cnt);
+
+ return 0;
+}
+
+static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
+ struct request_queue *q = ctrl->admin_q;
+ struct nvme_ns *ns = NULL;
+ struct request *rq = NULL;
+ unsigned int timeout;
+ u32 effects;
+ u16 status;
+ int ret;
+
+ if (likely(req->sq->qid != 0)) {
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+
+ ns = nvme_find_get_ns(ctrl, nsid);
+ if (unlikely(!ns)) {
+ pr_err("failed to get passthru ns nsid:%u\n", nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ q = ns->queue;
+ timeout = nvmet_req_subsys(req)->io_timeout;
+ } else {
+ timeout = nvmet_req_subsys(req)->admin_timeout;
+ }
+
+ rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
+ if (IS_ERR(rq)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_ns;
+ }
+ nvme_init_request(rq, req->cmd);
+
+ if (timeout)
+ rq->timeout = timeout;
+
+ if (req->sg_cnt) {
+ ret = nvmet_passthru_map_sg(req, rq);
+ if (unlikely(ret)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_req;
+ }
+ }
+
+ /*
+ * If a command needs post-execution fixups, or there are any
+ * non-trivial effects, make sure to execute the command synchronously
+ * in a workqueue so that nvme_passthru_end gets called.
+ */
+ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
+ if (req->p.use_workqueue ||
+ (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
+ INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ req->p.rq = rq;
+ queue_work(nvmet_wq, &req->p.work);
+ } else {
+ rq->end_io = nvmet_passthru_req_done;
+ rq->end_io_data = req;
+ blk_execute_rq_nowait(rq, false);
+ }
+
+ if (ns)
+ nvme_put_ns(ns);
+
+ return;
+
+out_put_req:
+ blk_mq_free_request(rq);
+out_put_ns:
+ if (ns)
+ nvme_put_ns(ns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+/*
+ * We need to emulate set host behaviour to ensure that any requested
+ * behaviour of the target's host matches the requested behaviour
+ * of the device's host and fail otherwise.
+ */
+static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
+ struct nvme_feat_host_behavior *host;
+ u16 status = NVME_SC_INTERNAL;
+ int ret;
+
+ host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
+ if (!host)
+ goto out_complete_req;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
+ host, sizeof(*host), NULL);
+ if (ret)
+ goto out_free_host;
+
+ status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
+ if (status)
+ goto out_free_host;
+
+ if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
+ pr_warn("target host has requested different behaviour from the local host\n");
+ status = NVME_SC_INTERNAL;
+ }
+
+out_free_host:
+ kfree(host);
+out_complete_req:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
+{
+ req->p.use_workqueue = false;
+ req->execute = nvmet_passthru_execute_cmd;
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_resv_register:
+ case nvme_cmd_resv_report:
+ case nvme_cmd_resv_acquire:
+ case nvme_cmd_resv_release:
+ /*
+ * Reservations cannot be supported properly because the
+ * underlying device has no way of differentiating different
+ * hosts that connect via fabrics. This could potentially be
+ * emulated in the future if regular targets grow support for
+ * this feature.
+ */
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return nvmet_setup_passthru_command(req);
+}
+
+/*
+ * Only features that are emulated or specifically allowed in the list are
+ * passed down to the controller. This function implements the allow list for
+ * both get and set features.
+ */
+static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
+{
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ARBITRATION:
+ case NVME_FEAT_POWER_MGMT:
+ case NVME_FEAT_LBA_RANGE:
+ case NVME_FEAT_TEMP_THRESH:
+ case NVME_FEAT_ERR_RECOVERY:
+ case NVME_FEAT_VOLATILE_WC:
+ case NVME_FEAT_WRITE_ATOMIC:
+ case NVME_FEAT_AUTO_PST:
+ case NVME_FEAT_TIMESTAMP:
+ case NVME_FEAT_HCTM:
+ case NVME_FEAT_NOPSC:
+ case NVME_FEAT_RRL:
+ case NVME_FEAT_PLM_CONFIG:
+ case NVME_FEAT_PLM_WINDOW:
+ case NVME_FEAT_HOST_BEHAVIOR:
+ case NVME_FEAT_SANITIZE:
+ case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
+ return nvmet_setup_passthru_command(req);
+
+ case NVME_FEAT_ASYNC_EVENT:
+ /* There is no support for forwarding ASYNC events */
+ case NVME_FEAT_IRQ_COALESCE:
+ case NVME_FEAT_IRQ_CONFIG:
+ /* The IRQ settings will not apply to the target controller */
+ case NVME_FEAT_HOST_MEM_BUF:
+ /*
+ * Any HMB that's set will not be passed through and will
+ * not work as expected
+ */
+ case NVME_FEAT_SW_PROGRESS:
+ /*
+ * The Pre-Boot Software Load Count doesn't make much
+ * sense for a target to export
+ */
+ case NVME_FEAT_RESV_MASK:
+ case NVME_FEAT_RESV_PERSIST:
+ /* No reservations, see nvmet_parse_passthru_io_cmd() */
+ default:
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
+
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
+ /*
+ * Passthru all vendor specific commands
+ */
+ if (req->cmd->common.opcode >= nvme_admin_vendor_start)
+ return nvmet_setup_passthru_command(req);
+
+ switch (req->cmd->common.opcode) {
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_keep_alive:
+ /*
+ * Most PCIe ctrls don't support keep alive cmd, we route keep
+ * alive to the non-passthru mode. In future please change this
+ * code when PCIe ctrls with keep alive support available.
+ */
+ req->execute = nvmet_execute_keep_alive;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_set_features;
+ return NVME_SC_SUCCESS;
+ case NVME_FEAT_HOST_BEHAVIOR:
+ req->execute = nvmet_passthru_set_host_behaviour;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_get_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_get_features;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_identify:
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_CTRL:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ case NVME_ID_CNS_NS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_NS:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ default:
+ return nvmet_setup_passthru_command(req);
+ }
+ case nvme_admin_get_log_page:
+ return nvmet_setup_passthru_command(req);
+ default:
+ /* Reject commands not in the allowlist above */
+ return nvmet_report_invalid_opcode(req);
+ }
+}
+
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
+{
+ struct nvme_ctrl *ctrl;
+ struct file *file;
+ int ret = -EINVAL;
+ void *old;
+
+ mutex_lock(&subsys->lock);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ if (subsys->nr_namespaces) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
+ file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_unlock;
+ }
+
+ ctrl = nvme_ctrl_from_file(file);
+ if (!ctrl) {
+ pr_err("failed to open nvme controller %s\n",
+ subsys->passthru_ctrl_path);
+
+ goto out_put_file;
+ }
+
+ old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+ subsys, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ ret = xa_err(old);
+ goto out_put_file;
+ }
+
+ if (old)
+ goto out_put_file;
+
+ subsys->passthru_ctrl = ctrl;
+ subsys->ver = ctrl->vs;
+
+ if (subsys->ver < NVME_VS(1, 2, 1)) {
+ pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
+ NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ subsys->ver = NVME_VS(1, 2, 1);
+ }
+ nvme_get_ctrl(ctrl);
+ __module_get(subsys->passthru_ctrl->ops->module);
+ ret = 0;
+
+out_put_file:
+ filp_close(file, NULL);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ if (subsys->passthru_ctrl) {
+ xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ module_put(subsys->passthru_ctrl->ops->module);
+ nvme_put_ctrl(subsys->passthru_ctrl);
+ }
+ subsys->passthru_ctrl = NULL;
+ subsys->ver = NVMET_DEFAULT_VS;
+}
+
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+ kfree(subsys->passthru_ctrl_path);
+}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
new file mode 100644
index 0000000000..4597bca43a
--- /dev/null
+++ b/drivers/nvme/target/rdma.c
@@ -0,0 +1,2095 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics RDMA target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
+#include <linux/blk-integrity.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/inet.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+#include <rdma/ib_cm.h>
+
+#include <linux/nvme-rdma.h>
+#include "nvmet.h"
+
+/*
+ * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
+ */
+#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
+#define NVMET_RDMA_MAX_INLINE_SGE 4
+#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
+
+/* Assume mpsmin == device_page_size == 4KB */
+#define NVMET_RDMA_MAX_MDTS 8
+#define NVMET_RDMA_MAX_METADATA_MDTS 5
+
+struct nvmet_rdma_srq;
+
+struct nvmet_rdma_cmd {
+ struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
+ struct ib_cqe cqe;
+ struct ib_recv_wr wr;
+ struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
+ struct nvme_command *nvme_cmd;
+ struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_srq *nsrq;
+};
+
+enum {
+ NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
+ NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
+};
+
+struct nvmet_rdma_rsp {
+ struct ib_sge send_sge;
+ struct ib_cqe send_cqe;
+ struct ib_send_wr send_wr;
+
+ struct nvmet_rdma_cmd *cmd;
+ struct nvmet_rdma_queue *queue;
+
+ struct ib_cqe read_cqe;
+ struct ib_cqe write_cqe;
+ struct rdma_rw_ctx rw;
+
+ struct nvmet_req req;
+
+ bool allocated;
+ u8 n_rdma;
+ u32 flags;
+ u32 invalidate_rkey;
+
+ struct list_head wait_list;
+ struct list_head free_list;
+};
+
+enum nvmet_rdma_queue_state {
+ NVMET_RDMA_Q_CONNECTING,
+ NVMET_RDMA_Q_LIVE,
+ NVMET_RDMA_Q_DISCONNECTING,
+};
+
+struct nvmet_rdma_queue {
+ struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct nvmet_port *port;
+ struct ib_cq *cq;
+ atomic_t sq_wr_avail;
+ struct nvmet_rdma_device *dev;
+ struct nvmet_rdma_srq *nsrq;
+ spinlock_t state_lock;
+ enum nvmet_rdma_queue_state state;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+
+ struct nvmet_rdma_rsp *rsps;
+ struct list_head free_rsps;
+ spinlock_t rsps_lock;
+ struct nvmet_rdma_cmd *cmds;
+
+ struct work_struct release_work;
+ struct list_head rsp_wait_list;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+
+ int idx;
+ int host_qid;
+ int comp_vector;
+ int recv_queue_size;
+ int send_queue_size;
+
+ struct list_head queue_list;
+};
+
+struct nvmet_rdma_port {
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ struct rdma_cm_id *cm_id;
+ struct delayed_work repair_work;
+};
+
+struct nvmet_rdma_srq {
+ struct ib_srq *srq;
+ struct nvmet_rdma_cmd *cmds;
+ struct nvmet_rdma_device *ndev;
+};
+
+struct nvmet_rdma_device {
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct nvmet_rdma_srq **srqs;
+ int srq_count;
+ size_t srq_size;
+ struct kref ref;
+ struct list_head entry;
+ int inline_data_size;
+ int inline_page_count;
+};
+
+static bool nvmet_rdma_use_srq;
+module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
+MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+
+static int srq_size_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops srq_size_ops = {
+ .set = srq_size_set,
+ .get = param_get_int,
+};
+
+static int nvmet_rdma_srq_size = 1024;
+module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
+MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
+
+static DEFINE_IDA(nvmet_rdma_queue_ida);
+static LIST_HEAD(nvmet_rdma_queue_list);
+static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r);
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r);
+
+static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+
+static int srq_size_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 256)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static int num_pages(int len)
+{
+ return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
+}
+
+static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
+{
+ return nvme_is_write(rsp->req.cmd) &&
+ rsp->req.transfer_len &&
+ !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
+{
+ return !nvme_is_write(rsp->req.cmd) &&
+ rsp->req.transfer_len &&
+ !rsp->req.cqe->status &&
+ !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline struct nvmet_rdma_rsp *
+nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_rsp *rsp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rsps_lock, flags);
+ rsp = list_first_entry_or_null(&queue->free_rsps,
+ struct nvmet_rdma_rsp, free_list);
+ if (likely(rsp))
+ list_del(&rsp->free_list);
+ spin_unlock_irqrestore(&queue->rsps_lock, flags);
+
+ if (unlikely(!rsp)) {
+ int ret;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (unlikely(!rsp))
+ return NULL;
+ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
+ if (unlikely(ret)) {
+ kfree(rsp);
+ return NULL;
+ }
+
+ rsp->allocated = true;
+ }
+
+ return rsp;
+}
+
+static inline void
+nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
+{
+ unsigned long flags;
+
+ if (unlikely(rsp->allocated)) {
+ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
+ kfree(rsp);
+ return;
+ }
+
+ spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
+ list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
+ spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
+}
+
+static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c)
+{
+ struct scatterlist *sg;
+ struct ib_sge *sge;
+ int i;
+
+ if (!ndev->inline_data_size)
+ return;
+
+ sg = c->inline_sg;
+ sge = &c->sge[1];
+
+ for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+ if (sge->length)
+ ib_dma_unmap_page(ndev->device, sge->addr,
+ sge->length, DMA_FROM_DEVICE);
+ if (sg_page(sg))
+ __free_page(sg_page(sg));
+ }
+}
+
+static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c)
+{
+ struct scatterlist *sg;
+ struct ib_sge *sge;
+ struct page *pg;
+ int len;
+ int i;
+
+ if (!ndev->inline_data_size)
+ return 0;
+
+ sg = c->inline_sg;
+ sg_init_table(sg, ndev->inline_page_count);
+ sge = &c->sge[1];
+ len = ndev->inline_data_size;
+
+ for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ goto out_err;
+ sg_assign_page(sg, pg);
+ sge->addr = ib_dma_map_page(ndev->device,
+ pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, sge->addr))
+ goto out_err;
+ sge->length = min_t(int, len, PAGE_SIZE);
+ sge->lkey = ndev->pd->local_dma_lkey;
+ len -= sge->length;
+ }
+
+ return 0;
+out_err:
+ for (; i >= 0; i--, sg--, sge--) {
+ if (sge->length)
+ ib_dma_unmap_page(ndev->device, sge->addr,
+ sge->length, DMA_FROM_DEVICE);
+ if (sg_page(sg))
+ __free_page(sg_page(sg));
+ }
+ return -ENOMEM;
+}
+
+static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c, bool admin)
+{
+ /* NVMe command / RDMA RECV */
+ c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
+ if (!c->nvme_cmd)
+ goto out;
+
+ c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
+ goto out_free_cmd;
+
+ c->sge[0].length = sizeof(*c->nvme_cmd);
+ c->sge[0].lkey = ndev->pd->local_dma_lkey;
+
+ if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
+ goto out_unmap_cmd;
+
+ c->cqe.done = nvmet_rdma_recv_done;
+
+ c->wr.wr_cqe = &c->cqe;
+ c->wr.sg_list = c->sge;
+ c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
+
+ return 0;
+
+out_unmap_cmd:
+ ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+out_free_cmd:
+ kfree(c->nvme_cmd);
+
+out:
+ return -ENOMEM;
+}
+
+static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c, bool admin)
+{
+ if (!admin)
+ nvmet_rdma_free_inline_pages(ndev, c);
+ ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+ kfree(c->nvme_cmd);
+}
+
+static struct nvmet_rdma_cmd *
+nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
+ int nr_cmds, bool admin)
+{
+ struct nvmet_rdma_cmd *cmds;
+ int ret = -EINVAL, i;
+
+ cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
+ if (!cmds)
+ goto out;
+
+ for (i = 0; i < nr_cmds; i++) {
+ ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
+ if (ret)
+ goto out_free;
+ }
+
+ return cmds;
+
+out_free:
+ while (--i >= 0)
+ nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+ kfree(cmds);
+out:
+ return ERR_PTR(ret);
+}
+
+static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
+{
+ int i;
+
+ for (i = 0; i < nr_cmds; i++)
+ nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+ kfree(cmds);
+}
+
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r)
+{
+ /* NVMe CQE / RDMA SEND */
+ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
+ if (!r->req.cqe)
+ goto out;
+
+ r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
+ sizeof(*r->req.cqe), DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
+ goto out_free_rsp;
+
+ if (ib_dma_pci_p2p_dma_supported(ndev->device))
+ r->req.p2p_client = &ndev->device->dev;
+ r->send_sge.length = sizeof(*r->req.cqe);
+ r->send_sge.lkey = ndev->pd->local_dma_lkey;
+
+ r->send_cqe.done = nvmet_rdma_send_done;
+
+ r->send_wr.wr_cqe = &r->send_cqe;
+ r->send_wr.sg_list = &r->send_sge;
+ r->send_wr.num_sge = 1;
+ r->send_wr.send_flags = IB_SEND_SIGNALED;
+
+ /* Data In / RDMA READ */
+ r->read_cqe.done = nvmet_rdma_read_data_done;
+ /* Data Out / RDMA WRITE */
+ r->write_cqe.done = nvmet_rdma_write_data_done;
+
+ return 0;
+
+out_free_rsp:
+ kfree(r->req.cqe);
+out:
+ return -ENOMEM;
+}
+
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r)
+{
+ ib_dma_unmap_single(ndev->device, r->send_sge.addr,
+ sizeof(*r->req.cqe), DMA_TO_DEVICE);
+ kfree(r->req.cqe);
+}
+
+static int
+nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int nr_rsps = queue->recv_queue_size * 2;
+ int ret = -EINVAL, i;
+
+ queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
+ GFP_KERNEL);
+ if (!queue->rsps)
+ goto out;
+
+ for (i = 0; i < nr_rsps; i++) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ ret = nvmet_rdma_alloc_rsp(ndev, rsp);
+ if (ret)
+ goto out_free;
+
+ list_add_tail(&rsp->free_list, &queue->free_rsps);
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ list_del(&rsp->free_list);
+ nvmet_rdma_free_rsp(ndev, rsp);
+ }
+ kfree(queue->rsps);
+out:
+ return ret;
+}
+
+static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int i, nr_rsps = queue->recv_queue_size * 2;
+
+ for (i = 0; i < nr_rsps; i++) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ list_del(&rsp->free_list);
+ nvmet_rdma_free_rsp(ndev, rsp);
+ }
+ kfree(queue->rsps);
+}
+
+static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *cmd)
+{
+ int ret;
+
+ ib_dma_sync_single_for_device(ndev->device,
+ cmd->sge[0].addr, cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+
+ if (cmd->nsrq)
+ ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
+ else
+ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
+
+ if (unlikely(ret))
+ pr_err("post_recv cmd failed\n");
+
+ return ret;
+}
+
+static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
+{
+ spin_lock(&queue->rsp_wr_wait_lock);
+ while (!list_empty(&queue->rsp_wr_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+ bool ret;
+
+ rsp = list_entry(queue->rsp_wr_wait_list.next,
+ struct nvmet_rdma_rsp, wait_list);
+ list_del(&rsp->wait_list);
+
+ spin_unlock(&queue->rsp_wr_wait_lock);
+ ret = nvmet_rdma_execute_command(rsp);
+ spin_lock(&queue->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&queue->rsp_wr_wait_lock);
+}
+
+static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
+{
+ struct ib_mr_status mr_status;
+ int ret;
+ u16 status = 0;
+
+ ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ return NVME_SC_INVALID_PI;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+
+ return status;
+}
+
+static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 control = le16_to_cpu(cmd->rw.control);
+ u8 pi_type = req->ns->pi_type;
+ struct blk_integrity *bi;
+
+ bi = bdev_get_integrity(req->ns->bdev);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no wire domain */
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ /* PI is added by the HW */
+ req->transfer_len += req->metadata_len;
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+ if (control & NVME_RW_PRINFO_PRCHK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (control & NVME_RW_PRINFO_PRCHK_APP)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+}
+
+static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+ int ret;
+
+ if (req->metadata_len)
+ ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
+ addr, key, nvmet_data_dir(req));
+ else
+ ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, 0, addr, key,
+ nvmet_data_dir(req));
+
+ return ret;
+}
+
+static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+
+ if (req->metadata_len)
+ rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt,
+ nvmet_data_dir(req));
+ else
+ rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, nvmet_data_dir(req));
+}
+
+static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvmet_rdma_queue *queue = rsp->queue;
+
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+
+ if (rsp->n_rdma)
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (rsp->req.sg != rsp->cmd->inline_sg)
+ nvmet_req_free_sgls(&rsp->req);
+
+ if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
+ nvmet_rdma_process_wr_wait_list(queue);
+
+ nvmet_rdma_put_rsp(rsp);
+}
+
+static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
+{
+ if (queue->nvme_sq.ctrl) {
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+ } else {
+ /*
+ * we didn't setup the controller yet in case
+ * of admin connect error, just disconnect and
+ * cleanup the queue
+ */
+ nvmet_rdma_queue_disconnect(queue);
+ }
+}
+
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+
+ nvmet_rdma_release_rsp(rsp);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)) {
+ pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+}
+
+static void nvmet_rdma_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(req, struct nvmet_rdma_rsp, req);
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct ib_send_wr *first_wr;
+
+ if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
+ rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
+ } else {
+ rsp->send_wr.opcode = IB_WR_SEND;
+ }
+
+ if (nvmet_rdma_need_data_out(rsp)) {
+ if (rsp->req.metadata_len)
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, &rsp->write_cqe, NULL);
+ else
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, NULL, &rsp->send_wr);
+ } else {
+ first_wr = &rsp->send_wr;
+ }
+
+ nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+ ib_dma_sync_single_for_device(rsp->queue->dev->device,
+ rsp->send_sge.addr, rsp->send_sge.length,
+ DMA_TO_DEVICE);
+
+ if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
+}
+
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ u16 status = 0;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
+ nvmet_req_uninit(&rsp->req);
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ if (rsp->req.metadata_len)
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(status))
+ nvmet_req_complete(&rsp->req, status);
+ else
+ rsp->req.execute(&rsp->req);
+}
+
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ u16 status;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
+ nvmet_req_uninit(&rsp->req);
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
+ ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ /*
+ * Upon RDMA completion check the signature status
+ * - if succeeded send good NVMe response
+ * - if failed send bad NVMe response with appropriate error
+ */
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ if (unlikely(status))
+ rsp->req.cqe->status = cpu_to_le16(status << 1);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
+}
+
+static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
+ u64 off)
+{
+ int sg_count = num_pages(len);
+ struct scatterlist *sg;
+ int i;
+
+ sg = rsp->cmd->inline_sg;
+ for (i = 0; i < sg_count; i++, sg++) {
+ if (i < sg_count - 1)
+ sg_unmark_end(sg);
+ else
+ sg_mark_end(sg);
+ sg->offset = off;
+ sg->length = min_t(int, len, PAGE_SIZE - off);
+ len -= sg->length;
+ if (!i)
+ off = 0;
+ }
+
+ rsp->req.sg = rsp->cmd->inline_sg;
+ rsp->req.sg_cnt = sg_count;
+}
+
+static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
+ u64 off = le64_to_cpu(sgl->addr);
+ u32 len = le32_to_cpu(sgl->length);
+
+ if (!nvme_is_write(rsp->req.cmd)) {
+ rsp->req.error_loc =
+ offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ if (off + len > rsp->queue->dev->inline_data_size) {
+ pr_err("invalid inline data offset!\n");
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ }
+
+ /* no data command? */
+ if (!len)
+ return 0;
+
+ nvmet_rdma_use_inline_sg(rsp, len, off);
+ rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
+ rsp->req.transfer_len += len;
+ return 0;
+}
+
+static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
+ struct nvme_keyed_sgl_desc *sgl, bool invalidate)
+{
+ u64 addr = le64_to_cpu(sgl->addr);
+ u32 key = get_unaligned_le32(sgl->key);
+ struct ib_sig_attrs sig_attrs;
+ int ret;
+
+ rsp->req.transfer_len = get_unaligned_le24(sgl->length);
+
+ /* no data command? */
+ if (!rsp->req.transfer_len)
+ return 0;
+
+ if (rsp->req.metadata_len)
+ nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
+
+ ret = nvmet_req_alloc_sgls(&rsp->req);
+ if (unlikely(ret < 0))
+ goto error_out;
+
+ ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
+ if (unlikely(ret < 0))
+ goto error_out;
+ rsp->n_rdma += ret;
+
+ if (invalidate) {
+ rsp->invalidate_rkey = key;
+ rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
+ }
+
+ return 0;
+
+error_out:
+ rsp->req.transfer_len = 0;
+ return NVME_SC_INTERNAL;
+}
+
+static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
+
+ switch (sgl->type >> 4) {
+ case NVME_SGL_FMT_DATA_DESC:
+ switch (sgl->type & 0xf) {
+ case NVME_SGL_FMT_OFFSET:
+ return nvmet_rdma_map_sgl_inline(rsp);
+ default:
+ pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ rsp->req.error_loc =
+ offsetof(struct nvme_common_command, dptr);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+ case NVME_KEY_SGL_FMT_DATA_DESC:
+ switch (sgl->type & 0xf) {
+ case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
+ return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
+ case NVME_SGL_FMT_ADDRESS:
+ return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
+ default:
+ pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ rsp->req.error_loc =
+ offsetof(struct nvme_common_command, dptr);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+ default:
+ pr_err("invalid SGL type: %#x\n", sgl->type);
+ rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
+ return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
+ }
+}
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvmet_rdma_queue *queue = rsp->queue;
+
+ if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
+ &queue->sq_wr_avail) < 0)) {
+ pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
+ 1 + rsp->n_rdma, queue->idx,
+ queue->nvme_sq.ctrl->cntlid);
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+ return false;
+ }
+
+ if (nvmet_rdma_need_data_in(rsp)) {
+ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
+ queue->cm_id->port_num, &rsp->read_cqe, NULL))
+ nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
+ } else {
+ rsp->req.execute(&rsp->req);
+ }
+
+ return true;
+}
+
+static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
+ struct nvmet_rdma_rsp *cmd)
+{
+ u16 status;
+
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->send_sge.addr, cmd->send_sge.length,
+ DMA_TO_DEVICE);
+
+ if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvmet_rdma_ops))
+ return;
+
+ status = nvmet_rdma_map_sgl(cmd);
+ if (status)
+ goto out_err;
+
+ if (unlikely(!nvmet_rdma_execute_command(cmd))) {
+ spin_lock(&queue->rsp_wr_wait_lock);
+ list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
+ spin_unlock(&queue->rsp_wr_wait_lock);
+ }
+
+ return;
+
+out_err:
+ nvmet_req_complete(&cmd->req, status);
+}
+
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_cmd *cmd =
+ container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ struct nvmet_rdma_rsp *rsp;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status),
+ wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
+ pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
+ nvmet_rdma_error_comp(queue);
+ return;
+ }
+
+ cmd->queue = queue;
+ rsp = nvmet_rdma_get_rsp(queue);
+ if (unlikely(!rsp)) {
+ /*
+ * we get here only under memory pressure,
+ * silently drop and have the host retry
+ * as we can't even fail it.
+ */
+ nvmet_rdma_post_recv(queue->dev, cmd);
+ return;
+ }
+ rsp->queue = queue;
+ rsp->cmd = cmd;
+ rsp->flags = 0;
+ rsp->req.cmd = cmd->nvme_cmd;
+ rsp->req.port = queue->port;
+ rsp->n_rdma = 0;
+
+ if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state == NVMET_RDMA_Q_CONNECTING)
+ list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+ else
+ nvmet_rdma_put_rsp(rsp);
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ return;
+ }
+
+ nvmet_rdma_handle_command(queue, rsp);
+}
+
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
+{
+ nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
+ false);
+ ib_destroy_srq(nsrq->srq);
+
+ kfree(nsrq);
+}
+
+static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i;
+
+ if (!ndev->srqs)
+ return;
+
+ for (i = 0; i < ndev->srq_count; i++)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+
+ kfree(ndev->srqs);
+}
+
+static struct nvmet_rdma_srq *
+nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+{
+ struct ib_srq_init_attr srq_attr = { NULL, };
+ size_t srq_size = ndev->srq_size;
+ struct nvmet_rdma_srq *nsrq;
+ struct ib_srq *srq;
+ int ret, i;
+
+ nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
+ if (!nsrq)
+ return ERR_PTR(-ENOMEM);
+
+ srq_attr.attr.max_wr = srq_size;
+ srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
+ srq_attr.attr.srq_limit = 0;
+ srq_attr.srq_type = IB_SRQT_BASIC;
+ srq = ib_create_srq(ndev->pd, &srq_attr);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto out_free;
+ }
+
+ nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+ if (IS_ERR(nsrq->cmds)) {
+ ret = PTR_ERR(nsrq->cmds);
+ goto out_destroy_srq;
+ }
+
+ nsrq->srq = srq;
+ nsrq->ndev = ndev;
+
+ for (i = 0; i < srq_size; i++) {
+ nsrq->cmds[i].nsrq = nsrq;
+ ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
+ if (ret)
+ goto out_free_cmds;
+ }
+
+ return nsrq;
+
+out_free_cmds:
+ nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
+out_destroy_srq:
+ ib_destroy_srq(srq);
+out_free:
+ kfree(nsrq);
+ return ERR_PTR(ret);
+}
+
+static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i, ret;
+
+ if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
+ /*
+ * If SRQs aren't supported we just go ahead and use normal
+ * non-shared receive queues.
+ */
+ pr_info("SRQ requested but not supported.\n");
+ return 0;
+ }
+
+ ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
+ nvmet_rdma_srq_size);
+ ndev->srq_count = min(ndev->device->num_comp_vectors,
+ ndev->device->attrs.max_srq);
+
+ ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
+ if (!ndev->srqs)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->srq_count; i++) {
+ ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
+ if (IS_ERR(ndev->srqs[i])) {
+ ret = PTR_ERR(ndev->srqs[i]);
+ goto err_srq;
+ }
+ }
+
+ return 0;
+
+err_srq:
+ while (--i >= 0)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+ kfree(ndev->srqs);
+ return ret;
+}
+
+static void nvmet_rdma_free_dev(struct kref *ref)
+{
+ struct nvmet_rdma_device *ndev =
+ container_of(ref, struct nvmet_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ nvmet_rdma_destroy_srqs(ndev);
+ ib_dealloc_pd(ndev->pd);
+
+ kfree(ndev);
+}
+
+static struct nvmet_rdma_device *
+nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvmet_rdma_port *port = cm_id->context;
+ struct nvmet_port *nport = port->nport;
+ struct nvmet_rdma_device *ndev;
+ int inline_page_count;
+ int inline_sge_count;
+ int ret;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->device->node_guid == cm_id->device->node_guid &&
+ kref_get_unless_zero(&ndev->ref))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ inline_page_count = num_pages(nport->inline_data_size);
+ inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
+ cm_id->device->attrs.max_recv_sge) - 1;
+ if (inline_page_count > inline_sge_count) {
+ pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
+ nport->inline_data_size, cm_id->device->name,
+ inline_sge_count * PAGE_SIZE);
+ nport->inline_data_size = inline_sge_count * PAGE_SIZE;
+ inline_page_count = inline_sge_count;
+ }
+ ndev->inline_data_size = nport->inline_data_size;
+ ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
+ IBK_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
+ ndev->device = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->device, 0);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (nvmet_rdma_use_srq) {
+ ret = nvmet_rdma_init_srqs(ndev);
+ if (ret)
+ goto out_free_pd;
+ }
+
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ pr_debug("added %s.\n", ndev->device->name);
+ return ndev;
+
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
+{
+ struct ib_qp_init_attr qp_attr = { };
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int nr_cqe, ret, i, factor;
+
+ /*
+ * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
+ */
+ nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
+
+ queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
+ queue->comp_vector, IB_POLL_WORKQUEUE);
+ if (IS_ERR(queue->cq)) {
+ ret = PTR_ERR(queue->cq);
+ pr_err("failed to create CQ cqe= %d ret= %d\n",
+ nr_cqe + 1, ret);
+ goto out;
+ }
+
+ qp_attr.qp_context = queue;
+ qp_attr.event_handler = nvmet_rdma_qp_event;
+ qp_attr.send_cq = queue->cq;
+ qp_attr.recv_cq = queue->cq;
+ qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_attr.qp_type = IB_QPT_RC;
+ /* +1 for drain */
+ qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
+ factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
+ 1 << NVMET_RDMA_MAX_MDTS);
+ qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
+ qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
+ ndev->device->attrs.max_send_sge);
+
+ if (queue->nsrq) {
+ qp_attr.srq = queue->nsrq->srq;
+ } else {
+ /* +1 for drain */
+ qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
+ qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
+ }
+
+ if (queue->port->pi_enable && queue->host_qid)
+ qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+
+ ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
+ if (ret) {
+ pr_err("failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+ queue->qp = queue->cm_id->qp;
+
+ atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
+ qp_attr.cap.max_send_wr, queue->cm_id);
+
+ if (!queue->nsrq) {
+ for (i = 0; i < queue->recv_queue_size; i++) {
+ queue->cmds[i].queue = queue;
+ ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ if (ret)
+ goto err_destroy_qp;
+ }
+ }
+
+out:
+ return ret;
+
+err_destroy_qp:
+ rdma_destroy_qp(queue->cm_id);
+err_destroy_cq:
+ ib_cq_pool_put(queue->cq, nr_cqe + 1);
+ goto out;
+}
+
+static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
+{
+ ib_drain_qp(queue->qp);
+ if (queue->cm_id)
+ rdma_destroy_id(queue->cm_id);
+ ib_destroy_qp(queue->qp);
+ ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
+ queue->send_queue_size + 1);
+}
+
+static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
+{
+ pr_debug("freeing queue %d\n", queue->idx);
+
+ nvmet_sq_destroy(&queue->nvme_sq);
+
+ nvmet_rdma_destroy_queue_ib(queue);
+ if (!queue->nsrq) {
+ nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ }
+ nvmet_rdma_free_rsps(queue);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
+ kfree(queue);
+}
+
+static void nvmet_rdma_release_queue_work(struct work_struct *w)
+{
+ struct nvmet_rdma_queue *queue =
+ container_of(w, struct nvmet_rdma_queue, release_work);
+ struct nvmet_rdma_device *dev = queue->dev;
+
+ nvmet_rdma_free_queue(queue);
+
+ kref_put(&dev->ref, nvmet_rdma_free_dev);
+}
+
+static int
+nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
+ struct nvmet_rdma_queue *queue)
+{
+ struct nvme_rdma_cm_req *req;
+
+ req = (struct nvme_rdma_cm_req *)conn->private_data;
+ if (!req || conn->private_data_len == 0)
+ return NVME_RDMA_CM_INVALID_LEN;
+
+ if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
+ return NVME_RDMA_CM_INVALID_RECFMT;
+
+ queue->host_qid = le16_to_cpu(req->qid);
+
+ /*
+ * req->hsqsize corresponds to our recv queue size plus 1
+ * req->hrqsize corresponds to our send queue size
+ */
+ queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
+ queue->send_queue_size = le16_to_cpu(req->hrqsize);
+
+ if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
+ return NVME_RDMA_CM_INVALID_HSQSIZE;
+
+ /* XXX: Should we enforce some kind of max for IO queues? */
+
+ return 0;
+}
+
+static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
+ enum nvme_rdma_cm_status status)
+{
+ struct nvme_rdma_cm_rej rej;
+
+ pr_debug("rejecting connect request: status %d (%s)\n",
+ status, nvme_rdma_cm_msg(status));
+
+ rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ rej.sts = cpu_to_le16(status);
+
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
+}
+
+static struct nvmet_rdma_queue *
+nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
+ struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_port *port = cm_id->context;
+ struct nvmet_rdma_queue *queue;
+ int ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_reject;
+ }
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_queue;
+ }
+
+ ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
+ if (ret)
+ goto out_destroy_sq;
+
+ /*
+ * Schedules the actual release because calling rdma_destroy_id from
+ * inside a CM callback would trigger a deadlock. (great API design..)
+ */
+ INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
+ queue->dev = ndev;
+ queue->cm_id = cm_id;
+ queue->port = port->nport;
+
+ spin_lock_init(&queue->state_lock);
+ queue->state = NVMET_RDMA_Q_CONNECTING;
+ INIT_LIST_HEAD(&queue->rsp_wait_list);
+ INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
+ spin_lock_init(&queue->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&queue->free_rsps);
+ spin_lock_init(&queue->rsps_lock);
+ INIT_LIST_HEAD(&queue->queue_list);
+
+ queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
+ if (queue->idx < 0) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_destroy_sq;
+ }
+
+ /*
+ * Spread the io queues across completion vectors,
+ * but still keep all admin queues on vector 0.
+ */
+ queue->comp_vector = !queue->host_qid ? 0 :
+ queue->idx % ndev->device->num_comp_vectors;
+
+
+ ret = nvmet_rdma_alloc_rsps(queue);
+ if (ret) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_ida_remove;
+ }
+
+ if (ndev->srqs) {
+ queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
+ } else {
+ queue->cmds = nvmet_rdma_alloc_cmds(ndev,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ if (IS_ERR(queue->cmds)) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_responses;
+ }
+ }
+
+ ret = nvmet_rdma_create_queue_ib(queue);
+ if (ret) {
+ pr_err("%s: creating RDMA queue failed (%d).\n",
+ __func__, ret);
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_cmds;
+ }
+
+ return queue;
+
+out_free_cmds:
+ if (!queue->nsrq) {
+ nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ }
+out_free_responses:
+ nvmet_rdma_free_rsps(queue);
+out_ida_remove:
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
+out_destroy_sq:
+ nvmet_sq_destroy(&queue->nvme_sq);
+out_free_queue:
+ kfree(queue);
+out_reject:
+ nvmet_rdma_cm_reject(cm_id, ret);
+ return NULL;
+}
+
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
+{
+ struct nvmet_rdma_queue *queue = priv;
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ rdma_notify(queue->cm_id, event->event);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ pr_debug("received last WQE reached event for queue=0x%p\n",
+ queue);
+ break;
+ default:
+ pr_err("received IB QP event: %s (%d)\n",
+ ib_event_msg(event->event), event->event);
+ break;
+ }
+}
+
+static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue,
+ struct rdma_conn_param *p)
+{
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_rep priv = { };
+ int ret = -ENOMEM;
+
+ param.rnr_retry_count = 7;
+ param.flow_control = 1;
+ param.initiator_depth = min_t(u8, p->initiator_depth,
+ queue->dev->device->attrs.max_qp_init_rd_atom);
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.crqsize = cpu_to_le16(queue->recv_queue_size);
+
+ ret = rdma_accept(cm_id, &param);
+ if (ret)
+ pr_err("rdma_accept failed (error code = %d)\n", ret);
+
+ return ret;
+}
+
+static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_device *ndev;
+ struct nvmet_rdma_queue *queue;
+ int ret = -EINVAL;
+
+ ndev = nvmet_rdma_find_get_device(cm_id);
+ if (!ndev) {
+ nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
+ return -ECONNREFUSED;
+ }
+
+ queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
+ if (!queue) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
+
+ if (queue->host_qid == 0) {
+ /* Let inflight controller teardown complete */
+ flush_workqueue(nvmet_wq);
+ }
+
+ ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+ if (ret) {
+ /*
+ * Don't destroy the cm_id in free path, as we implicitly
+ * destroy the cm_id here with non-zero ret code.
+ */
+ queue->cm_id = NULL;
+ goto free_queue;
+ }
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ return 0;
+
+free_queue:
+ nvmet_rdma_free_queue(queue);
+put_device:
+ kref_put(&ndev->ref, nvmet_rdma_free_dev);
+
+ return ret;
+}
+
+static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state != NVMET_RDMA_Q_CONNECTING) {
+ pr_warn("trying to establish a connected queue\n");
+ goto out_unlock;
+ }
+ queue->state = NVMET_RDMA_Q_LIVE;
+
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *cmd;
+
+ cmd = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp, wait_list);
+ list_del(&cmd->wait_list);
+
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ nvmet_rdma_handle_command(queue, cmd);
+ spin_lock_irqsave(&queue->state_lock, flags);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+}
+
+static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+ bool disconnect = false;
+ unsigned long flags;
+
+ pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ switch (queue->state) {
+ case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
+ case NVMET_RDMA_Q_LIVE:
+ queue->state = NVMET_RDMA_Q_DISCONNECTING;
+ disconnect = true;
+ break;
+ case NVMET_RDMA_Q_DISCONNECTING:
+ break;
+ }
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+
+ if (disconnect) {
+ rdma_disconnect(queue->cm_id);
+ queue_work(nvmet_wq, &queue->release_work);
+ }
+}
+
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+ bool disconnect = false;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ if (!list_empty(&queue->queue_list)) {
+ list_del_init(&queue->queue_list);
+ disconnect = true;
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ if (disconnect)
+ __nvmet_rdma_queue_disconnect(queue);
+}
+
+static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue)
+{
+ WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ if (!list_empty(&queue->queue_list))
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ pr_err("failed to connect queue %d\n", queue->idx);
+ queue_work(nvmet_wq, &queue->release_work);
+}
+
+/**
+ * nvmet_rdma_device_removal() - Handle RDMA device removal
+ * @cm_id: rdma_cm id, used for nvmet port
+ * @queue: nvmet rdma queue (cm id qp_context)
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug. Note that this event can be generated on a normal
+ * queue cm_id and/or a device bound listener cm_id (where in this
+ * case queue will be null).
+ *
+ * We registered an ib_client to handle device removal for queues,
+ * so we only need to handle the listening port cm_ids. In this case
+ * we nullify the priv to prevent double cm_id destruction and destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_port *port;
+
+ if (queue) {
+ /*
+ * This is a queue cm_id. we have registered
+ * an ib_client to handle queues removal
+ * so don't interfear and just return.
+ */
+ return 0;
+ }
+
+ port = cm_id->context;
+
+ /*
+ * This is a listener cm_id. Make sure that
+ * future remove_port won't invoke a double
+ * cm_id destroy. use atomic xchg to make sure
+ * we don't compete with remove_port.
+ */
+ if (xchg(&port->cm_id, NULL) != cm_id)
+ return 0;
+
+ /*
+ * We need to return 1 so that the core will destroy
+ * it's own ID. What a great API design..
+ */
+ return 1;
+}
+
+static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_queue *queue = NULL;
+ int ret = 0;
+
+ if (cm_id->qp)
+ queue = cm_id->qp->qp_context;
+
+ pr_debug("%s (%d): status %d id %p\n",
+ rdma_event_msg(event->event), event->event,
+ event->status, cm_id);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ ret = nvmet_rdma_queue_connect(cm_id, event);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ nvmet_rdma_queue_established(queue);
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ if (!queue) {
+ struct nvmet_rdma_port *port = cm_id->context;
+
+ queue_delayed_work(nvmet_wq, &port->repair_work, 0);
+ break;
+ }
+ fallthrough;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ nvmet_rdma_queue_disconnect(queue);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ ret = nvmet_rdma_device_removal(cm_id, queue);
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ pr_debug("Connection rejected: %s\n",
+ rdma_reject_msg(cm_id, event->status));
+ fallthrough;
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ nvmet_rdma_queue_connect_fail(cm_id, queue);
+ break;
+ default:
+ pr_err("received unrecognized RDMA CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_rdma_queue *queue;
+
+restart:
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+ if (queue->nvme_sq.ctrl == ctrl) {
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ __nvmet_rdma_queue_disconnect(queue);
+ goto restart;
+ }
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
+static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
+{
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_port *nport = port->nport;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->port != nport)
+ continue;
+
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
+static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
+{
+ struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
+
+ if (cm_id)
+ rdma_destroy_id(cm_id);
+
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet. Do it here after the RDMA-CM was destroyed
+ * guarantees that no new queue will be created.
+ */
+ nvmet_rdma_destroy_port_queues(port);
+}
+
+static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
+{
+ struct sockaddr *addr = (struct sockaddr *)&port->addr;
+ struct rdma_cm_id *cm_id;
+ int ret;
+
+ cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ pr_err("CM ID creation failed\n");
+ return PTR_ERR(cm_id);
+ }
+
+ /*
+ * Allow both IPv4 and IPv6 sockets to bind a single port
+ * at the same time.
+ */
+ ret = rdma_set_afonly(cm_id, 1);
+ if (ret) {
+ pr_err("rdma_set_afonly failed (%d)\n", ret);
+ goto out_destroy_id;
+ }
+
+ ret = rdma_bind_addr(cm_id, addr);
+ if (ret) {
+ pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
+ goto out_destroy_id;
+ }
+
+ ret = rdma_listen(cm_id, 128);
+ if (ret) {
+ pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
+ goto out_destroy_id;
+ }
+
+ port->cm_id = cm_id;
+ return 0;
+
+out_destroy_id:
+ rdma_destroy_id(cm_id);
+ return ret;
+}
+
+static void nvmet_rdma_repair_port_work(struct work_struct *w)
+{
+ struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
+ struct nvmet_rdma_port, repair_work);
+ int ret;
+
+ nvmet_rdma_disable_port(port);
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port;
+ __kernel_sa_family_t af;
+ int ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ nport->priv = port;
+ port->nport = nport;
+ INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto out_free_port;
+ }
+
+ if (nport->inline_data_size < 0) {
+ nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ nport->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto out_free_port;
+ }
+
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ goto out_free_port;
+
+ pr_info("enabling port %d (%pISpcs)\n",
+ le16_to_cpu(nport->disc_addr.portid),
+ (struct sockaddr *)&port->addr);
+
+ return 0;
+
+out_free_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port = nport->priv;
+
+ cancel_delayed_work_sync(&port->repair_work);
+ nvmet_rdma_disable_port(port);
+ kfree(port);
+}
+
+static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
+ struct nvmet_port *nport, char *traddr)
+{
+ struct nvmet_rdma_port *port = nport->priv;
+ struct rdma_cm_id *cm_id = port->cm_id;
+
+ if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
+ struct nvmet_rdma_rsp *rsp =
+ container_of(req, struct nvmet_rdma_rsp, req);
+ struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
+ struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
+
+ sprintf(traddr, "%pISc", addr);
+ } else {
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ }
+}
+
+static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
+{
+ if (ctrl->pi_support)
+ return NVMET_RDMA_MAX_METADATA_MDTS;
+ return NVMET_RDMA_MAX_MDTS;
+}
+
+static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
+{
+ return NVME_RDMA_MAX_QUEUE_SIZE;
+}
+
+static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_RDMA,
+ .msdbd = 1,
+ .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
+ .add_port = nvmet_rdma_add_port,
+ .remove_port = nvmet_rdma_remove_port,
+ .queue_response = nvmet_rdma_queue_response,
+ .delete_ctrl = nvmet_rdma_delete_ctrl,
+ .disc_traddr = nvmet_rdma_disc_port_addr,
+ .get_mdts = nvmet_rdma_get_mdts,
+ .get_max_queue_size = nvmet_rdma_get_max_queue_size,
+};
+
+static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_rdma_device *ndev;
+ bool found = false;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->device == ib_device) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&device_list_mutex);
+
+ if (!found)
+ return;
+
+ /*
+ * IB Device that is used by nvmet controllers is being removed,
+ * delete all queues using this device.
+ */
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->dev->device != ib_device)
+ continue;
+
+ pr_info("Removing queue %d\n", queue->idx);
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ flush_workqueue(nvmet_wq);
+}
+
+static struct ib_client nvmet_rdma_ib_client = {
+ .name = "nvmet_rdma",
+ .remove = nvmet_rdma_remove_one
+};
+
+static int __init nvmet_rdma_init(void)
+{
+ int ret;
+
+ ret = ib_register_client(&nvmet_rdma_ib_client);
+ if (ret)
+ return ret;
+
+ ret = nvmet_register_transport(&nvmet_rdma_ops);
+ if (ret)
+ goto err_ib_client;
+
+ return 0;
+
+err_ib_client:
+ ib_unregister_client(&nvmet_rdma_ib_client);
+ return ret;
+}
+
+static void __exit nvmet_rdma_exit(void)
+{
+ nvmet_unregister_transport(&nvmet_rdma_ops);
+ ib_unregister_client(&nvmet_rdma_ib_client);
+ WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
+ ida_destroy(&nvmet_rdma_queue_ida);
+}
+
+module_init(nvmet_rdma_init);
+module_exit(nvmet_rdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
new file mode 100644
index 0000000000..a4f802790c
--- /dev/null
+++ b/drivers/nvme/target/tcp.c
@@ -0,0 +1,1936 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP target.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/nvme-tcp.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/inet.h>
+#include <linux/llist.h>
+#include <crypto/hash.h>
+#include <trace/events/sock.h>
+
+#include "nvmet.h"
+
+#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+
+static int param_store_val(const char *str, int *val, int min, int max)
+{
+ int ret, new_val;
+
+ ret = kstrtoint(str, 10, &new_val);
+ if (ret)
+ return -EINVAL;
+
+ if (new_val < min || new_val > max)
+ return -EINVAL;
+
+ *val = new_val;
+ return 0;
+}
+
+static int set_params(const char *str, const struct kernel_param *kp)
+{
+ return param_store_val(str, kp->arg, 0, INT_MAX);
+}
+
+static const struct kernel_param_ops set_param_ops = {
+ .set = set_params,
+ .get = param_get_int,
+};
+
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
+MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
+
+/* Define a time period (in usecs) that io_work() shall sample an activated
+ * queue before determining it to be idle. This optional module behavior
+ * can enable NIC solutions that support socket optimized packet processing
+ * using advanced interrupt moderation techniques.
+ */
+static int idle_poll_period_usecs;
+device_param_cb(idle_poll_period_usecs, &set_param_ops,
+ &idle_poll_period_usecs, 0644);
+MODULE_PARM_DESC(idle_poll_period_usecs,
+ "nvmet tcp io_work poll till idle time period in usecs: Default 0");
+
+#define NVMET_TCP_RECV_BUDGET 8
+#define NVMET_TCP_SEND_BUDGET 8
+#define NVMET_TCP_IO_WORK_BUDGET 64
+
+enum nvmet_tcp_send_state {
+ NVMET_TCP_SEND_DATA_PDU,
+ NVMET_TCP_SEND_DATA,
+ NVMET_TCP_SEND_R2T,
+ NVMET_TCP_SEND_DDGST,
+ NVMET_TCP_SEND_RESPONSE
+};
+
+enum nvmet_tcp_recv_state {
+ NVMET_TCP_RECV_PDU,
+ NVMET_TCP_RECV_DATA,
+ NVMET_TCP_RECV_DDGST,
+ NVMET_TCP_RECV_ERR,
+};
+
+enum {
+ NVMET_TCP_F_INIT_FAILED = (1 << 0),
+};
+
+struct nvmet_tcp_cmd {
+ struct nvmet_tcp_queue *queue;
+ struct nvmet_req req;
+
+ struct nvme_tcp_cmd_pdu *cmd_pdu;
+ struct nvme_tcp_rsp_pdu *rsp_pdu;
+ struct nvme_tcp_data_pdu *data_pdu;
+ struct nvme_tcp_r2t_pdu *r2t_pdu;
+
+ u32 rbytes_done;
+ u32 wbytes_done;
+
+ u32 pdu_len;
+ u32 pdu_recv;
+ int sg_idx;
+ struct msghdr recv_msg;
+ struct bio_vec *iov;
+ u32 flags;
+
+ struct list_head entry;
+ struct llist_node lentry;
+
+ /* send state */
+ u32 offset;
+ struct scatterlist *cur_sg;
+ enum nvmet_tcp_send_state state;
+
+ __le32 exp_ddgst;
+ __le32 recv_ddgst;
+};
+
+enum nvmet_tcp_queue_state {
+ NVMET_TCP_Q_CONNECTING,
+ NVMET_TCP_Q_LIVE,
+ NVMET_TCP_Q_DISCONNECTING,
+};
+
+struct nvmet_tcp_queue {
+ struct socket *sock;
+ struct nvmet_tcp_port *port;
+ struct work_struct io_work;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+
+ /* send state */
+ struct nvmet_tcp_cmd *cmds;
+ unsigned int nr_cmds;
+ struct list_head free_list;
+ struct llist_head resp_list;
+ struct list_head resp_send_list;
+ int send_list_len;
+ struct nvmet_tcp_cmd *snd_cmd;
+
+ /* recv state */
+ int offset;
+ int left;
+ enum nvmet_tcp_recv_state rcv_state;
+ struct nvmet_tcp_cmd *cmd;
+ union nvme_tcp_pdu pdu;
+
+ /* digest state */
+ bool hdr_digest;
+ bool data_digest;
+ struct ahash_request *snd_hash;
+ struct ahash_request *rcv_hash;
+
+ unsigned long poll_end;
+
+ spinlock_t state_lock;
+ enum nvmet_tcp_queue_state state;
+
+ struct sockaddr_storage sockaddr;
+ struct sockaddr_storage sockaddr_peer;
+ struct work_struct release_work;
+
+ int idx;
+ struct list_head queue_list;
+
+ struct nvmet_tcp_cmd connect;
+
+ struct page_frag_cache pf_cache;
+
+ void (*data_ready)(struct sock *);
+ void (*state_change)(struct sock *);
+ void (*write_space)(struct sock *);
+};
+
+struct nvmet_tcp_port {
+ struct socket *sock;
+ struct work_struct accept_work;
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ void (*data_ready)(struct sock *);
+};
+
+static DEFINE_IDA(nvmet_tcp_queue_ida);
+static LIST_HEAD(nvmet_tcp_queue_list);
+static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
+
+static struct workqueue_struct *nvmet_tcp_wq;
+static const struct nvmet_fabrics_ops nvmet_tcp_ops;
+static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+
+static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
+ struct nvmet_tcp_cmd *cmd)
+{
+ if (unlikely(!queue->nr_cmds)) {
+ /* We didn't allocate cmds yet, send 0xffff */
+ return USHRT_MAX;
+ }
+
+ return cmd - queue->cmds;
+}
+
+static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
+{
+ return nvme_is_write(cmd->req.cmd) &&
+ cmd->rbytes_done < cmd->req.transfer_len;
+}
+
+static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
+{
+ return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
+}
+
+static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
+{
+ return !nvme_is_write(cmd->req.cmd) &&
+ cmd->req.transfer_len > 0 &&
+ !cmd->req.cqe->status;
+}
+
+static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
+{
+ return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
+ !cmd->rbytes_done;
+}
+
+static inline struct nvmet_tcp_cmd *
+nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd;
+
+ cmd = list_first_entry_or_null(&queue->free_list,
+ struct nvmet_tcp_cmd, entry);
+ if (!cmd)
+ return NULL;
+ list_del_init(&cmd->entry);
+
+ cmd->rbytes_done = cmd->wbytes_done = 0;
+ cmd->pdu_len = 0;
+ cmd->pdu_recv = 0;
+ cmd->iov = NULL;
+ cmd->flags = 0;
+ return cmd;
+}
+
+static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
+{
+ if (unlikely(cmd == &cmd->queue->connect))
+ return;
+
+ list_add_tail(&cmd->entry, &cmd->queue->free_list);
+}
+
+static inline int queue_cpu(struct nvmet_tcp_queue *queue)
+{
+ return queue->sock->sk->sk_incoming_cpu;
+}
+
+static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
+{
+ return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
+{
+ return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
+ void *pdu, size_t len)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, pdu, len);
+ ahash_request_set_crypt(hash, &sg, pdu + len, len);
+ crypto_ahash_digest(hash);
+}
+
+static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
+ void *pdu, size_t len)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ __le32 recv_digest;
+ __le32 exp_digest;
+
+ if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
+ pr_err("queue %d: header digest enabled but no header digest\n",
+ queue->idx);
+ return -EPROTO;
+ }
+
+ recv_digest = *(__le32 *)(pdu + hdr->hlen);
+ nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
+ exp_digest = *(__le32 *)(pdu + hdr->hlen);
+ if (recv_digest != exp_digest) {
+ pr_err("queue %d: header digest error: recv %#x expected %#x\n",
+ queue->idx, le32_to_cpu(recv_digest),
+ le32_to_cpu(exp_digest));
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ u8 digest_len = nvmet_tcp_hdgst_len(queue);
+ u32 len;
+
+ len = le32_to_cpu(hdr->plen) - hdr->hlen -
+ (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
+
+ if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
+ pr_err("queue %d: data digest flag is cleared\n", queue->idx);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+{
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ cmd->iov = NULL;
+ cmd->req.sg = NULL;
+}
+
+static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+{
+ struct bio_vec *iov = cmd->iov;
+ struct scatterlist *sg;
+ u32 length, offset, sg_offset;
+ int nr_pages;
+
+ length = cmd->pdu_len;
+ nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ offset = cmd->rbytes_done;
+ cmd->sg_idx = offset / PAGE_SIZE;
+ sg_offset = offset % PAGE_SIZE;
+ sg = &cmd->req.sg[cmd->sg_idx];
+
+ while (length) {
+ u32 iov_len = min_t(u32, length, sg->length - sg_offset);
+
+ bvec_set_page(iov, sg_page(sg), iov_len,
+ sg->offset + sg_offset);
+
+ length -= iov_len;
+ sg = sg_next(sg);
+ iov++;
+ sg_offset = 0;
+ }
+
+ iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
+ nr_pages, cmd->pdu_len);
+}
+
+static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
+{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
+ if (queue->nvme_sq.ctrl)
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+ else
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+}
+
+static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
+ if (status == -EPIPE || status == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+}
+
+static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
+ u32 len = le32_to_cpu(sgl->length);
+
+ if (!len)
+ return 0;
+
+ if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
+ NVME_SGL_FMT_OFFSET)) {
+ if (!nvme_is_write(cmd->req.cmd))
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+
+ if (len > cmd->req.port->inline_data_size)
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ cmd->pdu_len = len;
+ }
+ cmd->req.transfer_len += len;
+
+ cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
+ if (!cmd->req.sg)
+ return NVME_SC_INTERNAL;
+ cmd->cur_sg = cmd->req.sg;
+
+ if (nvmet_tcp_has_data_in(cmd)) {
+ cmd->iov = kmalloc_array(cmd->req.sg_cnt,
+ sizeof(*cmd->iov), GFP_KERNEL);
+ if (!cmd->iov)
+ goto err;
+ }
+
+ return 0;
+err:
+ nvmet_tcp_free_cmd_buffers(cmd);
+ return NVME_SC_INTERNAL;
+}
+
+static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ ahash_request_set_crypt(hash, cmd->req.sg,
+ (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
+ crypto_ahash_digest(hash);
+}
+
+static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
+
+ cmd->offset = 0;
+ cmd->state = NVMET_TCP_SEND_DATA_PDU;
+
+ pdu->hdr.type = nvme_tcp_c2h_data;
+ pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
+ NVME_TCP_F_DATA_SUCCESS : 0);
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
+ pdu->hdr.plen =
+ cpu_to_le32(pdu->hdr.hlen + hdgst +
+ cmd->req.transfer_len + ddgst);
+ pdu->command_id = cmd->req.cqe->command_id;
+ pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
+ pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
+
+ if (queue->data_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_DDGST;
+ nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
+ }
+
+ if (cmd->queue->hdr_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ }
+}
+
+static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+
+ cmd->offset = 0;
+ cmd->state = NVMET_TCP_SEND_R2T;
+
+ pdu->hdr.type = nvme_tcp_r2t;
+ pdu->hdr.flags = 0;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = 0;
+ pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
+
+ pdu->command_id = cmd->req.cmd->common.command_id;
+ pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
+ pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
+ pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
+ if (cmd->queue->hdr_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ }
+}
+
+static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+
+ cmd->offset = 0;
+ cmd->state = NVMET_TCP_SEND_RESPONSE;
+
+ pdu->hdr.type = nvme_tcp_rsp;
+ pdu->hdr.flags = 0;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = 0;
+ pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
+ if (cmd->queue->hdr_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ }
+}
+
+static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
+{
+ struct llist_node *node;
+ struct nvmet_tcp_cmd *cmd;
+
+ for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
+ cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
+ list_add(&cmd->entry, &queue->resp_send_list);
+ queue->send_list_len++;
+ }
+}
+
+static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
+{
+ queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
+ struct nvmet_tcp_cmd, entry);
+ if (!queue->snd_cmd) {
+ nvmet_tcp_process_resp_list(queue);
+ queue->snd_cmd =
+ list_first_entry_or_null(&queue->resp_send_list,
+ struct nvmet_tcp_cmd, entry);
+ if (unlikely(!queue->snd_cmd))
+ return NULL;
+ }
+
+ list_del_init(&queue->snd_cmd->entry);
+ queue->send_list_len--;
+
+ if (nvmet_tcp_need_data_out(queue->snd_cmd))
+ nvmet_setup_c2h_data_pdu(queue->snd_cmd);
+ else if (nvmet_tcp_need_data_in(queue->snd_cmd))
+ nvmet_setup_r2t_pdu(queue->snd_cmd);
+ else
+ nvmet_setup_response_pdu(queue->snd_cmd);
+
+ return queue->snd_cmd;
+}
+
+static void nvmet_tcp_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_tcp_cmd *cmd =
+ container_of(req, struct nvmet_tcp_cmd, req);
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ struct nvme_sgl_desc *sgl;
+ u32 len;
+
+ if (unlikely(cmd == queue->cmd)) {
+ sgl = &cmd->req.cmd->common.dptr.sgl;
+ len = le32_to_cpu(sgl->length);
+
+ /*
+ * Wait for inline data before processing the response.
+ * Avoid using helpers, this might happen before
+ * nvmet_req_init is completed.
+ */
+ if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ len && len <= cmd->req.port->inline_data_size &&
+ nvme_is_write(cmd->req.cmd))
+ return;
+ }
+
+ llist_add(&cmd->lentry, &queue->resp_list);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
+}
+
+static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
+{
+ if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
+ nvmet_tcp_queue_response(&cmd->req);
+ else
+ cmd->req.execute(&cmd->req);
+}
+
+static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
+ };
+ struct bio_vec bvec;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
+ int ret;
+
+ bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
+ ret = sock_sendmsg(cmd->queue->sock, &msg);
+ if (ret <= 0)
+ return ret;
+
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ cmd->state = NVMET_TCP_SEND_DATA;
+ cmd->offset = 0;
+ return 1;
+}
+
+static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
+{
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ int ret;
+
+ while (cmd->cur_sg) {
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
+ };
+ struct page *page = sg_page(cmd->cur_sg);
+ struct bio_vec bvec;
+ u32 left = cmd->cur_sg->length - cmd->offset;
+
+ if ((!last_in_batch && cmd->queue->send_list_len) ||
+ cmd->wbytes_done + left < cmd->req.transfer_len ||
+ queue->data_digest || !queue->nvme_sq.sqhd_disabled)
+ msg.msg_flags |= MSG_MORE;
+
+ bvec_set_page(&bvec, page, left, cmd->offset);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
+ ret = sock_sendmsg(cmd->queue->sock, &msg);
+ if (ret <= 0)
+ return ret;
+
+ cmd->offset += ret;
+ cmd->wbytes_done += ret;
+
+ /* Done with sg?*/
+ if (cmd->offset == cmd->cur_sg->length) {
+ cmd->cur_sg = sg_next(cmd->cur_sg);
+ cmd->offset = 0;
+ }
+ }
+
+ if (queue->data_digest) {
+ cmd->state = NVMET_TCP_SEND_DDGST;
+ cmd->offset = 0;
+ } else {
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
+ }
+
+ if (queue->nvme_sq.sqhd_disabled)
+ nvmet_tcp_free_cmd_buffers(cmd);
+
+ return 1;
+
+}
+
+static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
+ bool last_in_batch)
+{
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
+ struct bio_vec bvec;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
+ int ret;
+
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
+ bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
+ ret = sock_sendmsg(cmd->queue->sock, &msg);
+ if (ret <= 0)
+ return ret;
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ nvmet_tcp_free_cmd_buffers(cmd);
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ return 1;
+}
+
+static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
+{
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
+ struct bio_vec bvec;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
+ int ret;
+
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
+ bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
+ ret = sock_sendmsg(cmd->queue->sock, &msg);
+ if (ret <= 0)
+ return ret;
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ cmd->queue->snd_cmd = NULL;
+ return 1;
+}
+
+static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
+{
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
+ .iov_len = left
+ };
+ int ret;
+
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
+ return 1;
+}
+
+static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
+ bool last_in_batch)
+{
+ struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
+ int ret = 0;
+
+ if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
+ cmd = nvmet_tcp_fetch_cmd(queue);
+ if (unlikely(!cmd))
+ return 0;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
+ ret = nvmet_try_send_data_pdu(cmd);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_DATA) {
+ ret = nvmet_try_send_data(cmd, last_in_batch);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_DDGST) {
+ ret = nvmet_try_send_ddgst(cmd, last_in_batch);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_R2T) {
+ ret = nvmet_try_send_r2t(cmd, last_in_batch);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_RESPONSE)
+ ret = nvmet_try_send_response(cmd, last_in_batch);
+
+done_send:
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ return 0;
+ return ret;
+ }
+
+ return 1;
+}
+
+static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
+ int budget, int *sends)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < budget; i++) {
+ ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
+ break;
+ }
+ (*sends)++;
+ }
+done:
+ return ret;
+}
+
+static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
+{
+ queue->offset = 0;
+ queue->left = sizeof(struct nvme_tcp_hdr);
+ queue->cmd = NULL;
+ queue->rcv_state = NVMET_TCP_RECV_PDU;
+}
+
+static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
+
+ ahash_request_free(queue->rcv_hash);
+ ahash_request_free(queue->snd_hash);
+ crypto_free_ahash(tfm);
+}
+
+static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->snd_hash)
+ goto free_tfm;
+ ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
+
+ queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->rcv_hash)
+ goto free_snd_hash;
+ ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
+
+ return 0;
+free_snd_hash:
+ ahash_request_free(queue->snd_hash);
+free_tfm:
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+}
+
+
+static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
+ struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
+ struct msghdr msg = {};
+ struct kvec iov;
+ int ret;
+
+ if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
+ pr_err("bad nvme-tcp pdu length (%d)\n",
+ le32_to_cpu(icreq->hdr.plen));
+ nvmet_tcp_fatal_error(queue);
+ }
+
+ if (icreq->pfv != NVME_TCP_PFV_1_0) {
+ pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
+ return -EPROTO;
+ }
+
+ if (icreq->hpda != 0) {
+ pr_err("queue %d: unsupported hpda %d\n", queue->idx,
+ icreq->hpda);
+ return -EPROTO;
+ }
+
+ queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
+ queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
+ if (queue->hdr_digest || queue->data_digest) {
+ ret = nvmet_tcp_alloc_crypto(queue);
+ if (ret)
+ return ret;
+ }
+
+ memset(icresp, 0, sizeof(*icresp));
+ icresp->hdr.type = nvme_tcp_icresp;
+ icresp->hdr.hlen = sizeof(*icresp);
+ icresp->hdr.pdo = 0;
+ icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
+ icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
+ icresp->cpda = 0;
+ if (queue->hdr_digest)
+ icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+ if (queue->data_digest)
+ icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
+
+ iov.iov_base = icresp;
+ iov.iov_len = sizeof(*icresp);
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (ret < 0)
+ return ret; /* queue removal will cleanup */
+
+ queue->state = NVMET_TCP_Q_LIVE;
+ nvmet_prepare_receive_pdu(queue);
+ return 0;
+}
+
+static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
+ struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
+{
+ size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
+ int ret;
+
+ /*
+ * This command has not been processed yet, hence we are trying to
+ * figure out if there is still pending data left to receive. If
+ * we don't, we can simply prepare for the next pdu and bail out,
+ * otherwise we will need to prepare a buffer and receive the
+ * stale data before continuing forward.
+ */
+ if (!nvme_is_write(cmd->req.cmd) || !data_len ||
+ data_len > cmd->req.port->inline_data_size) {
+ nvmet_prepare_receive_pdu(queue);
+ return;
+ }
+
+ ret = nvmet_tcp_map_data(cmd);
+ if (unlikely(ret)) {
+ pr_err("queue %d: failed to map data\n", queue->idx);
+ nvmet_tcp_fatal_error(queue);
+ return;
+ }
+
+ queue->rcv_state = NVMET_TCP_RECV_DATA;
+ nvmet_tcp_build_pdu_iovec(cmd);
+ cmd->flags |= NVMET_TCP_F_INIT_FAILED;
+}
+
+static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ struct nvmet_tcp_cmd *cmd;
+ unsigned int exp_data_len;
+
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+ cmd = &queue->cmds[data->ttag];
+ } else {
+ cmd = &queue->connect;
+ }
+
+ if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
+ pr_err("ttag %u unexpected data offset %u (expected %u)\n",
+ data->ttag, le32_to_cpu(data->data_offset),
+ cmd->rbytes_done);
+ /* FIXME: use path and transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
+ exp_data_len = le32_to_cpu(data->hdr.plen) -
+ nvmet_tcp_hdgst_len(queue) -
+ nvmet_tcp_ddgst_len(queue) -
+ sizeof(*data);
+
+ cmd->pdu_len = le32_to_cpu(data->data_length);
+ if (unlikely(cmd->pdu_len != exp_data_len ||
+ cmd->pdu_len == 0 ||
+ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ /* FIXME: use proper transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+ cmd->pdu_recv = 0;
+ nvmet_tcp_build_pdu_iovec(cmd);
+ queue->cmd = cmd;
+ queue->rcv_state = NVMET_TCP_RECV_DATA;
+
+ return 0;
+}
+
+static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
+ struct nvmet_req *req;
+ int ret;
+
+ if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
+ if (hdr->type != nvme_tcp_icreq) {
+ pr_err("unexpected pdu type (%d) before icreq\n",
+ hdr->type);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+ return nvmet_tcp_handle_icreq(queue);
+ }
+
+ if (unlikely(hdr->type == nvme_tcp_icreq)) {
+ pr_err("queue %d: received icreq pdu in state %d\n",
+ queue->idx, queue->state);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
+ if (hdr->type == nvme_tcp_h2c_data) {
+ ret = nvmet_tcp_handle_h2c_data_pdu(queue);
+ if (unlikely(ret))
+ return ret;
+ return 0;
+ }
+
+ queue->cmd = nvmet_tcp_get_cmd(queue);
+ if (unlikely(!queue->cmd)) {
+ /* This should never happen */
+ pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
+ queue->idx, queue->nr_cmds, queue->send_list_len,
+ nvme_cmd->common.opcode);
+ nvmet_tcp_fatal_error(queue);
+ return -ENOMEM;
+ }
+
+ req = &queue->cmd->req;
+ memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
+
+ if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvmet_tcp_ops))) {
+ pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
+ req->cmd, req->cmd->common.command_id,
+ req->cmd->common.opcode,
+ le32_to_cpu(req->cmd->common.dptr.sgl.length));
+
+ nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
+ return 0;
+ }
+
+ ret = nvmet_tcp_map_data(queue->cmd);
+ if (unlikely(ret)) {
+ pr_err("queue %d: failed to map data\n", queue->idx);
+ if (nvmet_tcp_has_inline_data(queue->cmd))
+ nvmet_tcp_fatal_error(queue);
+ else
+ nvmet_req_complete(req, ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (nvmet_tcp_need_data_in(queue->cmd)) {
+ if (nvmet_tcp_has_inline_data(queue->cmd)) {
+ queue->rcv_state = NVMET_TCP_RECV_DATA;
+ nvmet_tcp_build_pdu_iovec(queue->cmd);
+ return 0;
+ }
+ /* send back R2T */
+ nvmet_tcp_queue_response(&queue->cmd->req);
+ goto out;
+ }
+
+ queue->cmd->req.execute(&queue->cmd->req);
+out:
+ nvmet_prepare_receive_pdu(queue);
+ return ret;
+}
+
+static const u8 nvme_tcp_pdu_sizes[] = {
+ [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
+ [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
+ [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
+};
+
+static inline u8 nvmet_tcp_pdu_size(u8 type)
+{
+ size_t idx = type;
+
+ return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
+ nvme_tcp_pdu_sizes[idx]) ?
+ nvme_tcp_pdu_sizes[idx] : 0;
+}
+
+static inline bool nvmet_tcp_pdu_valid(u8 type)
+{
+ switch (type) {
+ case nvme_tcp_icreq:
+ case nvme_tcp_cmd:
+ case nvme_tcp_h2c_data:
+ /* fallthru */
+ return true;
+ }
+
+ return false;
+}
+
+static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ int len;
+ struct kvec iov;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+
+recv:
+ iov.iov_base = (void *)&queue->pdu + queue->offset;
+ iov.iov_len = queue->left;
+ len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(len < 0))
+ return len;
+
+ queue->offset += len;
+ queue->left -= len;
+ if (queue->left)
+ return -EAGAIN;
+
+ if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
+ u8 hdgst = nvmet_tcp_hdgst_len(queue);
+
+ if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
+ pr_err("unexpected pdu type %d\n", hdr->type);
+ nvmet_tcp_fatal_error(queue);
+ return -EIO;
+ }
+
+ if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
+ pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
+ return -EIO;
+ }
+
+ queue->left = hdr->hlen - queue->offset + hdgst;
+ goto recv;
+ }
+
+ if (queue->hdr_digest &&
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
+ nvmet_tcp_fatal_error(queue); /* fatal */
+ return -EPROTO;
+ }
+
+ if (queue->data_digest &&
+ nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
+ nvmet_tcp_fatal_error(queue); /* fatal */
+ return -EPROTO;
+ }
+
+ return nvmet_tcp_done_recv_pdu(queue);
+}
+
+static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvmet_tcp_queue *queue = cmd->queue;
+
+ nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
+ queue->offset = 0;
+ queue->left = NVME_TCP_DIGEST_LENGTH;
+ queue->rcv_state = NVMET_TCP_RECV_DDGST;
+}
+
+static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmd;
+ int ret;
+
+ while (msg_data_left(&cmd->recv_msg)) {
+ ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
+ cmd->recv_msg.msg_flags);
+ if (ret <= 0)
+ return ret;
+
+ cmd->pdu_recv += ret;
+ cmd->rbytes_done += ret;
+ }
+
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
+
+ if (cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_tcp_execute_request(cmd);
+
+ nvmet_prepare_receive_pdu(queue);
+ return 0;
+}
+
+static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmd;
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+ .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
+ .iov_len = queue->left
+ };
+
+ ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(ret < 0))
+ return ret;
+
+ queue->offset += ret;
+ queue->left -= ret;
+ if (queue->left)
+ return -EAGAIN;
+
+ if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
+ pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
+ queue->idx, cmd->req.cmd->common.command_id,
+ queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
+ le32_to_cpu(cmd->exp_ddgst));
+ nvmet_req_uninit(&cmd->req);
+ nvmet_tcp_free_cmd_buffers(cmd);
+ nvmet_tcp_fatal_error(queue);
+ ret = -EPROTO;
+ goto out;
+ }
+
+ if (cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_tcp_execute_request(cmd);
+
+ ret = 0;
+out:
+ nvmet_prepare_receive_pdu(queue);
+ return ret;
+}
+
+static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
+{
+ int result = 0;
+
+ if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
+ return 0;
+
+ if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
+ result = nvmet_tcp_try_recv_pdu(queue);
+ if (result != 0)
+ goto done_recv;
+ }
+
+ if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
+ result = nvmet_tcp_try_recv_data(queue);
+ if (result != 0)
+ goto done_recv;
+ }
+
+ if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
+ result = nvmet_tcp_try_recv_ddgst(queue);
+ if (result != 0)
+ goto done_recv;
+ }
+
+done_recv:
+ if (result < 0) {
+ if (result == -EAGAIN)
+ return 0;
+ return result;
+ }
+ return 1;
+}
+
+static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
+ int budget, int *recvs)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < budget; i++) {
+ ret = nvmet_tcp_try_recv_one(queue);
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
+ break;
+ }
+ (*recvs)++;
+ }
+done:
+ return ret;
+}
+
+static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
+{
+ spin_lock(&queue->state_lock);
+ if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
+ queue->state = NVMET_TCP_Q_DISCONNECTING;
+ queue_work(nvmet_wq, &queue->release_work);
+ }
+ spin_unlock(&queue->state_lock);
+}
+
+static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
+{
+ queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
+}
+
+static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
+ int ops)
+{
+ if (!idle_poll_period_usecs)
+ return false;
+
+ if (ops)
+ nvmet_tcp_arm_queue_deadline(queue);
+
+ return !time_after(jiffies, queue->poll_end);
+}
+
+static void nvmet_tcp_io_work(struct work_struct *w)
+{
+ struct nvmet_tcp_queue *queue =
+ container_of(w, struct nvmet_tcp_queue, io_work);
+ bool pending;
+ int ret, ops = 0;
+
+ do {
+ pending = false;
+
+ ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
+ if (ret > 0)
+ pending = true;
+ else if (ret < 0)
+ return;
+
+ ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
+ if (ret > 0)
+ pending = true;
+ else if (ret < 0)
+ return;
+
+ } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
+
+ /*
+ * Requeue the worker if idle deadline period is in progress or any
+ * ops activity was recorded during the do-while loop above.
+ */
+ if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+}
+
+static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
+ struct nvmet_tcp_cmd *c)
+{
+ u8 hdgst = nvmet_tcp_hdgst_len(queue);
+
+ c->queue = queue;
+ c->req.port = queue->port->nport;
+
+ c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->cmd_pdu)
+ return -ENOMEM;
+ c->req.cmd = &c->cmd_pdu->cmd;
+
+ c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->rsp_pdu)
+ goto out_free_cmd;
+ c->req.cqe = &c->rsp_pdu->cqe;
+
+ c->data_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->data_pdu)
+ goto out_free_rsp;
+
+ c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->r2t_pdu)
+ goto out_free_data;
+
+ c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+
+ list_add_tail(&c->entry, &queue->free_list);
+
+ return 0;
+out_free_data:
+ page_frag_free(c->data_pdu);
+out_free_rsp:
+ page_frag_free(c->rsp_pdu);
+out_free_cmd:
+ page_frag_free(c->cmd_pdu);
+ return -ENOMEM;
+}
+
+static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
+{
+ page_frag_free(c->r2t_pdu);
+ page_frag_free(c->data_pdu);
+ page_frag_free(c->rsp_pdu);
+ page_frag_free(c->cmd_pdu);
+}
+
+static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmds;
+ int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
+
+ cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
+ if (!cmds)
+ goto out;
+
+ for (i = 0; i < nr_cmds; i++) {
+ ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
+ if (ret)
+ goto out_free;
+ }
+
+ queue->cmds = cmds;
+
+ return 0;
+out_free:
+ while (--i >= 0)
+ nvmet_tcp_free_cmd(cmds + i);
+ kfree(cmds);
+out:
+ return ret;
+}
+
+static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmds = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++)
+ nvmet_tcp_free_cmd(cmds + i);
+
+ nvmet_tcp_free_cmd(&queue->connect);
+ kfree(cmds);
+}
+
+static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_data_ready = queue->data_ready;
+ sock->sk->sk_state_change = queue->state_change;
+ sock->sk->sk_write_space = queue->write_space;
+ sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+}
+
+static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_req_uninit(&cmd->req);
+ }
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
+ /* failed in connect */
+ nvmet_req_uninit(&queue->connect.req);
+ }
+}
+
+static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_tcp_free_cmd_buffers(cmd);
+ }
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
+}
+
+static void nvmet_tcp_release_queue_work(struct work_struct *w)
+{
+ struct page *page;
+ struct nvmet_tcp_queue *queue =
+ container_of(w, struct nvmet_tcp_queue, release_work);
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+
+ nvmet_tcp_restore_socket_callbacks(queue);
+ cancel_work_sync(&queue->io_work);
+ /* stop accepting incoming data */
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
+
+ nvmet_tcp_uninit_data_in_cmds(queue);
+ nvmet_sq_destroy(&queue->nvme_sq);
+ cancel_work_sync(&queue->io_work);
+ nvmet_tcp_free_cmd_data_in_buffers(queue);
+ sock_release(queue->sock);
+ nvmet_tcp_free_cmds(queue);
+ if (queue->hdr_digest || queue->data_digest)
+ nvmet_tcp_free_crypto(queue);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
+
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ kfree(queue);
+}
+
+static void nvmet_tcp_data_ready(struct sock *sk)
+{
+ struct nvmet_tcp_queue *queue;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (likely(queue))
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvmet_tcp_write_space(struct sock *sk)
+{
+ struct nvmet_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (unlikely(!queue))
+ goto out;
+
+ if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
+ queue->write_space(sk);
+ goto out;
+ }
+
+ if (sk_stream_is_writeable(sk)) {
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvmet_tcp_state_change(struct sock *sk)
+{
+ struct nvmet_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (!queue)
+ goto done;
+
+ switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
+ case TCP_FIN_WAIT1:
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSE:
+ /* FALLTHRU */
+ nvmet_tcp_schedule_release_queue(queue);
+ break;
+ default:
+ pr_warn("queue %d unhandled state %d\n",
+ queue->idx, sk->sk_state);
+ }
+done:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+ struct inet_sock *inet = inet_sk(sock->sk);
+ int ret;
+
+ ret = kernel_getsockname(sock,
+ (struct sockaddr *)&queue->sockaddr);
+ if (ret < 0)
+ return ret;
+
+ ret = kernel_getpeername(sock,
+ (struct sockaddr *)&queue->sockaddr_peer);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Cleanup whatever is sitting in the TCP transmit queue on socket
+ * close. This is done to prevent stale data from being sent should
+ * the network connection be restored before TCP times out.
+ */
+ sock_no_linger(sock->sk);
+
+ if (so_priority > 0)
+ sock_set_priority(sock->sk, so_priority);
+
+ /* Set socket type of service */
+ if (inet->rcv_tos > 0)
+ ip_sock_set_tos(sock->sk, inet->rcv_tos);
+
+ ret = 0;
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (idle_poll_period_usecs)
+ nvmet_tcp_arm_queue_deadline(queue);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ return ret;
+}
+
+static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+ struct socket *newsock)
+{
+ struct nvmet_tcp_queue *queue;
+ int ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
+ INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
+ queue->sock = newsock;
+ queue->port = port;
+ queue->nr_cmds = 0;
+ spin_lock_init(&queue->state_lock);
+ queue->state = NVMET_TCP_Q_CONNECTING;
+ INIT_LIST_HEAD(&queue->free_list);
+ init_llist_head(&queue->resp_list);
+ INIT_LIST_HEAD(&queue->resp_send_list);
+
+ queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
+ if (queue->idx < 0) {
+ ret = queue->idx;
+ goto out_free_queue;
+ }
+
+ ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
+ if (ret)
+ goto out_ida_remove;
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_free_connect;
+
+ nvmet_prepare_receive_pdu(queue);
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+
+ ret = nvmet_tcp_set_queue_sock(queue);
+ if (ret)
+ goto out_destroy_sq;
+
+ return 0;
+out_destroy_sq:
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ nvmet_sq_destroy(&queue->nvme_sq);
+out_free_connect:
+ nvmet_tcp_free_cmd(&queue->connect);
+out_ida_remove:
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
+out_free_queue:
+ kfree(queue);
+ return ret;
+}
+
+static void nvmet_tcp_accept_work(struct work_struct *w)
+{
+ struct nvmet_tcp_port *port =
+ container_of(w, struct nvmet_tcp_port, accept_work);
+ struct socket *newsock;
+ int ret;
+
+ while (true) {
+ ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ pr_warn("failed to accept err=%d\n", ret);
+ return;
+ }
+ ret = nvmet_tcp_alloc_queue(port, newsock);
+ if (ret) {
+ pr_err("failed to allocate queue\n");
+ sock_release(newsock);
+ }
+ }
+}
+
+static void nvmet_tcp_listen_data_ready(struct sock *sk)
+{
+ struct nvmet_tcp_port *port;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ port = sk->sk_user_data;
+ if (!port)
+ goto out;
+
+ if (sk->sk_state == TCP_LISTEN)
+ queue_work(nvmet_wq, &port->accept_work);
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int nvmet_tcp_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_tcp_port *port;
+ __kernel_sa_family_t af;
+ int ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto err_port;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto err_port;
+ }
+
+ port->nport = nport;
+ INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
+ if (port->nport->inline_data_size < 0)
+ port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
+
+ ret = sock_create(port->addr.ss_family, SOCK_STREAM,
+ IPPROTO_TCP, &port->sock);
+ if (ret) {
+ pr_err("failed to create a socket\n");
+ goto err_port;
+ }
+
+ port->sock->sk->sk_user_data = port;
+ port->data_ready = port->sock->sk->sk_data_ready;
+ port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
+ sock_set_reuseaddr(port->sock->sk);
+ tcp_sock_set_nodelay(port->sock->sk);
+ if (so_priority > 0)
+ sock_set_priority(port->sock->sk, so_priority);
+
+ ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
+ sizeof(port->addr));
+ if (ret) {
+ pr_err("failed to bind port socket %d\n", ret);
+ goto err_sock;
+ }
+
+ ret = kernel_listen(port->sock, 128);
+ if (ret) {
+ pr_err("failed to listen %d on port sock\n", ret);
+ goto err_sock;
+ }
+
+ nport->priv = port;
+ pr_info("enabling port %d (%pISpc)\n",
+ le16_to_cpu(nport->disc_addr.portid), &port->addr);
+
+ return 0;
+
+err_sock:
+ sock_release(port->sock);
+err_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->port == port)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
+static void nvmet_tcp_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_tcp_port *port = nport->priv;
+
+ write_lock_bh(&port->sock->sk->sk_callback_lock);
+ port->sock->sk->sk_data_ready = port->data_ready;
+ port->sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&port->sock->sk->sk_callback_lock);
+ cancel_work_sync(&port->accept_work);
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet.
+ */
+ nvmet_tcp_destroy_port_queues(port);
+
+ sock_release(port->sock);
+ kfree(port);
+}
+
+static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->nvme_sq.ctrl == ctrl)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
+static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
+{
+ struct nvmet_tcp_queue *queue =
+ container_of(sq, struct nvmet_tcp_queue, nvme_sq);
+
+ if (sq->qid == 0) {
+ /* Let inflight controller teardown complete */
+ flush_workqueue(nvmet_wq);
+ }
+
+ queue->nr_cmds = sq->size * 2;
+ if (nvmet_tcp_alloc_cmds(queue))
+ return NVME_SC_INTERNAL;
+ return 0;
+}
+
+static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
+ struct nvmet_port *nport, char *traddr)
+{
+ struct nvmet_tcp_port *port = nport->priv;
+
+ if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
+ struct nvmet_tcp_cmd *cmd =
+ container_of(req, struct nvmet_tcp_cmd, req);
+ struct nvmet_tcp_queue *queue = cmd->queue;
+
+ sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
+ } else {
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ }
+}
+
+static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_TCP,
+ .msdbd = 1,
+ .add_port = nvmet_tcp_add_port,
+ .remove_port = nvmet_tcp_remove_port,
+ .queue_response = nvmet_tcp_queue_response,
+ .delete_ctrl = nvmet_tcp_delete_ctrl,
+ .install_queue = nvmet_tcp_install_queue,
+ .disc_traddr = nvmet_tcp_disc_port_addr,
+};
+
+static int __init nvmet_tcp_init(void)
+{
+ int ret;
+
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!nvmet_tcp_wq)
+ return -ENOMEM;
+
+ ret = nvmet_register_transport(&nvmet_tcp_ops);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ destroy_workqueue(nvmet_tcp_wq);
+ return ret;
+}
+
+static void __exit nvmet_tcp_exit(void)
+{
+ struct nvmet_tcp_queue *queue;
+
+ nvmet_unregister_transport(&nvmet_tcp_ops);
+
+ flush_workqueue(nvmet_wq);
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ flush_workqueue(nvmet_wq);
+
+ destroy_workqueue(nvmet_tcp_wq);
+}
+
+module_init(nvmet_tcp_init);
+module_exit(nvmet_tcp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
new file mode 100644
index 0000000000..bff454d462
--- /dev/null
+++ b/drivers/nvme/target/trace.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express target device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sel = cdw10[1] & 0x7;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 mndw = get_unaligned_le32(cdw10 + 8);
+ u16 rl = get_unaligned_le16(cdw10 + 12);
+ u8 atype = cdw10[15];
+
+ trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
+ slba, mndw, rl, atype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sv = cdw10[3] & 0x8;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_identify:
+ return nvmet_trace_admin_identify(p, cdw10);
+ case nvme_admin_set_features:
+ return nvmet_trace_admin_set_features(p, cdw10);
+ case nvme_admin_get_features:
+ return nvmet_trace_admin_get_features(p, cdw10);
+ case nvme_admin_get_lba_status:
+ return nvmet_trace_get_lba_status(p, cdw10);
+ default:
+ return nvmet_trace_common(p, cdw10);
+ }
+}
+
+const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvmet_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvmet_trace_dsm(p, cdw10);
+ default:
+ return nvmet_trace_common(p, cdw10);
+ }
+}
+
+static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p,
+ u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+ u64 value = get_unaligned_le64(spc + 8);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
+ attrib, ofst, value);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_connect(struct trace_seq *p,
+ u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 recfmt = get_unaligned_le16(spc);
+ u16 qid = get_unaligned_le16(spc + 2);
+ u16 sqsize = get_unaligned_le16(spc + 4);
+ u8 cattr = spc[6];
+ u32 kato = get_unaligned_le32(spc + 8);
+
+ trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
+ recfmt, qid, sqsize, cattr, kato);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
+ u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "specific=%*ph", 24, spc);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
+ u8 fctype, u8 *spc)
+{
+ switch (fctype) {
+ case nvme_fabrics_type_property_set:
+ return nvmet_trace_fabrics_property_set(p, spc);
+ case nvme_fabrics_type_connect:
+ return nvmet_trace_fabrics_connect(p, spc);
+ case nvme_fabrics_type_property_get:
+ return nvmet_trace_fabrics_property_get(p, spc);
+ default:
+ return nvmet_trace_fabrics_common(p, spc);
+ }
+}
+
+const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (*name)
+ trace_seq_printf(p, "disk=%s, ", name);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ /*
+ * XXX: We don't know the controller instance before executing the
+ * connect command itself because the connect command for the admin
+ * queue will not provide the cntlid which will be allocated in this
+ * command. In case of io queues, the controller instance will be
+ * mapped by the extra data of the connect command.
+ * If we can know the extra data of the connect command in this stage,
+ * we can update this print statement later.
+ */
+ if (ctrl)
+ trace_seq_printf(p, "%d", ctrl->cntlid);
+ else
+ trace_seq_printf(p, "_");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
new file mode 100644
index 0000000000..974d99d47f
--- /dev/null
+++ b/drivers/nvme/target/trace.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NVM Express target device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This is entirely based on drivers/nvme/host/trace.h
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvmet
+
+#if !defined(_TRACE_NVMET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVMET_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvmet.h"
+
+const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ u8 *spc);
+
+#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \
+ ((opcode) == nvme_fabrics_command ? \
+ nvmet_trace_parse_fabrics_cmd(p, fctype, cdw10) : \
+ (qid ? \
+ nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
+
+const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
+#define __print_ctrl_name(ctrl) \
+ nvmet_trace_ctrl_name(p, ctrl)
+
+const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
+#define __print_disk_name(name) \
+ nvmet_trace_disk_name(p, name)
+
+#ifndef TRACE_HEADER_MULTI_READ
+static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+{
+ return req->sq->ctrl;
+}
+
+static inline void __assign_req_name(char *name, struct nvmet_req *req)
+{
+ if (!req->ns) {
+ memset(name, 0, DISK_NAME_LEN);
+ return;
+ }
+
+ strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
+}
+#endif
+
+TRACE_EVENT(nvmet_req_init,
+ TP_PROTO(struct nvmet_req *req, struct nvme_command *cmd),
+ TP_ARGS(req, cmd),
+ TP_STRUCT__entry(
+ __field(struct nvme_command *, cmd)
+ __field(struct nvmet_ctrl *, ctrl)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(u16, cid)
+ __field(u8, opcode)
+ __field(u8, fctype)
+ __field(u8, flags)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->ctrl = nvmet_req_to_ctrl(req);
+ __assign_req_name(__entry->disk, req);
+ __entry->qid = req->sq->qid;
+ __entry->cid = cmd->common.command_id;
+ __entry->opcode = cmd->common.opcode;
+ __entry->fctype = cmd->fabrics.fctype;
+ __entry->flags = cmd->common.flags;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, &cmd->common.cdws,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
+ "meta=%#llx, cmd=(%s, %s)",
+ __print_ctrl_name(__entry->ctrl),
+ __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->nsid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->qid, __entry->opcode,
+ __entry->fctype),
+ parse_nvme_cmd(__entry->qid, __entry->opcode,
+ __entry->fctype, __entry->cdw10))
+);
+
+TRACE_EVENT(nvmet_req_complete,
+ TP_PROTO(struct nvmet_req *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(struct nvmet_ctrl *, ctrl)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->qid = req->cq->qid;
+ __entry->cid = req->cqe->command_id;
+ __entry->result = le64_to_cpu(req->cqe->result.u64);
+ __entry->status = le16_to_cpu(req->cqe->status) >> 1;
+ __assign_req_name(__entry->disk, req);
+ ),
+ TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
+ __print_ctrl_name(__entry->ctrl),
+ __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->result, __entry->status)
+
+);
+
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvmet_async_event,
+ TP_PROTO(struct nvmet_ctrl *ctrl, __le32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->cntlid;
+ __entry->result = (le32_to_cpu(result) & 0xff00) >> 8;
+ ),
+ TP_printk("nvmet%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result,
+ aer_name(NVME_AER_NOTICE_NS_CHANGED),
+ aer_name(NVME_AER_NOTICE_ANA),
+ aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+#undef aer_name
+
+#endif /* _TRACE_NVMET_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
new file mode 100644
index 0000000000..5b5c1e4817
--- /dev/null
+++ b/drivers/nvme/target/zns.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe ZNS-ZBD command implementation.
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/nvme.h>
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+/*
+ * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
+ * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
+ * as page_shift value. When calculating the ZASL use shift by 12.
+ */
+#define NVMET_MPSMIN_SHIFT 12
+
+static inline u8 nvmet_zasl(unsigned int zone_append_sects)
+{
+ /*
+ * Zone Append Size Limit (zasl) is expressed as a power of 2 value
+ * with the minimum memory page size (i.e. 12) as unit.
+ */
+ return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
+}
+
+static int validate_conv_zones_cb(struct blk_zone *z,
+ unsigned int i, void *data)
+{
+ if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
+{
+ u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
+ struct gendisk *bd_disk = ns->bdev->bd_disk;
+ int ret;
+
+ if (ns->subsys->zasl) {
+ if (ns->subsys->zasl > zasl)
+ return false;
+ }
+ ns->subsys->zasl = zasl;
+
+ /*
+ * Generic zoned block devices may have a smaller last zone which is
+ * not supported by ZNS. Exclude zoned drives that have such smaller
+ * last zone.
+ */
+ if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
+ return false;
+ /*
+ * ZNS does not define a conventional zone type. If the underlying
+ * device has a bitmap set indicating the existence of conventional
+ * zones, reject the device. Otherwise, use report zones to detect if
+ * the device has conventional zones.
+ */
+ if (ns->bdev->bd_disk->conv_zones_bitmap)
+ return false;
+
+ ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
+ validate_conv_zones_cb, NULL);
+ if (ret < 0)
+ return false;
+
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ return true;
+}
+
+void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
+{
+ u8 zasl = req->sq->ctrl->subsys->zasl;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl_zns *id;
+ u16 status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ if (ctrl->ops->get_mdts)
+ id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
+ else
+ id->zasl = zasl;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
+{
+ struct nvme_id_ns_zns *id_zns = NULL;
+ u64 zsze;
+ u16 status;
+ u32 mar, mor;
+
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
+ if (!id_zns) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto done;
+
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
+
+ if (!bdev_is_zoned(req->ns->bdev)) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ goto out;
+ }
+
+ zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ req->ns->blksize_shift;
+ id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+
+ mor = bdev_max_open_zones(req->ns->bdev);
+ if (!mor)
+ mor = U32_MAX;
+ else
+ mor--;
+ id_zns->mor = cpu_to_le32(mor);
+
+ mar = bdev_max_active_zones(req->ns->bdev);
+ if (!mar)
+ mar = U32_MAX;
+ else
+ mar--;
+ id_zns->mar = cpu_to_le32(mar);
+
+done:
+ status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+out:
+ kfree(id_zns);
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ }
+
+ if (out_bufsize < sizeof(struct nvme_zone_report)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.pr) {
+ case 0:
+ case 1:
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.zrasf) {
+ case NVME_ZRASF_ZONE_REPORT_ALL:
+ case NVME_ZRASF_ZONE_STATE_EMPTY:
+ case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_CLOSED:
+ case NVME_ZRASF_ZONE_STATE_FULL:
+ case NVME_ZRASF_ZONE_STATE_READONLY:
+ case NVME_ZRASF_ZONE_STATE_OFFLINE:
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+struct nvmet_report_zone_data {
+ struct nvmet_req *req;
+ u64 out_buf_offset;
+ u64 out_nr_zones;
+ u64 nr_zones;
+ u8 zrasf;
+};
+
+static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ static const unsigned int nvme_zrasf_to_blk_zcond[] = {
+ [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
+ [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
+ [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
+ [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
+ [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
+ };
+ struct nvmet_report_zone_data *rz = d;
+
+ if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
+ z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
+ return 0;
+
+ if (rz->nr_zones < rz->out_nr_zones) {
+ struct nvme_zone_descriptor zdesc = { };
+ u16 status;
+
+ zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
+ zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
+ zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
+ zdesc.za = z->reset ? 1 << 2 : 0;
+ zdesc.zs = z->cond << 4;
+ zdesc.zt = z->type;
+
+ status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
+ sizeof(zdesc));
+ if (status)
+ return -EINVAL;
+
+ rz->out_buf_offset += sizeof(zdesc);
+ }
+
+ rz->nr_zones++;
+
+ return 0;
+}
+
+static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
+{
+ unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+
+ return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect);
+}
+
+static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
+{
+ if (bufsize <= sizeof(struct nvme_zone_report))
+ return 0;
+
+ return (bufsize - sizeof(struct nvme_zone_report)) /
+ sizeof(struct nvme_zone_descriptor);
+}
+
+static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ __le64 nr_zones;
+ u16 status;
+ int ret;
+ struct nvmet_report_zone_data rz_data = {
+ .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
+ /* leave the place for report zone header */
+ .out_buf_offset = sizeof(struct nvme_zone_report),
+ .zrasf = req->cmd->zmr.zrasf,
+ .nr_zones = 0,
+ .req = req,
+ };
+
+ status = nvmet_bdev_validate_zone_mgmt_recv(req);
+ if (status)
+ goto out;
+
+ if (!req_slba_nr_zones) {
+ status = NVME_SC_SUCCESS;
+ goto out;
+ }
+
+ ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
+ nvmet_bdev_report_zone_cb, &rz_data);
+ if (ret < 0) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /*
+ * When partial bit is set nr_zones must indicate the number of zone
+ * descriptors actually transferred.
+ */
+ if (req->cmd->zmr.pr)
+ rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
+
+ nr_zones = cpu_to_le64(rz_data.nr_zones);
+ status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static inline enum req_op zsa_req_op(u8 zsa)
+{
+ switch (zsa) {
+ case NVME_ZONE_OPEN:
+ return REQ_OP_ZONE_OPEN;
+ case NVME_ZONE_CLOSE:
+ return REQ_OP_ZONE_CLOSE;
+ case NVME_ZONE_FINISH:
+ return REQ_OP_ZONE_FINISH;
+ case NVME_ZONE_RESET:
+ return REQ_OP_ZONE_RESET;
+ default:
+ return REQ_OP_LAST;
+ }
+}
+
+static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
+{
+ switch (ret) {
+ case 0:
+ return NVME_SC_SUCCESS;
+ case -EINVAL:
+ case -EIO:
+ return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ default:
+ return NVME_SC_INTERNAL;
+ }
+}
+
+struct nvmet_zone_mgmt_send_all_data {
+ unsigned long *zbitmap;
+ struct nvmet_req *req;
+};
+
+static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ struct nvmet_zone_mgmt_send_all_data *data = d;
+
+ switch (zsa_req_op(data->req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_OPEN:
+ switch (z->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_FINISH:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set_bit(i, data->zbitmap);
+
+ return 0;
+}
+
+static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
+{
+ struct block_device *bdev = req->ns->bdev;
+ unsigned int nr_zones = bdev_nr_zones(bdev);
+ struct bio *bio = NULL;
+ sector_t sector = 0;
+ int ret;
+ struct nvmet_zone_mgmt_send_all_data d = {
+ .req = req,
+ };
+
+ d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
+ GFP_NOIO, bdev->bd_disk->node_id);
+ if (!d.zbitmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Scan and build bitmap of the eligible zones */
+ ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
+ if (ret != nr_zones) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out;
+ } else {
+ /* We scanned all the zones */
+ ret = 0;
+ }
+
+ while (sector < bdev_nr_sectors(bdev)) {
+ if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
+ bio = blk_next_bio(bio, bdev, 0,
+ zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
+ GFP_KERNEL);
+ bio->bi_iter.bi_sector = sector;
+ /* This may take a while, so be nice to others */
+ cond_resched();
+ }
+ sector += bdev_zone_sectors(bdev);
+ }
+
+ if (bio) {
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+
+out:
+ kfree(d.zbitmap);
+
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+}
+
+static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
+{
+ int ret;
+
+ switch (zsa_req_op(req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_RESET:
+ ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
+ get_capacity(req->ns->bdev->bd_disk),
+ GFP_KERNEL);
+ if (ret < 0)
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return nvmet_bdev_zone_mgmt_emulate_all(req);
+ default:
+ /* this is needed to quiet compiler warning */
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
+ enum req_op op = zsa_req_op(req->cmd->zms.zsa);
+ struct block_device *bdev = req->ns->bdev;
+ sector_t zone_sectors = bdev_zone_sectors(bdev);
+ u16 status = NVME_SC_SUCCESS;
+ int ret;
+
+ if (op == REQ_OP_LAST) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ goto out;
+ }
+
+ /* when select all bit is set slba field is ignored */
+ if (req->cmd->zms.select_all) {
+ status = nvmet_bdev_execute_zmgmt_send_all(req);
+ goto out;
+ }
+
+ if (sect >= get_capacity(bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (zone_sectors - 1)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ if (ret < 0)
+ status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ if (bio->bi_status == BLK_STS_OK) {
+ req->cqe->result.u64 =
+ nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
+ }
+
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
+ nvmet_req_bio_put(req, bio);
+}
+
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ u16 status = NVME_SC_SUCCESS;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ struct bio *bio;
+ int sg_cnt;
+
+ /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
+ return;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->z.inline_bio;
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), opf);
+ } else {
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
+ }
+
+ bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
+ bio->bi_iter.bi_sector = sect;
+ bio->bi_private = req;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ bio->bi_opf |= REQ_FUA;
+
+ for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
+ struct page *p = sg_page(sg);
+ unsigned int l = sg->length;
+ unsigned int o = sg->offset;
+ unsigned int ret;
+
+ ret = bio_add_zone_append_page(bio, p, l, o);
+ if (ret != sg->length) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_bio;
+ }
+ total_len += sg->length;
+ }
+
+ if (total_len != nvmet_rw_data_len(req)) {
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ goto out_put_bio;
+ }
+
+ submit_bio(bio);
+ return;
+
+out_put_bio:
+ nvmet_req_bio_put(req, bio);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_zone_append:
+ req->execute = nvmet_bdev_execute_zone_append;
+ return 0;
+ case nvme_cmd_zone_mgmt_recv:
+ req->execute = nvmet_bdev_execute_zone_mgmt_recv;
+ return 0;
+ case nvme_cmd_zone_mgmt_send:
+ req->execute = nvmet_bdev_execute_zone_mgmt_send;
+ return 0;
+ default:
+ return nvmet_bdev_parse_io_cmd(req);
+ }
+}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
new file mode 100644
index 0000000000..5bc9c4874f
--- /dev/null
+++ b/drivers/nvmem/Kconfig
@@ -0,0 +1,420 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NVMEM
+ bool "NVMEM Support"
+ help
+ Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
+
+ This framework is designed to provide a generic interface to NVMEM
+ from both the Linux Kernel and the userspace.
+
+ If unsure, say no.
+
+if NVMEM
+
+config NVMEM_SYSFS
+ bool "/sys/bus/nvmem/devices/*/nvmem (sysfs interface)"
+ depends on SYSFS
+ default y
+ help
+ Say Y here to add a sysfs interface for NVMEM.
+
+ This interface is mostly used by userspace applications to
+ read/write directly into nvmem.
+
+# Layouts
+
+source "drivers/nvmem/layouts/Kconfig"
+
+# Devices
+
+config NVMEM_APPLE_EFUSES
+ tristate "Apple eFuse support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Say y here to enable support for reading eFuses on Apple SoCs
+ such as the M1. These are e.g. used to store factory programmed
+ calibration data required for the PCIe or the USB-C PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-apple-efuses.
+
+config NVMEM_BCM_OCOTP
+ tristate "Broadcom On-Chip OTP Controller support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM
+ default ARCH_BCM_IPROC
+ help
+ Say y here to enable read/write access to the Broadcom OTP
+ controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-bcm-ocotp.
+
+config NVMEM_BRCM_NVRAM
+ tristate "Broadcom's NVRAM support"
+ depends on ARCH_BCM_5301X || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_NET_UTILS
+ help
+ This driver provides support for Broadcom's NVRAM that can be accessed
+ using I/O mapping.
+
+config NVMEM_IMX_IIM
+ tristate "i.MX IC Identification Module support"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This is a driver for the IC Identification Module (IIM) available on
+ i.MX SoCs, providing access to 4 Kbits of programmable
+ eFuses.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-imx-iim.
+
+config NVMEM_IMX_OCOTP
+ tristate "i.MX 6/7/8 On-Chip OTP Controller support"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver for the On-Chip OTP Controller (OCOTP) available on
+ i.MX6 SoCs, providing access to 4 Kbits of one-time programmable
+ eFuses.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-imx-ocotp.
+
+config NVMEM_IMX_OCOTP_ELE
+ tristate "i.MX On-Chip OTP Controller support"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ This is a driver for the On-Chip OTP Controller (OCOTP)
+ available on i.MX SoCs which has ELE.
+
+config NVMEM_IMX_OCOTP_SCU
+ tristate "i.MX8 SCU On-Chip OTP Controller support"
+ depends on IMX_SCU
+ depends on HAVE_ARM_SMCCC
+ help
+ This is a driver for the SCU On-Chip OTP Controller (OCOTP)
+ available on i.MX8 SoCs.
+
+config NVMEM_JZ4780_EFUSE
+ tristate "JZ4780 EFUSE Memory Support"
+ depends on MACH_INGENIC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ select REGMAP_MMIO
+ help
+ Say Y here to include support for JZ4780 efuse memory found on
+ all JZ4780 SoC based devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_jz4780_efuse.
+
+config NVMEM_LAN9662_OTPC
+ tristate "Microchip LAN9662 OTP controller support"
+ depends on SOC_LAN966 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver enables the OTP controller available on Microchip LAN9662
+ SoCs. It controls the access to the OTP memory connected to it.
+
+config NVMEM_LAYERSCAPE_SFP
+ tristate "Layerscape SFP (Security Fuse Processor) support"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ This driver provides support to read the eFuses on Freescale
+ Layerscape SoC's. For example, the vendor provides a per part
+ unique ID there.
+
+ This driver can also be built as a module. If so, the module
+ will be called layerscape-sfp.
+
+config NVMEM_LPC18XX_EEPROM
+ tristate "NXP LPC18XX EEPROM Memory Support"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say Y here to include support for NXP LPC18xx EEPROM memory found in
+ NXP LPC185x/3x and LPC435x/3x/2x/1x devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_lpc18xx_eeprom.
+
+config NVMEM_LPC18XX_OTP
+ tristate "NXP LPC18XX OTP Memory Support"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say Y here to include support for NXP LPC18xx OTP memory found on
+ all LPC18xx and LPC43xx devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_lpc18xx_otp.
+
+config NVMEM_MESON_EFUSE
+ tristate "Amlogic Meson GX eFuse Support"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson GX SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_efuse.
+
+config NVMEM_MESON_MX_EFUSE
+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_mx_efuse.
+
+config NVMEM_MICROCHIP_OTPC
+ tristate "Microchip OTPC support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This driver enable the OTP controller available on Microchip SAMA7G5
+ SoCs. It controls the access to the OTP memory connected to it.
+
+config NVMEM_MTK_EFUSE
+ tristate "Mediatek SoCs EFUSE support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver to access hardware related data like sensor
+ calibration, HDMI impedance etc.
+
+ This driver can also be built as a module. If so, the module
+ will be called efuse-mtk.
+
+config NVMEM_MXS_OCOTP
+ tristate "Freescale MXS On-Chip OTP Memory Support"
+ depends on ARCH_MXS || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ If you say Y here, you will get readonly access to the
+ One Time Programmable memory pages that are stored
+ on the Freescale i.MX23/i.MX28 processor.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-mxs-ocotp.
+
+config NVMEM_NINTENDO_OTP
+ tristate "Nintendo Wii and Wii U OTP Support"
+ depends on WII || COMPILE_TEST
+ help
+ This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
+
+ This memory contains common and per-console keys, signatures and
+ related data required to access peripherals.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-nintendo-otp.
+
+config NVMEM_QCOM_QFPROM
+ tristate "QCOM QFPROM Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say y here to enable QFPROM support. The QFPROM provides access
+ functions for QFPROM data to rest of the drivers via nvmem interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_qfprom.
+
+config NVMEM_QCOM_SEC_QFPROM
+ tristate "QCOM SECURE QFPROM Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ select QCOM_SCM
+ help
+ Say y here to enable secure QFPROM support. The secure QFPROM provides access
+ functions for QFPROM data to rest of the drivers via nvmem interface.
+
+ This driver can also be built as a module. If so, the module will be called
+ nvmem_sec_qfprom.
+
+config NVMEM_RAVE_SP_EEPROM
+ tristate "Rave SP EEPROM Support"
+ depends on RAVE_SP_CORE
+ help
+ Say y here to enable Rave SP EEPROM support.
+
+config NVMEM_RMEM
+ tristate "Reserved Memory Based Driver Support"
+ depends on HAS_IOMEM
+ help
+ This driver maps reserved memory into an nvmem device. It might be
+ useful to expose information left by firmware in memory.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-rmem.
+
+config NVMEM_ROCKCHIP_EFUSE
+ tristate "Rockchip eFuse Support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Rockchip SoC
+ from eFuse, such as cpu-leakage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_rockchip_efuse.
+
+config NVMEM_ROCKCHIP_OTP
+ tristate "Rockchip OTP controller support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Rockchip SoC
+ from OTP, such as cpu-leakage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_rockchip_otp.
+
+config NVMEM_SC27XX_EFUSE
+ tristate "Spreadtrum SC27XX eFuse Support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SC27XX PMICs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sc27xx-efuse.
+
+config NVMEM_SNVS_LPGPR
+ tristate "Support for Low Power General Purpose Register"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This is a driver for Low Power General Purpose Register (LPGPR) available on
+ i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-snvs-lpgpr.
+
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
+config NVMEM_SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
+
+config NVMEM_STM32_BSEC_OPTEE_TA
+ def_bool NVMEM_STM32_ROMEM && OPTEE
+ help
+ Say y here to enable the accesses to STM32MP SoC OTPs by the OP-TEE
+ trusted application STM32MP BSEC.
+
+ This library is a used by stm32-romem driver or included in the module
+ called nvmem-stm32-romem.
+
+config NVMEM_STM32_ROMEM
+ tristate "STMicroelectronics STM32 factory-programmed memory support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OPTEE || !OPTEE
+ help
+ Say y here to enable read-only access for STMicroelectronics STM32
+ factory-programmed memory area.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-stm32-romem.
+
+config NVMEM_SUNPLUS_OCOTP
+ tristate "Sunplus SoC OTP support"
+ depends on SOC_SP7021 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver for the On-chip OTP controller (OCOTP) available
+ on Sunplus SoCs. It provides access to 128 bytes of one-time
+ programmable eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sunplus-ocotp.
+
+config NVMEM_SUNXI_SID
+ tristate "Allwinner SoCs SID support"
+ depends on ARCH_SUNXI
+ help
+ This is a driver for the 'security ID' available on various Allwinner
+ devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_sunxi_sid.
+
+config NVMEM_U_BOOT_ENV
+ tristate "U-Boot environment variables support"
+ depends on OF && MTD
+ select CRC32
+ select GENERIC_NET_UTILS
+ help
+ U-Boot stores its setup as environment variables. This driver adds
+ support for verifying & exporting such data. It also exposes variables
+ as NVMEM cells so they can be referenced by other drivers.
+
+ Currently this drivers works only with env variables on top of MTD.
+
+ If compiled as module it will be called nvmem_u-boot-env.
+
+config NVMEM_UNIPHIER_EFUSE
+ tristate "UniPhier SoCs eFuse support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of UniPhier SoC
+ from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-uniphier-efuse.
+
+config NVMEM_VF610_OCOTP
+ tristate "VF610 SoC OCOTP support"
+ depends on SOC_VF610 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver for the 'OCOTP' peripheral available on Vybrid
+ devices like VF5xx and VF6xx.
+
+ This driver can also be build as a module. If so, the module will
+ be called nvmem-vf610-ocotp.
+
+config NVMEM_ZYNQMP
+ tristate "Xilinx ZYNQMP SoC nvmem firmware support"
+ depends on ARCH_ZYNQMP
+ help
+ This is a driver to access hardware related data like
+ soc revision, IDCODE... etc by using the firmware
+ interface.
+
+ If sure, say yes. If unsure, say no.
+
+config NVMEM_QORIQ_EFUSE
+ tristate "NXP QorIQ eFuse support"
+ depends on PPC_85xx || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver provides read support for the eFuses (SFP) on NXP QorIQ
+ series SoC's. This includes secure boot settings, the globally unique
+ NXP ID 'FUIDR' and the OEM unique ID 'OUIDR'.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_qoriq_efuse.
+
+endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
new file mode 100644
index 0000000000..423baf0895
--- /dev/null
+++ b/drivers/nvmem/Makefile
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for nvmem drivers.
+#
+
+obj-$(CONFIG_NVMEM) += nvmem_core.o
+nvmem_core-y := core.o
+obj-y += layouts/
+
+# Devices
+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
+nvmem-apple-efuses-y := apple-efuses.o
+obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
+nvmem-bcm-ocotp-y := bcm-ocotp.o
+obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
+nvmem_brcm_nvram-y := brcm_nvram.o
+obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
+nvmem-imx-iim-y := imx-iim.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
+nvmem-imx-ocotp-y := imx-ocotp.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP_ELE) += nvmem-imx-ocotp-ele.o
+nvmem-imx-ocotp-ele-y := imx-ocotp-ele.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
+nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
+obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
+nvmem_jz4780_efuse-y := jz4780-efuse.o
+obj-$(CONFIG_NVMEM_LAN9662_OTPC) += nvmem-lan9662-otpc.o
+nvmem-lan9662-otpc-y := lan9662-otpc.o
+obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
+nvmem-layerscape-sfp-y := layerscape-sfp.o
+obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
+nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
+obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
+nvmem_lpc18xx_otp-y := lpc18xx_otp.o
+obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o
+nvmem_meson_efuse-y := meson-efuse.o
+obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
+nvmem_meson_mx_efuse-y := meson-mx-efuse.o
+obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o
+nvmem-microchip-otpc-y := microchip-otpc.o
+obj-$(CONFIG_NVMEM_MTK_EFUSE) += nvmem_mtk-efuse.o
+nvmem_mtk-efuse-y := mtk-efuse.o
+obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
+nvmem-mxs-ocotp-y := mxs-ocotp.o
+obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o
+nvmem-nintendo-otp-y := nintendo-otp.o
+obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o
+nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_QCOM_SEC_QFPROM) += nvmem_sec_qfprom.o
+nvmem_sec_qfprom-y := sec-qfprom.o
+obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
+nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
+obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
+nvmem-rmem-y := rmem.o
+obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
+nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_NVMEM_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
+obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
+nvmem-sc27xx-efuse-y := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
+nvmem_snvs_lpgpr-y := snvs_lpgpr.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
+nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
+obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
+obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
+nvmem_stm32_romem-y := stm32-romem.o
+nvmem_stm32_romem-$(CONFIG_NVMEM_STM32_BSEC_OPTEE_TA) += stm32-bsec-optee-ta.o
+obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
+nvmem_sunplus_ocotp-y := sunplus-ocotp.o
+obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
+nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o
+nvmem_u-boot-env-y := u-boot-env.o
+obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
+nvmem-uniphier-efuse-y := uniphier-efuse.o
+obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
+nvmem-vf610-ocotp-y := vf610-ocotp.o
+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
+obj-$(CONFIG_NVMEM_QORIQ_EFUSE) += nvmem-qoriq-efuse.o
+nvmem-qoriq-efuse-y := qoriq-efuse.o
diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
new file mode 100644
index 0000000000..9b7c871021
--- /dev/null
+++ b/drivers/nvmem/apple-efuses.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC eFuse driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct apple_efuses_priv {
+ void __iomem *fuses;
+};
+
+static int apple_efuses_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct apple_efuses_priv *priv = context;
+ u32 *dst = val;
+
+ while (bytes >= sizeof(u32)) {
+ *dst++ = readl_relaxed(priv->fuses + offset);
+ bytes -= sizeof(u32);
+ offset += sizeof(u32);
+ }
+
+ return 0;
+}
+
+static int apple_efuses_probe(struct platform_device *pdev)
+{
+ struct apple_efuses_priv *priv;
+ struct resource *res;
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
+ .read_only = true,
+ .reg_read = apple_efuses_read,
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .name = "apple_efuses_nvmem",
+ .id = NVMEM_DEVID_AUTO,
+ .root_only = true,
+ };
+
+ priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->fuses))
+ return PTR_ERR(priv->fuses);
+
+ config.priv = priv;
+ config.size = resource_size(res);
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config));
+}
+
+static const struct of_device_id apple_efuses_of_match[] = {
+ { .compatible = "apple,efuses", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, apple_efuses_of_match);
+
+static struct platform_driver apple_efuses_driver = {
+ .driver = {
+ .name = "apple_efuses",
+ .of_match_table = apple_efuses_of_match,
+ },
+ .probe = apple_efuses_probe,
+};
+
+module_platform_driver(apple_efuses_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
new file mode 100644
index 0000000000..2490f44caa
--- /dev/null
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2016 Broadcom
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/*
+ * # of tries for OTP Status. The time to execute a command varies. The slowest
+ * commands are writes which also vary based on the # of bits turned on. Writing
+ * 0xffffffff takes ~3800 us.
+ */
+#define OTPC_RETRIES 5000
+
+/* Sequence to enable OTP program */
+#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd }
+
+/* OTPC Commands */
+#define OTPC_CMD_READ 0x0
+#define OTPC_CMD_OTP_PROG_ENABLE 0x2
+#define OTPC_CMD_OTP_PROG_DISABLE 0x3
+#define OTPC_CMD_PROGRAM 0x8
+
+/* OTPC Status Bits */
+#define OTPC_STAT_CMD_DONE BIT(1)
+#define OTPC_STAT_PROG_OK BIT(2)
+
+/* OTPC register definition */
+#define OTPC_MODE_REG_OFFSET 0x0
+#define OTPC_MODE_REG_OTPC_MODE 0
+#define OTPC_COMMAND_OFFSET 0x4
+#define OTPC_COMMAND_COMMAND_WIDTH 6
+#define OTPC_CMD_START_OFFSET 0x8
+#define OTPC_CMD_START_START 0
+#define OTPC_CPU_STATUS_OFFSET 0xc
+#define OTPC_CPUADDR_REG_OFFSET 0x28
+#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16
+#define OTPC_CPU_WRITE_REG_OFFSET 0x2c
+
+#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1)
+#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1)
+
+
+struct otpc_map {
+ /* in words. */
+ u32 otpc_row_size;
+ /* 128 bit row / 4 words support. */
+ u16 data_r_offset[4];
+ /* 128 bit row / 4 words support. */
+ u16 data_w_offset[4];
+};
+
+static struct otpc_map otp_map = {
+ .otpc_row_size = 1,
+ .data_r_offset = {0x10},
+ .data_w_offset = {0x2c},
+};
+
+static struct otpc_map otp_map_v2 = {
+ .otpc_row_size = 2,
+ .data_r_offset = {0x10, 0x5c},
+ .data_w_offset = {0x2c, 0x64},
+};
+
+struct otpc_priv {
+ struct device *dev;
+ void __iomem *base;
+ const struct otpc_map *map;
+ struct nvmem_config *config;
+};
+
+static inline void set_command(void __iomem *base, u32 command)
+{
+ writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET);
+}
+
+static inline void set_cpu_address(void __iomem *base, u32 addr)
+{
+ writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET);
+}
+
+static inline void set_start_bit(void __iomem *base)
+{
+ writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void reset_start_bit(void __iomem *base)
+{
+ writel(0, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void write_cpu_data(void __iomem *base, u32 value)
+{
+ writel(value, base + OTPC_CPU_WRITE_REG_OFFSET);
+}
+
+static int poll_cpu_status(void __iomem *base, u32 value)
+{
+ u32 status;
+ u32 retries;
+
+ for (retries = 0; retries < OTPC_RETRIES; retries++) {
+ status = readl(base + OTPC_CPU_STATUS_OFFSET);
+ if (status & value)
+ break;
+ udelay(1);
+ }
+ if (retries == OTPC_RETRIES)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int enable_ocotp_program(void __iomem *base)
+{
+ static const u32 vals[] = OTPC_PROG_EN_SEQ;
+ int i;
+ int ret;
+
+ /* Write the magic sequence to enable programming */
+ set_command(base, OTPC_CMD_OTP_PROG_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(vals); i++) {
+ write_cpu_data(base, vals[i]);
+ set_start_bit(base);
+ ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE);
+ reset_start_bit(base);
+ if (ret)
+ return ret;
+ }
+
+ return poll_cpu_status(base, OTPC_STAT_PROG_OK);
+}
+
+static int disable_ocotp_program(void __iomem *base)
+{
+ int ret;
+
+ set_command(base, OTPC_CMD_OTP_PROG_DISABLE);
+ set_start_bit(base);
+ ret = poll_cpu_status(base, OTPC_STAT_PROG_OK);
+ reset_start_bit(base);
+
+ return ret;
+}
+
+static int bcm_otpc_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct otpc_priv *priv = context;
+ u32 *buf = val;
+ u32 bytes_read;
+ u32 address = offset / priv->config->word_size;
+ int i, ret;
+
+ for (bytes_read = 0; bytes_read < bytes;) {
+ set_command(priv->base, OTPC_CMD_READ);
+ set_cpu_address(priv->base, address++);
+ set_start_bit(priv->base);
+ ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+ if (ret) {
+ dev_err(priv->dev, "otp read error: 0x%x", ret);
+ return -EIO;
+ }
+
+ for (i = 0; i < priv->map->otpc_row_size; i++) {
+ *buf++ = readl(priv->base +
+ priv->map->data_r_offset[i]);
+ bytes_read += sizeof(*buf);
+ }
+
+ reset_start_bit(priv->base);
+ }
+
+ return 0;
+}
+
+static int bcm_otpc_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct otpc_priv *priv = context;
+ u32 *buf = val;
+ u32 bytes_written;
+ u32 address = offset / priv->config->word_size;
+ int i, ret;
+
+ if (offset % priv->config->word_size)
+ return -EINVAL;
+
+ ret = enable_ocotp_program(priv->base);
+ if (ret)
+ return -EIO;
+
+ for (bytes_written = 0; bytes_written < bytes;) {
+ set_command(priv->base, OTPC_CMD_PROGRAM);
+ set_cpu_address(priv->base, address++);
+ for (i = 0; i < priv->map->otpc_row_size; i++) {
+ writel(*buf, priv->base + priv->map->data_w_offset[i]);
+ buf++;
+ bytes_written += sizeof(*buf);
+ }
+ set_start_bit(priv->base);
+ ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+ reset_start_bit(priv->base);
+ if (ret) {
+ dev_err(priv->dev, "otp write error: 0x%x", ret);
+ return -EIO;
+ }
+ }
+
+ disable_ocotp_program(priv->base);
+
+ return 0;
+}
+
+static struct nvmem_config bcm_otpc_nvmem_config = {
+ .name = "bcm-ocotp",
+ .read_only = false,
+ .word_size = 4,
+ .stride = 4,
+ .reg_read = bcm_otpc_read,
+ .reg_write = bcm_otpc_write,
+};
+
+static const struct of_device_id bcm_otpc_dt_ids[] = {
+ { .compatible = "brcm,ocotp", .data = &otp_map },
+ { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+
+static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = {
+ { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
+ { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids);
+
+static int bcm_otpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct otpc_priv *priv;
+ struct nvmem_device *nvmem;
+ int err;
+ u32 num_words;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->map = device_get_match_data(dev);
+ if (!priv->map)
+ return -ENODEV;
+
+ /* Get OTP base address register. */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "unable to map I/O memory\n");
+ return PTR_ERR(priv->base);
+ }
+
+ /* Enable CPU access to OTPC. */
+ writel(readl(priv->base + OTPC_MODE_REG_OFFSET) |
+ BIT(OTPC_MODE_REG_OTPC_MODE),
+ priv->base + OTPC_MODE_REG_OFFSET);
+ reset_start_bit(priv->base);
+
+ /* Read size of memory in words. */
+ err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words);
+ if (err) {
+ dev_err(dev, "size parameter not specified\n");
+ return -EINVAL;
+ } else if (num_words == 0) {
+ dev_err(dev, "size must be > 0\n");
+ return -EINVAL;
+ }
+
+ bcm_otpc_nvmem_config.size = 4 * num_words;
+ bcm_otpc_nvmem_config.dev = dev;
+ bcm_otpc_nvmem_config.priv = priv;
+
+ if (priv->map == &otp_map_v2) {
+ bcm_otpc_nvmem_config.word_size = 8;
+ bcm_otpc_nvmem_config.stride = 8;
+ }
+
+ priv->config = &bcm_otpc_nvmem_config;
+
+ nvmem = devm_nvmem_register(dev, &bcm_otpc_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "error registering nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static struct platform_driver bcm_otpc_driver = {
+ .probe = bcm_otpc_probe,
+ .driver = {
+ .name = "brcm-otpc",
+ .of_match_table = bcm_otpc_dt_ids,
+ .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids),
+ },
+};
+module_platform_driver(bcm_otpc_driver);
+
+MODULE_DESCRIPTION("Broadcom OTPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
new file mode 100644
index 0000000000..5cdf339cfb
--- /dev/null
+++ b/drivers/nvmem/brcm_nvram.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define NVRAM_MAGIC "FLSH"
+
+/**
+ * struct brcm_nvram - driver state internal struct
+ *
+ * @dev: NVMEM device pointer
+ * @nvmem_size: Size of the whole space available for NVRAM
+ * @data: NVRAM data copy stored to avoid poking underlaying flash controller
+ * @data_len: NVRAM data size
+ * @padding_byte: Padding value used to fill remaining space
+ * @cells: Array of discovered NVMEM cells
+ * @ncells: Number of elements in cells
+ */
+struct brcm_nvram {
+ struct device *dev;
+ size_t nvmem_size;
+ uint8_t *data;
+ size_t data_len;
+ uint8_t padding_byte;
+ struct nvmem_cell_info *cells;
+ int ncells;
+};
+
+struct brcm_nvram_header {
+ char magic[4];
+ __le32 len;
+ __le32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ __le32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ __le32 config_ncdl; /* ncdl values for memc */
+};
+
+static int brcm_nvram_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct brcm_nvram *priv = context;
+ size_t to_copy;
+
+ if (offset + bytes > priv->data_len)
+ to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0);
+ else
+ to_copy = bytes;
+
+ memcpy(val, priv->data + offset, to_copy);
+
+ memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy);
+
+ return 0;
+}
+
+static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->nvmem_size = resource_size(res);
+
+ priv->padding_byte = readb(base + priv->nvmem_size - 1);
+ for (priv->data_len = priv->nvmem_size;
+ priv->data_len;
+ priv->data_len--) {
+ if (readb(base + priv->data_len - 1) != priv->padding_byte)
+ break;
+ }
+ WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len);
+
+ priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL);
+ if (!priv->data)
+ return -ENOMEM;
+
+ memcpy_fromio(priv->data, base, priv->data_len);
+
+ bcm47xx_nvram_init_from_iomem(base, priv->data_len);
+
+ return 0;
+}
+
+static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, int index,
+ unsigned int offset, void *buf, size_t bytes)
+{
+ u8 mac[ETH_ALEN];
+
+ if (bytes != 3 * ETH_ALEN - 1)
+ return -EINVAL;
+
+ if (!mac_pton(buf, mac))
+ return -EINVAL;
+
+ if (index)
+ eth_addr_add(mac, index);
+
+ ether_addr_copy(buf, mac);
+
+ return 0;
+}
+
+static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
+ size_t len)
+{
+ struct device *dev = priv->dev;
+ char *var, *value;
+ uint8_t tmp;
+ int idx;
+ int err = 0;
+
+ tmp = priv->data[len - 1];
+ priv->data[len - 1] = '\0';
+
+ priv->ncells = 0;
+ for (var = data + sizeof(struct brcm_nvram_header);
+ var < (char *)data + len && *var;
+ var += strlen(var) + 1) {
+ priv->ncells++;
+ }
+
+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+ if (!priv->cells) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (var = data + sizeof(struct brcm_nvram_header), idx = 0;
+ var < (char *)data + len && *var;
+ var = value + strlen(value) + 1, idx++) {
+ char *eq, *name;
+
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ name = devm_kstrdup(dev, var, GFP_KERNEL);
+ *eq = '=';
+ if (!name) {
+ err = -ENOMEM;
+ goto out;
+ }
+ value = eq + 1;
+
+ priv->cells[idx].name = name;
+ priv->cells[idx].offset = value - (char *)data;
+ priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+ if (!strcmp(name, "et0macaddr") ||
+ !strcmp(name, "et1macaddr") ||
+ !strcmp(name, "et2macaddr")) {
+ priv->cells[idx].raw_len = strlen(value);
+ priv->cells[idx].bytes = ETH_ALEN;
+ priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr;
+ }
+ }
+
+out:
+ priv->data[len - 1] = tmp;
+ return err;
+}
+
+static int brcm_nvram_parse(struct brcm_nvram *priv)
+{
+ struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data;
+ struct device *dev = priv->dev;
+ size_t len;
+ int err;
+
+ if (memcmp(header->magic, NVRAM_MAGIC, 4)) {
+ dev_err(dev, "Invalid NVRAM magic\n");
+ return -EINVAL;
+ }
+
+ len = le32_to_cpu(header->len);
+ if (len > priv->nvmem_size) {
+ dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len,
+ priv->nvmem_size);
+ return -EINVAL;
+ }
+
+ err = brcm_nvram_add_cells(priv, priv->data, len);
+ if (err)
+ dev_err(dev, "Failed to add cells: %d\n", err);
+
+ return 0;
+}
+
+static int brcm_nvram_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .name = "brcm-nvram",
+ .reg_read = brcm_nvram_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct brcm_nvram *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ err = brcm_nvram_copy_data(priv, pdev);
+ if (err)
+ return err;
+
+ err = brcm_nvram_parse(priv);
+ if (err)
+ return err;
+
+ config.dev = dev;
+ config.cells = priv->cells;
+ config.ncells = priv->ncells;
+ config.priv = priv;
+ config.size = priv->nvmem_size;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id brcm_nvram_of_match_table[] = {
+ { .compatible = "brcm,nvram", },
+ {},
+};
+
+static struct platform_driver brcm_nvram_driver = {
+ .probe = brcm_nvram_probe,
+ .driver = {
+ .name = "brcm_nvram",
+ .of_match_table = brcm_nvram_of_match_table,
+ },
+};
+
+static int __init brcm_nvram_init(void)
+{
+ return platform_driver_register(&brcm_nvram_driver);
+}
+
+subsys_initcall_sync(brcm_nvram_init);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
new file mode 100644
index 0000000000..5b3955ad40
--- /dev/null
+++ b/drivers/nvmem/core.c
@@ -0,0 +1,2148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * nvmem framework core.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+struct nvmem_device {
+ struct module *owner;
+ struct device dev;
+ int stride;
+ int word_size;
+ int id;
+ struct kref refcnt;
+ size_t size;
+ bool read_only;
+ bool root_only;
+ int flags;
+ enum nvmem_type type;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
+ struct list_head cells;
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ struct gpio_desc *wp_gpio;
+ struct nvmem_layout *layout;
+ void *priv;
+};
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+
+#define FLAG_COMPAT BIT(0)
+struct nvmem_cell_entry {
+ const char *name;
+ int offset;
+ size_t raw_len;
+ int bytes;
+ int bit_offset;
+ int nbits;
+ nvmem_cell_post_process_t read_post_process;
+ void *priv;
+ struct device_node *np;
+ struct nvmem_device *nvmem;
+ struct list_head node;
+};
+
+struct nvmem_cell {
+ struct nvmem_cell_entry *entry;
+ const char *id;
+ int index;
+};
+
+static DEFINE_MUTEX(nvmem_mutex);
+static DEFINE_IDA(nvmem_ida);
+
+static DEFINE_MUTEX(nvmem_cell_mutex);
+static LIST_HEAD(nvmem_cell_tables);
+
+static DEFINE_MUTEX(nvmem_lookup_mutex);
+static LIST_HEAD(nvmem_lookup_list);
+
+static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
+
+static DEFINE_SPINLOCK(nvmem_layout_lock);
+static LIST_HEAD(nvmem_layouts);
+
+static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_read)
+ return nvmem->reg_read(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
+
+static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+
+ if (nvmem->reg_write) {
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
+ ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
+ unsigned int offset, void *val,
+ size_t bytes, int write)
+{
+
+ unsigned int end = offset + bytes;
+ unsigned int kend, ksize;
+ const struct nvmem_keepout *keepout = nvmem->keepout;
+ const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
+ int rc;
+
+ /*
+ * Skip all keepouts before the range being accessed.
+ * Keepouts are sorted.
+ */
+ while ((keepout < keepoutend) && (keepout->end <= offset))
+ keepout++;
+
+ while ((offset < end) && (keepout < keepoutend)) {
+ /* Access the valid portion before the keepout. */
+ if (offset < keepout->start) {
+ kend = min(end, keepout->start);
+ ksize = kend - offset;
+ if (write)
+ rc = __nvmem_reg_write(nvmem, offset, val, ksize);
+ else
+ rc = __nvmem_reg_read(nvmem, offset, val, ksize);
+
+ if (rc)
+ return rc;
+
+ offset += ksize;
+ val += ksize;
+ }
+
+ /*
+ * Now we're aligned to the start of this keepout zone. Go
+ * through it.
+ */
+ kend = min(end, keepout->end);
+ ksize = kend - offset;
+ if (!write)
+ memset(val, keepout->value, ksize);
+
+ val += ksize;
+ offset += ksize;
+ keepout++;
+ }
+
+ /*
+ * If we ran out of keepouts but there's still stuff to do, send it
+ * down directly
+ */
+ if (offset < end) {
+ ksize = end - offset;
+ if (write)
+ return __nvmem_reg_write(nvmem, offset, val, ksize);
+ else
+ return __nvmem_reg_read(nvmem, offset, val, ksize);
+ }
+
+ return 0;
+}
+
+static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (!nvmem->nkeepout)
+ return __nvmem_reg_read(nvmem, offset, val, bytes);
+
+ return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
+}
+
+static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (!nvmem->nkeepout)
+ return __nvmem_reg_write(nvmem, offset, val, bytes);
+
+ return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
+}
+
+#ifdef CONFIG_NVMEM_SYSFS
+static const char * const nvmem_type_str[] = {
+ [NVMEM_TYPE_UNKNOWN] = "Unknown",
+ [NVMEM_TYPE_EEPROM] = "EEPROM",
+ [NVMEM_TYPE_OTP] = "OTP",
+ [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
+ [NVMEM_TYPE_FRAM] = "FRAM",
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key eeprom_lock_key;
+#endif
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *nvmem_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = kobj_to_dev(kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from reading */
+ if (pos >= nvmem->size)
+ return 0;
+
+ if (!IS_ALIGNED(pos, nvmem->stride))
+ return -EINVAL;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ if (!nvmem->reg_read)
+ return -EPERM;
+
+ rc = nvmem_reg_read(nvmem, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = kobj_to_dev(kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from writing */
+ if (pos >= nvmem->size)
+ return -EFBIG;
+
+ if (!IS_ALIGNED(pos, nvmem->stride))
+ return -EINVAL;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ if (!nvmem->reg_write)
+ return -EPERM;
+
+ rc = nvmem_reg_write(nvmem, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
+{
+ umode_t mode = 0400;
+
+ if (!nvmem->root_only)
+ mode |= 0044;
+
+ if (!nvmem->read_only)
+ mode |= 0200;
+
+ if (!nvmem->reg_write)
+ mode &= ~0200;
+
+ if (!nvmem->reg_read)
+ mode &= ~0444;
+
+ return mode;
+}
+
+static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ attr->size = nvmem->size;
+
+ return nvmem_bin_attr_get_umode(nvmem);
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0644,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_attributes[] = {
+ &bin_attr_rw_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_group = {
+ .bin_attrs = nvmem_bin_attributes,
+ .attrs = nvmem_attrs,
+ .is_bin_visible = nvmem_bin_attr_is_visible,
+};
+
+static const struct attribute_group *nvmem_dev_groups[] = {
+ &nvmem_bin_group,
+ NULL,
+};
+
+static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
+ .attr = {
+ .name = "eeprom",
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+/*
+ * nvmem_setup_compat() - Create an additional binary entry in
+ * drivers sys directory, to be backwards compatible with the older
+ * drivers/misc/eeprom drivers.
+ */
+static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ int rval;
+
+ if (!config->compat)
+ return 0;
+
+ if (!config->base_dev)
+ return -EINVAL;
+
+ if (config->type == NVMEM_TYPE_FRAM)
+ bin_attr_nvmem_eeprom_compat.attr.name = "fram";
+
+ nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
+ nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
+ nvmem->eeprom.size = nvmem->size;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ nvmem->eeprom.attr.key = &eeprom_lock_key;
+#endif
+ nvmem->eeprom.private = &nvmem->dev;
+ nvmem->base_dev = config->base_dev;
+
+ rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
+ if (rval) {
+ dev_err(&nvmem->dev,
+ "Failed to create eeprom binary file %d\n", rval);
+ return rval;
+ }
+
+ nvmem->flags |= FLAG_COMPAT;
+
+ return 0;
+}
+
+static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ if (config->compat)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+}
+
+#else /* CONFIG_NVMEM_SYSFS */
+
+static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ return -ENOSYS;
+}
+static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+}
+
+#endif /* CONFIG_NVMEM_SYSFS */
+
+static void nvmem_release(struct device *dev)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ ida_free(&nvmem_ida, nvmem->id);
+ gpiod_put(nvmem->wp_gpio);
+ kfree(nvmem);
+}
+
+static const struct device_type nvmem_provider_type = {
+ .release = nvmem_release,
+};
+
+static struct bus_type nvmem_bus_type = {
+ .name = "nvmem",
+};
+
+static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
+{
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
+ mutex_lock(&nvmem_mutex);
+ list_del(&cell->node);
+ mutex_unlock(&nvmem_mutex);
+ of_node_put(cell->np);
+ kfree_const(cell->name);
+ kfree(cell);
+}
+
+static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
+{
+ struct nvmem_cell_entry *cell, *p;
+
+ list_for_each_entry_safe(cell, p, &nvmem->cells, node)
+ nvmem_cell_entry_drop(cell);
+}
+
+static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
+{
+ mutex_lock(&nvmem_mutex);
+ list_add_tail(&cell->node, &cell->nvmem->cells);
+ mutex_unlock(&nvmem_mutex);
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
+}
+
+static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ struct nvmem_cell_entry *cell)
+{
+ cell->nvmem = nvmem;
+ cell->offset = info->offset;
+ cell->raw_len = info->raw_len ?: info->bytes;
+ cell->bytes = info->bytes;
+ cell->name = info->name;
+ cell->read_post_process = info->read_post_process;
+ cell->priv = info->priv;
+
+ cell->bit_offset = info->bit_offset;
+ cell->nbits = info->nbits;
+ cell->np = info->np;
+
+ if (cell->nbits)
+ cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+ BITS_PER_BYTE);
+
+ if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ dev_err(&nvmem->dev,
+ "cell %s unaligned to nvmem stride %d\n",
+ cell->name ?: "<unknown>", nvmem->stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ struct nvmem_cell_entry *cell)
+{
+ int err;
+
+ err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
+ if (err)
+ return err;
+
+ cell->name = kstrdup_const(info->name, GFP_KERNEL);
+ if (!cell->name)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * nvmem_add_one_cell() - Add one cell information to an nvmem device
+ *
+ * @nvmem: nvmem device to add cells to.
+ * @info: nvmem cell info to add to the device
+ *
+ * Return: 0 or negative error code on failure.
+ */
+int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info)
+{
+ struct nvmem_cell_entry *cell;
+ int rval;
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell)
+ return -ENOMEM;
+
+ rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
+ if (rval) {
+ kfree(cell);
+ return rval;
+ }
+
+ nvmem_cell_entry_add(cell);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
+
+/**
+ * nvmem_add_cells() - Add cell information to an nvmem device
+ *
+ * @nvmem: nvmem device to add cells to.
+ * @info: nvmem cell info to add to the device
+ * @ncells: number of cells in info
+ *
+ * Return: 0 or negative error code on failure.
+ */
+static int nvmem_add_cells(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ int ncells)
+{
+ int i, rval;
+
+ for (i = 0; i < ncells; i++) {
+ rval = nvmem_add_one_cell(nvmem, &info[i]);
+ if (rval)
+ return rval;
+ }
+
+ return 0;
+}
+
+/**
+ * nvmem_register_notifier() - Register a notifier block for nvmem events.
+ *
+ * @nb: notifier block to be called on nvmem events.
+ *
+ * Return: 0 on success, negative error number on failure.
+ */
+int nvmem_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&nvmem_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(nvmem_register_notifier);
+
+/**
+ * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
+ *
+ * @nb: notifier block to be unregistered.
+ *
+ * Return: 0 on success, negative error number on failure.
+ */
+int nvmem_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
+
+static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
+{
+ const struct nvmem_cell_info *info;
+ struct nvmem_cell_table *table;
+ struct nvmem_cell_entry *cell;
+ int rval = 0, i;
+
+ mutex_lock(&nvmem_cell_mutex);
+ list_for_each_entry(table, &nvmem_cell_tables, node) {
+ if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
+ for (i = 0; i < table->ncells; i++) {
+ info = &table->cells[i];
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
+ if (rval) {
+ kfree(cell);
+ goto out;
+ }
+
+ nvmem_cell_entry_add(cell);
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&nvmem_cell_mutex);
+ return rval;
+}
+
+static struct nvmem_cell_entry *
+nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
+{
+ struct nvmem_cell_entry *iter, *cell = NULL;
+
+ mutex_lock(&nvmem_mutex);
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (strcmp(cell_id, iter->name) == 0) {
+ cell = iter;
+ break;
+ }
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ return cell;
+}
+
+static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
+{
+ unsigned int cur = 0;
+ const struct nvmem_keepout *keepout = nvmem->keepout;
+ const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
+
+ while (keepout < keepoutend) {
+ /* Ensure keepouts are sorted and don't overlap. */
+ if (keepout->start < cur) {
+ dev_err(&nvmem->dev,
+ "Keepout regions aren't sorted or overlap.\n");
+
+ return -ERANGE;
+ }
+
+ if (keepout->end < keepout->start) {
+ dev_err(&nvmem->dev,
+ "Invalid keepout region.\n");
+
+ return -EINVAL;
+ }
+
+ /*
+ * Validate keepouts (and holes between) don't violate
+ * word_size constraints.
+ */
+ if ((keepout->end - keepout->start < nvmem->word_size) ||
+ ((keepout->start != cur) &&
+ (keepout->start - cur < nvmem->word_size))) {
+
+ dev_err(&nvmem->dev,
+ "Keepout regions violate word_size constraints.\n");
+
+ return -ERANGE;
+ }
+
+ /* Validate keepouts don't violate stride (alignment). */
+ if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
+ !IS_ALIGNED(keepout->end, nvmem->stride)) {
+
+ dev_err(&nvmem->dev,
+ "Keepout regions violate stride.\n");
+
+ return -EINVAL;
+ }
+
+ cur = keepout->end;
+ keepout++;
+ }
+
+ return 0;
+}
+
+static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
+{
+ struct nvmem_layout *layout = nvmem->layout;
+ struct device *dev = &nvmem->dev;
+ struct device_node *child;
+ const __be32 *addr;
+ int len, ret;
+
+ for_each_child_of_node(np, child) {
+ struct nvmem_cell_info info = {0};
+
+ addr = of_get_property(child, "reg", &len);
+ if (!addr)
+ continue;
+ if (len < 2 * sizeof(u32)) {
+ dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ info.offset = be32_to_cpup(addr++);
+ info.bytes = be32_to_cpup(addr);
+ info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
+
+ addr = of_get_property(child, "bits", &len);
+ if (addr && len == (2 * sizeof(u32))) {
+ info.bit_offset = be32_to_cpup(addr++);
+ info.nbits = be32_to_cpup(addr);
+ }
+
+ info.np = of_node_get(child);
+
+ if (layout && layout->fixup_cell_info)
+ layout->fixup_cell_info(nvmem, layout, &info);
+
+ ret = nvmem_add_one_cell(nvmem, &info);
+ kfree(info.name);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
+{
+ return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
+}
+
+static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
+{
+ struct device_node *layout_np;
+ int err = 0;
+
+ layout_np = of_nvmem_layout_get_container(nvmem);
+ if (!layout_np)
+ return 0;
+
+ if (of_device_is_compatible(layout_np, "fixed-layout"))
+ err = nvmem_add_cells_from_dt(nvmem, layout_np);
+
+ of_node_put(layout_np);
+
+ return err;
+}
+
+int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
+{
+ layout->owner = owner;
+
+ spin_lock(&nvmem_layout_lock);
+ list_add(&layout->node, &nvmem_layouts);
+ spin_unlock(&nvmem_layout_lock);
+
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__nvmem_layout_register);
+
+void nvmem_layout_unregister(struct nvmem_layout *layout)
+{
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
+
+ spin_lock(&nvmem_layout_lock);
+ list_del(&layout->node);
+ spin_unlock(&nvmem_layout_lock);
+}
+EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
+
+static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
+{
+ struct device_node *layout_np;
+ struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
+
+ layout_np = of_nvmem_layout_get_container(nvmem);
+ if (!layout_np)
+ return NULL;
+
+ /* Fixed layouts don't have a matching driver */
+ if (of_device_is_compatible(layout_np, "fixed-layout")) {
+ of_node_put(layout_np);
+ return NULL;
+ }
+
+ /*
+ * In case the nvmem device was built-in while the layout was built as a
+ * module, we shall manually request the layout driver loading otherwise
+ * we'll never have any match.
+ */
+ of_request_module(layout_np);
+
+ spin_lock(&nvmem_layout_lock);
+
+ list_for_each_entry(l, &nvmem_layouts, node) {
+ if (of_match_node(l->of_match_table, layout_np)) {
+ if (try_module_get(l->owner))
+ layout = l;
+
+ break;
+ }
+ }
+
+ spin_unlock(&nvmem_layout_lock);
+ of_node_put(layout_np);
+
+ return layout;
+}
+
+static void nvmem_layout_put(struct nvmem_layout *layout)
+{
+ if (layout)
+ module_put(layout->owner);
+}
+
+static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
+{
+ struct nvmem_layout *layout = nvmem->layout;
+ int ret;
+
+ if (layout && layout->add_cells) {
+ ret = layout->add_cells(&nvmem->dev, nvmem, layout);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_layout_get_container() - Get OF node to layout container.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: a node pointer with refcount incremented or NULL if no
+ * container exists. Use of_node_put() on it when done.
+ */
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+{
+ return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
+}
+EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
+#endif
+
+const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
+ struct nvmem_layout *layout)
+{
+ struct device_node __maybe_unused *layout_np;
+ const struct of_device_id *match;
+
+ layout_np = of_nvmem_layout_get_container(nvmem);
+ match = of_match_node(layout->of_match_table, layout_np);
+
+ return match ? match->data : NULL;
+}
+EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
+
+/**
+ * nvmem_register() - Register a nvmem device for given nvmem_config.
+ * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ *
+ * @config: nvmem device configuration with which nvmem device is created.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
+ * on success.
+ */
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+{
+ struct nvmem_device *nvmem;
+ int rval;
+
+ if (!config->dev)
+ return ERR_PTR(-EINVAL);
+
+ if (!config->reg_read && !config->reg_write)
+ return ERR_PTR(-EINVAL);
+
+ nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
+ if (!nvmem)
+ return ERR_PTR(-ENOMEM);
+
+ rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
+ if (rval < 0) {
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
+
+ nvmem->id = rval;
+
+ nvmem->dev.type = &nvmem_provider_type;
+ nvmem->dev.bus = &nvmem_bus_type;
+ nvmem->dev.parent = config->dev;
+
+ device_initialize(&nvmem->dev);
+
+ if (!config->ignore_wp)
+ nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(nvmem->wp_gpio)) {
+ rval = PTR_ERR(nvmem->wp_gpio);
+ nvmem->wp_gpio = NULL;
+ goto err_put_device;
+ }
+
+ kref_init(&nvmem->refcnt);
+ INIT_LIST_HEAD(&nvmem->cells);
+
+ nvmem->owner = config->owner;
+ if (!nvmem->owner && config->dev->driver)
+ nvmem->owner = config->dev->driver->owner;
+ nvmem->stride = config->stride ?: 1;
+ nvmem->word_size = config->word_size ?: 1;
+ nvmem->size = config->size;
+ nvmem->root_only = config->root_only;
+ nvmem->priv = config->priv;
+ nvmem->type = config->type;
+ nvmem->reg_read = config->reg_read;
+ nvmem->reg_write = config->reg_write;
+ nvmem->keepout = config->keepout;
+ nvmem->nkeepout = config->nkeepout;
+ if (config->of_node)
+ nvmem->dev.of_node = config->of_node;
+ else if (!config->no_of_node)
+ nvmem->dev.of_node = config->dev->of_node;
+
+ switch (config->id) {
+ case NVMEM_DEVID_NONE:
+ rval = dev_set_name(&nvmem->dev, "%s", config->name);
+ break;
+ case NVMEM_DEVID_AUTO:
+ rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
+ break;
+ default:
+ rval = dev_set_name(&nvmem->dev, "%s%d",
+ config->name ? : "nvmem",
+ config->name ? config->id : nvmem->id);
+ break;
+ }
+
+ if (rval)
+ goto err_put_device;
+
+ nvmem->read_only = device_property_present(config->dev, "read-only") ||
+ config->read_only || !nvmem->reg_write;
+
+#ifdef CONFIG_NVMEM_SYSFS
+ nvmem->dev.groups = nvmem_dev_groups;
+#endif
+
+ if (nvmem->nkeepout) {
+ rval = nvmem_validate_keepouts(nvmem);
+ if (rval)
+ goto err_put_device;
+ }
+
+ if (config->compat) {
+ rval = nvmem_sysfs_setup_compat(nvmem, config);
+ if (rval)
+ goto err_put_device;
+ }
+
+ /*
+ * If the driver supplied a layout by config->layout, the module
+ * pointer will be NULL and nvmem_layout_put() will be a noop.
+ */
+ nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
+ if (IS_ERR(nvmem->layout)) {
+ rval = PTR_ERR(nvmem->layout);
+ nvmem->layout = NULL;
+
+ if (rval == -EPROBE_DEFER)
+ goto err_teardown_compat;
+ }
+
+ if (config->cells) {
+ rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
+ if (rval)
+ goto err_remove_cells;
+ }
+
+ rval = nvmem_add_cells_from_table(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = nvmem_add_cells_from_legacy_of(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = nvmem_add_cells_from_fixed_layout(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = nvmem_add_cells_from_layout(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+ rval = device_add(&nvmem->dev);
+ if (rval)
+ goto err_remove_cells;
+
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
+
+ return nvmem;
+
+err_remove_cells:
+ nvmem_device_remove_all_cells(nvmem);
+ nvmem_layout_put(nvmem->layout);
+err_teardown_compat:
+ if (config->compat)
+ nvmem_sysfs_remove_compat(nvmem, config);
+err_put_device:
+ put_device(&nvmem->dev);
+
+ return ERR_PTR(rval);
+}
+EXPORT_SYMBOL_GPL(nvmem_register);
+
+static void nvmem_device_release(struct kref *kref)
+{
+ struct nvmem_device *nvmem;
+
+ nvmem = container_of(kref, struct nvmem_device, refcnt);
+
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
+
+ if (nvmem->flags & FLAG_COMPAT)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+
+ nvmem_device_remove_all_cells(nvmem);
+ nvmem_layout_put(nvmem->layout);
+ device_unregister(&nvmem->dev);
+}
+
+/**
+ * nvmem_unregister() - Unregister previously registered nvmem device
+ *
+ * @nvmem: Pointer to previously registered nvmem device.
+ */
+void nvmem_unregister(struct nvmem_device *nvmem)
+{
+ if (nvmem)
+ kref_put(&nvmem->refcnt, nvmem_device_release);
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister);
+
+static void devm_nvmem_unregister(void *nvmem)
+{
+ nvmem_unregister(nvmem);
+}
+
+/**
+ * devm_nvmem_register() - Register a managed nvmem device for given
+ * nvmem_config.
+ * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ *
+ * @dev: Device that uses the nvmem device.
+ * @config: nvmem device configuration with which nvmem device is created.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
+ * on success.
+ */
+struct nvmem_device *devm_nvmem_register(struct device *dev,
+ const struct nvmem_config *config)
+{
+ struct nvmem_device *nvmem;
+ int ret;
+
+ nvmem = nvmem_register(config);
+ if (IS_ERR(nvmem))
+ return nvmem;
+
+ ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return nvmem;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_register);
+
+static struct nvmem_device *__nvmem_device_get(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct nvmem_device *nvmem = NULL;
+ struct device *dev;
+
+ mutex_lock(&nvmem_mutex);
+ dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
+ if (dev)
+ nvmem = to_nvmem_device(dev);
+ mutex_unlock(&nvmem_mutex);
+ if (!nvmem)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!try_module_get(nvmem->owner)) {
+ dev_err(&nvmem->dev,
+ "could not increase module refcount for cell %s\n",
+ nvmem_dev_name(nvmem));
+
+ put_device(&nvmem->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ kref_get(&nvmem->refcnt);
+
+ return nvmem;
+}
+
+static void __nvmem_device_put(struct nvmem_device *nvmem)
+{
+ put_device(&nvmem->dev);
+ module_put(nvmem->owner);
+ kref_put(&nvmem->refcnt, nvmem_device_release);
+}
+
+#if IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @np: Device tree node that uses the nvmem device.
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
+{
+
+ struct device_node *nvmem_np;
+ struct nvmem_device *nvmem;
+ int index = 0;
+
+ if (id)
+ index = of_property_match_string(np, "nvmem-names", id);
+
+ nvmem_np = of_parse_phandle(np, "nvmem", index);
+ if (!nvmem_np)
+ return ERR_PTR(-ENOENT);
+
+ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
+ of_node_put(nvmem_np);
+ return nvmem;
+}
+EXPORT_SYMBOL_GPL(of_nvmem_device_get);
+#endif
+
+/**
+ * nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev: Device that uses the nvmem device.
+ * @dev_name: name of the requested nvmem device.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
+{
+ if (dev->of_node) { /* try dt first */
+ struct nvmem_device *nvmem;
+
+ nvmem = of_nvmem_device_get(dev->of_node, dev_name);
+
+ if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
+ return nvmem;
+
+ }
+
+ return __nvmem_device_get((void *)dev_name, device_match_name);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_get);
+
+/**
+ * nvmem_device_find() - Find nvmem device with matching function
+ *
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ return __nvmem_device_get(data, match);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_find);
+
+static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
+{
+ struct nvmem_device **nvmem = res;
+
+ if (WARN_ON(!nvmem || !*nvmem))
+ return 0;
+
+ return *nvmem == data;
+}
+
+static void devm_nvmem_device_release(struct device *dev, void *res)
+{
+ nvmem_device_put(*(struct nvmem_device **)res);
+}
+
+/**
+ * devm_nvmem_device_put() - put alredy got nvmem device
+ *
+ * @dev: Device that uses the nvmem device.
+ * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
+ * that needs to be released.
+ */
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_nvmem_device_release,
+ devm_nvmem_device_match, nvmem);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
+
+/**
+ * nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device that needs to be released.
+ */
+void nvmem_device_put(struct nvmem_device *nvmem)
+{
+ __nvmem_device_put(nvmem);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_put);
+
+/**
+ * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ *
+ * @dev: Device that requests the nvmem device.
+ * @id: name id for the requested nvmem device.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+ * on success. The nvmem_cell will be freed by the automatically once the
+ * device is freed.
+ */
+struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+{
+ struct nvmem_device **ptr, *nvmem;
+
+ ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ nvmem = nvmem_device_get(dev, id);
+ if (!IS_ERR(nvmem)) {
+ *ptr = nvmem;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return nvmem;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
+
+static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
+ const char *id, int index)
+{
+ struct nvmem_cell *cell;
+ const char *name = NULL;
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell)
+ return ERR_PTR(-ENOMEM);
+
+ if (id) {
+ name = kstrdup_const(id, GFP_KERNEL);
+ if (!name) {
+ kfree(cell);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ cell->id = name;
+ cell->entry = entry;
+ cell->index = index;
+
+ return cell;
+}
+
+static struct nvmem_cell *
+nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
+{
+ struct nvmem_cell_entry *cell_entry;
+ struct nvmem_cell *cell = ERR_PTR(-ENOENT);
+ struct nvmem_cell_lookup *lookup;
+ struct nvmem_device *nvmem;
+ const char *dev_id;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ dev_id = dev_name(dev);
+
+ mutex_lock(&nvmem_lookup_mutex);
+
+ list_for_each_entry(lookup, &nvmem_lookup_list, node) {
+ if ((strcmp(lookup->dev_id, dev_id) == 0) &&
+ (strcmp(lookup->con_id, con_id) == 0)) {
+ /* This is the right entry. */
+ nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
+ device_match_name);
+ if (IS_ERR(nvmem)) {
+ /* Provider may not be registered yet. */
+ cell = ERR_CAST(nvmem);
+ break;
+ }
+
+ cell_entry = nvmem_find_cell_entry_by_name(nvmem,
+ lookup->cell_name);
+ if (!cell_entry) {
+ __nvmem_device_put(nvmem);
+ cell = ERR_PTR(-ENOENT);
+ } else {
+ cell = nvmem_create_cell(cell_entry, con_id, 0);
+ if (IS_ERR(cell))
+ __nvmem_device_put(nvmem);
+ }
+ break;
+ }
+ }
+
+ mutex_unlock(&nvmem_lookup_mutex);
+ return cell;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static struct nvmem_cell_entry *
+nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
+{
+ struct nvmem_cell_entry *iter, *cell = NULL;
+
+ mutex_lock(&nvmem_mutex);
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (np == iter->np) {
+ cell = iter;
+ break;
+ }
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ return cell;
+}
+
+/**
+ * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
+ *
+ * @np: Device tree node that uses the nvmem cell.
+ * @id: nvmem cell name from nvmem-cell-names property, or NULL
+ * for the cell at index 0 (the lone cell with no accompanying
+ * nvmem-cell-names property).
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell. The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
+{
+ struct device_node *cell_np, *nvmem_np;
+ struct nvmem_device *nvmem;
+ struct nvmem_cell_entry *cell_entry;
+ struct nvmem_cell *cell;
+ struct of_phandle_args cell_spec;
+ int index = 0;
+ int cell_index = 0;
+ int ret;
+
+ /* if cell name exists, find index to the name */
+ if (id)
+ index = of_property_match_string(np, "nvmem-cell-names", id);
+
+ ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
+ "#nvmem-cell-cells",
+ index, &cell_spec);
+ if (ret)
+ return ERR_PTR(-ENOENT);
+
+ if (cell_spec.args_count > 1)
+ return ERR_PTR(-EINVAL);
+
+ cell_np = cell_spec.np;
+ if (cell_spec.args_count)
+ cell_index = cell_spec.args[0];
+
+ nvmem_np = of_get_parent(cell_np);
+ if (!nvmem_np) {
+ of_node_put(cell_np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* nvmem layouts produce cells within the nvmem-layout container */
+ if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
+ nvmem_np = of_get_next_parent(nvmem_np);
+ if (!nvmem_np) {
+ of_node_put(cell_np);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
+ of_node_put(nvmem_np);
+ if (IS_ERR(nvmem)) {
+ of_node_put(cell_np);
+ return ERR_CAST(nvmem);
+ }
+
+ cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
+ of_node_put(cell_np);
+ if (!cell_entry) {
+ __nvmem_device_put(nvmem);
+ return ERR_PTR(-ENOENT);
+ }
+
+ cell = nvmem_create_cell(cell_entry, id, cell_index);
+ if (IS_ERR(cell))
+ __nvmem_device_put(nvmem);
+
+ return cell;
+}
+EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
+#endif
+
+/**
+ * nvmem_cell_get() - Get nvmem cell of device form a given cell name
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @id: nvmem cell name to get (this corresponds with the name from the
+ * nvmem-cell-names property for DT systems and with the con_id from
+ * the lookup entry for non-DT systems).
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell. The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
+{
+ struct nvmem_cell *cell;
+
+ if (dev->of_node) { /* try dt first */
+ cell = of_nvmem_cell_get(dev->of_node, id);
+ if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
+ return cell;
+ }
+
+ /* NULL cell id only allowed for device tree; invalid otherwise */
+ if (!id)
+ return ERR_PTR(-EINVAL);
+
+ return nvmem_cell_get_from_lookup(dev, id);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_get);
+
+static void devm_nvmem_cell_release(struct device *dev, void *res)
+{
+ nvmem_cell_put(*(struct nvmem_cell **)res);
+}
+
+/**
+ * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @id: nvmem cell name id to get.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell. The nvmem_cell will be freed by the
+ * automatically once the device is freed.
+ */
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
+{
+ struct nvmem_cell **ptr, *cell;
+
+ ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ cell = nvmem_cell_get(dev, id);
+ if (!IS_ERR(cell)) {
+ *ptr = cell;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return cell;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
+
+static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
+{
+ struct nvmem_cell **c = res;
+
+ if (WARN_ON(!c || !*c))
+ return 0;
+
+ return *c == data;
+}
+
+/**
+ * devm_nvmem_cell_put() - Release previously allocated nvmem cell
+ * from devm_nvmem_cell_get.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
+ */
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_nvmem_cell_release,
+ devm_nvmem_cell_match, cell);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_nvmem_cell_put);
+
+/**
+ * nvmem_cell_put() - Release previously allocated nvmem cell.
+ *
+ * @cell: Previously allocated nvmem cell by nvmem_cell_get().
+ */
+void nvmem_cell_put(struct nvmem_cell *cell)
+{
+ struct nvmem_device *nvmem = cell->entry->nvmem;
+
+ if (cell->id)
+ kfree_const(cell->id);
+
+ kfree(cell);
+ __nvmem_device_put(nvmem);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_put);
+
+static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
+{
+ u8 *p, *b;
+ int i, extra, bit_offset = cell->bit_offset;
+
+ p = b = buf;
+ if (bit_offset) {
+ /* First shift */
+ *b++ >>= bit_offset;
+
+ /* setup rest of the bytes if any */
+ for (i = 1; i < cell->bytes; i++) {
+ /* Get bits from next byte and shift them towards msb */
+ *p |= *b << (BITS_PER_BYTE - bit_offset);
+
+ p = b;
+ *b++ >>= bit_offset;
+ }
+ } else {
+ /* point to the msb */
+ p += cell->bytes - 1;
+ }
+
+ /* result fits in less bytes */
+ extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
+ while (--extra >= 0)
+ *p-- = 0;
+
+ /* clear msb bits if any leftover in the last byte */
+ if (cell->nbits % BITS_PER_BYTE)
+ *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
+}
+
+static int __nvmem_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_entry *cell,
+ void *buf, size_t *len, const char *id, int index)
+{
+ int rc;
+
+ rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
+
+ if (rc)
+ return rc;
+
+ /* shift bits in-place */
+ if (cell->bit_offset || cell->nbits)
+ nvmem_shift_read_buffer_in_place(cell, buf);
+
+ if (cell->read_post_process) {
+ rc = cell->read_post_process(cell->priv, id, index,
+ cell->offset, buf, cell->raw_len);
+ if (rc)
+ return rc;
+ }
+
+ if (len)
+ *len = cell->bytes;
+
+ return 0;
+}
+
+/**
+ * nvmem_cell_read() - Read a given nvmem cell
+ *
+ * @cell: nvmem cell to be read.
+ * @len: pointer to length of cell which will be populated on successful read;
+ * can be NULL.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
+ * buffer should be freed by the consumer with a kfree().
+ */
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+ struct nvmem_cell_entry *entry = cell->entry;
+ struct nvmem_device *nvmem = entry->nvmem;
+ u8 *buf;
+ int rc;
+
+ if (!nvmem)
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
+ if (rc) {
+ kfree(buf);
+ return ERR_PTR(rc);
+ }
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read);
+
+static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
+ u8 *_buf, int len)
+{
+ struct nvmem_device *nvmem = cell->nvmem;
+ int i, rc, nbits, bit_offset = cell->bit_offset;
+ u8 v, *p, *buf, *b, pbyte, pbits;
+
+ nbits = cell->nbits;
+ buf = kzalloc(cell->bytes, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(buf, _buf, len);
+ p = b = buf;
+
+ if (bit_offset) {
+ pbyte = *b;
+ *b <<= bit_offset;
+
+ /* setup the first byte with lsb bits from nvmem */
+ rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
+ if (rc)
+ goto err;
+ *b++ |= GENMASK(bit_offset - 1, 0) & v;
+
+ /* setup rest of the byte if any */
+ for (i = 1; i < cell->bytes; i++) {
+ /* Get last byte bits and shift them towards lsb */
+ pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
+ pbyte = *b;
+ p = b;
+ *b <<= bit_offset;
+ *b++ |= pbits;
+ }
+ }
+
+ /* if it's not end on byte boundary */
+ if ((nbits + bit_offset) % BITS_PER_BYTE) {
+ /* setup the last byte with msb bits from nvmem */
+ rc = nvmem_reg_read(nvmem,
+ cell->offset + cell->bytes - 1, &v, 1);
+ if (rc)
+ goto err;
+ *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
+
+ }
+
+ return buf;
+err:
+ kfree(buf);
+ return ERR_PTR(rc);
+}
+
+static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
+{
+ struct nvmem_device *nvmem = cell->nvmem;
+ int rc;
+
+ if (!nvmem || nvmem->read_only ||
+ (cell->bit_offset == 0 && len != cell->bytes))
+ return -EINVAL;
+
+ /*
+ * Any cells which have a read_post_process hook are read-only because
+ * we cannot reverse the operation and it might affect other cells,
+ * too.
+ */
+ if (cell->read_post_process)
+ return -EINVAL;
+
+ if (cell->bit_offset || cell->nbits) {
+ buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+ }
+
+ rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
+
+ /* free the tmp buffer */
+ if (cell->bit_offset || cell->nbits)
+ kfree(buf);
+
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+/**
+ * nvmem_cell_write() - Write to a given nvmem cell
+ *
+ * @cell: nvmem cell to be written.
+ * @buf: Buffer to be written.
+ * @len: length of buffer to be written to nvmem cell.
+ *
+ * Return: length of bytes written or negative on failure.
+ */
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
+{
+ return __nvmem_cell_entry_write(cell->entry, buf, len);
+}
+
+EXPORT_SYMBOL_GPL(nvmem_cell_write);
+
+static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
+ void *val, size_t count)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+
+ cell = nvmem_cell_get(dev, cell_id);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ return PTR_ERR(buf);
+ }
+ if (len != count) {
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return -EINVAL;
+ }
+ memcpy(val, buf, count);
+ kfree(buf);
+ nvmem_cell_put(cell);
+
+ return 0;
+}
+
+/**
+ * nvmem_cell_read_u8() - Read a cell value as a u8
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
+
+/**
+ * nvmem_cell_read_u16() - Read a cell value as a u16
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
+
+/**
+ * nvmem_cell_read_u32() - Read a cell value as a u32
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
+
+/**
+ * nvmem_cell_read_u64() - Read a cell value as a u64
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
+
+static const void *nvmem_cell_read_variable_common(struct device *dev,
+ const char *cell_id,
+ size_t max_len, size_t *len)
+{
+ struct nvmem_cell *cell;
+ int nbits;
+ void *buf;
+
+ cell = nvmem_cell_get(dev, cell_id);
+ if (IS_ERR(cell))
+ return cell;
+
+ nbits = cell->entry->nbits;
+ buf = nvmem_cell_read(cell, len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(buf))
+ return buf;
+
+ /*
+ * If nbits is set then nvmem_cell_read() can significantly exaggerate
+ * the length of the real data. Throw away the extra junk.
+ */
+ if (nbits)
+ *len = DIV_ROUND_UP(nbits, 8);
+
+ if (*len > max_len) {
+ kfree(buf);
+ return ERR_PTR(-ERANGE);
+ }
+
+ return buf;
+}
+
+/**
+ * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+ u32 *val)
+{
+ size_t len;
+ const u8 *buf;
+ int i;
+
+ buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* Copy w/ implicit endian conversion */
+ *val = 0;
+ for (i = 0; i < len; i++)
+ *val |= buf[i] << (8 * i);
+
+ kfree(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
+
+/**
+ * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+ u64 *val)
+{
+ size_t len;
+ const u8 *buf;
+ int i;
+
+ buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* Copy w/ implicit endian conversion */
+ *val = 0;
+ for (i = 0; i < len; i++)
+ *val |= (uint64_t)buf[i] << (8 * i);
+
+ kfree(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
+
+/**
+ * nvmem_device_cell_read() - Read a given nvmem device and cell
+ *
+ * @nvmem: nvmem device to read from.
+ * @info: nvmem cell info to be read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf)
+{
+ struct nvmem_cell_entry cell;
+ int rc;
+ ssize_t len;
+
+ if (!nvmem)
+ return -EINVAL;
+
+ rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
+ if (rc)
+ return rc;
+
+ rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
+ if (rc)
+ return rc;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
+
+/**
+ * nvmem_device_cell_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @info: nvmem cell info to be written.
+ * @buf: buffer to be written to cell.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ */
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf)
+{
+ struct nvmem_cell_entry cell;
+ int rc;
+
+ if (!nvmem)
+ return -EINVAL;
+
+ rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
+ if (rc)
+ return rc;
+
+ return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
+
+/**
+ * nvmem_device_read() - Read from a given nvmem device
+ *
+ * @nvmem: nvmem device to read from.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+int nvmem_device_read(struct nvmem_device *nvmem,
+ unsigned int offset,
+ size_t bytes, void *buf)
+{
+ int rc;
+
+ if (!nvmem)
+ return -EINVAL;
+
+ rc = nvmem_reg_read(nvmem, offset, buf, bytes);
+
+ if (rc)
+ return rc;
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_read);
+
+/**
+ * nvmem_device_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to write.
+ * @buf: buffer to be written.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ */
+int nvmem_device_write(struct nvmem_device *nvmem,
+ unsigned int offset,
+ size_t bytes, void *buf)
+{
+ int rc;
+
+ if (!nvmem)
+ return -EINVAL;
+
+ rc = nvmem_reg_write(nvmem, offset, buf, bytes);
+
+ if (rc)
+ return rc;
+
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_write);
+
+/**
+ * nvmem_add_cell_table() - register a table of cell info entries
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_add_cell_table(struct nvmem_cell_table *table)
+{
+ mutex_lock(&nvmem_cell_mutex);
+ list_add_tail(&table->node, &nvmem_cell_tables);
+ mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
+
+/**
+ * nvmem_del_cell_table() - remove a previously registered cell info table
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_del_cell_table(struct nvmem_cell_table *table)
+{
+ mutex_lock(&nvmem_cell_mutex);
+ list_del(&table->node);
+ mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
+
+/**
+ * nvmem_add_cell_lookups() - register a list of cell lookup entries
+ *
+ * @entries: array of cell lookup entries
+ * @nentries: number of cell lookup entries in the array
+ */
+void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
+{
+ int i;
+
+ mutex_lock(&nvmem_lookup_mutex);
+ for (i = 0; i < nentries; i++)
+ list_add_tail(&entries[i].node, &nvmem_lookup_list);
+ mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
+
+/**
+ * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
+ * entries
+ *
+ * @entries: array of cell lookup entries
+ * @nentries: number of cell lookup entries in the array
+ */
+void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
+{
+ int i;
+
+ mutex_lock(&nvmem_lookup_mutex);
+ for (i = 0; i < nentries; i++)
+ list_del(&entries[i].node);
+ mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
+
+/**
+ * nvmem_dev_name() - Get the name of a given nvmem device.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: name of the nvmem device.
+ */
+const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+ return dev_name(&nvmem->dev);
+}
+EXPORT_SYMBOL_GPL(nvmem_dev_name);
+
+static int __init nvmem_init(void)
+{
+ return bus_register(&nvmem_bus_type);
+}
+
+static void __exit nvmem_exit(void)
+{
+ bus_unregister(&nvmem_bus_type);
+}
+
+subsys_initcall(nvmem_init);
+module_exit(nvmem_exit);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("nvmem Driver Core");
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
new file mode 100644
index 0000000000..f13bbd1640
--- /dev/null
+++ b/drivers/nvmem/imx-iim.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * i.MX IIM driver
+ *
+ * Copyright (c) 2017 Pengutronix, Michael Grzeschik <m.grzeschik@pengutronix.de>
+ *
+ * Based on the barebox iim driver,
+ * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>,
+ * Orex Computed Radiography
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#define IIM_BANK_BASE(n) (0x800 + 0x400 * (n))
+
+struct imx_iim_drvdata {
+ unsigned int nregs;
+};
+
+struct iim_priv {
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static int imx_iim_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct iim_priv *iim = context;
+ int i, ret;
+ u8 *buf8 = buf;
+
+ ret = clk_prepare_enable(iim->clk);
+ if (ret)
+ return ret;
+
+ for (i = offset; i < offset + bytes; i++) {
+ int bank = i >> 5;
+ int reg = i & 0x1f;
+
+ *buf8++ = readl(iim->base + IIM_BANK_BASE(bank) + reg * 4);
+ }
+
+ clk_disable_unprepare(iim->clk);
+
+ return 0;
+}
+
+static struct imx_iim_drvdata imx27_drvdata = {
+ .nregs = 2 * 32,
+};
+
+static struct imx_iim_drvdata imx25_imx31_imx35_drvdata = {
+ .nregs = 3 * 32,
+};
+
+static struct imx_iim_drvdata imx51_drvdata = {
+ .nregs = 4 * 32,
+};
+
+static struct imx_iim_drvdata imx53_drvdata = {
+ .nregs = 4 * 32 + 16,
+};
+
+static const struct of_device_id imx_iim_dt_ids[] = {
+ {
+ .compatible = "fsl,imx25-iim",
+ .data = &imx25_imx31_imx35_drvdata,
+ }, {
+ .compatible = "fsl,imx27-iim",
+ .data = &imx27_drvdata,
+ }, {
+ .compatible = "fsl,imx31-iim",
+ .data = &imx25_imx31_imx35_drvdata,
+ }, {
+ .compatible = "fsl,imx35-iim",
+ .data = &imx25_imx31_imx35_drvdata,
+ }, {
+ .compatible = "fsl,imx51-iim",
+ .data = &imx51_drvdata,
+ }, {
+ .compatible = "fsl,imx53-iim",
+ .data = &imx53_drvdata,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, imx_iim_dt_ids);
+
+static int imx_iim_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iim_priv *iim;
+ struct nvmem_device *nvmem;
+ struct nvmem_config cfg = {};
+ const struct imx_iim_drvdata *drvdata = NULL;
+
+ iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
+ if (!iim)
+ return -ENOMEM;
+
+ iim->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iim->base))
+ return PTR_ERR(iim->base);
+
+ drvdata = of_device_get_match_data(&pdev->dev);
+
+ iim->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(iim->clk))
+ return PTR_ERR(iim->clk);
+
+ cfg.name = "imx-iim",
+ cfg.read_only = true,
+ cfg.word_size = 1,
+ cfg.stride = 1,
+ cfg.reg_read = imx_iim_read,
+ cfg.dev = dev;
+ cfg.size = drvdata->nregs;
+ cfg.priv = iim;
+
+ nvmem = devm_nvmem_register(dev, &cfg);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver imx_iim_driver = {
+ .probe = imx_iim_probe,
+ .driver = {
+ .name = "imx-iim",
+ .of_match_table = imx_iim_dt_ids,
+ },
+};
+module_platform_driver(imx_iim_driver);
+
+MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX IIM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
new file mode 100644
index 0000000000..cf920542f9
--- /dev/null
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * i.MX9 OCOTP fusebox driver
+ *
+ * Copyright 2023 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum fuse_type {
+ FUSE_FSB = 1,
+ FUSE_ELE = 2,
+ FUSE_INVALID = -1
+};
+
+struct ocotp_map_entry {
+ u32 start; /* start word */
+ u32 num; /* num words */
+ enum fuse_type type;
+};
+
+struct ocotp_devtype_data {
+ u32 reg_off;
+ char *name;
+ u32 size;
+ u32 num_entry;
+ u32 flag;
+ nvmem_reg_read_t reg_read;
+ struct ocotp_map_entry entry[];
+};
+
+struct imx_ocotp_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct nvmem_config config;
+ struct mutex lock;
+ const struct ocotp_devtype_data *data;
+};
+
+static enum fuse_type imx_ocotp_fuse_type(void *context, u32 index)
+{
+ struct imx_ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ u32 start, end;
+ int i;
+
+ for (i = 0; i < data->num_entry; i++) {
+ start = data->entry[i].start;
+ end = data->entry[i].start + data->entry[i].num;
+
+ if (index >= start && index < end)
+ return data->entry[i].type;
+ }
+
+ return FUSE_INVALID;
+}
+
+static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, size_t bytes)
+{
+ struct imx_ocotp_priv *priv = context;
+ void __iomem *reg = priv->base + priv->data->reg_off;
+ u32 count, index, num_bytes;
+ enum fuse_type type;
+ u32 *buf;
+ void *p;
+ int i;
+
+ index = offset;
+ num_bytes = round_up(bytes, 4);
+ count = num_bytes >> 2;
+
+ if (count > ((priv->data->size >> 2) - index))
+ count = (priv->data->size >> 2) - index;
+
+ p = kzalloc(num_bytes, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ mutex_lock(&priv->lock);
+
+ buf = p;
+
+ for (i = index; i < (index + count); i++) {
+ type = imx_ocotp_fuse_type(context, i);
+ if (type == FUSE_INVALID || type == FUSE_ELE) {
+ *buf++ = 0;
+ continue;
+ }
+
+ *buf++ = readl_relaxed(reg + (i << 2));
+ }
+
+ memcpy(val, (u8 *)p, bytes);
+
+ mutex_unlock(&priv->lock);
+
+ kfree(p);
+
+ return 0;
+};
+
+static int imx_ele_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_ocotp_priv *priv;
+ struct nvmem_device *nvmem;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->config.dev = dev;
+ priv->config.name = "ELE-OCOTP";
+ priv->config.id = NVMEM_DEVID_AUTO;
+ priv->config.owner = THIS_MODULE;
+ priv->config.size = priv->data->size;
+ priv->config.reg_read = priv->data->reg_read;
+ priv->config.word_size = 4;
+ priv->config.stride = 1;
+ priv->config.priv = priv;
+ priv->config.read_only = true;
+ mutex_init(&priv->lock);
+
+ nvmem = devm_nvmem_register(dev, &priv->config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ return 0;
+}
+
+static const struct ocotp_devtype_data imx93_ocotp_data = {
+ .reg_off = 0x8000,
+ .reg_read = imx_ocotp_reg_read,
+ .size = 2048,
+ .num_entry = 6,
+ .entry = {
+ { 0, 52, FUSE_FSB },
+ { 63, 1, FUSE_ELE},
+ { 128, 16, FUSE_ELE },
+ { 182, 1, FUSE_ELE },
+ { 188, 1, FUSE_ELE },
+ { 312, 200, FUSE_FSB }
+ },
+};
+
+static const struct of_device_id imx_ele_ocotp_dt_ids[] = {
+ { .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids);
+
+static struct platform_driver imx_ele_ocotp_driver = {
+ .driver = {
+ .name = "imx_ele_ocotp",
+ .of_match_table = imx_ele_ocotp_dt_ids,
+ },
+ .probe = imx_ele_ocotp_probe,
+};
+module_platform_driver(imx_ele_ocotp_driver);
+
+MODULE_DESCRIPTION("i.MX OCOTP/ELE driver");
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
new file mode 100644
index 0000000000..c38d9c1c3f
--- /dev/null
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 OCOTP fusebox driver
+ *
+ * Copyright 2019 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define IMX_SIP_OTP_WRITE 0xc200000B
+
+enum ocotp_devtype {
+ IMX8QXP,
+ IMX8QM,
+};
+
+#define ECC_REGION BIT(0)
+#define HOLE_REGION BIT(1)
+
+struct ocotp_region {
+ u32 start;
+ u32 end;
+ u32 flag;
+};
+
+struct ocotp_devtype_data {
+ int devtype;
+ int nregs;
+ u32 num_region;
+ struct ocotp_region region[];
+};
+
+struct ocotp_priv {
+ struct device *dev;
+ const struct ocotp_devtype_data *data;
+ struct imx_sc_ipc *nvmem_ipc;
+};
+
+struct imx_sc_msg_misc_fuse_read {
+ struct imx_sc_rpc_msg hdr;
+ u32 word;
+} __packed;
+
+static DEFINE_MUTEX(scu_ocotp_mutex);
+
+static struct ocotp_devtype_data imx8qxp_data = {
+ .devtype = IMX8QXP,
+ .nregs = 800,
+ .num_region = 3,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x110, 0x21F, HOLE_REGION},
+ {0x220, 0x31F, ECC_REGION},
+ },
+};
+
+static struct ocotp_devtype_data imx8qm_data = {
+ .devtype = IMX8QM,
+ .nregs = 800,
+ .num_region = 2,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x1a0, 0x1ff, ECC_REGION},
+ },
+};
+
+static bool in_hole(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & HOLE_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool in_ecc(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & ECC_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
+ u32 *val)
+{
+ struct imx_sc_msg_misc_fuse_read msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ;
+ hdr->size = 2;
+
+ msg.word = word;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return ret;
+
+ *val = msg.word;
+
+ return 0;
+}
+
+static int imx_scu_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ u32 count, index, num_bytes;
+ u32 *buf;
+ void *p;
+ int i, ret;
+
+ index = offset;
+ num_bytes = round_up(bytes, 4);
+ count = num_bytes >> 2;
+
+ if (count > (priv->data->nregs - index))
+ count = priv->data->nregs - index;
+
+ p = kzalloc(num_bytes, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ mutex_lock(&scu_ocotp_mutex);
+
+ buf = p;
+
+ for (i = index; i < (index + count); i++) {
+ if (in_hole(context, i)) {
+ *buf++ = 0;
+ continue;
+ }
+
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
+ if (ret) {
+ mutex_unlock(&scu_ocotp_mutex);
+ kfree(p);
+ return ret;
+ }
+ buf++;
+ }
+
+ memcpy(val, (u8 *)p, bytes);
+
+ mutex_unlock(&scu_ocotp_mutex);
+
+ kfree(p);
+
+ return 0;
+}
+
+static int imx_scu_ocotp_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ struct arm_smccc_res res;
+ u32 *buf = val;
+ u32 tmp;
+ u32 index;
+ int ret;
+
+ /* allow only writing one complete OTP word at a time */
+ if (bytes != 4)
+ return -EINVAL;
+
+ index = offset;
+
+ if (in_hole(context, index))
+ return -EINVAL;
+
+ if (in_ecc(context, index)) {
+ pr_warn("ECC region, only program once\n");
+ mutex_lock(&scu_ocotp_mutex);
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp);
+ mutex_unlock(&scu_ocotp_mutex);
+ if (ret)
+ return ret;
+ if (tmp) {
+ pr_warn("ECC region, already has value: %x\n", tmp);
+ return -EIO;
+ }
+ }
+
+ mutex_lock(&scu_ocotp_mutex);
+
+ arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res);
+
+ mutex_unlock(&scu_ocotp_mutex);
+
+ return res.a0;
+}
+
+static struct nvmem_config imx_scu_ocotp_nvmem_config = {
+ .name = "imx-scu-ocotp",
+ .read_only = false,
+ .word_size = 4,
+ .stride = 1,
+ .owner = THIS_MODULE,
+ .reg_read = imx_scu_ocotp_read,
+ .reg_write = imx_scu_ocotp_write,
+};
+
+static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data },
+ { .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids);
+
+static int imx_scu_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocotp_priv *priv;
+ struct nvmem_device *nvmem;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = imx_scu_get_handle(&priv->nvmem_ipc);
+ if (ret)
+ return ret;
+
+ priv->data = of_device_get_match_data(dev);
+ priv->dev = dev;
+ imx_scu_ocotp_nvmem_config.size = 4 * priv->data->nregs;
+ imx_scu_ocotp_nvmem_config.dev = dev;
+ imx_scu_ocotp_nvmem_config.priv = priv;
+ nvmem = devm_nvmem_register(dev, &imx_scu_ocotp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver imx_scu_ocotp_driver = {
+ .probe = imx_scu_ocotp_probe,
+ .driver = {
+ .name = "imx_scu_ocotp",
+ .of_match_table = imx_scu_ocotp_dt_ids,
+ },
+};
+module_platform_driver(imx_scu_ocotp_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("i.MX8 SCU OCOTP fuse box driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
new file mode 100644
index 0000000000..e8b6f19492
--- /dev/null
+++ b/drivers/nvmem/imx-ocotp.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * i.MX6 OCOTP fusebox driver
+ *
+ * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * Copyright 2019 NXP
+ *
+ * Based on the barebox ocotp driver,
+ * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>,
+ * Orex Computed Radiography
+ *
+ * Write support based on the fsl_otp driver,
+ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the
+ * OTP Bank0 Word0
+ */
+#define IMX_OCOTP_OFFSET_PER_WORD 0x10 /* Offset between the start addr
+ * of two consecutive OTP words.
+ */
+
+#define IMX_OCOTP_ADDR_CTRL 0x0000
+#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
+#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
+#define IMX_OCOTP_ADDR_TIMING 0x0010
+#define IMX_OCOTP_ADDR_DATA0 0x0020
+#define IMX_OCOTP_ADDR_DATA1 0x0030
+#define IMX_OCOTP_ADDR_DATA2 0x0040
+#define IMX_OCOTP_ADDR_DATA3 0x0050
+
+#define IMX_OCOTP_BM_CTRL_ADDR 0x000000FF
+#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
+#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
+#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+
+#define IMX_OCOTP_BM_CTRL_ADDR_8MP 0x000001FF
+#define IMX_OCOTP_BM_CTRL_BUSY_8MP 0x00000200
+#define IMX_OCOTP_BM_CTRL_ERROR_8MP 0x00000400
+#define IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP 0x00000800
+
+#define IMX_OCOTP_BM_CTRL_DEFAULT \
+ { \
+ .bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \
+ .bm_busy = IMX_OCOTP_BM_CTRL_BUSY, \
+ .bm_error = IMX_OCOTP_BM_CTRL_ERROR, \
+ .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
+ }
+
+#define IMX_OCOTP_BM_CTRL_8MP \
+ { \
+ .bm_addr = IMX_OCOTP_BM_CTRL_ADDR_8MP, \
+ .bm_busy = IMX_OCOTP_BM_CTRL_BUSY_8MP, \
+ .bm_error = IMX_OCOTP_BM_CTRL_ERROR_8MP, \
+ .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP,\
+ }
+
+#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
+#define TIMING_STROBE_READ_NS 37 /* Min time before read */
+#define TIMING_RELAX_NS 17
+#define DEF_FSOURCE 1001 /* > 1000 ns */
+#define DEF_STROBE_PROG 10000 /* IPG clocks */
+#define IMX_OCOTP_WR_UNLOCK 0x3E770000
+#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
+
+static DEFINE_MUTEX(ocotp_mutex);
+
+struct ocotp_priv {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ const struct ocotp_params *params;
+ struct nvmem_config *config;
+};
+
+struct ocotp_ctrl_reg {
+ u32 bm_addr;
+ u32 bm_busy;
+ u32 bm_error;
+ u32 bm_rel_shadows;
+};
+
+struct ocotp_params {
+ unsigned int nregs;
+ unsigned int bank_address_words;
+ void (*set_timing)(struct ocotp_priv *priv);
+ struct ocotp_ctrl_reg ctrl;
+};
+
+static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags)
+{
+ int count;
+ u32 c, mask;
+ u32 bm_ctrl_busy, bm_ctrl_error;
+ void __iomem *base = priv->base;
+
+ bm_ctrl_busy = priv->params->ctrl.bm_busy;
+ bm_ctrl_error = priv->params->ctrl.bm_error;
+
+ mask = bm_ctrl_busy | bm_ctrl_error | flags;
+
+ for (count = 10000; count >= 0; count--) {
+ c = readl(base + IMX_OCOTP_ADDR_CTRL);
+ if (!(c & mask))
+ break;
+ cpu_relax();
+ }
+
+ if (count < 0) {
+ /* HW_OCOTP_CTRL[ERROR] will be set under the following
+ * conditions:
+ * - A write is performed to a shadow register during a shadow
+ * reload (essentially, while HW_OCOTP_CTRL[RELOAD_SHADOWS] is
+ * set. In addition, the contents of the shadow register shall
+ * not be updated.
+ * - A write is performed to a shadow register which has been
+ * locked.
+ * - A read is performed to from a shadow register which has
+ * been read locked.
+ * - A program is performed to a fuse word which has been locked
+ * - A read is performed to from a fuse word which has been read
+ * locked.
+ */
+ if (c & bm_ctrl_error)
+ return -EPERM;
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv)
+{
+ u32 c, bm_ctrl_error;
+ void __iomem *base = priv->base;
+
+ bm_ctrl_error = priv->params->ctrl.bm_error;
+
+ c = readl(base + IMX_OCOTP_ADDR_CTRL);
+ if (!(c & bm_ctrl_error))
+ return;
+
+ writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR);
+}
+
+static int imx_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ unsigned int count;
+ u8 *buf, *p;
+ int i, ret;
+ u32 index, num_bytes;
+
+ index = offset >> 2;
+ num_bytes = round_up((offset % 4) + bytes, 4);
+ count = num_bytes >> 2;
+
+ if (count > (priv->params->nregs - index))
+ count = priv->params->nregs - index;
+
+ p = kzalloc(num_bytes, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ mutex_lock(&ocotp_mutex);
+
+ buf = p;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ mutex_unlock(&ocotp_mutex);
+ dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+ kfree(p);
+ return ret;
+ }
+
+ ret = imx_ocotp_wait_for_busy(priv, 0);
+ if (ret < 0) {
+ dev_err(priv->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+
+ for (i = index; i < (index + count); i++) {
+ *(u32 *)buf = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 +
+ i * IMX_OCOTP_OFFSET_PER_WORD);
+
+ /* 47.3.1.2
+ * For "read locked" registers 0xBADABADA will be returned and
+ * HW_OCOTP_CTRL[ERROR] will be set. It must be cleared by
+ * software before any new write, read or reload access can be
+ * issued
+ */
+ if (*((u32 *)buf) == IMX_OCOTP_READ_LOCKED_VAL)
+ imx_ocotp_clr_err_if_set(priv);
+
+ buf += 4;
+ }
+
+ index = offset % 4;
+ memcpy(val, &p[index], bytes);
+
+read_end:
+ clk_disable_unprepare(priv->clk);
+ mutex_unlock(&ocotp_mutex);
+
+ kfree(p);
+
+ return ret;
+}
+
+static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ unsigned int offset, void *data, size_t bytes)
+{
+ u8 *buf = data;
+ int i;
+
+ /* Deal with some post processing of nvmem cell data */
+ if (id && !strcmp(id, "mac-address"))
+ for (i = 0; i < bytes / 2; i++)
+ swap(buf[i], buf[bytes - i - 1]);
+
+ return 0;
+}
+
+static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate;
+ unsigned long strobe_read, relax, strobe_prog;
+ u32 timing;
+
+ /* 47.3.1.3.1
+ * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
+ * fields with timing values to match the current frequency of the
+ * ipg_clk. OTP writes will work at maximum bus frequencies as long
+ * as the HW_OCOTP_TIMING parameters are set correctly.
+ *
+ * Note: there are minimum timings required to ensure an OTP fuse burns
+ * correctly that are independent of the ipg_clk. Those values are not
+ * formally documented anywhere however, working from the minimum
+ * timings given in u-boot we can say:
+ *
+ * - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10
+ * microseconds feels about right as representative of a minimum time
+ * to physically burn out a fuse.
+ *
+ * - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before
+ * performing another read is 37 nanoseconds
+ *
+ * - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum
+ * timing is not entirely clear the documentation says "This
+ * count value specifies the time to add to all default timing
+ * parameters other than the Tpgm and Trd. It is given in number
+ * of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG
+ * and STROBE_READ respectively. What the other timing parameters
+ * are though, is not specified. Experience shows a zero RELAX
+ * value will mess up a re-load of the shadow registers post OTP
+ * burn.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+
+ relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1;
+ strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS,
+ 1000000000);
+ strobe_read += 2 * (relax + 1) - 1;
+ strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US,
+ 1000000);
+ strobe_prog += 2 * (relax + 1) - 1;
+
+ timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000;
+ timing |= strobe_prog & 0x00000FFF;
+ timing |= (relax << 12) & 0x0000F000;
+ timing |= (strobe_read << 16) & 0x003F0000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
+static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate;
+ u64 fsource, strobe_prog;
+ u32 timing;
+
+ /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
+ * 6.4.3.3
+ */
+ clk_rate = clk_get_rate(priv->clk);
+ fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE,
+ NSEC_PER_SEC) + 1;
+ strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG,
+ NSEC_PER_SEC) + 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (fsource << 12) & 0x000FF000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
+static int imx_ocotp_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ u32 *buf = val;
+ int ret;
+
+ u32 ctrl;
+ u8 waddr;
+ u8 word = 0;
+
+ /* allow only writing one complete OTP word at a time */
+ if ((bytes != priv->config->word_size) ||
+ (offset % priv->config->word_size))
+ return -EINVAL;
+
+ mutex_lock(&ocotp_mutex);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ mutex_unlock(&ocotp_mutex);
+ dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+ return ret;
+ }
+
+ /* Setup the write timing values */
+ priv->params->set_timing(priv);
+
+ /* 47.3.1.3.2
+ * Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
+ * Overlapped accesses are not supported by the controller. Any pending
+ * write or reload must be completed before a write access can be
+ * requested.
+ */
+ ret = imx_ocotp_wait_for_busy(priv, 0);
+ if (ret < 0) {
+ dev_err(priv->dev, "timeout during timing setup\n");
+ goto write_end;
+ }
+
+ /* 47.3.1.3.3
+ * Write the requested address to HW_OCOTP_CTRL[ADDR] and program the
+ * unlock code into HW_OCOTP_CTRL[WR_UNLOCK]. This must be programmed
+ * for each write access. The lock code is documented in the register
+ * description. Both the unlock code and address can be written in the
+ * same operation.
+ */
+ if (priv->params->bank_address_words != 0) {
+ /*
+ * In banked/i.MX7 mode the OTP register bank goes into waddr
+ * see i.MX 7Solo Applications Processor Reference Manual, Rev.
+ * 0.1 section 6.4.3.1
+ */
+ offset = offset / priv->config->word_size;
+ waddr = offset / priv->params->bank_address_words;
+ word = offset & (priv->params->bank_address_words - 1);
+ } else {
+ /*
+ * Non-banked i.MX6 mode.
+ * OTP write/read address specifies one of 128 word address
+ * locations
+ */
+ waddr = offset / 4;
+ }
+
+ ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
+ ctrl &= ~priv->params->ctrl.bm_addr;
+ ctrl |= waddr & priv->params->ctrl.bm_addr;
+ ctrl |= IMX_OCOTP_WR_UNLOCK;
+
+ writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL);
+
+ /* 47.3.1.3.4
+ * Write the data to the HW_OCOTP_DATA register. This will automatically
+ * set HW_OCOTP_CTRL[BUSY] and clear HW_OCOTP_CTRL[WR_UNLOCK]. To
+ * protect programming same OTP bit twice, before program OCOTP will
+ * automatically read fuse value in OTP and use read value to mask
+ * program data. The controller will use masked program data to program
+ * a 32-bit word in the OTP per the address in HW_OCOTP_CTRL[ADDR]. Bit
+ * fields with 1's will result in that OTP bit being programmed. Bit
+ * fields with 0's will be ignored. At the same time that the write is
+ * accepted, the controller makes an internal copy of
+ * HW_OCOTP_CTRL[ADDR] which cannot be updated until the next write
+ * sequence is initiated. This copy guarantees that erroneous writes to
+ * HW_OCOTP_CTRL[ADDR] will not affect an active write operation. It
+ * should also be noted that during the programming HW_OCOTP_DATA will
+ * shift right (with zero fill). This shifting is required to program
+ * the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
+ * modified.
+ * Note: on i.MX7 there are four data fields to write for banked write
+ * with the fuse blowing operation only taking place after data0
+ * has been written. This is why data0 must always be the last
+ * register written.
+ */
+ if (priv->params->bank_address_words != 0) {
+ /* Banked/i.MX7 mode */
+ switch (word) {
+ case 0:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 1:
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 2:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 3:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ }
+ } else {
+ /* Non-banked i.MX6 mode */
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ }
+
+ /* 47.4.1.4.5
+ * Once complete, the controller will clear BUSY. A write request to a
+ * protected or locked region will result in no OTP access and no
+ * setting of HW_OCOTP_CTRL[BUSY]. In addition HW_OCOTP_CTRL[ERROR] will
+ * be set. It must be cleared by software before any new write access
+ * can be issued.
+ */
+ ret = imx_ocotp_wait_for_busy(priv, 0);
+ if (ret < 0) {
+ if (ret == -EPERM) {
+ dev_err(priv->dev, "failed write to locked region");
+ imx_ocotp_clr_err_if_set(priv);
+ } else {
+ dev_err(priv->dev, "timeout during data write\n");
+ }
+ goto write_end;
+ }
+
+ /* 47.3.1.4
+ * Write Postamble: Due to internal electrical characteristics of the
+ * OTP during writes, all OTP operations following a write must be
+ * separated by 2 us after the clearing of HW_OCOTP_CTRL_BUSY following
+ * the write.
+ */
+ udelay(2);
+
+ /* reload all shadow registers */
+ writel(priv->params->ctrl.bm_rel_shadows,
+ priv->base + IMX_OCOTP_ADDR_CTRL_SET);
+ ret = imx_ocotp_wait_for_busy(priv,
+ priv->params->ctrl.bm_rel_shadows);
+ if (ret < 0)
+ dev_err(priv->dev, "timeout during shadow register reload\n");
+
+write_end:
+ clk_disable_unprepare(priv->clk);
+ mutex_unlock(&ocotp_mutex);
+ return ret < 0 ? ret : bytes;
+}
+
+static struct nvmem_config imx_ocotp_nvmem_config = {
+ .name = "imx-ocotp",
+ .read_only = false,
+ .word_size = 4,
+ .stride = 1,
+ .reg_read = imx_ocotp_read,
+ .reg_write = imx_ocotp_write,
+};
+
+static const struct ocotp_params imx6q_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx6sl_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx6sll_params = {
+ .nregs = 80,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx6sx_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx6ul_params = {
+ .nregs = 144,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx6ull_params = {
+ .nregs = 80,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx7d_params = {
+ .nregs = 64,
+ .bank_address_words = 4,
+ .set_timing = imx_ocotp_set_imx7_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx7ulp_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx8mq_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx8mm_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx8mn_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+};
+
+static const struct ocotp_params imx8mp_params = {
+ .nregs = 384,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_8MP,
+};
+
+static const struct of_device_id imx_ocotp_dt_ids[] = {
+ { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
+ { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
+ { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
+ { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params },
+ { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
+ { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
+ { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
+ { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
+ { .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
+ { .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
+ { .compatible = "fsl,imx8mp-ocotp", .data = &imx8mp_params },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
+
+static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_layout *layout,
+ struct nvmem_cell_info *cell)
+{
+ cell->read_post_process = imx_ocotp_cell_pp;
+}
+
+static struct nvmem_layout imx_ocotp_layout = {
+ .fixup_cell_info = imx_ocotp_fixup_cell_info,
+};
+
+static int imx_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocotp_priv *priv;
+ struct nvmem_device *nvmem;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->params = of_device_get_match_data(&pdev->dev);
+ imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
+ imx_ocotp_nvmem_config.dev = dev;
+ imx_ocotp_nvmem_config.priv = priv;
+ imx_ocotp_nvmem_config.layout = &imx_ocotp_layout;
+
+ priv->config = &imx_ocotp_nvmem_config;
+
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv);
+ clk_disable_unprepare(priv->clk);
+
+ nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver imx_ocotp_driver = {
+ .probe = imx_ocotp_probe,
+ .driver = {
+ .name = "imx_ocotp",
+ .of_match_table = imx_ocotp_dt_ids,
+ },
+};
+module_platform_driver(imx_ocotp_driver);
+
+MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c
new file mode 100644
index 0000000000..0b01b840ed
--- /dev/null
+++ b/drivers/nvmem/jz4780-efuse.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * JZ4780 EFUSE Memory Support driver
+ *
+ * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+ * Copyright (c) 2020 H. Nikolaus Schaller <hns@goldelico.com>
+ */
+
+/*
+ * Currently supports JZ4780 efuse which has 8K programmable bit.
+ * Efuse is separated into seven segments as below:
+ *
+ * -----------------------------------------------------------------------
+ * | 64 bit | 128 bit | 128 bit | 3520 bit | 8 bit | 2296 bit | 2048 bit |
+ * -----------------------------------------------------------------------
+ *
+ * The rom itself is accessed using a 9 bit address line and an 8 word wide bus
+ * which reads/writes based on strobes. The strobe is configured in the config
+ * register and is based on number of cycles of the bus clock.
+ *
+ * Driver supports read only as the writes are done in the Factory.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/timer.h>
+
+#define JZ_EFUCTRL (0x0) /* Control Register */
+#define JZ_EFUCFG (0x4) /* Configure Register*/
+#define JZ_EFUSTATE (0x8) /* Status Register */
+#define JZ_EFUDATA(n) (0xC + (n) * 4)
+
+/* We read 32 byte chunks to avoid complexity in the driver. */
+#define JZ_EFU_READ_SIZE 32
+
+#define EFUCTRL_ADDR_MASK 0x3FF
+#define EFUCTRL_ADDR_SHIFT 21
+#define EFUCTRL_LEN_MASK 0x1F
+#define EFUCTRL_LEN_SHIFT 16
+#define EFUCTRL_PG_EN BIT(15)
+#define EFUCTRL_WR_EN BIT(1)
+#define EFUCTRL_RD_EN BIT(0)
+
+#define EFUCFG_INT_EN BIT(31)
+#define EFUCFG_RD_ADJ_MASK 0xF
+#define EFUCFG_RD_ADJ_SHIFT 20
+#define EFUCFG_RD_STR_MASK 0xF
+#define EFUCFG_RD_STR_SHIFT 16
+#define EFUCFG_WR_ADJ_MASK 0xF
+#define EFUCFG_WR_ADJ_SHIFT 12
+#define EFUCFG_WR_STR_MASK 0xFFF
+#define EFUCFG_WR_STR_SHIFT 0
+
+#define EFUSTATE_WR_DONE BIT(1)
+#define EFUSTATE_RD_DONE BIT(0)
+
+struct jz4780_efuse {
+ struct device *dev;
+ struct regmap *map;
+ struct clk *clk;
+};
+
+/* main entry point */
+static int jz4780_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct jz4780_efuse *efuse = context;
+
+ while (bytes > 0) {
+ size_t start = offset & ~(JZ_EFU_READ_SIZE - 1);
+ size_t chunk = min(bytes, (start + JZ_EFU_READ_SIZE)
+ - offset);
+ char buf[JZ_EFU_READ_SIZE];
+ unsigned int tmp;
+ u32 ctrl;
+ int ret;
+
+ ctrl = (start << EFUCTRL_ADDR_SHIFT)
+ | ((JZ_EFU_READ_SIZE - 1) << EFUCTRL_LEN_SHIFT)
+ | EFUCTRL_RD_EN;
+
+ regmap_update_bits(efuse->map, JZ_EFUCTRL,
+ (EFUCTRL_ADDR_MASK << EFUCTRL_ADDR_SHIFT) |
+ (EFUCTRL_LEN_MASK << EFUCTRL_LEN_SHIFT) |
+ EFUCTRL_PG_EN | EFUCTRL_WR_EN |
+ EFUCTRL_RD_EN,
+ ctrl);
+
+ ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE,
+ tmp, tmp & EFUSTATE_RD_DONE,
+ 1 * MSEC_PER_SEC,
+ 50 * MSEC_PER_SEC);
+ if (ret < 0) {
+ dev_err(efuse->dev, "Time out while reading efuse data");
+ return ret;
+ }
+
+ ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0),
+ buf, JZ_EFU_READ_SIZE / sizeof(u32));
+ if (ret < 0)
+ return ret;
+
+ memcpy(val, &buf[offset - start], chunk);
+
+ val += chunk;
+ offset += chunk;
+ bytes -= chunk;
+ }
+
+ return 0;
+}
+
+static struct nvmem_config jz4780_efuse_nvmem_config = {
+ .name = "jz4780-efuse",
+ .size = 1024,
+ .word_size = 1,
+ .stride = 1,
+ .owner = THIS_MODULE,
+ .reg_read = jz4780_efuse_read,
+};
+
+static const struct regmap_config jz4780_efuse_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = JZ_EFUDATA(7),
+};
+
+static void clk_disable_unprepare_helper(void *clock)
+{
+ clk_disable_unprepare(clock);
+}
+
+static int jz4780_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct jz4780_efuse *efuse;
+ struct nvmem_config cfg;
+ unsigned long clk_rate;
+ unsigned long rd_adj;
+ unsigned long rd_strobe;
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ int ret;
+
+ efuse = devm_kzalloc(dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ efuse->map = devm_regmap_init_mmio(dev, regs,
+ &jz4780_efuse_regmap_config);
+ if (IS_ERR(efuse->map))
+ return PTR_ERR(efuse->map);
+
+ efuse->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(efuse->clk))
+ return PTR_ERR(efuse->clk);
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ clk_disable_unprepare_helper,
+ efuse->clk);
+ if (ret < 0)
+ return ret;
+
+ clk_rate = clk_get_rate(efuse->clk);
+
+ efuse->dev = dev;
+
+ /*
+ * rd_adj and rd_strobe are 4 bit values
+ * conditions:
+ * bus clk_period * (rd_adj + 1) > 6.5ns
+ * bus clk_period * (rd_adj + 5 + rd_strobe) > 35ns
+ * i.e. rd_adj >= 6.5ns / clk_period
+ * i.e. rd_strobe >= 35 ns / clk_period - 5 - rd_adj + 1
+ * constants:
+ * 1 / 6.5ns == 153846154 Hz
+ * 1 / 35ns == 28571429 Hz
+ */
+
+ rd_adj = clk_rate / 153846154;
+ rd_strobe = clk_rate / 28571429 - 5 - rd_adj + 1;
+
+ if (rd_adj > EFUCFG_RD_ADJ_MASK ||
+ rd_strobe > EFUCFG_RD_STR_MASK) {
+ dev_err(&pdev->dev, "Cannot set clock configuration\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(efuse->map, JZ_EFUCFG,
+ (EFUCFG_RD_ADJ_MASK << EFUCFG_RD_ADJ_SHIFT) |
+ (EFUCFG_RD_STR_MASK << EFUCFG_RD_STR_SHIFT),
+ (rd_adj << EFUCFG_RD_ADJ_SHIFT) |
+ (rd_strobe << EFUCFG_RD_STR_SHIFT));
+
+ cfg = jz4780_efuse_nvmem_config;
+ cfg.dev = &pdev->dev;
+ cfg.priv = efuse;
+
+ nvmem = devm_nvmem_register(dev, &cfg);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id jz4780_efuse_match[] = {
+ { .compatible = "ingenic,jz4780-efuse" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jz4780_efuse_match);
+
+static struct platform_driver jz4780_efuse_driver = {
+ .probe = jz4780_efuse_probe,
+ .driver = {
+ .name = "jz4780-efuse",
+ .of_match_table = jz4780_efuse_match,
+ },
+};
+module_platform_driver(jz4780_efuse_driver);
+
+MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4780 efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lan9662-otpc.c b/drivers/nvmem/lan9662-otpc.c
new file mode 100644
index 0000000000..56fc19f092
--- /dev/null
+++ b/drivers/nvmem/lan9662-otpc.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define OTP_OTP_PWR_DN(t) (t + 0x00)
+#define OTP_OTP_PWR_DN_OTP_PWRDN_N BIT(0)
+#define OTP_OTP_ADDR_HI(t) (t + 0x04)
+#define OTP_OTP_ADDR_LO(t) (t + 0x08)
+#define OTP_OTP_PRGM_DATA(t) (t + 0x10)
+#define OTP_OTP_PRGM_MODE(t) (t + 0x14)
+#define OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE BIT(0)
+#define OTP_OTP_RD_DATA(t) (t + 0x18)
+#define OTP_OTP_FUNC_CMD(t) (t + 0x20)
+#define OTP_OTP_FUNC_CMD_OTP_PROGRAM BIT(1)
+#define OTP_OTP_FUNC_CMD_OTP_READ BIT(0)
+#define OTP_OTP_CMD_GO(t) (t + 0x28)
+#define OTP_OTP_CMD_GO_OTP_GO BIT(0)
+#define OTP_OTP_PASS_FAIL(t) (t + 0x2c)
+#define OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED BIT(3)
+#define OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED BIT(2)
+#define OTP_OTP_PASS_FAIL_OTP_FAIL BIT(0)
+#define OTP_OTP_STATUS(t) (t + 0x30)
+#define OTP_OTP_STATUS_OTP_CPUMPEN BIT(1)
+#define OTP_OTP_STATUS_OTP_BUSY BIT(0)
+
+#define OTP_MEM_SIZE 8192
+#define OTP_SLEEP_US 10
+#define OTP_TIMEOUT_US 500000
+
+struct lan9662_otp {
+ struct device *dev;
+ void __iomem *base;
+};
+
+static int lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag)
+{
+ u32 val;
+
+ return readl_poll_timeout(reg, val, !(val & flag),
+ OTP_SLEEP_US, OTP_TIMEOUT_US);
+}
+
+static int lan9662_otp_power(struct lan9662_otp *otp, bool up)
+{
+ void __iomem *pwrdn = OTP_OTP_PWR_DN(otp->base);
+
+ if (up) {
+ writel(readl(pwrdn) & ~OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
+ OTP_OTP_STATUS_OTP_CPUMPEN))
+ return -ETIMEDOUT;
+ } else {
+ writel(readl(pwrdn) | OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
+ }
+
+ return 0;
+}
+
+static int lan9662_otp_execute(struct lan9662_otp *otp)
+{
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_CMD_GO(otp->base),
+ OTP_OTP_CMD_GO_OTP_GO))
+ return -ETIMEDOUT;
+
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
+ OTP_OTP_STATUS_OTP_BUSY))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void lan9662_otp_set_address(struct lan9662_otp *otp, u32 offset)
+{
+ writel(0xff & (offset >> 8), OTP_OTP_ADDR_HI(otp->base));
+ writel(0xff & offset, OTP_OTP_ADDR_LO(otp->base));
+}
+
+static int lan9662_otp_read_byte(struct lan9662_otp *otp, u32 offset, u8 *dst)
+{
+ u32 pass;
+ int rc;
+
+ lan9662_otp_set_address(otp, offset);
+ writel(OTP_OTP_FUNC_CMD_OTP_READ, OTP_OTP_FUNC_CMD(otp->base));
+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
+ rc = lan9662_otp_execute(otp);
+ if (!rc) {
+ pass = readl(OTP_OTP_PASS_FAIL(otp->base));
+ if (pass & OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED)
+ return -EACCES;
+ *dst = (u8) readl(OTP_OTP_RD_DATA(otp->base));
+ }
+ return rc;
+}
+
+static int lan9662_otp_write_byte(struct lan9662_otp *otp, u32 offset, u8 data)
+{
+ u32 pass;
+ int rc;
+
+ lan9662_otp_set_address(otp, offset);
+ writel(OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE, OTP_OTP_PRGM_MODE(otp->base));
+ writel(data, OTP_OTP_PRGM_DATA(otp->base));
+ writel(OTP_OTP_FUNC_CMD_OTP_PROGRAM, OTP_OTP_FUNC_CMD(otp->base));
+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
+
+ rc = lan9662_otp_execute(otp);
+ if (!rc) {
+ pass = readl(OTP_OTP_PASS_FAIL(otp->base));
+ if (pass & OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED)
+ return -EACCES;
+ if (pass & OTP_OTP_PASS_FAIL_OTP_FAIL)
+ return -EIO;
+ }
+ return rc;
+}
+
+static int lan9662_otp_read(void *context, unsigned int offset,
+ void *_val, size_t bytes)
+{
+ struct lan9662_otp *otp = context;
+ u8 *val = _val;
+ uint8_t data;
+ int i, rc = 0;
+
+ lan9662_otp_power(otp, true);
+ for (i = 0; i < bytes; i++) {
+ rc = lan9662_otp_read_byte(otp, offset + i, &data);
+ if (rc < 0)
+ break;
+ *val++ = data;
+ }
+ lan9662_otp_power(otp, false);
+
+ return rc;
+}
+
+static int lan9662_otp_write(void *context, unsigned int offset,
+ void *_val, size_t bytes)
+{
+ struct lan9662_otp *otp = context;
+ u8 *val = _val;
+ u8 data, newdata;
+ int i, rc = 0;
+
+ lan9662_otp_power(otp, true);
+ for (i = 0; i < bytes; i++) {
+ /* Skip zero bytes */
+ if (val[i]) {
+ rc = lan9662_otp_read_byte(otp, offset + i, &data);
+ if (rc < 0)
+ break;
+
+ newdata = data | val[i];
+ if (newdata == data)
+ continue;
+
+ rc = lan9662_otp_write_byte(otp, offset + i,
+ newdata);
+ if (rc < 0)
+ break;
+ }
+ }
+ lan9662_otp_power(otp, false);
+
+ return rc;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "lan9662-otp",
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = lan9662_otp_read,
+ .reg_write = lan9662_otp_write,
+ .size = OTP_MEM_SIZE,
+};
+
+static int lan9662_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct lan9662_otp *otp;
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id lan9662_otp_match[] = {
+ { .compatible = "microchip,lan9662-otpc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lan9662_otp_match);
+
+static struct platform_driver lan9662_otp_driver = {
+ .probe = lan9662_otp_probe,
+ .driver = {
+ .name = "lan9662-otp",
+ .of_match_table = lan9662_otp_match,
+ },
+};
+module_platform_driver(lan9662_otp_driver);
+
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_DESCRIPTION("lan9662 OTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/layerscape-sfp.c b/drivers/nvmem/layerscape-sfp.c
new file mode 100644
index 0000000000..e2b4245619
--- /dev/null
+++ b/drivers/nvmem/layerscape-sfp.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Layerscape SFP driver
+ *
+ * Copyright (c) 2022 Michael Walle <michael@walle.cc>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200
+
+struct layerscape_sfp_priv {
+ struct regmap *regmap;
+};
+
+struct layerscape_sfp_data {
+ int size;
+ enum regmap_endian endian;
+};
+
+static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct layerscape_sfp_priv *priv = context;
+
+ return regmap_bulk_read(priv->regmap,
+ LAYERSCAPE_SFP_OTP_OFFSET + offset, val,
+ bytes / 4);
+}
+
+static struct nvmem_config layerscape_sfp_nvmem_config = {
+ .name = "fsl-sfp",
+ .reg_read = layerscape_sfp_read,
+ .word_size = 4,
+ .stride = 4,
+};
+
+static int layerscape_sfp_probe(struct platform_device *pdev)
+{
+ const struct layerscape_sfp_data *data;
+ struct layerscape_sfp_priv *priv;
+ struct nvmem_device *nvmem;
+ struct regmap_config config = { 0 };
+ void __iomem *base;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ data = device_get_match_data(&pdev->dev);
+ config.reg_bits = 32;
+ config.reg_stride = 4;
+ config.val_bits = 32;
+ config.val_format_endian = data->endian;
+ config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4;
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ layerscape_sfp_nvmem_config.size = data->size;
+ layerscape_sfp_nvmem_config.dev = &pdev->dev;
+ layerscape_sfp_nvmem_config.priv = priv;
+
+ nvmem = devm_nvmem_register(&pdev->dev, &layerscape_sfp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct layerscape_sfp_data ls1021a_data = {
+ .size = 0x88,
+ .endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct layerscape_sfp_data ls1028a_data = {
+ .size = 0x88,
+ .endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct of_device_id layerscape_sfp_dt_ids[] = {
+ { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data },
+ { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, layerscape_sfp_dt_ids);
+
+static struct platform_driver layerscape_sfp_driver = {
+ .probe = layerscape_sfp_probe,
+ .driver = {
+ .name = "layerscape_sfp",
+ .of_match_table = layerscape_sfp_dt_ids,
+ },
+};
+module_platform_driver(layerscape_sfp_driver);
+
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("Layerscape Security Fuse Processor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig
new file mode 100644
index 0000000000..7ff1ee1c1f
--- /dev/null
+++ b/drivers/nvmem/layouts/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Layout Types"
+
+config NVMEM_LAYOUT_SL28_VPD
+ tristate "Kontron sl28 VPD layout support"
+ select CRC8
+ help
+ Say Y here if you want to support the VPD layout of the Kontron
+ SMARC-sAL28 boards.
+
+ If unsure, say N.
+
+config NVMEM_LAYOUT_ONIE_TLV
+ tristate "ONIE tlv support"
+ select CRC32
+ help
+ Say Y here if you want to support the Open Compute Project ONIE
+ Type-Length-Value standard table.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/nvmem/layouts/Makefile b/drivers/nvmem/layouts/Makefile
new file mode 100644
index 0000000000..2974bd7d33
--- /dev/null
+++ b/drivers/nvmem/layouts/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for nvmem layouts.
+#
+
+obj-$(CONFIG_NVMEM_LAYOUT_SL28_VPD) += sl28vpd.o
+obj-$(CONFIG_NVMEM_LAYOUT_ONIE_TLV) += onie-tlv.o
diff --git a/drivers/nvmem/layouts/onie-tlv.c b/drivers/nvmem/layouts/onie-tlv.c
new file mode 100644
index 0000000000..59fc87ccfc
--- /dev/null
+++ b/drivers/nvmem/layouts/onie-tlv.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ONIE tlv NVMEM cells provider
+ *
+ * Copyright (C) 2022 Open Compute Group ONIE
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com>
+ * Based on the nvmem driver written by: Vadym Kochan <vadym.kochan@plvision.eu>
+ * Inspired by the first layout written by: Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+
+#define ONIE_TLV_MAX_LEN 2048
+#define ONIE_TLV_CRC_FIELD_SZ 6
+#define ONIE_TLV_CRC_SZ 4
+#define ONIE_TLV_HDR_ID "TlvInfo"
+
+struct onie_tlv_hdr {
+ u8 id[8];
+ u8 version;
+ __be16 data_len;
+} __packed;
+
+struct onie_tlv {
+ u8 type;
+ u8 len;
+} __packed;
+
+static const char *onie_tlv_cell_name(u8 type)
+{
+ switch (type) {
+ case 0x21:
+ return "product-name";
+ case 0x22:
+ return "part-number";
+ case 0x23:
+ return "serial-number";
+ case 0x24:
+ return "mac-address";
+ case 0x25:
+ return "manufacture-date";
+ case 0x26:
+ return "device-version";
+ case 0x27:
+ return "label-revision";
+ case 0x28:
+ return "platform-name";
+ case 0x29:
+ return "onie-version";
+ case 0x2A:
+ return "num-macs";
+ case 0x2B:
+ return "manufacturer";
+ case 0x2C:
+ return "country-code";
+ case 0x2D:
+ return "vendor";
+ case 0x2E:
+ return "diag-version";
+ case 0x2F:
+ return "service-tag";
+ case 0xFD:
+ return "vendor-extension";
+ case 0xFE:
+ return "crc32";
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static int onie_tlv_mac_read_cb(void *priv, const char *id, int index,
+ unsigned int offset, void *buf,
+ size_t bytes)
+{
+ eth_addr_add(buf, index);
+
+ return 0;
+}
+
+static nvmem_cell_post_process_t onie_tlv_read_cb(u8 type, u8 *buf)
+{
+ switch (type) {
+ case 0x24:
+ return &onie_tlv_mac_read_cb;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static int onie_tlv_add_cells(struct device *dev, struct nvmem_device *nvmem,
+ size_t data_len, u8 *data)
+{
+ struct nvmem_cell_info cell = {};
+ struct device_node *layout;
+ struct onie_tlv tlv;
+ unsigned int hdr_len = sizeof(struct onie_tlv_hdr);
+ unsigned int offset = 0;
+ int ret;
+
+ layout = of_nvmem_layout_get_container(nvmem);
+ if (!layout)
+ return -ENOENT;
+
+ while (offset < data_len) {
+ memcpy(&tlv, data + offset, sizeof(tlv));
+ if (offset + tlv.len >= data_len) {
+ dev_err(dev, "Out of bounds field (0x%x bytes at 0x%x)\n",
+ tlv.len, hdr_len + offset);
+ break;
+ }
+
+ cell.name = onie_tlv_cell_name(tlv.type);
+ if (!cell.name)
+ continue;
+
+ cell.offset = hdr_len + offset + sizeof(tlv.type) + sizeof(tlv.len);
+ cell.bytes = tlv.len;
+ cell.np = of_get_child_by_name(layout, cell.name);
+ cell.read_post_process = onie_tlv_read_cb(tlv.type, data + offset + sizeof(tlv));
+
+ ret = nvmem_add_one_cell(nvmem, &cell);
+ if (ret) {
+ of_node_put(layout);
+ return ret;
+ }
+
+ offset += sizeof(tlv) + tlv.len;
+ }
+
+ of_node_put(layout);
+
+ return 0;
+}
+
+static bool onie_tlv_hdr_is_valid(struct device *dev, struct onie_tlv_hdr *hdr)
+{
+ if (memcmp(hdr->id, ONIE_TLV_HDR_ID, sizeof(hdr->id))) {
+ dev_err(dev, "Invalid header\n");
+ return false;
+ }
+
+ if (hdr->version != 0x1) {
+ dev_err(dev, "Invalid version number\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *table)
+{
+ struct onie_tlv crc_hdr;
+ u32 read_crc, calc_crc;
+ __be32 crc_be;
+
+ memcpy(&crc_hdr, table + table_len - ONIE_TLV_CRC_FIELD_SZ, sizeof(crc_hdr));
+ if (crc_hdr.type != 0xfe || crc_hdr.len != ONIE_TLV_CRC_SZ) {
+ dev_err(dev, "Invalid CRC field\n");
+ return false;
+ }
+
+ /* The table contains a JAMCRC, which is XOR'ed compared to the original
+ * CRC32 implementation as known in the Ethernet world.
+ */
+ memcpy(&crc_be, table + table_len - ONIE_TLV_CRC_SZ, ONIE_TLV_CRC_SZ);
+ read_crc = be32_to_cpu(crc_be);
+ calc_crc = crc32(~0, table, table_len - ONIE_TLV_CRC_SZ) ^ 0xFFFFFFFF;
+ if (read_crc != calc_crc) {
+ dev_err(dev, "Invalid CRC read: 0x%08x, expected: 0x%08x\n",
+ read_crc, calc_crc);
+ return false;
+ }
+
+ return true;
+}
+
+static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem,
+ struct nvmem_layout *layout)
+{
+ struct onie_tlv_hdr hdr;
+ size_t table_len, data_len, hdr_len;
+ u8 *table, *data;
+ int ret;
+
+ ret = nvmem_device_read(nvmem, 0, sizeof(hdr), &hdr);
+ if (ret < 0)
+ return ret;
+
+ if (!onie_tlv_hdr_is_valid(dev, &hdr)) {
+ dev_err(dev, "Invalid ONIE TLV header\n");
+ return -EINVAL;
+ }
+
+ hdr_len = sizeof(hdr.id) + sizeof(hdr.version) + sizeof(hdr.data_len);
+ data_len = be16_to_cpu(hdr.data_len);
+ table_len = hdr_len + data_len;
+ if (table_len > ONIE_TLV_MAX_LEN) {
+ dev_err(dev, "Invalid ONIE TLV data length\n");
+ return -EINVAL;
+ }
+
+ table = devm_kmalloc(dev, table_len, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ ret = nvmem_device_read(nvmem, 0, table_len, table);
+ if (ret != table_len)
+ return ret;
+
+ if (!onie_tlv_crc_is_valid(dev, table_len, table))
+ return -EINVAL;
+
+ data = table + hdr_len;
+ ret = onie_tlv_add_cells(dev, nvmem, data_len, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id onie_tlv_of_match_table[] = {
+ { .compatible = "onie,tlv-layout", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, onie_tlv_of_match_table);
+
+static struct nvmem_layout onie_tlv_layout = {
+ .name = "ONIE tlv layout",
+ .of_match_table = onie_tlv_of_match_table,
+ .add_cells = onie_tlv_parse_table,
+};
+module_nvmem_layout_driver(onie_tlv_layout);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("NVMEM layout driver for Onie TLV table parsing");
diff --git a/drivers/nvmem/layouts/sl28vpd.c b/drivers/nvmem/layouts/sl28vpd.c
new file mode 100644
index 0000000000..05671371f6
--- /dev/null
+++ b/drivers/nvmem/layouts/sl28vpd.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/crc8.h>
+#include <linux/etherdevice.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <uapi/linux/if_ether.h>
+
+#define SL28VPD_MAGIC 'V'
+
+struct sl28vpd_header {
+ u8 magic;
+ u8 version;
+} __packed;
+
+struct sl28vpd_v1 {
+ struct sl28vpd_header header;
+ char serial_number[15];
+ u8 base_mac_address[ETH_ALEN];
+ u8 crc8;
+} __packed;
+
+static int sl28vpd_mac_address_pp(void *priv, const char *id, int index,
+ unsigned int offset, void *buf,
+ size_t bytes)
+{
+ if (bytes != ETH_ALEN)
+ return -EINVAL;
+
+ if (index < 0)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(buf))
+ return -EINVAL;
+
+ eth_addr_add(buf, index);
+
+ return 0;
+}
+
+static const struct nvmem_cell_info sl28vpd_v1_entries[] = {
+ {
+ .name = "serial-number",
+ .offset = offsetof(struct sl28vpd_v1, serial_number),
+ .bytes = sizeof_field(struct sl28vpd_v1, serial_number),
+ },
+ {
+ .name = "base-mac-address",
+ .offset = offsetof(struct sl28vpd_v1, base_mac_address),
+ .bytes = sizeof_field(struct sl28vpd_v1, base_mac_address),
+ .read_post_process = sl28vpd_mac_address_pp,
+ },
+};
+
+static int sl28vpd_v1_check_crc(struct device *dev, struct nvmem_device *nvmem)
+{
+ struct sl28vpd_v1 data_v1;
+ u8 table[CRC8_TABLE_SIZE];
+ int ret;
+ u8 crc;
+
+ crc8_populate_msb(table, 0x07);
+
+ ret = nvmem_device_read(nvmem, 0, sizeof(data_v1), &data_v1);
+ if (ret < 0)
+ return ret;
+ else if (ret != sizeof(data_v1))
+ return -EIO;
+
+ crc = crc8(table, (void *)&data_v1, sizeof(data_v1) - 1, 0);
+
+ if (crc != data_v1.crc8) {
+ dev_err(dev,
+ "Checksum is invalid (got %02x, expected %02x).\n",
+ crc, data_v1.crc8);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem,
+ struct nvmem_layout *layout)
+{
+ const struct nvmem_cell_info *pinfo;
+ struct nvmem_cell_info info = {0};
+ struct device_node *layout_np;
+ struct sl28vpd_header hdr;
+ int ret, i;
+
+ /* check header */
+ ret = nvmem_device_read(nvmem, 0, sizeof(hdr), &hdr);
+ if (ret < 0)
+ return ret;
+ else if (ret != sizeof(hdr))
+ return -EIO;
+
+ if (hdr.magic != SL28VPD_MAGIC) {
+ dev_err(dev, "Invalid magic value (%02x)\n", hdr.magic);
+ return -EINVAL;
+ }
+
+ if (hdr.version != 1) {
+ dev_err(dev, "Version %d is unsupported.\n", hdr.version);
+ return -EINVAL;
+ }
+
+ ret = sl28vpd_v1_check_crc(dev, nvmem);
+ if (ret)
+ return ret;
+
+ layout_np = of_nvmem_layout_get_container(nvmem);
+ if (!layout_np)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(sl28vpd_v1_entries); i++) {
+ pinfo = &sl28vpd_v1_entries[i];
+
+ info.name = pinfo->name;
+ info.offset = pinfo->offset;
+ info.bytes = pinfo->bytes;
+ info.read_post_process = pinfo->read_post_process;
+ info.np = of_get_child_by_name(layout_np, pinfo->name);
+
+ ret = nvmem_add_one_cell(nvmem, &info);
+ if (ret) {
+ of_node_put(layout_np);
+ return ret;
+ }
+ }
+
+ of_node_put(layout_np);
+
+ return 0;
+}
+
+static const struct of_device_id sl28vpd_of_match_table[] = {
+ { .compatible = "kontron,sl28-vpd" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sl28vpd_of_match_table);
+
+static struct nvmem_layout sl28vpd_layout = {
+ .name = "sl28-vpd",
+ .of_match_table = sl28vpd_of_match_table,
+ .add_cells = sl28vpd_add_cells,
+};
+module_nvmem_layout_driver(sl28vpd_layout);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("NVMEM layout driver for the VPD of Kontron sl28 boards");
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
new file mode 100644
index 0000000000..a0275b29af
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Registers */
+#define LPC18XX_EEPROM_AUTOPROG 0x00c
+#define LPC18XX_EEPROM_AUTOPROG_WORD 0x1
+
+#define LPC18XX_EEPROM_CLKDIV 0x014
+
+#define LPC18XX_EEPROM_PWRDWN 0x018
+#define LPC18XX_EEPROM_PWRDWN_NO 0x0
+#define LPC18XX_EEPROM_PWRDWN_YES 0x1
+
+#define LPC18XX_EEPROM_INTSTAT 0xfe0
+#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2)
+
+#define LPC18XX_EEPROM_INTSTATCLR 0xfe8
+#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2)
+
+/* Fixed page size (bytes) */
+#define LPC18XX_EEPROM_PAGE_SIZE 0x80
+
+/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */
+#define LPC18XX_EEPROM_CLOCK_HZ 1500000
+
+/* EEPROM requires 3 ms of erase/program time between each writing */
+#define LPC18XX_EEPROM_PROGRAM_TIME 3
+
+struct lpc18xx_eeprom_dev {
+ struct clk *clk;
+ void __iomem *reg_base;
+ void __iomem *mem_base;
+ struct nvmem_device *nvmem;
+ unsigned reg_bytes;
+ unsigned val_bytes;
+ int size;
+};
+
+static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
+ u32 reg, u32 val)
+{
+ writel(val, eeprom->reg_base + reg);
+}
+
+static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom,
+ u32 reg)
+{
+ return readl(eeprom->reg_base + reg);
+}
+
+static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
+{
+ unsigned long end;
+ u32 val;
+
+ /* Wait until EEPROM program operation has finished */
+ end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10);
+
+ while (time_is_after_jiffies(end)) {
+ val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT);
+
+ if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) {
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR,
+ LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST);
+ return 0;
+ }
+
+ usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC,
+ (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg,
+ void *val, size_t bytes)
+{
+ struct lpc18xx_eeprom_dev *eeprom = context;
+ unsigned int offset = reg;
+ int ret;
+
+ /*
+ * The last page contains the EEPROM initialization data and is not
+ * writable.
+ */
+ if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) ||
+ (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE))
+ return -EINVAL;
+
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_NO);
+
+ /* Wait 100 us while the EEPROM wakes up */
+ usleep_range(100, 200);
+
+ while (bytes) {
+ writel(*(u32 *)val, eeprom->mem_base + offset);
+ ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
+ if (ret < 0)
+ return ret;
+
+ bytes -= eeprom->val_bytes;
+ val += eeprom->val_bytes;
+ offset += eeprom->val_bytes;
+ }
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_YES);
+
+ return 0;
+}
+
+static int lpc18xx_eeprom_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct lpc18xx_eeprom_dev *eeprom = context;
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_NO);
+
+ /* Wait 100 us while the EEPROM wakes up */
+ usleep_range(100, 200);
+
+ while (bytes) {
+ *(u32 *)val = readl(eeprom->mem_base + offset);
+ bytes -= eeprom->val_bytes;
+ val += eeprom->val_bytes;
+ offset += eeprom->val_bytes;
+ }
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_YES);
+
+ return 0;
+}
+
+
+static struct nvmem_config lpc18xx_nvmem_config = {
+ .name = "lpc18xx-eeprom",
+ .stride = 4,
+ .word_size = 4,
+ .reg_read = lpc18xx_eeprom_read,
+ .reg_write = lpc18xx_eeprom_gather_write,
+};
+
+static int lpc18xx_eeprom_probe(struct platform_device *pdev)
+{
+ struct lpc18xx_eeprom_dev *eeprom;
+ struct device *dev = &pdev->dev;
+ struct reset_control *rst;
+ unsigned long clk_rate;
+ struct resource *res;
+ int ret;
+
+ eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
+ eeprom->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(eeprom->reg_base))
+ return PTR_ERR(eeprom->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+ eeprom->mem_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(eeprom->mem_base))
+ return PTR_ERR(eeprom->mem_base);
+
+ eeprom->clk = devm_clk_get(&pdev->dev, "eeprom");
+ if (IS_ERR(eeprom->clk)) {
+ dev_err(&pdev->dev, "failed to get eeprom clock\n");
+ return PTR_ERR(eeprom->clk);
+ }
+
+ ret = clk_prepare_enable(eeprom->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret);
+ return ret;
+ }
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst));
+ ret = PTR_ERR(rst);
+ goto err_clk;
+ }
+
+ ret = reset_control_assert(rst);
+ if (ret < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", ret);
+ goto err_clk;
+ }
+
+ eeprom->val_bytes = 4;
+ eeprom->reg_bytes = 4;
+
+ /*
+ * Clock rate is generated by dividing the system bus clock by the
+ * division factor, contained in the divider register (minus 1 encoded).
+ */
+ clk_rate = clk_get_rate(eeprom->clk);
+ clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1;
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate);
+
+ /*
+ * Writing a single word to the page will start the erase/program cycle
+ * automatically
+ */
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG,
+ LPC18XX_EEPROM_AUTOPROG_WORD);
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_YES);
+
+ eeprom->size = resource_size(res);
+ lpc18xx_nvmem_config.size = resource_size(res);
+ lpc18xx_nvmem_config.dev = dev;
+ lpc18xx_nvmem_config.priv = eeprom;
+
+ eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config);
+ if (IS_ERR(eeprom->nvmem)) {
+ ret = PTR_ERR(eeprom->nvmem);
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, eeprom);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(eeprom->clk);
+
+ return ret;
+}
+
+static int lpc18xx_eeprom_remove(struct platform_device *pdev)
+{
+ struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(eeprom->clk);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_eeprom_of_match[] = {
+ { .compatible = "nxp,lpc1857-eeprom" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match);
+
+static struct platform_driver lpc18xx_eeprom_driver = {
+ .probe = lpc18xx_eeprom_probe,
+ .remove = lpc18xx_eeprom_remove,
+ .driver = {
+ .name = "lpc18xx-eeprom",
+ .of_match_table = lpc18xx_eeprom_of_match,
+ },
+};
+
+module_platform_driver(lpc18xx_eeprom_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
new file mode 100644
index 0000000000..adc9948e7b
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NXP LPC18xx/43xx OTP memory NVMEM driver
+ *
+ * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on the imx ocotp driver,
+ * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * TODO: add support for writing OTP register via API in boot ROM.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts
+ * at offset 0 from the base.
+ *
+ * Bank 0 contains the part ID for Flashless devices and is reseverd for
+ * devices with Flash.
+ * Bank 1/2 is generale purpose or AES key storage for secure devices.
+ * Bank 3 contains control data, USB ID and generale purpose words.
+ */
+#define LPC18XX_OTP_NUM_BANKS 4
+#define LPC18XX_OTP_WORDS_PER_BANK 4
+#define LPC18XX_OTP_WORD_SIZE sizeof(u32)
+#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \
+ LPC18XX_OTP_WORDS_PER_BANK * \
+ LPC18XX_OTP_WORD_SIZE)
+
+struct lpc18xx_otp {
+ void __iomem *base;
+};
+
+static int lpc18xx_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct lpc18xx_otp *otp = context;
+ unsigned int count = bytes >> 2;
+ u32 index = offset >> 2;
+ u32 *buf = val;
+ int i;
+
+ if (count > (LPC18XX_OTP_SIZE - index))
+ count = LPC18XX_OTP_SIZE - index;
+
+ for (i = index; i < (index + count); i++)
+ *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE);
+
+ return 0;
+}
+
+static struct nvmem_config lpc18xx_otp_nvmem_config = {
+ .name = "lpc18xx-otp",
+ .read_only = true,
+ .word_size = LPC18XX_OTP_WORD_SIZE,
+ .stride = LPC18XX_OTP_WORD_SIZE,
+ .reg_read = lpc18xx_otp_read,
+};
+
+static int lpc18xx_otp_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct lpc18xx_otp *otp;
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE;
+ lpc18xx_otp_nvmem_config.dev = &pdev->dev;
+ lpc18xx_otp_nvmem_config.priv = otp;
+
+ nvmem = devm_nvmem_register(&pdev->dev, &lpc18xx_otp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id lpc18xx_otp_dt_ids[] = {
+ { .compatible = "nxp,lpc1850-otp" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids);
+
+static struct platform_driver lpc18xx_otp_driver = {
+ .probe = lpc18xx_otp_probe,
+ .driver = {
+ .name = "lpc18xx_otp",
+ .of_match_table = lpc18xx_otp_dt_ids,
+ },
+};
+module_platform_driver(lpc18xx_otp_driver);
+
+MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>");
+MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
new file mode 100644
index 0000000000..d6b533497c
--- /dev/null
+++ b/drivers/nvmem/meson-efuse.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Amlogic Meson GX eFuse Driver
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+static int meson_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct meson_sm_firmware *fw = context;
+
+ return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
+ bytes, 0, 0, 0);
+}
+
+static int meson_efuse_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct meson_sm_firmware *fw = context;
+
+ return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
+ bytes, 0, 0, 0);
+}
+
+static const struct of_device_id meson_efuse_match[] = {
+ { .compatible = "amlogic,meson-gxbb-efuse", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_efuse_match);
+
+static int meson_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_sm_firmware *fw;
+ struct device_node *sm_np;
+ struct nvmem_device *nvmem;
+ struct nvmem_config *econfig;
+ struct clk *clk;
+ unsigned int size;
+ int ret;
+
+ sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
+ if (!sm_np) {
+ dev_err(&pdev->dev, "no secure-monitor node\n");
+ return -ENODEV;
+ }
+
+ fw = meson_sm_get(sm_np);
+ of_node_put(sm_np);
+ if (!fw)
+ return -EPROBE_DEFER;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get efuse gate");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable gate");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret) {
+ dev_err(dev, "failed to add disable callback");
+ return ret;
+ }
+
+ if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
+ dev_err(dev, "failed to get max user");
+ return -EINVAL;
+ }
+
+ econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
+ if (!econfig)
+ return -ENOMEM;
+
+ econfig->dev = dev;
+ econfig->name = dev_name(dev);
+ econfig->stride = 1;
+ econfig->word_size = 1;
+ econfig->reg_read = meson_efuse_read;
+ econfig->reg_write = meson_efuse_write;
+ econfig->size = size;
+ econfig->priv = fw;
+
+ nvmem = devm_nvmem_register(&pdev->dev, econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver meson_efuse_driver = {
+ .probe = meson_efuse_probe,
+ .driver = {
+ .name = "meson-efuse",
+ .of_match_table = meson_efuse_match,
+ },
+};
+
+module_platform_driver(meson_efuse_driver);
+
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
new file mode 100644
index 0000000000..d6d7aeda31
--- /dev/null
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Amlogic Meson6, Meson8 and Meson8b eFuse Driver
+ *
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#define MESON_MX_EFUSE_CNTL1 0x04
+#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24)
+#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0)
+
+#define MESON_MX_EFUSE_CNTL2 0x08
+
+#define MESON_MX_EFUSE_CNTL4 0x10
+#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10)
+
+struct meson_mx_efuse_platform_data {
+ const char *name;
+ unsigned int word_size;
+};
+
+struct meson_mx_efuse {
+ void __iomem *base;
+ struct clk *core_clk;
+ struct nvmem_device *nvmem;
+ struct nvmem_config config;
+};
+
+static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg,
+ u32 mask, u32 set)
+{
+ u32 data;
+
+ data = readl(efuse->base + reg);
+ data &= ~mask;
+ data |= (set & mask);
+
+ writel(data, efuse->base + reg);
+}
+
+static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse)
+{
+ int err;
+
+ err = clk_prepare_enable(efuse->core_clk);
+ if (err)
+ return err;
+
+ /* power up the efuse */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0);
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4,
+ MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0);
+
+ return 0;
+}
+
+static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse)
+{
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE);
+
+ clk_disable_unprepare(efuse->core_clk);
+}
+
+static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse,
+ unsigned int addr, u32 *value)
+{
+ int err;
+ u32 regval;
+
+ /* write the address to read */
+ regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval);
+
+ /* inform the hardware that we changed the address */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0);
+
+ /* start the read process */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0);
+
+ /*
+ * perform a dummy read to ensure that the HW has the RD_BUSY bit set
+ * when polling for the status below.
+ */
+ readl(efuse->base + MESON_MX_EFUSE_CNTL1);
+
+ err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1,
+ regval,
+ (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)),
+ 1, 1000);
+ if (err) {
+ dev_err(efuse->config.dev,
+ "Timeout while reading efuse address %u\n", addr);
+ return err;
+ }
+
+ *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2);
+
+ return 0;
+}
+
+static int meson_mx_efuse_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct meson_mx_efuse *efuse = context;
+ u32 tmp;
+ int err, i, addr;
+
+ err = meson_mx_efuse_hw_enable(efuse);
+ if (err)
+ return err;
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
+
+ for (i = 0; i < bytes; i += efuse->config.word_size) {
+ addr = (offset + i) / efuse->config.word_size;
+
+ err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
+ if (err)
+ break;
+
+ memcpy(buf + i, &tmp,
+ min_t(size_t, bytes - i, efuse->config.word_size));
+ }
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0);
+
+ meson_mx_efuse_hw_disable(efuse);
+
+ return err;
+}
+
+static const struct meson_mx_efuse_platform_data meson6_efuse_data = {
+ .name = "meson6-efuse",
+ .word_size = 1,
+};
+
+static const struct meson_mx_efuse_platform_data meson8_efuse_data = {
+ .name = "meson8-efuse",
+ .word_size = 4,
+};
+
+static const struct meson_mx_efuse_platform_data meson8b_efuse_data = {
+ .name = "meson8b-efuse",
+ .word_size = 4,
+};
+
+static const struct of_device_id meson_mx_efuse_match[] = {
+ { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data },
+ { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data },
+ { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_mx_efuse_match);
+
+static int meson_mx_efuse_probe(struct platform_device *pdev)
+{
+ const struct meson_mx_efuse_platform_data *drvdata;
+ struct meson_mx_efuse *efuse;
+
+ drvdata = of_device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
+
+ efuse->config.name = drvdata->name;
+ efuse->config.owner = THIS_MODULE;
+ efuse->config.dev = &pdev->dev;
+ efuse->config.priv = efuse;
+ efuse->config.stride = drvdata->word_size;
+ efuse->config.word_size = drvdata->word_size;
+ efuse->config.size = SZ_512;
+ efuse->config.read_only = true;
+ efuse->config.reg_read = meson_mx_efuse_read;
+
+ efuse->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(efuse->core_clk)) {
+ dev_err(&pdev->dev, "Failed to get core clock\n");
+ return PTR_ERR(efuse->core_clk);
+ }
+
+ efuse->nvmem = devm_nvmem_register(&pdev->dev, &efuse->config);
+
+ return PTR_ERR_OR_ZERO(efuse->nvmem);
+}
+
+static struct platform_driver meson_mx_efuse_driver = {
+ .probe = meson_mx_efuse_probe,
+ .driver = {
+ .name = "meson-mx-efuse",
+ .of_match_table = meson_mx_efuse_match,
+ },
+};
+
+module_platform_driver(meson_mx_efuse_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/microchip-otpc.c b/drivers/nvmem/microchip-otpc.c
new file mode 100644
index 0000000000..436e0dc4f3
--- /dev/null
+++ b/drivers/nvmem/microchip-otpc.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OTP Memory controller
+ *
+ * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define MCHP_OTPC_CR (0x0)
+#define MCHP_OTPC_CR_READ BIT(6)
+#define MCHP_OTPC_MR (0x4)
+#define MCHP_OTPC_MR_ADDR GENMASK(31, 16)
+#define MCHP_OTPC_AR (0x8)
+#define MCHP_OTPC_SR (0xc)
+#define MCHP_OTPC_SR_READ BIT(6)
+#define MCHP_OTPC_HR (0x20)
+#define MCHP_OTPC_HR_SIZE GENMASK(15, 8)
+#define MCHP_OTPC_DR (0x24)
+
+#define MCHP_OTPC_NAME "mchp-otpc"
+#define MCHP_OTPC_SIZE (11 * 1024)
+
+/**
+ * struct mchp_otpc - OTPC private data structure
+ * @base: base address
+ * @dev: struct device pointer
+ * @packets: list of packets in OTP memory
+ * @npackets: number of packets in OTP memory
+ */
+struct mchp_otpc {
+ void __iomem *base;
+ struct device *dev;
+ struct list_head packets;
+ u32 npackets;
+};
+
+/**
+ * struct mchp_otpc_packet - OTPC packet data structure
+ * @list: list head
+ * @id: packet ID
+ * @offset: packet offset (in words) in OTP memory
+ */
+struct mchp_otpc_packet {
+ struct list_head list;
+ u32 id;
+ u32 offset;
+};
+
+static struct mchp_otpc_packet *mchp_otpc_id_to_packet(struct mchp_otpc *otpc,
+ u32 id)
+{
+ struct mchp_otpc_packet *packet;
+
+ if (id >= otpc->npackets)
+ return NULL;
+
+ list_for_each_entry(packet, &otpc->packets, list) {
+ if (packet->id == id)
+ return packet;
+ }
+
+ return NULL;
+}
+
+static int mchp_otpc_prepare_read(struct mchp_otpc *otpc,
+ unsigned int offset)
+{
+ u32 tmp;
+
+ /* Set address. */
+ tmp = readl_relaxed(otpc->base + MCHP_OTPC_MR);
+ tmp &= ~MCHP_OTPC_MR_ADDR;
+ tmp |= FIELD_PREP(MCHP_OTPC_MR_ADDR, offset);
+ writel_relaxed(tmp, otpc->base + MCHP_OTPC_MR);
+
+ /* Set read. */
+ tmp = readl_relaxed(otpc->base + MCHP_OTPC_CR);
+ tmp |= MCHP_OTPC_CR_READ;
+ writel_relaxed(tmp, otpc->base + MCHP_OTPC_CR);
+
+ /* Wait for packet to be transferred into temporary buffers. */
+ return read_poll_timeout(readl_relaxed, tmp, !(tmp & MCHP_OTPC_SR_READ),
+ 10000, 2000, false, otpc->base + MCHP_OTPC_SR);
+}
+
+/*
+ * OTPC memory is organized into packets. Each packets contains a header and
+ * a payload. Header is 4 bytes long and contains the size of the payload.
+ * Payload size varies. The memory footprint is something as follows:
+ *
+ * Memory offset Memory footprint Packet ID
+ * ------------- ---------------- ---------
+ *
+ * 0x0 +------------+ <-- packet 0
+ * | header 0 |
+ * 0x4 +------------+
+ * | payload 0 |
+ * . .
+ * . ... .
+ * . .
+ * offset1 +------------+ <-- packet 1
+ * | header 1 |
+ * offset1 + 0x4 +------------+
+ * | payload 1 |
+ * . .
+ * . ... .
+ * . .
+ * offset2 +------------+ <-- packet 2
+ * . .
+ * . ... .
+ * . .
+ * offsetN +------------+ <-- packet N
+ * | header N |
+ * offsetN + 0x4 +------------+
+ * | payload N |
+ * . .
+ * . ... .
+ * . .
+ * +------------+
+ *
+ * where offset1, offset2, offsetN depends on the size of payload 0, payload 1,
+ * payload N-1.
+ *
+ * The access to memory is done on a per packet basis: the control registers
+ * need to be updated with an offset address (within a packet range) and the
+ * data registers will be update by controller with information contained by
+ * that packet. E.g. if control registers are updated with any address within
+ * the range [offset1, offset2) the data registers are updated by controller
+ * with packet 1. Header data is accessible though MCHP_OTPC_HR register.
+ * Payload data is accessible though MCHP_OTPC_DR and MCHP_OTPC_AR registers.
+ * There is no direct mapping b/w the offset requested by software and the
+ * offset returned by hardware.
+ *
+ * For this, the read function will return the first requested bytes in the
+ * packet. The user will have to be aware of the memory footprint before doing
+ * the read request.
+ */
+static int mchp_otpc_read(void *priv, unsigned int off, void *val,
+ size_t bytes)
+{
+ struct mchp_otpc *otpc = priv;
+ struct mchp_otpc_packet *packet;
+ u32 *buf = val;
+ u32 offset;
+ size_t len = 0;
+ int ret, payload_size;
+
+ /*
+ * We reach this point with off being multiple of stride = 4 to
+ * be able to cross the subsystem. Inside the driver we use continuous
+ * unsigned integer numbers for packet id, thus devide off by 4
+ * before passing it to mchp_otpc_id_to_packet().
+ */
+ packet = mchp_otpc_id_to_packet(otpc, off / 4);
+ if (!packet)
+ return -EINVAL;
+ offset = packet->offset;
+
+ while (len < bytes) {
+ ret = mchp_otpc_prepare_read(otpc, offset);
+ if (ret)
+ return ret;
+
+ /* Read and save header content. */
+ *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_HR);
+ len += sizeof(*buf);
+ offset++;
+ if (len >= bytes)
+ break;
+
+ /* Read and save payload content. */
+ payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, *(buf - 1));
+ writel_relaxed(0UL, otpc->base + MCHP_OTPC_AR);
+ do {
+ *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_DR);
+ len += sizeof(*buf);
+ offset++;
+ payload_size--;
+ } while (payload_size >= 0 && len < bytes);
+ }
+
+ return 0;
+}
+
+static int mchp_otpc_init_packets_list(struct mchp_otpc *otpc, u32 *size)
+{
+ struct mchp_otpc_packet *packet;
+ u32 word, word_pos = 0, id = 0, npackets = 0, payload_size;
+ int ret;
+
+ INIT_LIST_HEAD(&otpc->packets);
+ *size = 0;
+
+ while (*size < MCHP_OTPC_SIZE) {
+ ret = mchp_otpc_prepare_read(otpc, word_pos);
+ if (ret)
+ return ret;
+
+ word = readl_relaxed(otpc->base + MCHP_OTPC_HR);
+ payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, word);
+ if (!payload_size)
+ break;
+
+ packet = devm_kzalloc(otpc->dev, sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->id = id++;
+ packet->offset = word_pos;
+ INIT_LIST_HEAD(&packet->list);
+ list_add_tail(&packet->list, &otpc->packets);
+
+ /* Count size by adding header and paload sizes. */
+ *size += 4 * (payload_size + 1);
+ /* Next word: this packet (header, payload) position + 1. */
+ word_pos += payload_size + 2;
+
+ npackets++;
+ }
+
+ otpc->npackets = npackets;
+
+ return 0;
+}
+
+static struct nvmem_config mchp_nvmem_config = {
+ .name = MCHP_OTPC_NAME,
+ .type = NVMEM_TYPE_OTP,
+ .read_only = true,
+ .word_size = 4,
+ .stride = 4,
+ .reg_read = mchp_otpc_read,
+};
+
+static int mchp_otpc_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct mchp_otpc *otpc;
+ u32 size;
+ int ret;
+
+ otpc = devm_kzalloc(&pdev->dev, sizeof(*otpc), GFP_KERNEL);
+ if (!otpc)
+ return -ENOMEM;
+
+ otpc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otpc->base))
+ return PTR_ERR(otpc->base);
+
+ otpc->dev = &pdev->dev;
+ ret = mchp_otpc_init_packets_list(otpc, &size);
+ if (ret)
+ return ret;
+
+ mchp_nvmem_config.dev = otpc->dev;
+ mchp_nvmem_config.size = size;
+ mchp_nvmem_config.priv = otpc;
+ nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id __maybe_unused mchp_otpc_ids[] = {
+ { .compatible = "microchip,sama7g5-otpc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mchp_otpc_ids);
+
+static struct platform_driver mchp_otpc_driver = {
+ .probe = mchp_otpc_probe,
+ .driver = {
+ .name = MCHP_OTPC_NAME,
+ .of_match_table = of_match_ptr(mchp_otpc_ids),
+ },
+};
+module_platform_driver(mchp_otpc_driver);
+
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
+MODULE_DESCRIPTION("Microchip SAMA7G5 OTPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
new file mode 100644
index 0000000000..b36cd0dcc8
--- /dev/null
+++ b/drivers/nvmem/mtk-efuse.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/io.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+struct mtk_efuse_pdata {
+ bool uses_post_processing;
+};
+
+struct mtk_efuse_priv {
+ void __iomem *base;
+};
+
+static int mtk_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct mtk_efuse_priv *priv = context;
+ void __iomem *addr = priv->base + reg;
+ u8 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes; i++, val++)
+ *val = readb(addr + i);
+
+ return 0;
+}
+
+static int mtk_efuse_gpu_speedbin_pp(void *context, const char *id, int index,
+ unsigned int offset, void *data, size_t bytes)
+{
+ u8 *val = data;
+
+ if (val[0] < 8)
+ val[0] = BIT(val[0]);
+
+ return 0;
+}
+
+static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_layout *layout,
+ struct nvmem_cell_info *cell)
+{
+ size_t sz = strlen(cell->name);
+
+ /*
+ * On some SoCs, the GPU speedbin is not read as bitmask but as
+ * a number with range [0-7] (max 3 bits): post process to use
+ * it in OPP tables to describe supported-hw.
+ */
+ if (cell->nbits <= 3 &&
+ strncmp(cell->name, "gpu-speedbin", min(sz, strlen("gpu-speedbin"))) == 0)
+ cell->read_post_process = mtk_efuse_gpu_speedbin_pp;
+}
+
+static struct nvmem_layout mtk_efuse_layout = {
+ .fixup_cell_info = mtk_efuse_fixup_cell_info,
+};
+
+static int mtk_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = {};
+ struct mtk_efuse_priv *priv;
+ const struct mtk_efuse_pdata *pdata;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ pdata = device_get_match_data(dev);
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.reg_read = mtk_reg_read;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ if (pdata->uses_post_processing)
+ econfig.layout = &mtk_efuse_layout;
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct mtk_efuse_pdata mtk_mt8186_efuse_pdata = {
+ .uses_post_processing = true,
+};
+
+static const struct mtk_efuse_pdata mtk_efuse_pdata = {
+ .uses_post_processing = false,
+};
+
+static const struct of_device_id mtk_efuse_of_match[] = {
+ { .compatible = "mediatek,mt8173-efuse", .data = &mtk_efuse_pdata },
+ { .compatible = "mediatek,mt8186-efuse", .data = &mtk_mt8186_efuse_pdata },
+ { .compatible = "mediatek,efuse", .data = &mtk_efuse_pdata },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
+
+static struct platform_driver mtk_efuse_driver = {
+ .probe = mtk_efuse_probe,
+ .driver = {
+ .name = "mediatek,efuse",
+ .of_match_table = mtk_efuse_of_match,
+ },
+};
+
+static int __init mtk_efuse_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mtk_efuse_driver);
+ if (ret) {
+ pr_err("Failed to register efuse driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mtk_efuse_exit(void)
+{
+ return platform_driver_unregister(&mtk_efuse_driver);
+}
+
+subsys_initcall(mtk_efuse_init);
+module_exit(mtk_efuse_exit);
+
+MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek EFUSE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
new file mode 100644
index 0000000000..588ab56d75
--- /dev/null
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale MXS On-Chip OTP driver
+ *
+ * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com>
+ *
+ * Based on the driver from Huang Shijie and Christoph G. Baumann
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/stmp_device.h>
+
+/* OCOTP registers and bits */
+
+#define BM_OCOTP_CTRL_RD_BANK_OPEN BIT(12)
+#define BM_OCOTP_CTRL_ERROR BIT(9)
+#define BM_OCOTP_CTRL_BUSY BIT(8)
+
+#define OCOTP_TIMEOUT 10000
+#define OCOTP_DATA_OFFSET 0x20
+
+struct mxs_ocotp {
+ struct clk *clk;
+ void __iomem *base;
+ struct nvmem_device *nvmem;
+};
+
+static int mxs_ocotp_wait(struct mxs_ocotp *otp)
+{
+ int timeout = OCOTP_TIMEOUT;
+ unsigned int status = 0;
+
+ while (timeout--) {
+ status = readl(otp->base);
+
+ if (!(status & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)))
+ break;
+
+ cpu_relax();
+ }
+
+ if (status & BM_OCOTP_CTRL_BUSY)
+ return -EBUSY;
+ else if (status & BM_OCOTP_CTRL_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int mxs_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct mxs_ocotp *otp = context;
+ u32 *buf = val;
+ int ret;
+
+ ret = clk_enable(otp->clk);
+ if (ret)
+ return ret;
+
+ writel(BM_OCOTP_CTRL_ERROR, otp->base + STMP_OFFSET_REG_CLR);
+
+ ret = mxs_ocotp_wait(otp);
+ if (ret)
+ goto disable_clk;
+
+ /* open OCOTP banks for read */
+ writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_SET);
+
+ /* approximately wait 33 hclk cycles */
+ udelay(1);
+
+ ret = mxs_ocotp_wait(otp);
+ if (ret)
+ goto close_banks;
+
+ while (bytes) {
+ if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
+ /* fill up non-data register */
+ *buf++ = 0;
+ } else {
+ *buf++ = readl(otp->base + offset);
+ }
+
+ bytes -= 4;
+ offset += 4;
+ }
+
+close_banks:
+ /* close banks for power saving */
+ writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_CLR);
+
+disable_clk:
+ clk_disable(otp->clk);
+
+ return ret;
+}
+
+static struct nvmem_config ocotp_config = {
+ .name = "mxs-ocotp",
+ .stride = 16,
+ .word_size = 4,
+ .reg_read = mxs_ocotp_read,
+};
+
+struct mxs_data {
+ int size;
+};
+
+static const struct mxs_data imx23_data = {
+ .size = 0x220,
+};
+
+static const struct mxs_data imx28_data = {
+ .size = 0x2a0,
+};
+
+static const struct of_device_id mxs_ocotp_match[] = {
+ { .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
+ { .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
+ { /* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
+
+static void mxs_ocotp_action(void *data)
+{
+ clk_unprepare(data);
+}
+
+static int mxs_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct mxs_data *data;
+ struct mxs_ocotp *otp;
+ const struct of_device_id *match;
+ int ret;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(otp->clk))
+ return PTR_ERR(otp->clk);
+
+ ret = clk_prepare(otp->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_ocotp_action, otp->clk);
+ if (ret)
+ return ret;
+
+ data = match->data;
+
+ ocotp_config.size = data->size;
+ ocotp_config.priv = otp;
+ ocotp_config.dev = dev;
+ otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
+ if (IS_ERR(otp->nvmem))
+ return PTR_ERR(otp->nvmem);
+
+ platform_set_drvdata(pdev, otp);
+
+ return 0;
+}
+
+static struct platform_driver mxs_ocotp_driver = {
+ .probe = mxs_ocotp_probe,
+ .driver = {
+ .name = "mxs-ocotp",
+ .of_match_table = mxs_ocotp_match,
+ },
+};
+
+module_platform_driver(mxs_ocotp_driver);
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net");
+MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/nintendo-otp.c b/drivers/nvmem/nintendo-otp.c
new file mode 100644
index 0000000000..355e7f1fc6
--- /dev/null
+++ b/drivers/nvmem/nintendo-otp.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Nintendo Wii and Wii U OTP driver
+ *
+ * This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
+ *
+ * This memory contains common and per-console keys, signatures and
+ * related data required to access peripherals.
+ *
+ * Based on reversed documentation from https://wiiubrew.org/wiki/Hardware/OTP
+ *
+ * Copyright (C) 2021 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define HW_OTPCMD 0
+#define HW_OTPDATA 4
+#define OTP_READ 0x80000000
+#define BANK_SIZE 128
+#define WORD_SIZE 4
+
+struct nintendo_otp_priv {
+ void __iomem *regs;
+};
+
+struct nintendo_otp_devtype_data {
+ const char *name;
+ unsigned int num_banks;
+};
+
+static const struct nintendo_otp_devtype_data hollywood_otp_data = {
+ .name = "wii-otp",
+ .num_banks = 1,
+};
+
+static const struct nintendo_otp_devtype_data latte_otp_data = {
+ .name = "wiiu-otp",
+ .num_banks = 8,
+};
+
+static int nintendo_otp_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct nintendo_otp_priv *priv = context;
+ u32 *val = _val;
+ int words = bytes / WORD_SIZE;
+ u32 bank, addr;
+
+ while (words--) {
+ bank = (reg / BANK_SIZE) << 8;
+ addr = (reg / WORD_SIZE) % (BANK_SIZE / WORD_SIZE);
+ iowrite32be(OTP_READ | bank | addr, priv->regs + HW_OTPCMD);
+ *val++ = ioread32be(priv->regs + HW_OTPDATA);
+ reg += WORD_SIZE;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nintendo_otp_of_table[] = {
+ { .compatible = "nintendo,hollywood-otp", .data = &hollywood_otp_data },
+ { .compatible = "nintendo,latte-otp", .data = &latte_otp_data },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, nintendo_otp_of_table);
+
+static int nintendo_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id =
+ of_match_device(nintendo_otp_of_table, dev);
+ struct nvmem_device *nvmem;
+ struct nintendo_otp_priv *priv;
+
+ struct nvmem_config config = {
+ .stride = WORD_SIZE,
+ .word_size = WORD_SIZE,
+ .reg_read = nintendo_otp_reg_read,
+ .read_only = true,
+ .root_only = true,
+ };
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ if (of_id->data) {
+ const struct nintendo_otp_devtype_data *data = of_id->data;
+ config.name = data->name;
+ config.size = data->num_banks * BANK_SIZE;
+ }
+
+ config.dev = dev;
+ config.priv = priv;
+
+ nvmem = devm_nvmem_register(dev, &config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver nintendo_otp_driver = {
+ .probe = nintendo_otp_probe,
+ .driver = {
+ .name = "nintendo-otp",
+ .of_match_table = nintendo_otp_of_table,
+ },
+};
+module_platform_driver(nintendo_otp_driver);
+MODULE_AUTHOR("Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>");
+MODULE_DESCRIPTION("Nintendo Wii and Wii U OTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
new file mode 100644
index 0000000000..70f2d4f2ef
--- /dev/null
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define SDAM_MEM_START 0x40
+#define REGISTER_MAP_ID 0x40
+#define REGISTER_MAP_VERSION 0x41
+#define SDAM_SIZE 0x44
+#define SDAM_PBS_TRIG_SET 0xE5
+#define SDAM_PBS_TRIG_CLR 0xE6
+
+struct sdam_chip {
+ struct regmap *regmap;
+ struct nvmem_config sdam_config;
+ unsigned int base;
+ unsigned int size;
+};
+
+/* read only register offsets */
+static const u8 sdam_ro_map[] = {
+ REGISTER_MAP_ID,
+ REGISTER_MAP_VERSION,
+ SDAM_SIZE
+};
+
+static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset,
+ size_t len)
+{
+ unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
+
+ if (!len)
+ return false;
+
+ if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
+ && (offset + len - 1) <= sdam_mem_end)
+ return true;
+ else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
+ && (len == 1))
+ return true;
+
+ return false;
+}
+
+static bool sdam_is_ro(unsigned int offset, size_t len)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
+ if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
+ return true;
+
+ return false;
+}
+
+static int sdam_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ struct device *dev = sdam->sdam_config.dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+ dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ struct device *dev = sdam->sdam_config.dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+ dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ if (sdam_is_ro(offset, bytes)) {
+ dev_err(dev, "Invalid write offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_probe(struct platform_device *pdev)
+{
+ struct sdam_chip *sdam;
+ struct nvmem_device *nvmem;
+ unsigned int val;
+ int rc;
+
+ sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
+ if (!sdam)
+ return -ENOMEM;
+
+ sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sdam->regmap) {
+ dev_err(&pdev->dev, "Failed to get regmap handle\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc);
+ return -EINVAL;
+ }
+ sdam->size = val * 32;
+
+ sdam->sdam_config.dev = &pdev->dev;
+ sdam->sdam_config.name = "spmi_sdam";
+ sdam->sdam_config.id = NVMEM_DEVID_AUTO;
+ sdam->sdam_config.owner = THIS_MODULE;
+ sdam->sdam_config.stride = 1;
+ sdam->sdam_config.word_size = 1;
+ sdam->sdam_config.reg_read = sdam_read;
+ sdam->sdam_config.reg_write = sdam_write;
+ sdam->sdam_config.priv = sdam;
+
+ nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev,
+ "Failed to register SDAM nvmem device rc=%ld\n",
+ PTR_ERR(nvmem));
+ return -ENXIO;
+ }
+ dev_dbg(&pdev->dev,
+ "SDAM base=%#x size=%u registered successfully\n",
+ sdam->base, sdam->size);
+
+ return 0;
+}
+
+static const struct of_device_id sdam_match_table[] = {
+ { .compatible = "qcom,spmi-sdam" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdam_match_table);
+
+static struct platform_driver sdam_driver = {
+ .driver = {
+ .name = "qcom,spmi-sdam",
+ .of_match_table = sdam_match_table,
+ },
+ .probe = sdam_probe,
+};
+module_platform_driver(sdam_driver);
+
+MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
new file mode 100644
index 0000000000..14814cba2d
--- /dev/null
+++ b/drivers/nvmem/qfprom.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+/* Blow timer clock frequency in Mhz */
+#define QFPROM_BLOW_TIMER_OFFSET 0x03c
+
+/* Amount of time required to hold charge to blow fuse in micro-seconds */
+#define QFPROM_FUSE_BLOW_POLL_US 100
+#define QFPROM_FUSE_BLOW_TIMEOUT_US 10000
+
+#define QFPROM_BLOW_STATUS_OFFSET 0x048
+#define QFPROM_BLOW_STATUS_BUSY 0x1
+#define QFPROM_BLOW_STATUS_READY 0x0
+
+#define QFPROM_ACCEL_OFFSET 0x044
+
+#define QFPROM_VERSION_OFFSET 0x0
+#define QFPROM_MAJOR_VERSION_SHIFT 28
+#define QFPROM_MAJOR_VERSION_MASK GENMASK(31, QFPROM_MAJOR_VERSION_SHIFT)
+#define QFPROM_MINOR_VERSION_SHIFT 16
+#define QFPROM_MINOR_VERSION_MASK GENMASK(27, QFPROM_MINOR_VERSION_SHIFT)
+
+static bool read_raw_data;
+module_param(read_raw_data, bool, 0644);
+MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data");
+
+/**
+ * struct qfprom_soc_data - config that varies from SoC to SoC.
+ *
+ * @accel_value: Should contain qfprom accel value.
+ * @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow.
+ * @qfprom_blow_set_freq: The frequency required to set when we start the
+ * fuse blowing.
+ * @qfprom_blow_uV: LDO voltage to be set when doing efuse blow
+ */
+struct qfprom_soc_data {
+ u32 accel_value;
+ u32 qfprom_blow_timer_value;
+ u32 qfprom_blow_set_freq;
+ int qfprom_blow_uV;
+};
+
+/**
+ * struct qfprom_priv - structure holding qfprom attributes
+ *
+ * @qfpraw: iomapped memory space for qfprom-efuse raw address space.
+ * @qfpconf: iomapped memory space for qfprom-efuse configuration address
+ * space.
+ * @qfpcorrected: iomapped memory space for qfprom corrected address space.
+ * @qfpsecurity: iomapped memory space for qfprom security control space.
+ * @dev: qfprom device structure.
+ * @secclk: Clock supply.
+ * @vcc: Regulator supply.
+ * @soc_data: Data that for things that varies from SoC to SoC.
+ */
+struct qfprom_priv {
+ void __iomem *qfpraw;
+ void __iomem *qfpconf;
+ void __iomem *qfpcorrected;
+ void __iomem *qfpsecurity;
+ struct device *dev;
+ struct clk *secclk;
+ struct regulator *vcc;
+ const struct qfprom_soc_data *soc_data;
+};
+
+/**
+ * struct qfprom_touched_values - saved values to restore after blowing
+ *
+ * @clk_rate: The rate the clock was at before blowing.
+ * @accel_val: The value of the accel reg before blowing.
+ * @timer_val: The value of the timer before blowing.
+ */
+struct qfprom_touched_values {
+ unsigned long clk_rate;
+ u32 accel_val;
+ u32 timer_val;
+};
+
+/**
+ * struct qfprom_soc_compatible_data - Data matched against the SoC
+ * compatible string.
+ *
+ * @keepout: Array of keepout regions for this SoC.
+ * @nkeepout: Number of elements in the keepout array.
+ */
+struct qfprom_soc_compatible_data {
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
+};
+
+static const struct nvmem_keepout sc7180_qfprom_keepout[] = {
+ {.start = 0x128, .end = 0x148},
+ {.start = 0x220, .end = 0x228}
+};
+
+static const struct qfprom_soc_compatible_data sc7180_qfprom = {
+ .keepout = sc7180_qfprom_keepout,
+ .nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout)
+};
+
+static const struct nvmem_keepout sc7280_qfprom_keepout[] = {
+ {.start = 0x128, .end = 0x148},
+ {.start = 0x238, .end = 0x248}
+};
+
+static const struct qfprom_soc_compatible_data sc7280_qfprom = {
+ .keepout = sc7280_qfprom_keepout,
+ .nkeepout = ARRAY_SIZE(sc7280_qfprom_keepout)
+};
+
+/**
+ * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
+ * @priv: Our driver data.
+ * @old: The data that was stashed from before fuse blowing.
+ *
+ * Resets the value of the blow timer, accel register and the clock
+ * and voltage settings.
+ *
+ * Prints messages if there are errors but doesn't return an error code
+ * since there's not much we can do upon failure.
+ */
+static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
+ const struct qfprom_touched_values *old)
+{
+ int ret;
+
+ writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
+
+ dev_pm_genpd_set_performance_state(priv->dev, 0);
+ pm_runtime_put(priv->dev);
+
+ /*
+ * This may be a shared rail and may be able to run at a lower rate
+ * when we're not blowing fuses. At the moment, the regulator framework
+ * applies voltage constraints even on disabled rails, so remove our
+ * constraints and allow the rail to be adjusted by other users.
+ */
+ ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
+ if (ret)
+ dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
+
+ ret = regulator_disable(priv->vcc);
+ if (ret)
+ dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
+
+ ret = clk_set_rate(priv->secclk, old->clk_rate);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to set clock rate for disable (ignoring)\n");
+
+ clk_disable_unprepare(priv->secclk);
+}
+
+/**
+ * qfprom_enable_fuse_blowing() - Enable fuse blowing.
+ * @priv: Our driver data.
+ * @old: We'll stash stuff here to use when disabling.
+ *
+ * Sets the value of the blow timer, accel register and the clock
+ * and voltage settings.
+ *
+ * Prints messages if there are errors so caller doesn't need to.
+ *
+ * Return: 0 or -err.
+ */
+static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
+ struct qfprom_touched_values *old)
+{
+ int ret;
+ int qfprom_blow_uV = priv->soc_data->qfprom_blow_uV;
+
+ ret = clk_prepare_enable(priv->secclk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ old->clk_rate = clk_get_rate(priv->secclk);
+ ret = clk_set_rate(priv->secclk, priv->soc_data->qfprom_blow_set_freq);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set clock rate for enable\n");
+ goto err_clk_prepared;
+ }
+
+ /*
+ * Hardware requires a minimum voltage for fuse blowing.
+ * This may be a shared rail so don't specify a maximum.
+ * Regulator constraints will cap to the actual maximum.
+ */
+ ret = regulator_set_voltage(priv->vcc, qfprom_blow_uV, INT_MAX);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set %duV\n", qfprom_blow_uV);
+ goto err_clk_rate_set;
+ }
+
+ ret = regulator_enable(priv->vcc);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable regulator\n");
+ goto err_clk_rate_set;
+ }
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to enable power-domain\n");
+ goto err_reg_enable;
+ }
+ dev_pm_genpd_set_performance_state(priv->dev, INT_MAX);
+
+ old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET);
+ writel(priv->soc_data->qfprom_blow_timer_value,
+ priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ writel(priv->soc_data->accel_value,
+ priv->qfpconf + QFPROM_ACCEL_OFFSET);
+
+ return 0;
+
+err_reg_enable:
+ regulator_disable(priv->vcc);
+err_clk_rate_set:
+ clk_set_rate(priv->secclk, old->clk_rate);
+err_clk_prepared:
+ clk_disable_unprepare(priv->secclk);
+ return ret;
+}
+
+/**
+ * qfprom_reg_write() - Write to fuses.
+ * @context: Our driver data.
+ * @reg: The offset to write at.
+ * @_val: Pointer to data to write.
+ * @bytes: The number of bytes to write.
+ *
+ * Writes to fuses. WARNING: THIS IS PERMANENT.
+ *
+ * Return: 0 or -err.
+ */
+static int qfprom_reg_write(void *context, unsigned int reg, void *_val,
+ size_t bytes)
+{
+ struct qfprom_priv *priv = context;
+ struct qfprom_touched_values old;
+ int words = bytes / 4;
+ u32 *value = _val;
+ u32 blow_status;
+ int ret;
+ int i;
+
+ dev_dbg(priv->dev,
+ "Writing to raw qfprom region : %#010x of size: %zu\n",
+ reg, bytes);
+
+ /*
+ * The hardware only allows us to write word at a time, but we can
+ * read byte at a time. Until the nvmem framework allows a separate
+ * word_size and stride for reading vs. writing, we'll enforce here.
+ */
+ if (bytes % 4) {
+ dev_err(priv->dev,
+ "%zu is not an integral number of words\n", bytes);
+ return -EINVAL;
+ }
+ if (reg % 4) {
+ dev_err(priv->dev,
+ "Invalid offset: %#x. Must be word aligned\n", reg);
+ return -EINVAL;
+ }
+
+ ret = qfprom_enable_fuse_blowing(priv, &old);
+ if (ret)
+ return ret;
+
+ ret = readl_relaxed_poll_timeout(
+ priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
+ blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
+ QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
+
+ if (ret) {
+ dev_err(priv->dev,
+ "Timeout waiting for initial ready; aborting.\n");
+ goto exit_enabled_fuse_blowing;
+ }
+
+ for (i = 0; i < words; i++)
+ writel(value[i], priv->qfpraw + reg + (i * 4));
+
+ ret = readl_relaxed_poll_timeout(
+ priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
+ blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
+ QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
+
+ /* Give an error, but not much we can do in this case */
+ if (ret)
+ dev_err(priv->dev, "Timeout waiting for finish.\n");
+
+exit_enabled_fuse_blowing:
+ qfprom_disable_fuse_blowing(priv, &old);
+
+ return ret;
+}
+
+static int qfprom_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct qfprom_priv *priv = context;
+ u8 *val = _val;
+ int i = 0, words = bytes;
+ void __iomem *base = priv->qfpcorrected;
+
+ if (read_raw_data && priv->qfpraw)
+ base = priv->qfpraw;
+
+ while (words--)
+ *val++ = readb(base + reg + i++);
+
+ return 0;
+}
+
+static void qfprom_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static const struct qfprom_soc_data qfprom_7_8_data = {
+ .accel_value = 0xD10,
+ .qfprom_blow_timer_value = 25,
+ .qfprom_blow_set_freq = 4800000,
+ .qfprom_blow_uV = 1800000,
+};
+
+static const struct qfprom_soc_data qfprom_7_15_data = {
+ .accel_value = 0xD08,
+ .qfprom_blow_timer_value = 24,
+ .qfprom_blow_set_freq = 4800000,
+ .qfprom_blow_uV = 1900000,
+};
+
+static int qfprom_probe(struct platform_device *pdev)
+{
+ struct nvmem_config econfig = {
+ .name = "qfprom",
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+ .reg_read = qfprom_reg_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ const struct qfprom_soc_compatible_data *soc_data;
+ struct qfprom_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* The corrected section is always provided */
+ priv->qfpcorrected = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->qfpcorrected))
+ return PTR_ERR(priv->qfpcorrected);
+
+ econfig.size = resource_size(res);
+ econfig.dev = dev;
+ econfig.priv = priv;
+
+ priv->dev = dev;
+ soc_data = device_get_match_data(dev);
+ if (soc_data) {
+ econfig.keepout = soc_data->keepout;
+ econfig.nkeepout = soc_data->nkeepout;
+ }
+
+ /*
+ * If more than one region is provided then the OS has the ability
+ * to write.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ u32 version;
+ int major_version, minor_version;
+
+ priv->qfpraw = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpraw))
+ return PTR_ERR(priv->qfpraw);
+ priv->qfpconf = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(priv->qfpconf))
+ return PTR_ERR(priv->qfpconf);
+ priv->qfpsecurity = devm_platform_ioremap_resource(pdev, 3);
+ if (IS_ERR(priv->qfpsecurity))
+ return PTR_ERR(priv->qfpsecurity);
+
+ version = readl(priv->qfpsecurity + QFPROM_VERSION_OFFSET);
+ major_version = (version & QFPROM_MAJOR_VERSION_MASK) >>
+ QFPROM_MAJOR_VERSION_SHIFT;
+ minor_version = (version & QFPROM_MINOR_VERSION_MASK) >>
+ QFPROM_MINOR_VERSION_SHIFT;
+
+ if (major_version == 7 && minor_version == 8)
+ priv->soc_data = &qfprom_7_8_data;
+ else if (major_version == 7 && minor_version == 15)
+ priv->soc_data = &qfprom_7_15_data;
+
+ priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(priv->vcc))
+ return PTR_ERR(priv->vcc);
+
+ priv->secclk = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->secclk))
+ return dev_err_probe(dev, PTR_ERR(priv->secclk), "Error getting clock\n");
+
+ /* Only enable writing if we have SoC data. */
+ if (priv->soc_data)
+ econfig.reg_write = qfprom_reg_write;
+ }
+
+ pm_runtime_enable(dev);
+ ret = devm_add_action_or_reset(dev, qfprom_runtime_disable, dev);
+ if (ret)
+ return ret;
+
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id qfprom_of_match[] = {
+ { .compatible = "qcom,qfprom",},
+ { .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom},
+ { .compatible = "qcom,sc7280-qfprom", .data = &sc7280_qfprom},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, qfprom_of_match);
+
+static struct platform_driver qfprom_driver = {
+ .probe = qfprom_probe,
+ .driver = {
+ .name = "qcom,qfprom",
+ .of_match_table = qfprom_of_match,
+ },
+};
+module_platform_driver(qfprom_driver);
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm QFPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/qoriq-efuse.c b/drivers/nvmem/qoriq-efuse.c
new file mode 100644
index 0000000000..e7fd04d6dd
--- /dev/null
+++ b/drivers/nvmem/qoriq-efuse.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Westermo Network Technologies AB
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct qoriq_efuse_priv {
+ void __iomem *base;
+};
+
+static int qoriq_efuse_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct qoriq_efuse_priv *priv = context;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ __ioread32_copy(val, priv->base + offset, bytes / 4);
+
+ /* Ignore trailing bytes (there shouldn't be any) */
+
+ return 0;
+}
+
+static int qoriq_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
+ .read_only = true,
+ .reg_read = qoriq_efuse_read,
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .name = "qoriq_efuse_read",
+ .id = NVMEM_DEVID_AUTO,
+ .root_only = true,
+ };
+ struct qoriq_efuse_priv *priv;
+ struct nvmem_device *nvmem;
+ struct resource *res;
+
+ priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ config.size = resource_size(res);
+ config.priv = priv;
+ nvmem = devm_nvmem_register(config.dev, &config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id qoriq_efuse_of_match[] = {
+ { .compatible = "fsl,t1023-sfp", },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, qoriq_efuse_of_match);
+
+static struct platform_driver qoriq_efuse_driver = {
+ .probe = qoriq_efuse_probe,
+ .driver = {
+ .name = "qoriq-efuse",
+ .of_match_table = qoriq_efuse_of_match,
+ },
+};
+module_platform_driver(qoriq_efuse_driver);
+
+MODULE_AUTHOR("Richard Alpe <richard.alpe@bit42.se>");
+MODULE_DESCRIPTION("NXP QorIQ Security Fuse Processor (SFP) Reader");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
new file mode 100644
index 0000000000..df6a1c594b
--- /dev/null
+++ b/drivers/nvmem/rave-sp-eeprom.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * EEPROM driver for RAVE SP
+ *
+ * Copyright (C) 2018 Zodiac Inflight Innovations
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+/**
+ * enum rave_sp_eeprom_access_type - Supported types of EEPROM access
+ *
+ * @RAVE_SP_EEPROM_WRITE: EEPROM write
+ * @RAVE_SP_EEPROM_READ: EEPROM read
+ */
+enum rave_sp_eeprom_access_type {
+ RAVE_SP_EEPROM_WRITE = 0,
+ RAVE_SP_EEPROM_READ = 1,
+};
+
+/**
+ * enum rave_sp_eeprom_header_size - EEPROM command header sizes
+ *
+ * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K)
+ * @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K)
+ */
+enum rave_sp_eeprom_header_size {
+ RAVE_SP_EEPROM_HEADER_SMALL = 4U,
+ RAVE_SP_EEPROM_HEADER_BIG = 5U,
+};
+#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG
+
+#define RAVE_SP_EEPROM_PAGE_SIZE 32U
+
+/**
+ * struct rave_sp_eeprom_page - RAVE SP EEPROM page
+ *
+ * @type: Access type (see enum rave_sp_eeprom_access_type)
+ * @success: Success flag (Success = 1, Failure = 0)
+ * @data: Read data
+ *
+ * Note this structure corresponds to RSP_*_EEPROM payload from RAVE
+ * SP ICD
+ */
+struct rave_sp_eeprom_page {
+ u8 type;
+ u8 success;
+ u8 data[RAVE_SP_EEPROM_PAGE_SIZE];
+} __packed;
+
+/**
+ * struct rave_sp_eeprom - RAVE SP EEPROM device
+ *
+ * @sp: Pointer to parent RAVE SP device
+ * @mutex: Lock protecting access to EEPROM
+ * @address: EEPROM device address
+ * @header_size: Size of EEPROM command header for this device
+ * @dev: Pointer to corresponding struct device used for logging
+ */
+struct rave_sp_eeprom {
+ struct rave_sp *sp;
+ struct mutex mutex;
+ u8 address;
+ unsigned int header_size;
+ struct device *dev;
+};
+
+/**
+ * rave_sp_eeprom_io - Low-level part of EEPROM page access
+ *
+ * @eeprom: EEPROM device to write to
+ * @type: EEPROM access type (read or write)
+ * @idx: number of the EEPROM page
+ * @page: Data to write or buffer to store result (via page->data)
+ *
+ * This function does all of the low-level work required to perform a
+ * EEPROM access. This includes formatting correct command payload,
+ * sending it and checking received results.
+ *
+ * Returns zero in case of success or negative error code in
+ * case of failure.
+ */
+static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom,
+ enum rave_sp_eeprom_access_type type,
+ u16 idx,
+ struct rave_sp_eeprom_page *page)
+{
+ const bool is_write = type == RAVE_SP_EEPROM_WRITE;
+ const unsigned int data_size = is_write ? sizeof(page->data) : 0;
+ const unsigned int cmd_size = eeprom->header_size + data_size;
+ const unsigned int rsp_size =
+ is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page);
+ unsigned int offset = 0;
+ u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)];
+ int ret;
+
+ if (WARN_ON(cmd_size > sizeof(cmd)))
+ return -EINVAL;
+
+ cmd[offset++] = eeprom->address;
+ cmd[offset++] = 0;
+ cmd[offset++] = type;
+ cmd[offset++] = idx;
+
+ /*
+ * If there's still room in this command's header it means we
+ * are talkin to EEPROM that uses 16-bit page numbers and we
+ * have to specify index's MSB in payload as well.
+ */
+ if (offset < eeprom->header_size)
+ cmd[offset++] = idx >> 8;
+ /*
+ * Copy our data to write to command buffer first. In case of
+ * a read data_size should be zero and memcpy would become a
+ * no-op
+ */
+ memcpy(&cmd[offset], page->data, data_size);
+
+ ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size);
+ if (ret)
+ return ret;
+
+ if (page->type != type)
+ return -EPROTO;
+
+ if (!page->success)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * rave_sp_eeprom_page_access - Access single EEPROM page
+ *
+ * @eeprom: EEPROM device to access
+ * @type: Access type to perform (read or write)
+ * @offset: Offset within EEPROM to access
+ * @data: Data buffer
+ * @data_len: Size of the data buffer
+ *
+ * This function performs a generic access to a single page or a
+ * portion thereof. Requested access MUST NOT cross the EEPROM page
+ * boundary.
+ *
+ * Returns zero in case of success or negative error code in
+ * case of failure.
+ */
+static int
+rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom,
+ enum rave_sp_eeprom_access_type type,
+ unsigned int offset, u8 *data,
+ size_t data_len)
+{
+ const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE;
+ const unsigned int page_nr = offset / RAVE_SP_EEPROM_PAGE_SIZE;
+ struct rave_sp_eeprom_page page;
+ int ret;
+
+ /*
+ * This function will not work if data access we've been asked
+ * to do is crossing EEPROM page boundary. Normally this
+ * should never happen and getting here would indicate a bug
+ * in the code.
+ */
+ if (WARN_ON(data_len > sizeof(page.data) - page_offset))
+ return -EINVAL;
+
+ if (type == RAVE_SP_EEPROM_WRITE) {
+ /*
+ * If doing a partial write we need to do a read first
+ * to fill the rest of the page with correct data.
+ */
+ if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) {
+ ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ,
+ page_nr, &page);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(&page.data[page_offset], data, data_len);
+ }
+
+ ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page);
+ if (ret)
+ return ret;
+
+ /*
+ * Since we receive the result of the read via 'page.data'
+ * buffer we need to copy that to 'data'
+ */
+ if (type == RAVE_SP_EEPROM_READ)
+ memcpy(data, &page.data[page_offset], data_len);
+
+ return 0;
+}
+
+/**
+ * rave_sp_eeprom_access - Access EEPROM data
+ *
+ * @eeprom: EEPROM device to access
+ * @type: Access type to perform (read or write)
+ * @offset: Offset within EEPROM to access
+ * @data: Data buffer
+ * @data_len: Size of the data buffer
+ *
+ * This function performs a generic access (either read or write) at
+ * arbitrary offset (not necessary page aligned) of arbitrary length
+ * (is not constrained by EEPROM page size).
+ *
+ * Returns zero in case of success or negative error code in case of
+ * failure.
+ */
+static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom,
+ enum rave_sp_eeprom_access_type type,
+ unsigned int offset, u8 *data,
+ unsigned int data_len)
+{
+ unsigned int residue;
+ unsigned int chunk;
+ unsigned int head;
+ int ret;
+
+ mutex_lock(&eeprom->mutex);
+
+ head = offset % RAVE_SP_EEPROM_PAGE_SIZE;
+ residue = data_len;
+
+ do {
+ /*
+ * First iteration, if we are doing an access that is
+ * not 32-byte aligned, we need to access only data up
+ * to a page boundary to avoid corssing it in
+ * rave_sp_eeprom_page_access()
+ */
+ if (unlikely(head)) {
+ chunk = RAVE_SP_EEPROM_PAGE_SIZE - head;
+ /*
+ * This can only happen once per
+ * rave_sp_eeprom_access() call, so we set
+ * head to zero to process all the other
+ * iterations normally.
+ */
+ head = 0;
+ } else {
+ chunk = RAVE_SP_EEPROM_PAGE_SIZE;
+ }
+
+ /*
+ * We should never read more that 'residue' bytes
+ */
+ chunk = min(chunk, residue);
+ ret = rave_sp_eeprom_page_access(eeprom, type, offset,
+ data, chunk);
+ if (ret)
+ goto out;
+
+ residue -= chunk;
+ offset += chunk;
+ data += chunk;
+ } while (residue);
+out:
+ mutex_unlock(&eeprom->mutex);
+ return ret;
+}
+
+static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset,
+ void *val, size_t bytes)
+{
+ return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ,
+ offset, val, bytes);
+}
+
+static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset,
+ void *val, size_t bytes)
+{
+ return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE,
+ offset, val, bytes);
+}
+
+static int rave_sp_eeprom_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct device_node *np = dev->of_node;
+ struct nvmem_config config = { 0 };
+ struct rave_sp_eeprom *eeprom;
+ struct nvmem_device *nvmem;
+ u32 reg[2], size;
+
+ if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) {
+ dev_err(dev, "Failed to parse \"reg\" property\n");
+ return -EINVAL;
+ }
+
+ size = reg[1];
+ /*
+ * Per ICD, we have no more than 2 bytes to specify EEPROM
+ * page.
+ */
+ if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) {
+ dev_err(dev, "Specified size is too big\n");
+ return -EINVAL;
+ }
+
+ eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ eeprom->address = reg[0];
+ eeprom->sp = sp;
+ eeprom->dev = dev;
+
+ if (size > SZ_8K)
+ eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG;
+ else
+ eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL;
+
+ mutex_init(&eeprom->mutex);
+
+ config.id = -1;
+ of_property_read_string(np, "zii,eeprom-name", &config.name);
+ config.priv = eeprom;
+ config.dev = dev;
+ config.size = size;
+ config.reg_read = rave_sp_eeprom_reg_read;
+ config.reg_write = rave_sp_eeprom_reg_write;
+ config.word_size = 1;
+ config.stride = 1;
+
+ nvmem = devm_nvmem_register(dev, &config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id rave_sp_eeprom_of_match[] = {
+ { .compatible = "zii,rave-sp-eeprom" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match);
+
+static struct platform_driver rave_sp_eeprom_driver = {
+ .probe = rave_sp_eeprom_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rave_sp_eeprom_of_match,
+ },
+};
+module_platform_driver(rave_sp_eeprom_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP EEPROM driver");
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
new file mode 100644
index 0000000000..752d0bf444
--- /dev/null
+++ b/drivers/nvmem/rmem.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+
+struct rmem {
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ struct reserved_mem *mem;
+
+ phys_addr_t size;
+};
+
+static int rmem_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rmem *priv = context;
+ size_t available = priv->mem->size;
+ loff_t off = offset;
+ void *addr;
+ int count;
+
+ /*
+ * Only map the reserved memory at this point to avoid potential rogue
+ * kernel threads inadvertently modifying it. Based on the current
+ * uses-cases for this driver, the performance hit isn't a concern.
+ * Nor is likely to be, given the nature of the subsystem. Most nvmem
+ * devices operate over slow buses to begin with.
+ *
+ * An alternative would be setting the memory as RO, set_memory_ro(),
+ * but as of Dec 2020 this isn't possible on arm64.
+ */
+ addr = memremap(priv->mem->base, available, MEMREMAP_WB);
+ if (!addr) {
+ dev_err(priv->dev, "Failed to remap memory region\n");
+ return -ENOMEM;
+ }
+
+ count = memory_read_from_buffer(val, bytes, &off, addr, available);
+
+ memunmap(addr);
+
+ return count;
+}
+
+static int rmem_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = { };
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *mem;
+ struct rmem *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ mem = of_reserved_mem_lookup(dev->of_node);
+ if (!mem) {
+ dev_err(dev, "Failed to lookup reserved memory\n");
+ return -EINVAL;
+ }
+ priv->mem = mem;
+
+ config.dev = dev;
+ config.priv = priv;
+ config.name = "rmem";
+ config.id = NVMEM_DEVID_AUTO;
+ config.size = mem->size;
+ config.reg_read = rmem_read;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id rmem_match[] = {
+ { .compatible = "nvmem-rmem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rmem_match);
+
+static struct platform_driver rmem_driver = {
+ .probe = rmem_probe,
+ .driver = {
+ .name = "rmem",
+ .of_match_table = rmem_match,
+ },
+};
+module_platform_driver(rmem_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
+MODULE_DESCRIPTION("Reserved Memory Based nvmem Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
new file mode 100644
index 0000000000..4004c5bece
--- /dev/null
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip eFuse Driver
+ *
+ * Copyright (c) 2015 Rockchip Electronics Co. Ltd.
+ * Author: Caesar Wang <wxt@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define RK3288_A_SHIFT 6
+#define RK3288_A_MASK 0x3ff
+#define RK3288_PGENB BIT(3)
+#define RK3288_LOAD BIT(2)
+#define RK3288_STROBE BIT(1)
+#define RK3288_CSB BIT(0)
+
+#define RK3328_SECURE_SIZES 96
+#define RK3328_INT_STATUS 0x0018
+#define RK3328_DOUT 0x0020
+#define RK3328_AUTO_CTRL 0x0024
+#define RK3328_INT_FINISH BIT(0)
+#define RK3328_AUTO_ENB BIT(0)
+#define RK3328_AUTO_RD BIT(1)
+
+#define RK3399_A_SHIFT 16
+#define RK3399_A_MASK 0x3ff
+#define RK3399_NBYTES 4
+#define RK3399_STROBSFTSEL BIT(9)
+#define RK3399_RSB BIT(7)
+#define RK3399_PD BIT(5)
+#define RK3399_PGENB BIT(3)
+#define RK3399_LOAD BIT(2)
+#define RK3399_STROBE BIT(1)
+#define RK3399_CSB BIT(0)
+
+#define REG_EFUSE_CTRL 0x0000
+#define REG_EFUSE_DOUT 0x0004
+
+struct rockchip_efuse_chip {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_efuse_chip *efuse = context;
+ u8 *buf = val;
+ int ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0) {
+ dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
+ return ret;
+ }
+
+ writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ while (bytes--) {
+ writel(readl(efuse->base + REG_EFUSE_CTRL) &
+ (~(RK3288_A_MASK << RK3288_A_SHIFT)),
+ efuse->base + REG_EFUSE_CTRL);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) |
+ ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT),
+ efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) |
+ RK3288_STROBE, efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ *buf++ = readb(efuse->base + REG_EFUSE_DOUT);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) &
+ (~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ }
+
+ /* Switch to standby mode */
+ writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL);
+
+ clk_disable_unprepare(efuse->clk);
+
+ return 0;
+}
+
+static int rockchip_rk3328_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_efuse_chip *efuse = context;
+ unsigned int addr_start, addr_end, addr_offset, addr_len;
+ u32 out_value, status;
+ u8 *buf;
+ int ret, i = 0;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0) {
+ dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
+ return ret;
+ }
+
+ /* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */
+ offset += RK3328_SECURE_SIZES;
+ addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_offset = offset % RK3399_NBYTES;
+ addr_len = addr_end - addr_start;
+
+ buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
+ GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
+
+ while (addr_len--) {
+ writel(RK3328_AUTO_RD | RK3328_AUTO_ENB |
+ ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
+ efuse->base + RK3328_AUTO_CTRL);
+ udelay(4);
+ status = readl(efuse->base + RK3328_INT_STATUS);
+ if (!(status & RK3328_INT_FINISH)) {
+ ret = -EIO;
+ goto err;
+ }
+ out_value = readl(efuse->base + RK3328_DOUT);
+ writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS);
+
+ memcpy(&buf[i], &out_value, RK3399_NBYTES);
+ i += RK3399_NBYTES;
+ }
+
+ memcpy(val, buf + addr_offset, bytes);
+err:
+ kfree(buf);
+nomem:
+ clk_disable_unprepare(efuse->clk);
+
+ return ret;
+}
+
+static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_efuse_chip *efuse = context;
+ unsigned int addr_start, addr_end, addr_offset, addr_len;
+ u32 out_value;
+ u8 *buf;
+ int ret, i = 0;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0) {
+ dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
+ return ret;
+ }
+
+ addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_offset = offset % RK3399_NBYTES;
+ addr_len = addr_end - addr_start;
+
+ buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
+ GFP_KERNEL);
+ if (!buf) {
+ clk_disable_unprepare(efuse->clk);
+ return -ENOMEM;
+ }
+
+ writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB,
+ efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ while (addr_len--) {
+ writel(readl(efuse->base + REG_EFUSE_CTRL) | RK3399_STROBE |
+ ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
+ efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ out_value = readl(efuse->base + REG_EFUSE_DOUT);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) & (~RK3399_STROBE),
+ efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+
+ memcpy(&buf[i], &out_value, RK3399_NBYTES);
+ i += RK3399_NBYTES;
+ }
+
+ /* Switch to standby mode */
+ writel(RK3399_PD | RK3399_CSB, efuse->base + REG_EFUSE_CTRL);
+
+ memcpy(val, buf + addr_offset, bytes);
+
+ kfree(buf);
+
+ clk_disable_unprepare(efuse->clk);
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "rockchip-efuse",
+ .stride = 1,
+ .word_size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id rockchip_efuse_match[] = {
+ /* deprecated but kept around for dts binding compatibility */
+ {
+ .compatible = "rockchip,rockchip-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3066a-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3188-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3228-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3288-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3368-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3328-efuse",
+ .data = (void *)&rockchip_rk3328_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3399-efuse",
+ .data = (void *)&rockchip_rk3399_efuse_read,
+ },
+ { /* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
+
+static int rockchip_efuse_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct rockchip_efuse_chip *efuse;
+ const void *data;
+ struct device *dev = &pdev->dev;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ efuse = devm_kzalloc(dev, sizeof(struct rockchip_efuse_chip),
+ GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
+
+ efuse->clk = devm_clk_get(dev, "pclk_efuse");
+ if (IS_ERR(efuse->clk))
+ return PTR_ERR(efuse->clk);
+
+ efuse->dev = dev;
+ if (of_property_read_u32(dev->of_node, "rockchip,efuse-size",
+ &econfig.size))
+ econfig.size = resource_size(res);
+ econfig.reg_read = data;
+ econfig.priv = efuse;
+ econfig.dev = efuse->dev;
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver rockchip_efuse_driver = {
+ .probe = rockchip_efuse_probe,
+ .driver = {
+ .name = "rockchip-efuse",
+ .of_match_table = rockchip_efuse_match,
+ },
+};
+
+module_platform_driver(rockchip_efuse_driver);
+MODULE_DESCRIPTION("rockchip_efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
new file mode 100644
index 0000000000..cb9aa54283
--- /dev/null
+++ b/drivers/nvmem/rockchip-otp.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip OTP Driver
+ *
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* OTP Register Offsets */
+#define OTPC_SBPI_CTRL 0x0020
+#define OTPC_SBPI_CMD_VALID_PRE 0x0024
+#define OTPC_SBPI_CS_VALID_PRE 0x0028
+#define OTPC_SBPI_STATUS 0x002C
+#define OTPC_USER_CTRL 0x0100
+#define OTPC_USER_ADDR 0x0104
+#define OTPC_USER_ENABLE 0x0108
+#define OTPC_USER_Q 0x0124
+#define OTPC_INT_STATUS 0x0304
+#define OTPC_SBPI_CMD0_OFFSET 0x1000
+#define OTPC_SBPI_CMD1_OFFSET 0x1004
+
+/* OTP Register bits and masks */
+#define OTPC_USER_ADDR_MASK GENMASK(31, 16)
+#define OTPC_USE_USER BIT(0)
+#define OTPC_USE_USER_MASK GENMASK(16, 16)
+#define OTPC_USER_FSM_ENABLE BIT(0)
+#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16)
+#define OTPC_SBPI_DONE BIT(1)
+#define OTPC_USER_DONE BIT(2)
+
+#define SBPI_DAP_ADDR 0x02
+#define SBPI_DAP_ADDR_SHIFT 8
+#define SBPI_DAP_ADDR_MASK GENMASK(31, 24)
+#define SBPI_CMD_VALID_MASK GENMASK(31, 16)
+#define SBPI_DAP_CMD_WRF 0xC0
+#define SBPI_DAP_REG_ECC 0x3A
+#define SBPI_ECC_ENABLE 0x00
+#define SBPI_ECC_DISABLE 0x09
+#define SBPI_ENABLE BIT(0)
+#define SBPI_ENABLE_MASK GENMASK(16, 16)
+
+#define OTPC_TIMEOUT 10000
+
+/* RK3588 Register */
+#define RK3588_OTPC_AUTO_CTRL 0x04
+#define RK3588_OTPC_AUTO_EN 0x08
+#define RK3588_OTPC_INT_ST 0x84
+#define RK3588_OTPC_DOUT0 0x20
+#define RK3588_NO_SECURE_OFFSET 0x300
+#define RK3588_NBYTES 4
+#define RK3588_BURST_NUM 1
+#define RK3588_BURST_SHIFT 8
+#define RK3588_ADDR_SHIFT 16
+#define RK3588_AUTO_EN BIT(0)
+#define RK3588_RD_DONE BIT(1)
+
+struct rockchip_data {
+ int size;
+ const char * const *clks;
+ int num_clks;
+ nvmem_reg_read_t reg_read;
+};
+
+struct rockchip_otp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data *clks;
+ struct reset_control *rst;
+ const struct rockchip_data *data;
+};
+
+static int rockchip_otp_reset(struct rockchip_otp *otp)
+{
+ int ret;
+
+ ret = reset_control_assert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to assert otp phy %d\n", ret);
+ return ret;
+ }
+
+ udelay(2);
+
+ ret = reset_control_deassert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to deassert otp phy %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_otp_wait_status(struct rockchip_otp *otp,
+ unsigned int reg, u32 flag)
+{
+ u32 status = 0;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(otp->base + reg, status,
+ (status & flag), 1, OTPC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* clean int status */
+ writel(flag, otp->base + reg);
+
+ return 0;
+}
+
+static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
+{
+ int ret = 0;
+
+ writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT),
+ otp->base + OTPC_SBPI_CTRL);
+
+ writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE);
+ writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC,
+ otp->base + OTPC_SBPI_CMD0_OFFSET);
+ if (enable)
+ writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+ else
+ writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+
+ writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
+
+ ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_SBPI_DONE);
+ if (ret < 0)
+ dev_err(otp->dev, "timeout during ecc_enable\n");
+
+ return ret;
+}
+
+static int px30_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ u8 *buf = val;
+ int ret;
+
+ ret = rockchip_otp_reset(otp);
+ if (ret) {
+ dev_err(otp->dev, "failed to reset otp phy\n");
+ return ret;
+ }
+
+ ret = rockchip_otp_ecc_enable(otp, false);
+ if (ret < 0) {
+ dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
+ return ret;
+ }
+
+ writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+ udelay(5);
+ while (bytes--) {
+ writel(offset++ | OTPC_USER_ADDR_MASK,
+ otp->base + OTPC_USER_ADDR);
+ writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
+ otp->base + OTPC_USER_ENABLE);
+ ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_USER_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+ *buf++ = readb(otp->base + OTPC_USER_Q);
+ }
+
+read_end:
+ writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+
+ return ret;
+}
+
+static int rk3588_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ unsigned int addr_start, addr_end, addr_len;
+ int ret, i = 0;
+ u32 data;
+ u8 *buf;
+
+ addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES;
+ addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES;
+ addr_len = addr_end - addr_start;
+ addr_start += RK3588_NO_SECURE_OFFSET;
+
+ buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (addr_len--) {
+ writel((addr_start << RK3588_ADDR_SHIFT) |
+ (RK3588_BURST_NUM << RK3588_BURST_SHIFT),
+ otp->base + RK3588_OTPC_AUTO_CTRL);
+ writel(RK3588_AUTO_EN, otp->base + RK3588_OTPC_AUTO_EN);
+
+ ret = rockchip_otp_wait_status(otp, RK3588_OTPC_INT_ST,
+ RK3588_RD_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+
+ data = readl(otp->base + RK3588_OTPC_DOUT0);
+ memcpy(&buf[i], &data, RK3588_NBYTES);
+
+ i += RK3588_NBYTES;
+ addr_start++;
+ }
+
+ memcpy(val, buf + offset % RK3588_NBYTES, bytes);
+
+read_end:
+ kfree(buf);
+
+ return ret;
+}
+
+static int rockchip_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ int ret;
+
+ if (!otp->data || !otp->data->reg_read)
+ return -EINVAL;
+
+ ret = clk_bulk_prepare_enable(otp->data->num_clks, otp->clks);
+ if (ret < 0) {
+ dev_err(otp->dev, "failed to prepare/enable clks\n");
+ return ret;
+ }
+
+ ret = otp->data->reg_read(context, offset, val, bytes);
+
+ clk_bulk_disable_unprepare(otp->data->num_clks, otp->clks);
+
+ return ret;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "rockchip-otp",
+ .owner = THIS_MODULE,
+ .read_only = true,
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = rockchip_otp_read,
+};
+
+static const char * const px30_otp_clocks[] = {
+ "otp", "apb_pclk", "phy",
+};
+
+static const struct rockchip_data px30_data = {
+ .size = 0x40,
+ .clks = px30_otp_clocks,
+ .num_clks = ARRAY_SIZE(px30_otp_clocks),
+ .reg_read = px30_otp_read,
+};
+
+static const char * const rk3588_otp_clocks[] = {
+ "otp", "apb_pclk", "phy", "arb",
+};
+
+static const struct rockchip_data rk3588_data = {
+ .size = 0x400,
+ .clks = rk3588_otp_clocks,
+ .num_clks = ARRAY_SIZE(rk3588_otp_clocks),
+ .reg_read = rk3588_otp_read,
+};
+
+static const struct of_device_id rockchip_otp_match[] = {
+ {
+ .compatible = "rockchip,px30-otp",
+ .data = &px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3308-otp",
+ .data = &px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3588-otp",
+ .data = &rk3588_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_otp_match);
+
+static int rockchip_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_otp *otp;
+ const struct rockchip_data *data;
+ struct nvmem_device *nvmem;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "failed to get match data\n");
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
+ GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->data = data;
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return dev_err_probe(dev, PTR_ERR(otp->base),
+ "failed to ioremap resource\n");
+
+ otp->clks = devm_kcalloc(dev, data->num_clks, sizeof(*otp->clks),
+ GFP_KERNEL);
+ if (!otp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_clks; ++i)
+ otp->clks[i].id = data->clks[i];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, otp->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+
+ otp->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(otp->rst))
+ return dev_err_probe(dev, PTR_ERR(otp->rst),
+ "failed to get resets\n");
+
+ otp_config.size = data->size;
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &otp_config);
+ if (IS_ERR(nvmem))
+ return dev_err_probe(dev, PTR_ERR(nvmem),
+ "failed to register nvmem device\n");
+ return 0;
+}
+
+static struct platform_driver rockchip_otp_driver = {
+ .probe = rockchip_otp_probe,
+ .driver = {
+ .name = "rockchip-otp",
+ .of_match_table = rockchip_otp_match,
+ },
+};
+
+module_platform_driver(rockchip_otp_driver);
+MODULE_DESCRIPTION("Rockchip OTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
new file mode 100644
index 0000000000..2210da40df
--- /dev/null
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/hwspinlock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/nvmem-provider.h>
+
+/* PMIC global registers definition */
+#define SC27XX_MODULE_EN 0xc08
+#define SC2730_MODULE_EN 0x1808
+#define SC27XX_EFUSE_EN BIT(6)
+
+/* Efuse controller registers definition */
+#define SC27XX_EFUSE_GLB_CTRL 0x0
+#define SC27XX_EFUSE_DATA_RD 0x4
+#define SC27XX_EFUSE_DATA_WR 0x8
+#define SC27XX_EFUSE_BLOCK_INDEX 0xc
+#define SC27XX_EFUSE_MODE_CTRL 0x10
+#define SC27XX_EFUSE_STATUS 0x14
+#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20
+#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24
+#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28
+
+/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */
+#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0)
+
+/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */
+#define SC27XX_EFUSE_PG_START BIT(0)
+#define SC27XX_EFUSE_RD_START BIT(1)
+#define SC27XX_EFUSE_CLR_RDDONE BIT(2)
+
+/* Bits definitions for SC27XX_EFUSE_STATUS register */
+#define SC27XX_EFUSE_PGM_BUSY BIT(0)
+#define SC27XX_EFUSE_READ_BUSY BIT(1)
+#define SC27XX_EFUSE_STANDBY BIT(2)
+#define SC27XX_EFUSE_GLOBAL_PROT BIT(3)
+#define SC27XX_EFUSE_RD_DONE BIT(4)
+
+/* Block number and block width (bytes) definitions */
+#define SC27XX_EFUSE_BLOCK_MAX 32
+#define SC27XX_EFUSE_BLOCK_WIDTH 2
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000
+
+/* Timeout (us) of polling the status */
+#define SC27XX_EFUSE_POLL_TIMEOUT 3000000
+#define SC27XX_EFUSE_POLL_DELAY_US 10000
+
+/*
+ * Since different PMICs of SC27xx series can have different
+ * address , we should save address in the device data structure.
+ */
+struct sc27xx_efuse_variant_data {
+ u32 module_en;
+};
+
+struct sc27xx_efuse {
+ struct device *dev;
+ struct regmap *regmap;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ u32 base;
+ const struct sc27xx_efuse_variant_data *var_data;
+};
+
+static const struct sc27xx_efuse_variant_data sc2731_edata = {
+ .module_en = SC27XX_MODULE_EN,
+};
+
+static const struct sc27xx_efuse_variant_data sc2730_edata = {
+ .module_en = SC2730_MODULE_EN,
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SC27XX_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout to get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read_poll_timeout(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_STATUS,
+ val, (val & bits),
+ SC27XX_EFUSE_POLL_DELAY_US,
+ SC27XX_EFUSE_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout to update the efuse status\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sc27xx_efuse *efuse = context;
+ u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH;
+ u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
+ int ret;
+
+ if (blk_index > SC27XX_EFUSE_BLOCK_MAX ||
+ bytes > SC27XX_EFUSE_BLOCK_WIDTH)
+ return -EINVAL;
+
+ ret = sc27xx_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ /* Enable the efuse controller. */
+ ret = regmap_update_bits(efuse->regmap, efuse->var_data->module_en,
+ SC27XX_EFUSE_EN, SC27XX_EFUSE_EN);
+ if (ret)
+ goto unlock_efuse;
+
+ /*
+ * Before reading, we should ensure the efuse controller is in
+ * standby state.
+ */
+ ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY);
+ if (ret)
+ goto disable_efuse;
+
+ /* Set the block address to be read. */
+ ret = regmap_write(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
+ blk_index & SC27XX_EFUSE_BLOCK_MASK);
+ if (ret)
+ goto disable_efuse;
+
+ /* Start reading process from efuse memory. */
+ ret = regmap_update_bits(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_MODE_CTRL,
+ SC27XX_EFUSE_RD_START,
+ SC27XX_EFUSE_RD_START);
+ if (ret)
+ goto disable_efuse;
+
+ /*
+ * Polling the read done status to make sure the reading process
+ * is completed, that means the data can be read out now.
+ */
+ ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE);
+ if (ret)
+ goto disable_efuse;
+
+ /* Read data from efuse memory. */
+ ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD,
+ &buf);
+ if (ret)
+ goto disable_efuse;
+
+ /* Clear the read done flag. */
+ ret = regmap_update_bits(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_MODE_CTRL,
+ SC27XX_EFUSE_CLR_RDDONE,
+ SC27XX_EFUSE_CLR_RDDONE);
+
+disable_efuse:
+ /* Disable the efuse controller after reading. */
+ regmap_update_bits(efuse->regmap, efuse->var_data->module_en, SC27XX_EFUSE_EN, 0);
+unlock_efuse:
+ sc27xx_efuse_unlock(efuse);
+
+ if (!ret) {
+ buf >>= blk_offset;
+ memcpy(val, &buf, bytes);
+ }
+
+ return ret;
+}
+
+static int sc27xx_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_config econfig = { };
+ struct nvmem_device *nvmem;
+ struct sc27xx_efuse *efuse;
+ int ret;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!efuse->regmap) {
+ dev_err(&pdev->dev, "failed to get efuse regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &efuse->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get efuse base address\n");
+ return ret;
+ }
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwspinlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwspinlock\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ efuse->var_data = of_device_get_match_data(&pdev->dev);
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = true;
+ econfig.name = "sc27xx-efuse";
+ econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sc27xx_efuse_read;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sc27xx_efuse_of_match[] = {
+ { .compatible = "sprd,sc2731-efuse", .data = &sc2731_edata},
+ { .compatible = "sprd,sc2730-efuse", .data = &sc2730_edata},
+ { }
+};
+
+static struct platform_driver sc27xx_efuse_driver = {
+ .probe = sc27xx_efuse_probe,
+ .driver = {
+ .name = "sc27xx-efuse",
+ .of_match_table = sc27xx_efuse_of_match,
+ },
+};
+
+module_platform_driver(sc27xx_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sec-qfprom.c b/drivers/nvmem/sec-qfprom.c
new file mode 100644
index 0000000000..e48c2dc0c4
--- /dev/null
+++ b/drivers/nvmem/sec-qfprom.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/**
+ * struct sec_qfprom - structure holding secure qfprom attributes
+ *
+ * @base: starting physical address for secure qfprom corrected address space.
+ * @dev: qfprom device structure.
+ */
+struct sec_qfprom {
+ phys_addr_t base;
+ struct device *dev;
+};
+
+static int sec_qfprom_reg_read(void *context, unsigned int reg, void *_val, size_t bytes)
+{
+ struct sec_qfprom *priv = context;
+ unsigned int i;
+ u8 *val = _val;
+ u32 read_val;
+ u8 *tmp;
+
+ for (i = 0; i < bytes; i++, reg++) {
+ if (i == 0 || reg % 4 == 0) {
+ if (qcom_scm_io_readl(priv->base + (reg & ~3), &read_val)) {
+ dev_err(priv->dev, "Couldn't access fuse register\n");
+ return -EINVAL;
+ }
+ tmp = (u8 *)&read_val;
+ }
+
+ val[i] = tmp[reg & 3];
+ }
+
+ return 0;
+}
+
+static int sec_qfprom_probe(struct platform_device *pdev)
+{
+ struct nvmem_config econfig = {
+ .name = "sec-qfprom",
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+ .reg_read = sec_qfprom_reg_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct sec_qfprom *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ priv->base = res->start;
+
+ econfig.size = resource_size(res);
+ econfig.dev = dev;
+ econfig.priv = priv;
+
+ priv->dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id sec_qfprom_of_match[] = {
+ { .compatible = "qcom,sec-qfprom" },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sec_qfprom_of_match);
+
+static struct platform_driver qfprom_driver = {
+ .probe = sec_qfprom_probe,
+ .driver = {
+ .name = "qcom_sec_qfprom",
+ .of_match_table = sec_qfprom_of_match,
+ },
+};
+module_platform_driver(qfprom_driver);
+MODULE_DESCRIPTION("Qualcomm Secure QFPROM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c
new file mode 100644
index 0000000000..89c2711232
--- /dev/null
+++ b/drivers/nvmem/snvs_lpgpr.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IMX6Q_SNVS_HPLR 0x00
+#define IMX6Q_SNVS_LPLR 0x34
+#define IMX6Q_SNVS_LPGPR 0x68
+
+#define IMX7D_SNVS_HPLR 0x00
+#define IMX7D_SNVS_LPLR 0x34
+#define IMX7D_SNVS_LPGPR 0x90
+
+#define IMX_GPR_SL BIT(5)
+#define IMX_GPR_HL BIT(5)
+
+struct snvs_lpgpr_cfg {
+ int offset;
+ int offset_hplr;
+ int offset_lplr;
+ int size;
+};
+
+struct snvs_lpgpr_priv {
+ struct device_d *dev;
+ struct regmap *regmap;
+ struct nvmem_config cfg;
+ const struct snvs_lpgpr_cfg *dcfg;
+};
+
+static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = {
+ .offset = IMX6Q_SNVS_LPGPR,
+ .offset_hplr = IMX6Q_SNVS_HPLR,
+ .offset_lplr = IMX6Q_SNVS_LPLR,
+ .size = 4,
+};
+
+static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx7d = {
+ .offset = IMX7D_SNVS_LPGPR,
+ .offset_hplr = IMX7D_SNVS_HPLR,
+ .offset_lplr = IMX7D_SNVS_LPLR,
+ .size = 16,
+};
+
+static int snvs_lpgpr_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+ unsigned int lock_reg;
+ int ret;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX_GPR_SL)
+ return -EPERM;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX_GPR_HL)
+ return -EPERM;
+
+ return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val,
+ bytes / 4);
+}
+
+static int snvs_lpgpr_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+
+ return regmap_bulk_read(priv->regmap, dcfg->offset + offset,
+ val, bytes / 4);
+}
+
+static int snvs_lpgpr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *syscon_node;
+ struct snvs_lpgpr_priv *priv;
+ struct nvmem_config *cfg;
+ struct nvmem_device *nvmem;
+ const struct snvs_lpgpr_cfg *dcfg;
+
+ if (!node)
+ return -ENOENT;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ syscon_node = of_get_parent(node);
+ if (!syscon_node)
+ return -ENODEV;
+
+ priv->regmap = syscon_node_to_regmap(syscon_node);
+ of_node_put(syscon_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->dcfg = dcfg;
+
+ cfg = &priv->cfg;
+ cfg->priv = priv;
+ cfg->name = dev_name(dev);
+ cfg->dev = dev;
+ cfg->stride = 4;
+ cfg->word_size = 4;
+ cfg->size = dcfg->size;
+ cfg->owner = THIS_MODULE;
+ cfg->reg_read = snvs_lpgpr_read;
+ cfg->reg_write = snvs_lpgpr_write;
+
+ nvmem = devm_nvmem_register(dev, cfg);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id snvs_lpgpr_dt_ids[] = {
+ { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q },
+ { .compatible = "fsl,imx6ul-snvs-lpgpr",
+ .data = &snvs_lpgpr_cfg_imx6q },
+ { .compatible = "fsl,imx7d-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx7d },
+ { },
+};
+MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids);
+
+static struct platform_driver snvs_lpgpr_driver = {
+ .probe = snvs_lpgpr_probe,
+ .driver = {
+ .name = "snvs_lpgpr",
+ .of_match_table = snvs_lpgpr_dt_ids,
+ },
+};
+module_platform_driver(snvs_lpgpr_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 and i.MX7 Secure Non-Volatile Storage");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
new file mode 100644
index 0000000000..7e6e31db4b
--- /dev/null
+++ b/drivers/nvmem/sprd-efuse.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define SPRD_EFUSE_ENABLE 0x20
+#define SPRD_EFUSE_ERR_FLAG 0x24
+#define SPRD_EFUSE_ERR_CLR 0x28
+#define SPRD_EFUSE_MAGIC_NUM 0x2c
+#define SPRD_EFUSE_FW_CFG 0x50
+#define SPRD_EFUSE_PW_SWT 0x54
+#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2))
+
+#define SPRD_EFUSE_VDD_EN BIT(0)
+#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1)
+#define SPRD_EFUSE_DOUBLE_EN BIT(2)
+#define SPRD_EFUSE_MARGIN_RD_EN BIT(3)
+#define SPRD_EFUSE_LOCK_WR_EN BIT(4)
+
+#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0)
+
+#define SPRD_EFUSE_ENK1_ON BIT(0)
+#define SPRD_EFUSE_ENK2_ON BIT(1)
+#define SPRD_EFUSE_PROG_EN BIT(2)
+
+#define SPRD_EFUSE_MAGIC_NUMBER 0x8810
+
+/* Block width (bytes) definitions */
+#define SPRD_EFUSE_BLOCK_WIDTH 4
+
+/*
+ * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse,
+ * and we can only access the normal efuse in kernel. So define the normal
+ * block offset index and normal block numbers.
+ */
+#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24
+#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000
+
+/*
+ * Since different Spreadtrum SoC chip can have different normal block numbers
+ * and offset. And some SoC can support block double feature, which means
+ * when reading or writing data to efuse memory, the controller can save double
+ * data in case one data become incorrect after a long period.
+ *
+ * Thus we should save them in the device data structure.
+ */
+struct sprd_efuse_variant_data {
+ u32 blk_nums;
+ u32 blk_offset;
+ bool blk_double;
+};
+
+struct sprd_efuse {
+ struct device *dev;
+ struct clk *clk;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ void __iomem *base;
+ const struct sprd_efuse_variant_data *data;
+};
+
+static const struct sprd_efuse_variant_data ums312_data = {
+ .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS,
+ .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET,
+ .blk_double = false,
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sprd_efuse_lock(struct sprd_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SPRD_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_efuse_unlock(struct sprd_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val &= ~SPRD_EFUSE_ENK2_ON;
+ else
+ val &= ~SPRD_EFUSE_ENK1_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+
+ if (en)
+ val |= SPRD_EFUSE_ENK1_ON;
+ else
+ val |= SPRD_EFUSE_ENK2_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_VDD_EN;
+ else
+ val &= ~SPRD_EFUSE_VDD_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_LOCK_WR_EN;
+ else
+ val &= ~SPRD_EFUSE_LOCK_WR_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_AUTO_CHECK_EN;
+ else
+ val &= ~SPRD_EFUSE_AUTO_CHECK_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_DOUBLE_EN;
+ else
+ val &= ~SPRD_EFUSE_DOUBLE_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val |= SPRD_EFUSE_PROG_EN;
+ else
+ val &= ~SPRD_EFUSE_PROG_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+}
+
+static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
+ bool lock, u32 *data)
+{
+ u32 status;
+ int ret = 0;
+
+ /*
+ * We need set the correct magic number before writing the efuse to
+ * allow programming, and block other programming until we clear the
+ * magic number.
+ */
+ writel(SPRD_EFUSE_MAGIC_NUMBER,
+ efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ /*
+ * Power on the efuse, enable programme and enable double data
+ * if asked.
+ */
+ sprd_efuse_set_prog_power(efuse, true);
+ sprd_efuse_set_prog_en(efuse, true);
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /*
+ * Enable the auto-check function to validate if the programming is
+ * successful.
+ */
+ if (lock)
+ sprd_efuse_set_auto_check(efuse, true);
+
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable auto-check and data double after programming */
+ if (lock)
+ sprd_efuse_set_auto_check(efuse, false);
+ sprd_efuse_set_data_double(efuse, false);
+
+ /*
+ * Check the efuse error status, if the programming is successful,
+ * we should lock this efuse block to avoid programming again.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "write error status %u of block %d\n", status, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ ret = -EBUSY;
+ } else if (lock) {
+ sprd_efuse_set_prog_lock(efuse, lock);
+ writel(0, efuse->base + SPRD_EFUSE_MEM(blk));
+ sprd_efuse_set_prog_lock(efuse, false);
+ }
+
+ sprd_efuse_set_prog_power(efuse, false);
+ writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ return ret;
+}
+
+static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val,
+ bool doub)
+{
+ u32 status;
+
+ /*
+ * Need power on the efuse before reading data from efuse, and will
+ * power off the efuse after reading process.
+ */
+ sprd_efuse_set_read_power(efuse, true);
+
+ /* Enable double data if asked */
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /* Start to read data from efuse block */
+ *val = readl(efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable double data */
+ sprd_efuse_set_data_double(efuse, false);
+
+ /* Power off the efuse */
+ sprd_efuse_set_read_power(efuse, false);
+
+ /*
+ * Check the efuse error status and clear them if there are some
+ * errors occurred.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "read error status %d of block %d\n", status, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset;
+ u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
+ u32 data;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_read(efuse, index, &data, blk_double);
+ if (!ret) {
+ data >>= blk_offset;
+ memcpy(val, &data, bytes);
+ }
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ bool lock;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ /*
+ * If the writing bytes are equal with the block width, which means the
+ * whole block will be programmed. For this case, we should not allow
+ * this block to be programmed again by locking this block.
+ *
+ * If the block was programmed partially, we should allow this block to
+ * be programmed again.
+ */
+ if (bytes < SPRD_EFUSE_BLOCK_WIDTH)
+ lock = false;
+ else
+ lock = true;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val);
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = { };
+ struct sprd_efuse *efuse;
+ const struct sprd_efuse_variant_data *pdata;
+ int ret;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwlock\n");
+ return -ENXIO;
+ }
+
+ efuse->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(efuse->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(efuse->clk);
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ efuse->data = pdata;
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = false;
+ econfig.name = "sprd-efuse";
+ econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sprd_efuse_read;
+ econfig.reg_write = sprd_efuse_write;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_efuse_of_match[] = {
+ { .compatible = "sprd,ums312-efuse", .data = &ums312_data },
+ { }
+};
+
+static struct platform_driver sprd_efuse_driver = {
+ .probe = sprd_efuse_probe,
+ .driver = {
+ .name = "sprd-efuse",
+ .of_match_table = sprd_efuse_of_match,
+ },
+};
+
+module_platform_driver(sprd_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum AP efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/stm32-bsec-optee-ta.c b/drivers/nvmem/stm32-bsec-optee-ta.c
new file mode 100644
index 0000000000..f89ce791dd
--- /dev/null
+++ b/drivers/nvmem/stm32-bsec-optee-ta.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver
+ *
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/tee_drv.h>
+
+#include "stm32-bsec-optee-ta.h"
+
+/*
+ * Read OTP memory
+ *
+ * [in] value[0].a OTP start offset in byte
+ * [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock)
+ * [out] memref[1].buffer Output buffer to store read values
+ * [out] memref[1].size Size of OTP to be read
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller
+ */
+#define PTA_BSEC_READ_MEM 0x0
+
+/*
+ * Write OTP memory
+ *
+ * [in] value[0].a OTP start offset in byte
+ * [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock)
+ * [in] memref[1].buffer Input buffer to read values
+ * [in] memref[1].size Size of OTP to be written
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller
+ */
+#define PTA_BSEC_WRITE_MEM 0x1
+
+/* value of PTA_BSEC access type = value[in] b */
+#define SHADOW_ACCESS 0
+#define FUSE_ACCESS 1
+#define LOCK_ACCESS 2
+
+/* Bitfield definition for LOCK status */
+#define LOCK_PERM BIT(30)
+
+/* OP-TEE STM32MP BSEC TA UUID */
+static const uuid_t stm32mp_bsec_ta_uuid =
+ UUID_INIT(0x94cf71ad, 0x80e6, 0x40b5,
+ 0xa7, 0xc6, 0x3d, 0xc5, 0x01, 0xeb, 0x28, 0x03);
+
+/*
+ * Check whether this driver supports the BSEC TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int stm32_bsec_optee_ta_match(struct tee_ioctl_version_data *ver,
+ const void *data)
+{
+ /* Currently this driver only supports GP compliant, OP-TEE based TA */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/* Open a session to OP-TEE for STM32MP BSEC TA */
+static int stm32_bsec_ta_open_session(struct tee_context *ctx, u32 *id)
+{
+ struct tee_ioctl_open_session_arg sess_arg;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ export_uuid(sess_arg.uuid, &stm32mp_bsec_ta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ pr_err("%s: tee_client_open_session failed err:%#x, ret:%#x\n",
+ __func__, sess_arg.ret, rc);
+ if (!rc)
+ rc = -EINVAL;
+ } else {
+ *id = sess_arg.session;
+ }
+
+ return rc;
+}
+
+/* close a session to OP-TEE for STM32MP BSEC TA */
+static void stm32_bsec_ta_close_session(void *ctx, u32 id)
+{
+ tee_client_close_session(ctx, id);
+}
+
+/* stm32_bsec_optee_ta_open() - initialize the STM32MP BSEC TA */
+int stm32_bsec_optee_ta_open(struct tee_context **ctx)
+{
+ struct tee_context *tee_ctx;
+ u32 session_id;
+ int rc;
+
+ /* Open context with TEE driver */
+ tee_ctx = tee_client_open_context(NULL, stm32_bsec_optee_ta_match, NULL, NULL);
+ if (IS_ERR(tee_ctx)) {
+ rc = PTR_ERR(tee_ctx);
+ if (rc == -ENOENT)
+ return -EPROBE_DEFER;
+ pr_err("%s: tee_client_open_context failed (%d)\n", __func__, rc);
+
+ return rc;
+ }
+
+ /* Check STM32MP BSEC TA presence */
+ rc = stm32_bsec_ta_open_session(tee_ctx, &session_id);
+ if (rc) {
+ tee_client_close_context(tee_ctx);
+ return rc;
+ }
+
+ stm32_bsec_ta_close_session(tee_ctx, session_id);
+
+ *ctx = tee_ctx;
+
+ return 0;
+}
+
+/* stm32_bsec_optee_ta_open() - release the PTA STM32MP BSEC TA */
+void stm32_bsec_optee_ta_close(void *ctx)
+{
+ tee_client_close_context(ctx);
+}
+
+/* stm32_bsec_optee_ta_read() - nvmem read access using PTA client driver */
+int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct tee_shm *shm;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[2];
+ u8 *shm_buf;
+ u32 start, num_bytes;
+ int ret;
+ u32 session_id;
+
+ ret = stm32_bsec_ta_open_session(ctx, &session_id);
+ if (ret)
+ return ret;
+
+ memset(&arg, 0, sizeof(arg));
+ memset(&param, 0, sizeof(param));
+
+ arg.func = PTA_BSEC_READ_MEM;
+ arg.session = session_id;
+ arg.num_params = 2;
+
+ /* align access on 32bits */
+ start = ALIGN_DOWN(offset, 4);
+ num_bytes = round_up(offset + bytes - start, 4);
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = start;
+ param[0].u.value.b = SHADOW_ACCESS;
+
+ shm = tee_shm_alloc_kernel_buf(ctx, num_bytes);
+ if (IS_ERR(shm)) {
+ ret = PTR_ERR(shm);
+ goto out_tee_session;
+ }
+
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[1].u.memref.shm = shm;
+ param[1].u.memref.size = num_bytes;
+
+ ret = tee_client_invoke_func(ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n",
+ arg.ret, ret);
+ if (!ret)
+ ret = -EIO;
+ }
+ if (!ret) {
+ shm_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(shm_buf)) {
+ ret = PTR_ERR(shm_buf);
+ pr_err("tee_shm_get_va failed for transmit (%d)\n", ret);
+ } else {
+ /* read data from 32 bits aligned buffer */
+ memcpy(buf, &shm_buf[offset % 4], bytes);
+ }
+ }
+
+ tee_shm_free(shm);
+
+out_tee_session:
+ stm32_bsec_ta_close_session(ctx, session_id);
+
+ return ret;
+}
+
+/* stm32_bsec_optee_ta_write() - nvmem write access using PTA client driver */
+int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower,
+ unsigned int offset, void *buf, size_t bytes)
+{ struct tee_shm *shm;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[2];
+ u8 *shm_buf;
+ int ret;
+ u32 session_id;
+
+ ret = stm32_bsec_ta_open_session(ctx, &session_id);
+ if (ret)
+ return ret;
+
+ /* Allow only writing complete 32-bits aligned words */
+ if ((bytes % 4) || (offset % 4))
+ return -EINVAL;
+
+ memset(&arg, 0, sizeof(arg));
+ memset(&param, 0, sizeof(param));
+
+ arg.func = PTA_BSEC_WRITE_MEM;
+ arg.session = session_id;
+ arg.num_params = 2;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = offset;
+ param[0].u.value.b = FUSE_ACCESS;
+
+ shm = tee_shm_alloc_kernel_buf(ctx, bytes);
+ if (IS_ERR(shm)) {
+ ret = PTR_ERR(shm);
+ goto out_tee_session;
+ }
+
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[1].u.memref.shm = shm;
+ param[1].u.memref.size = bytes;
+
+ shm_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(shm_buf)) {
+ ret = PTR_ERR(shm_buf);
+ pr_err("tee_shm_get_va failed for transmit (%d)\n", ret);
+ tee_shm_free(shm);
+
+ goto out_tee_session;
+ }
+
+ memcpy(shm_buf, buf, bytes);
+
+ ret = tee_client_invoke_func(ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret);
+ if (!ret)
+ ret = -EIO;
+ }
+ pr_debug("Write OTPs %d to %zu, ret=%d\n", offset / 4, (offset + bytes) / 4, ret);
+
+ /* Lock the upper OTPs with ECC protection, word programming only */
+ if (!ret && ((offset + bytes) >= (lower * 4))) {
+ u32 start, nb_lock;
+ u32 *lock = (u32 *)shm_buf;
+ int i;
+
+ /*
+ * don't lock the lower OTPs, no ECC protection and incremental
+ * bit programming, a second write is allowed
+ */
+ start = max_t(u32, offset, lower * 4);
+ nb_lock = (offset + bytes - start) / 4;
+
+ param[0].u.value.a = start;
+ param[0].u.value.b = LOCK_ACCESS;
+ param[1].u.memref.size = nb_lock * 4;
+
+ for (i = 0; i < nb_lock; i++)
+ lock[i] = LOCK_PERM;
+
+ ret = tee_client_invoke_func(ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret);
+ if (!ret)
+ ret = -EIO;
+ }
+ pr_debug("Lock upper OTPs %d to %d, ret=%d\n",
+ start / 4, start / 4 + nb_lock, ret);
+ }
+
+ tee_shm_free(shm);
+
+out_tee_session:
+ stm32_bsec_ta_close_session(ctx, session_id);
+
+ return ret;
+}
diff --git a/drivers/nvmem/stm32-bsec-optee-ta.h b/drivers/nvmem/stm32-bsec-optee-ta.h
new file mode 100644
index 0000000000..3966a05351
--- /dev/null
+++ b/drivers/nvmem/stm32-bsec-optee-ta.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver
+ *
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#if IS_ENABLED(CONFIG_NVMEM_STM32_BSEC_OPTEE_TA)
+/**
+ * stm32_bsec_optee_ta_open() - initialize the STM32 BSEC TA
+ * @ctx: the OP-TEE context on success
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+int stm32_bsec_optee_ta_open(struct tee_context **ctx);
+
+/**
+ * stm32_bsec_optee_ta_close() - release the STM32 BSEC TA
+ * @ctx: the OP-TEE context
+ *
+ * This function used to clean the OP-TEE resources initialized in
+ * stm32_bsec_optee_ta_open(); it can be used as callback to
+ * devm_add_action_or_reset()
+ */
+void stm32_bsec_optee_ta_close(void *ctx);
+
+/**
+ * stm32_bsec_optee_ta_read() - nvmem read access using TA client driver
+ * @ctx: the OP-TEE context provided by stm32_bsec_optee_ta_open
+ * @offset: nvmem offset
+ * @buf: buffer to fill with nvem values
+ * @bytes: number of bytes to read
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset,
+ void *buf, size_t bytes);
+
+/**
+ * stm32_bsec_optee_ta_write() - nvmem write access using TA client driver
+ * @ctx: the OP-TEE context provided by stm32_bsec_optee_ta_open
+ * @lower: number of lower OTP, not protected by ECC
+ * @offset: nvmem offset
+ * @buf: buffer with nvem values
+ * @bytes: number of bytes to write
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower,
+ unsigned int offset, void *buf, size_t bytes);
+
+#else
+
+static inline int stm32_bsec_optee_ta_open(struct tee_context **ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void stm32_bsec_optee_ta_close(void *ctx)
+{
+}
+
+static inline int stm32_bsec_optee_ta_read(struct tee_context *ctx,
+ unsigned int offset, void *buf,
+ size_t bytes)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int stm32_bsec_optee_ta_write(struct tee_context *ctx,
+ unsigned int lower,
+ unsigned int offset, void *buf,
+ size_t bytes)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_NVMEM_STM32_BSEC_OPTEE_TA */
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
new file mode 100644
index 0000000000..0f84044bd1
--- /dev/null
+++ b/drivers/nvmem/stm32-romem.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 Factory-programmed memory read access driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com> for STMicroelectronics.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/tee_drv.h>
+
+#include "stm32-bsec-optee-ta.h"
+
+/* BSEC secure service access from non-secure */
+#define STM32_SMC_BSEC 0x82001003
+#define STM32_SMC_READ_SHADOW 0x01
+#define STM32_SMC_PROG_OTP 0x02
+#define STM32_SMC_WRITE_SHADOW 0x03
+#define STM32_SMC_READ_OTP 0x04
+
+/* shadow registers offset */
+#define STM32MP15_BSEC_DATA0 0x200
+
+struct stm32_romem_cfg {
+ int size;
+ u8 lower;
+ bool ta;
+};
+
+struct stm32_romem_priv {
+ void __iomem *base;
+ struct nvmem_config cfg;
+ u8 lower;
+ struct tee_context *ctx;
+};
+
+static int stm32_romem_read(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+ u8 *buf8 = buf;
+ int i;
+
+ for (i = offset; i < offset + bytes; i++)
+ *buf8++ = readb_relaxed(priv->base + i);
+
+ return 0;
+}
+
+static int stm32_bsec_smc(u8 op, u32 otp, u32 data, u32 *result)
+{
+#if IS_ENABLED(CONFIG_HAVE_ARM_SMCCC)
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(STM32_SMC_BSEC, op, otp, data, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return -EIO;
+
+ if (result)
+ *result = (u32)res.a1;
+
+ return 0;
+#else
+ return -ENXIO;
+#endif
+}
+
+static int stm32_bsec_read(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+ struct device *dev = priv->cfg.dev;
+ u32 roffset, rbytes, val;
+ u8 *buf8 = buf, *val8 = (u8 *)&val;
+ int i, j = 0, ret, skip_bytes, size;
+
+ /* Round unaligned access to 32-bits */
+ roffset = rounddown(offset, 4);
+ skip_bytes = offset & 0x3;
+ rbytes = roundup(bytes + skip_bytes, 4);
+
+ if (roffset + rbytes > priv->cfg.size)
+ return -EINVAL;
+
+ for (i = roffset; (i < roffset + rbytes); i += 4) {
+ u32 otp = i >> 2;
+
+ if (otp < priv->lower) {
+ /* read lower data from shadow registers */
+ val = readl_relaxed(
+ priv->base + STM32MP15_BSEC_DATA0 + i);
+ } else {
+ ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, otp, 0,
+ &val);
+ if (ret) {
+ dev_err(dev, "Can't read data%d (%d)\n", otp,
+ ret);
+ return ret;
+ }
+ }
+ /* skip first bytes in case of unaligned read */
+ if (skip_bytes)
+ size = min(bytes, (size_t)(4 - skip_bytes));
+ else
+ size = min(bytes, (size_t)4);
+ memcpy(&buf8[j], &val8[skip_bytes], size);
+ bytes -= size;
+ j += size;
+ skip_bytes = 0;
+ }
+
+ return 0;
+}
+
+static int stm32_bsec_write(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+ struct device *dev = priv->cfg.dev;
+ u32 *buf32 = buf;
+ int ret, i;
+
+ /* Allow only writing complete 32-bits aligned words */
+ if ((bytes % 4) || (offset % 4))
+ return -EINVAL;
+
+ for (i = offset; i < offset + bytes; i += 4) {
+ ret = stm32_bsec_smc(STM32_SMC_PROG_OTP, i >> 2, *buf32++,
+ NULL);
+ if (ret) {
+ dev_err(dev, "Can't write data%d (%d)\n", i >> 2, ret);
+ return ret;
+ }
+ }
+
+ if (offset + bytes >= priv->lower * 4)
+ dev_warn(dev, "Update of upper OTPs with ECC protection (word programming, only once)\n");
+
+ return 0;
+}
+
+static int stm32_bsec_pta_read(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+
+ return stm32_bsec_optee_ta_read(priv->ctx, offset, buf, bytes);
+}
+
+static int stm32_bsec_pta_write(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+
+ return stm32_bsec_optee_ta_write(priv->ctx, priv->lower, offset, buf, bytes);
+}
+
+static bool stm32_bsec_smc_check(void)
+{
+ u32 val;
+ int ret;
+
+ /* check that the OP-TEE support the BSEC SMC (legacy mode) */
+ ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, 0, 0, &val);
+
+ return !ret;
+}
+
+static bool optee_presence_check(void)
+{
+ struct device_node *np;
+ bool tee_detected = false;
+
+ /* check that the OP-TEE node is present and available. */
+ np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+ if (np && of_device_is_available(np))
+ tee_detected = true;
+ of_node_put(np);
+
+ return tee_detected;
+}
+
+static int stm32_romem_probe(struct platform_device *pdev)
+{
+ const struct stm32_romem_cfg *cfg;
+ struct device *dev = &pdev->dev;
+ struct stm32_romem_priv *priv;
+ struct resource *res;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->cfg.name = "stm32-romem";
+ priv->cfg.word_size = 1;
+ priv->cfg.stride = 1;
+ priv->cfg.dev = dev;
+ priv->cfg.priv = priv;
+ priv->cfg.owner = THIS_MODULE;
+ priv->cfg.type = NVMEM_TYPE_OTP;
+
+ priv->lower = 0;
+
+ cfg = (const struct stm32_romem_cfg *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
+ if (!cfg) {
+ priv->cfg.read_only = true;
+ priv->cfg.size = resource_size(res);
+ priv->cfg.reg_read = stm32_romem_read;
+ } else {
+ priv->cfg.size = cfg->size;
+ priv->lower = cfg->lower;
+ if (cfg->ta || optee_presence_check()) {
+ rc = stm32_bsec_optee_ta_open(&priv->ctx);
+ if (rc) {
+ /* wait for OP-TEE client driver to be up and ready */
+ if (rc == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ /* BSEC PTA is required or SMC not supported */
+ if (cfg->ta || !stm32_bsec_smc_check())
+ return rc;
+ }
+ }
+ if (priv->ctx) {
+ rc = devm_add_action_or_reset(dev, stm32_bsec_optee_ta_close, priv->ctx);
+ if (rc) {
+ dev_err(dev, "devm_add_action_or_reset() failed (%d)\n", rc);
+ return rc;
+ }
+ priv->cfg.reg_read = stm32_bsec_pta_read;
+ priv->cfg.reg_write = stm32_bsec_pta_write;
+ } else {
+ priv->cfg.reg_read = stm32_bsec_read;
+ priv->cfg.reg_write = stm32_bsec_write;
+ }
+ }
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg));
+}
+
+/*
+ * STM32MP15/13 BSEC OTP regions: 4096 OTP bits (with 3072 effective bits)
+ * => 96 x 32-bits data words
+ * - Lower: 1K bits, 2:1 redundancy, incremental bit programming
+ * => 32 (x 32-bits) lower shadow registers = words 0 to 31
+ * - Upper: 2K bits, ECC protection, word programming only
+ * => 64 (x 32-bits) = words 32 to 95
+ */
+static const struct stm32_romem_cfg stm32mp15_bsec_cfg = {
+ .size = 384,
+ .lower = 32,
+ .ta = false,
+};
+
+static const struct stm32_romem_cfg stm32mp13_bsec_cfg = {
+ .size = 384,
+ .lower = 32,
+ .ta = true,
+};
+
+static const struct of_device_id stm32_romem_of_match[] __maybe_unused = {
+ { .compatible = "st,stm32f4-otp", }, {
+ .compatible = "st,stm32mp15-bsec",
+ .data = (void *)&stm32mp15_bsec_cfg,
+ }, {
+ .compatible = "st,stm32mp13-bsec",
+ .data = (void *)&stm32mp13_bsec_cfg,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_romem_of_match);
+
+static struct platform_driver stm32_romem_driver = {
+ .probe = stm32_romem_probe,
+ .driver = {
+ .name = "stm32-romem",
+ .of_match_table = of_match_ptr(stm32_romem_of_match),
+ },
+};
+module_platform_driver(stm32_romem_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 RO-MEM");
+MODULE_ALIAS("platform:nvmem-stm32-romem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
new file mode 100644
index 0000000000..f3a18aa0a6
--- /dev/null
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * The OCOTP driver for Sunplus SP7021
+ *
+ * Copyright (C) 2019 Sunplus Technology Inc., All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+/*
+ * OTP memory
+ * Each bank contains 4 words (32 bits).
+ * Bank 0 starts at offset 0 from the base.
+ */
+
+#define OTP_WORDS_PER_BANK 4
+#define OTP_WORD_SIZE sizeof(u32)
+#define OTP_BIT_ADDR_OF_BANK (8 * OTP_WORD_SIZE * OTP_WORDS_PER_BANK)
+#define QAC628_OTP_NUM_BANKS 8
+#define QAC628_OTP_SIZE (QAC628_OTP_NUM_BANKS * OTP_WORDS_PER_BANK * OTP_WORD_SIZE)
+#define OTP_READ_TIMEOUT_US 200000
+
+/* HB_GPIO */
+#define ADDRESS_8_DATA 0x20
+
+/* OTP_RX */
+#define OTP_CONTROL_2 0x48
+#define OTP_RD_PERIOD GENMASK(15, 8)
+#define OTP_RD_PERIOD_MASK ~GENMASK(15, 8)
+#define CPU_CLOCK FIELD_PREP(OTP_RD_PERIOD, 30)
+#define SEL_BAK_KEY2 BIT(5)
+#define SEL_BAK_KEY2_MASK ~BIT(5)
+#define SW_TRIM_EN BIT(4)
+#define SW_TRIM_EN_MASK ~BIT(4)
+#define SEL_BAK_KEY BIT(3)
+#define SEL_BAK_KEY_MASK ~BIT(3)
+#define OTP_READ BIT(2)
+#define OTP_LOAD_SECURE_DATA BIT(1)
+#define OTP_LOAD_SECURE_DATA_MASK ~BIT(1)
+#define OTP_DO_CRC BIT(0)
+#define OTP_DO_CRC_MASK ~BIT(0)
+#define OTP_STATUS 0x4c
+#define OTP_READ_DONE BIT(4)
+#define OTP_READ_DONE_MASK ~BIT(4)
+#define OTP_LOAD_SECURE_DONE_MASK ~BIT(2)
+#define OTP_READ_ADDRESS 0x50
+
+enum base_type {
+ HB_GPIO,
+ OTPRX,
+ BASEMAX,
+};
+
+struct sp_ocotp_priv {
+ struct device *dev;
+ void __iomem *base[BASEMAX];
+ struct clk *clk;
+};
+
+struct sp_ocotp_data {
+ int size;
+};
+
+static const struct sp_ocotp_data sp_otp_v0 = {
+ .size = QAC628_OTP_SIZE,
+};
+
+static int sp_otp_read_real(struct sp_ocotp_priv *otp, int addr, char *value)
+{
+ unsigned int addr_data;
+ unsigned int byte_shift;
+ unsigned int status;
+ int ret;
+
+ addr_data = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
+ addr_data = addr_data / OTP_WORD_SIZE;
+
+ byte_shift = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
+ byte_shift = byte_shift % OTP_WORD_SIZE;
+
+ addr = addr / (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
+ addr = addr * OTP_BIT_ADDR_OF_BANK;
+
+ writel(readl(otp->base[OTPRX] + OTP_STATUS) & OTP_READ_DONE_MASK &
+ OTP_LOAD_SECURE_DONE_MASK, otp->base[OTPRX] + OTP_STATUS);
+ writel(addr, otp->base[OTPRX] + OTP_READ_ADDRESS);
+ writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) | OTP_READ,
+ otp->base[OTPRX] + OTP_CONTROL_2);
+ writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) & SEL_BAK_KEY2_MASK & SW_TRIM_EN_MASK
+ & SEL_BAK_KEY_MASK & OTP_LOAD_SECURE_DATA_MASK & OTP_DO_CRC_MASK,
+ otp->base[OTPRX] + OTP_CONTROL_2);
+ writel((readl(otp->base[OTPRX] + OTP_CONTROL_2) & OTP_RD_PERIOD_MASK) | CPU_CLOCK,
+ otp->base[OTPRX] + OTP_CONTROL_2);
+
+ ret = readl_poll_timeout(otp->base[OTPRX] + OTP_STATUS, status,
+ status & OTP_READ_DONE, 10, OTP_READ_TIMEOUT_US);
+
+ if (ret < 0)
+ return ret;
+
+ *value = (readl(otp->base[HB_GPIO] + ADDRESS_8_DATA + addr_data * OTP_WORD_SIZE)
+ >> (8 * byte_shift)) & 0xff;
+
+ return ret;
+}
+
+static int sp_ocotp_read(void *priv, unsigned int offset, void *value, size_t bytes)
+{
+ struct sp_ocotp_priv *otp = priv;
+ unsigned int addr;
+ char *buf = value;
+ char val[4];
+ int ret;
+
+ ret = clk_enable(otp->clk);
+ if (ret)
+ return ret;
+
+ *buf = 0;
+ for (addr = offset; addr < (offset + bytes); addr++) {
+ ret = sp_otp_read_real(otp, addr, val);
+ if (ret < 0) {
+ dev_err(otp->dev, "OTP read fail:%d at %d", ret, addr);
+ goto disable_clk;
+ }
+
+ *buf++ = *val;
+ }
+
+disable_clk:
+ clk_disable(otp->clk);
+
+ return ret;
+}
+
+static struct nvmem_config sp_ocotp_nvmem_config = {
+ .name = "sp-ocotp",
+ .read_only = true,
+ .word_size = 1,
+ .size = QAC628_OTP_SIZE,
+ .stride = 1,
+ .reg_read = sp_ocotp_read,
+ .owner = THIS_MODULE,
+};
+
+static int sp_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct sp_ocotp_priv *otp;
+ struct resource *res;
+ int ret;
+
+ otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio");
+ otp->base[HB_GPIO] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(otp->base[HB_GPIO]))
+ return PTR_ERR(otp->base[HB_GPIO]);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx");
+ otp->base[OTPRX] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(otp->base[OTPRX]))
+ return PTR_ERR(otp->base[OTPRX]);
+
+ otp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(otp->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(otp->clk),
+ "devm_clk_get fail\n");
+
+ ret = clk_prepare(otp->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare clk: %d\n", ret);
+ return ret;
+ }
+
+ sp_ocotp_nvmem_config.priv = otp;
+ sp_ocotp_nvmem_config.dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
+ "register nvmem device fail\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, nvmem);
+
+ dev_dbg(dev, "banks:%d x wpb:%d x wsize:%d = %d",
+ (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK,
+ (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
+
+ return 0;
+err:
+ clk_unprepare(otp->clk);
+ return ret;
+}
+
+static const struct of_device_id sp_ocotp_dt_ids[] = {
+ { .compatible = "sunplus,sp7021-ocotp", .data = &sp_otp_v0 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sp_ocotp_dt_ids);
+
+static struct platform_driver sp_otp_driver = {
+ .probe = sp_ocotp_probe,
+ .driver = {
+ .name = "sunplus,sp7021-ocotp",
+ .of_match_table = sp_ocotp_dt_ids,
+ }
+};
+module_platform_driver(sp_otp_driver);
+
+MODULE_AUTHOR("Vincent Shih <vincent.sunplus@gmail.com>");
+MODULE_DESCRIPTION("Sunplus On-Chip OTP driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
new file mode 100644
index 0000000000..5d364d8534
--- /dev/null
+++ b/drivers/nvmem/sunxi_sid.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Allwinner sunXi SoCs Security ID support.
+ *
+ * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+/* Registers and special values for doing register-based SID readout on H3 */
+#define SUN8I_SID_PRCTL 0x40
+#define SUN8I_SID_RDKEY 0x60
+
+#define SUN8I_SID_OFFSET_MASK 0x1FF
+#define SUN8I_SID_OFFSET_SHIFT 16
+#define SUN8I_SID_OP_LOCK (0xAC << 8)
+#define SUN8I_SID_READ BIT(1)
+
+struct sunxi_sid_cfg {
+ u32 value_offset;
+ u32 size;
+ bool need_register_readout;
+};
+
+struct sunxi_sid {
+ void __iomem *base;
+ u32 value_offset;
+};
+
+static int sunxi_sid_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct sunxi_sid *sid = context;
+ u32 word;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ __ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4);
+
+ val += round_down(bytes, 4);
+ offset += round_down(bytes, 4);
+ bytes = bytes % 4;
+
+ if (!bytes)
+ return 0;
+
+ /* Handle any trailing bytes */
+ word = readl_relaxed(sid->base + sid->value_offset + offset);
+ memcpy(val, &word, bytes);
+
+ return 0;
+}
+
+static int sun8i_sid_register_readout(const struct sunxi_sid *sid,
+ const unsigned int offset,
+ u32 *out)
+{
+ u32 reg_val;
+ int ret;
+
+ /* Set word, lock access, and set read command */
+ reg_val = (offset & SUN8I_SID_OFFSET_MASK)
+ << SUN8I_SID_OFFSET_SHIFT;
+ reg_val |= SUN8I_SID_OP_LOCK | SUN8I_SID_READ;
+ writel(reg_val, sid->base + SUN8I_SID_PRCTL);
+
+ ret = readl_poll_timeout(sid->base + SUN8I_SID_PRCTL, reg_val,
+ !(reg_val & SUN8I_SID_READ), 100, 250000);
+ if (ret)
+ return ret;
+
+ if (out)
+ *out = readl(sid->base + SUN8I_SID_RDKEY);
+
+ writel(0, sid->base + SUN8I_SID_PRCTL);
+
+ return 0;
+}
+
+/*
+ * On Allwinner H3, the value on the 0x200 offset of the SID controller seems
+ * to be not reliable at all.
+ * Read by the registers instead.
+ */
+static int sun8i_sid_read_by_reg(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct sunxi_sid *sid = context;
+ u32 word;
+ int ret;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ while (bytes >= 4) {
+ ret = sun8i_sid_register_readout(sid, offset, val);
+ if (ret)
+ return ret;
+
+ val += 4;
+ offset += 4;
+ bytes -= 4;
+ }
+
+ if (!bytes)
+ return 0;
+
+ /* Handle any trailing bytes */
+ ret = sun8i_sid_register_readout(sid, offset, &word);
+ if (ret)
+ return ret;
+
+ memcpy(val, &word, bytes);
+
+ return 0;
+}
+
+static int sunxi_sid_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvmem_config *nvmem_cfg;
+ struct nvmem_device *nvmem;
+ struct sunxi_sid *sid;
+ int size;
+ char *randomness;
+ const struct sunxi_sid_cfg *cfg;
+
+ sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
+ if (!sid)
+ return -ENOMEM;
+
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+ sid->value_offset = cfg->value_offset;
+
+ sid->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sid->base))
+ return PTR_ERR(sid->base);
+
+ size = cfg->size;
+
+ nvmem_cfg = devm_kzalloc(dev, sizeof(*nvmem_cfg), GFP_KERNEL);
+ if (!nvmem_cfg)
+ return -ENOMEM;
+
+ nvmem_cfg->dev = dev;
+ nvmem_cfg->name = "sunxi-sid";
+ nvmem_cfg->type = NVMEM_TYPE_OTP;
+ nvmem_cfg->read_only = true;
+ nvmem_cfg->size = cfg->size;
+ nvmem_cfg->word_size = 1;
+ nvmem_cfg->stride = 4;
+ nvmem_cfg->priv = sid;
+ if (cfg->need_register_readout)
+ nvmem_cfg->reg_read = sun8i_sid_read_by_reg;
+ else
+ nvmem_cfg->reg_read = sunxi_sid_read;
+
+ nvmem = devm_nvmem_register(dev, nvmem_cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ randomness = kzalloc(size, GFP_KERNEL);
+ if (!randomness)
+ return -ENOMEM;
+
+ nvmem_cfg->reg_read(sid, 0, randomness, size);
+ add_device_randomness(randomness, size);
+ kfree(randomness);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static const struct sunxi_sid_cfg sun4i_a10_cfg = {
+ .size = 0x10,
+};
+
+static const struct sunxi_sid_cfg sun7i_a20_cfg = {
+ .size = 0x200,
+};
+
+static const struct sunxi_sid_cfg sun8i_h3_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+ .need_register_readout = true,
+};
+
+static const struct sunxi_sid_cfg sun50i_a64_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+};
+
+static const struct sunxi_sid_cfg sun50i_h6_cfg = {
+ .value_offset = 0x200,
+ .size = 0x200,
+};
+
+static const struct of_device_id sunxi_sid_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
+ { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
+ { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
+ { .compatible = "allwinner,sun20i-d1-sid", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
+
+static struct platform_driver sunxi_sid_driver = {
+ .probe = sunxi_sid_probe,
+ .driver = {
+ .name = "eeprom-sunxi-sid",
+ .of_match_table = sunxi_sid_of_match,
+ },
+};
+module_platform_driver(sunxi_sid_driver);
+
+MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
+MODULE_DESCRIPTION("Allwinner sunxi security id driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
new file mode 100644
index 0000000000..c4ae94af4a
--- /dev/null
+++ b/drivers/nvmem/u-boot-env.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum u_boot_env_format {
+ U_BOOT_FORMAT_SINGLE,
+ U_BOOT_FORMAT_REDUNDANT,
+ U_BOOT_FORMAT_BROADCOM,
+};
+
+struct u_boot_env {
+ struct device *dev;
+ enum u_boot_env_format format;
+
+ struct mtd_info *mtd;
+
+ /* Cells */
+ struct nvmem_cell_info *cells;
+ int ncells;
+};
+
+struct u_boot_env_image_single {
+ __le32 crc32;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_redundant {
+ __le32 crc32;
+ u8 mark;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_broadcom {
+ __le32 magic;
+ __le32 len;
+ __le32 crc32;
+ DECLARE_FLEX_ARRAY(uint8_t, data);
+} __packed;
+
+static int u_boot_env_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct u_boot_env *priv = context;
+ struct device *dev = priv->dev;
+ size_t bytes_read;
+ int err;
+
+ err = mtd_read(priv->mtd, offset, bytes, &bytes_read, val);
+ if (err && !mtd_is_bitflip(err)) {
+ dev_err(dev, "Failed to read from mtd: %d\n", err);
+ return err;
+ }
+
+ if (bytes_read != bytes) {
+ dev_err(dev, "Failed to read %zu bytes\n", bytes);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index,
+ unsigned int offset, void *buf, size_t bytes)
+{
+ u8 mac[ETH_ALEN];
+
+ if (bytes != 3 * ETH_ALEN - 1)
+ return -EINVAL;
+
+ if (!mac_pton(buf, mac))
+ return -EINVAL;
+
+ if (index)
+ eth_addr_add(mac, index);
+
+ ether_addr_copy(buf, mac);
+
+ return 0;
+}
+
+static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+{
+ struct device *dev = priv->dev;
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+ int idx;
+
+ priv->ncells = 0;
+ for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
+ priv->ncells++;
+
+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+ if (!priv->cells)
+ return -ENOMEM;
+
+ for (var = data, idx = 0;
+ var < data + data_len && *var;
+ var = value + strlen(value) + 1, idx++) {
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+ priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+ if (!priv->cells[idx].name)
+ return -ENOMEM;
+ priv->cells[idx].offset = data_offset + value - data;
+ priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+ if (!strcmp(var, "ethaddr")) {
+ priv->cells[idx].raw_len = strlen(value);
+ priv->cells[idx].bytes = ETH_ALEN;
+ priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr;
+ }
+ }
+
+ if (WARN_ON(idx != priv->ncells))
+ priv->ncells = idx;
+
+ return 0;
+}
+
+static int u_boot_env_parse(struct u_boot_env *priv)
+{
+ struct device *dev = priv->dev;
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
+ size_t data_offset;
+ size_t data_len;
+ uint32_t crc32;
+ uint32_t calc;
+ size_t bytes;
+ uint8_t *buf;
+ int err;
+
+ buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
+ if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
+ dev_err(dev, "Failed to read from mtd: %d\n", err);
+ goto err_kfree;
+ }
+
+ switch (priv->format) {
+ case U_BOOT_FORMAT_SINGLE:
+ crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
+ data_offset = offsetof(struct u_boot_env_image_single, data);
+ break;
+ case U_BOOT_FORMAT_REDUNDANT:
+ crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ break;
+ case U_BOOT_FORMAT_BROADCOM:
+ crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ break;
+ }
+ crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
+ crc32_data_len = priv->mtd->size - crc32_data_offset;
+ data_len = priv->mtd->size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+ dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
+ err = -EINVAL;
+ goto err_kfree;
+ }
+
+ buf[priv->mtd->size - 1] = '\0';
+ err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
+ if (err)
+ dev_err(dev, "Failed to add cells: %d\n", err);
+
+err_kfree:
+ kfree(buf);
+err_out:
+ return err;
+}
+
+static int u_boot_env_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .name = "u-boot-env",
+ .reg_read = u_boot_env_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct u_boot_env *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ priv->format = (uintptr_t)of_device_get_match_data(dev);
+
+ priv->mtd = of_get_mtd_device_by_node(np);
+ if (IS_ERR(priv->mtd)) {
+ dev_err_probe(dev, PTR_ERR(priv->mtd), "Failed to get %pOF MTD\n", np);
+ return PTR_ERR(priv->mtd);
+ }
+
+ err = u_boot_env_parse(priv);
+ if (err)
+ return err;
+
+ config.dev = dev;
+ config.cells = priv->cells;
+ config.ncells = priv->ncells;
+ config.priv = priv;
+ config.size = priv->mtd->size;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id u_boot_env_of_match_table[] = {
+ { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, },
+ { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, },
+ {},
+};
+
+static struct platform_driver u_boot_env_driver = {
+ .probe = u_boot_env_probe,
+ .driver = {
+ .name = "u_boot_env",
+ .of_match_table = u_boot_env_of_match_table,
+ },
+};
+module_platform_driver(u_boot_env_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
new file mode 100644
index 0000000000..0a1dbb8053
--- /dev/null
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UniPhier eFuse driver
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct uniphier_efuse_priv {
+ void __iomem *base;
+};
+
+static int uniphier_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct uniphier_efuse_priv *priv = context;
+ u8 *val = _val;
+ int offs;
+
+ for (offs = 0; offs < bytes; offs += sizeof(u8))
+ *val++ = readb(priv->base + reg + offs);
+
+ return 0;
+}
+
+static int uniphier_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = {};
+ struct uniphier_efuse_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = true;
+ econfig.reg_read = uniphier_reg_read;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id uniphier_efuse_of_match[] = {
+ { .compatible = "socionext,uniphier-efuse",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match);
+
+static struct platform_driver uniphier_efuse_driver = {
+ .probe = uniphier_efuse_probe,
+ .driver = {
+ .name = "uniphier-efuse",
+ .of_match_table = uniphier_efuse_of_match,
+ },
+};
+module_platform_driver(uniphier_efuse_driver);
+
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("UniPhier eFuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
new file mode 100644
index 0000000000..ee9c61ae72
--- /dev/null
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Toradex AG.
+ *
+ * Author: Sanchayan Maity <sanchayan.maity@toradex.com>
+ *
+ * Based on the barebox ocotp driver,
+ * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>
+ * Orex Computed Radiography
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* OCOTP Register Offsets */
+#define OCOTP_CTRL_REG 0x00
+#define OCOTP_CTRL_SET 0x04
+#define OCOTP_CTRL_CLR 0x08
+#define OCOTP_TIMING 0x10
+#define OCOTP_DATA 0x20
+#define OCOTP_READ_CTRL_REG 0x30
+#define OCOTP_READ_FUSE_DATA 0x40
+
+/* OCOTP Register bits and masks */
+#define OCOTP_CTRL_WR_UNLOCK 16
+#define OCOTP_CTRL_WR_UNLOCK_KEY 0x3E77
+#define OCOTP_CTRL_WR_UNLOCK_MASK GENMASK(31, 16)
+#define OCOTP_CTRL_ADDR 0
+#define OCOTP_CTRL_ADDR_MASK GENMASK(6, 0)
+#define OCOTP_CTRL_RELOAD_SHADOWS BIT(10)
+#define OCOTP_CTRL_ERR BIT(9)
+#define OCOTP_CTRL_BUSY BIT(8)
+
+#define OCOTP_TIMING_STROBE_READ 16
+#define OCOTP_TIMING_STROBE_READ_MASK GENMASK(21, 16)
+#define OCOTP_TIMING_RELAX 12
+#define OCOTP_TIMING_RELAX_MASK GENMASK(15, 12)
+#define OCOTP_TIMING_STROBE_PROG 0
+#define OCOTP_TIMING_STROBE_PROG_MASK GENMASK(11, 0)
+
+#define OCOTP_READ_CTRL_READ_FUSE 0x1
+
+#define VF610_OCOTP_TIMEOUT 100000
+
+#define BF(value, field) (((value) << field) & field##_MASK)
+
+#define DEF_RELAX 20
+
+static const int base_to_fuse_addr_mappings[][2] = {
+ {0x400, 0x00},
+ {0x410, 0x01},
+ {0x420, 0x02},
+ {0x450, 0x05},
+ {0x4F0, 0x0F},
+ {0x600, 0x20},
+ {0x610, 0x21},
+ {0x620, 0x22},
+ {0x630, 0x23},
+ {0x640, 0x24},
+ {0x650, 0x25},
+ {0x660, 0x26},
+ {0x670, 0x27},
+ {0x6F0, 0x2F},
+ {0x880, 0x38},
+ {0x890, 0x39},
+ {0x8A0, 0x3A},
+ {0x8B0, 0x3B},
+ {0x8C0, 0x3C},
+ {0x8D0, 0x3D},
+ {0x8E0, 0x3E},
+ {0x8F0, 0x3F},
+ {0xC80, 0x78},
+ {0xC90, 0x79},
+ {0xCA0, 0x7A},
+ {0xCB0, 0x7B},
+ {0xCC0, 0x7C},
+ {0xCD0, 0x7D},
+ {0xCE0, 0x7E},
+ {0xCF0, 0x7F},
+};
+
+struct vf610_ocotp {
+ void __iomem *base;
+ struct clk *clk;
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int timing;
+};
+
+static int vf610_ocotp_wait_busy(void __iomem *base)
+{
+ int timeout = VF610_OCOTP_TIMEOUT;
+
+ while ((readl(base) & OCOTP_CTRL_BUSY) && --timeout)
+ udelay(10);
+
+ if (!timeout) {
+ writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR);
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+
+ return 0;
+}
+
+static int vf610_ocotp_calculate_timing(struct vf610_ocotp *ocotp_dev)
+{
+ u32 clk_rate;
+ u32 relax, strobe_read, strobe_prog;
+ u32 timing;
+
+ clk_rate = clk_get_rate(ocotp_dev->clk);
+
+ /* Refer section OTP read/write timing parameters in TRM */
+ relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
+ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
+ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
+
+ timing = BF(relax, OCOTP_TIMING_RELAX);
+ timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ);
+ timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG);
+
+ return timing;
+}
+
+static int vf610_get_fuse_address(int base_addr_offset)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_to_fuse_addr_mappings); i++) {
+ if (base_to_fuse_addr_mappings[i][0] == base_addr_offset)
+ return base_to_fuse_addr_mappings[i][1];
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct vf610_ocotp *ocotp = context;
+ void __iomem *base = ocotp->base;
+ u32 reg, *buf = val;
+ int fuse_addr;
+ int ret;
+
+ while (bytes > 0) {
+ fuse_addr = vf610_get_fuse_address(offset);
+ if (fuse_addr > 0) {
+ writel(ocotp->timing, base + OCOTP_TIMING);
+ ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG);
+ if (ret)
+ return ret;
+
+ reg = readl(base + OCOTP_CTRL_REG);
+ reg &= ~OCOTP_CTRL_ADDR_MASK;
+ reg &= ~OCOTP_CTRL_WR_UNLOCK_MASK;
+ reg |= BF(fuse_addr, OCOTP_CTRL_ADDR);
+ writel(reg, base + OCOTP_CTRL_REG);
+
+ writel(OCOTP_READ_CTRL_READ_FUSE,
+ base + OCOTP_READ_CTRL_REG);
+ ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG);
+ if (ret)
+ return ret;
+
+ if (readl(base) & OCOTP_CTRL_ERR) {
+ dev_dbg(ocotp->dev, "Error reading from fuse address %x\n",
+ fuse_addr);
+ writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR);
+ }
+
+ /*
+ * In case of error, we do not abort and expect to read
+ * 0xBADABADA as mentioned by the TRM. We just read this
+ * value and return.
+ */
+ *buf = readl(base + OCOTP_READ_FUSE_DATA);
+ } else {
+ *buf = 0;
+ }
+
+ buf++;
+ bytes -= 4;
+ offset += 4;
+ }
+
+ return 0;
+}
+
+static struct nvmem_config ocotp_config = {
+ .name = "ocotp",
+ .stride = 4,
+ .word_size = 4,
+ .reg_read = vf610_ocotp_read,
+};
+
+static const struct of_device_id ocotp_of_match[] = {
+ { .compatible = "fsl,vf610-ocotp", },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, ocotp_of_match);
+
+static int vf610_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct vf610_ocotp *ocotp_dev;
+
+ ocotp_dev = devm_kzalloc(dev, sizeof(struct vf610_ocotp), GFP_KERNEL);
+ if (!ocotp_dev)
+ return -ENOMEM;
+
+ ocotp_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ocotp_dev->base))
+ return PTR_ERR(ocotp_dev->base);
+
+ ocotp_dev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ocotp_dev->clk)) {
+ dev_err(dev, "failed getting clock, err = %ld\n",
+ PTR_ERR(ocotp_dev->clk));
+ return PTR_ERR(ocotp_dev->clk);
+ }
+ ocotp_dev->dev = dev;
+ ocotp_dev->timing = vf610_ocotp_calculate_timing(ocotp_dev);
+
+ ocotp_config.size = resource_size(res);
+ ocotp_config.priv = ocotp_dev;
+ ocotp_config.dev = dev;
+
+ ocotp_dev->nvmem = devm_nvmem_register(dev, &ocotp_config);
+
+ return PTR_ERR_OR_ZERO(ocotp_dev->nvmem);
+}
+
+static struct platform_driver vf610_ocotp_driver = {
+ .probe = vf610_ocotp_probe,
+ .driver = {
+ .name = "vf610-ocotp",
+ .of_match_table = ocotp_of_match,
+ },
+};
+module_platform_driver(vf610_ocotp_driver);
+MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>");
+MODULE_DESCRIPTION("Vybrid OCOTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
new file mode 100644
index 0000000000..f49bb9a26d
--- /dev/null
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define SILICON_REVISION_MASK 0xF
+
+struct zynqmp_nvmem_data {
+ struct device *dev;
+ struct nvmem_device *nvmem;
+};
+
+static int zynqmp_nvmem_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+ int idcode, version;
+ struct zynqmp_nvmem_data *priv = context;
+
+ ret = zynqmp_pm_get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "zynqmp-nvmem",
+ .owner = THIS_MODULE,
+ .word_size = 1,
+ .size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id zynqmp_nvmem_match[] = {
+ { .compatible = "xlnx,zynqmp-nvmem-fw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
+
+static int zynqmp_nvmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_nvmem_data *priv;
+
+ priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ econfig.dev = dev;
+ econfig.reg_read = zynqmp_nvmem_read;
+ econfig.priv = priv;
+
+ priv->nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(priv->nvmem);
+}
+
+static struct platform_driver zynqmp_nvmem_driver = {
+ .probe = zynqmp_nvmem_probe,
+ .driver = {
+ .name = "zynqmp-nvmem",
+ .of_match_table = zynqmp_nvmem_match,
+ },
+};
+
+module_platform_driver(zynqmp_nvmem_driver);
+
+MODULE_AUTHOR("Michal Simek <michal.simek@amd.com>, Nava kishore Manne <nava.kishore.manne@amd.com>");
+MODULE_DESCRIPTION("ZynqMP NVMEM driver");
+MODULE_LICENSE("GPL");