diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/nvme/host/Kconfig | 121 | ||||
-rw-r--r-- | drivers/nvme/host/Makefile | 32 | ||||
-rw-r--r-- | drivers/nvme/host/apple.c | 1606 | ||||
-rw-r--r-- | drivers/nvme/host/auth.c | 1043 | ||||
-rw-r--r-- | drivers/nvme/host/constants.c | 203 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 4803 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 1428 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.h | 230 | ||||
-rw-r--r-- | drivers/nvme/host/fault_inject.c | 82 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 4006 | ||||
-rw-r--r-- | drivers/nvme/host/fc.h | 227 | ||||
-rw-r--r-- | drivers/nvme/host/hwmon.c | 281 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 985 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 964 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 1141 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 3543 | ||||
-rw-r--r-- | drivers/nvme/host/pr.c | 315 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 2397 | ||||
-rw-r--r-- | drivers/nvme/host/sysfs.c | 668 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 2673 | ||||
-rw-r--r-- | drivers/nvme/host/trace.c | 357 | ||||
-rw-r--r-- | drivers/nvme/host/trace.h | 172 | ||||
-rw-r--r-- | drivers/nvme/host/zns.c | 249 |
23 files changed, 27526 insertions, 0 deletions
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig new file mode 100644 index 0000000000..2f6a7f8c94 --- /dev/null +++ b/drivers/nvme/host/Kconfig @@ -0,0 +1,121 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NVME_CORE + tristate + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY + +config BLK_DEV_NVME + tristate "NVM Express block device" + depends on PCI && BLOCK + select NVME_CORE + help + The NVM Express driver is for solid state drives directly + connected to the PCI or PCI Express bus. If you know you + don't have one of these, it is safe to answer N. + + To compile this driver as a module, choose M here: the + module will be called nvme. + +config NVME_MULTIPATH + bool "NVMe multipath support" + depends on NVME_CORE + help + This option enables support for multipath access to NVMe + subsystems. If this option is enabled only a single + /dev/nvmeXnY device will show up for each NVMe namespace, + even if it is accessible through multiple controllers. + +config NVME_VERBOSE_ERRORS + bool "NVMe verbose error reporting" + depends on NVME_CORE + help + This option enables verbose reporting for NVMe errors. The + error translation table will grow the kernel image size by + about 4 KB. + +config NVME_HWMON + bool "NVMe hardware monitoring" + depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON) + help + This provides support for NVMe hardware monitoring. If enabled, + a hardware monitoring device will be created for each NVMe drive + in the system. + +config NVME_FABRICS + select NVME_CORE + tristate + +config NVME_RDMA + tristate "NVM Express over Fabrics RDMA host driver" + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK + select NVME_FABRICS + select SG_POOL + help + This provides support for the NVMe over Fabrics protocol using + the RDMA (Infiniband, RoCE, iWarp) transport. This allows you + to use remote block devices exported using the NVMe protocol set. + + To configure a NVMe over Fabrics controller use the nvme-cli tool + from https://github.com/linux-nvme/nvme-cli. + + If unsure, say N. + +config NVME_FC + tristate "NVM Express over Fabrics FC host driver" + depends on BLOCK + depends on HAS_DMA + select NVME_FABRICS + select SG_POOL + help + This provides support for the NVMe over Fabrics protocol using + the FC transport. This allows you to use remote block devices + exported using the NVMe protocol set. + + To configure a NVMe over Fabrics controller use the nvme-cli tool + from https://github.com/linux-nvme/nvme-cli. + + If unsure, say N. + +config NVME_TCP + tristate "NVM Express over Fabrics TCP host driver" + depends on INET + depends on BLOCK + select NVME_FABRICS + select CRYPTO + select CRYPTO_CRC32C + help + This provides support for the NVMe over Fabrics protocol using + the TCP transport. This allows you to use remote block devices + exported using the NVMe protocol set. + + To configure a NVMe over Fabrics controller use the nvme-cli tool + from https://github.com/linux-nvme/nvme-cli. + + If unsure, say N. + +config NVME_AUTH + bool "NVM Express over Fabrics In-Band Authentication" + depends on NVME_CORE + select NVME_COMMON + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_DH + select CRYPTO_DH_RFC7919_GROUPS + help + This provides support for NVMe over Fabrics In-Band Authentication. + + If unsure, say N. + +config NVME_APPLE + tristate "Apple ANS2 NVM Express host driver" + depends on OF && BLOCK + depends on APPLE_RTKIT && APPLE_SART + depends on ARCH_APPLE || COMPILE_TEST + select NVME_CORE + help + This provides support for the NVMe controller embedded in Apple SoCs + such as the M1. + + To compile this driver as a module, choose M here: the + module will be called nvme-apple. diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile new file mode 100644 index 0000000000..c7c3cf202d --- /dev/null +++ b/drivers/nvme/host/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y += -I$(src) + +obj-$(CONFIG_NVME_CORE) += nvme-core.o +obj-$(CONFIG_BLK_DEV_NVME) += nvme.o +obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o +obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o +obj-$(CONFIG_NVME_FC) += nvme-fc.o +obj-$(CONFIG_NVME_TCP) += nvme-tcp.o +obj-$(CONFIG_NVME_APPLE) += nvme-apple.o + +nvme-core-y += core.o ioctl.o sysfs.o pr.o +nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o +nvme-core-$(CONFIG_TRACING) += trace.o +nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o +nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o +nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o +nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o +nvme-core-$(CONFIG_NVME_AUTH) += auth.o + +nvme-y += pci.o + +nvme-fabrics-y += fabrics.o + +nvme-rdma-y += rdma.o + +nvme-fc-y += fc.o + +nvme-tcp-y += tcp.o + +nvme-apple-y += apple.o diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c new file mode 100644 index 0000000000..596bb11eeb --- /dev/null +++ b/drivers/nvme/host/apple.c @@ -0,0 +1,1606 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple ANS NVM Express device driver + * Copyright The Asahi Linux Contributors + * + * Based on the pci.c NVM Express device driver + * Copyright (c) 2011-2014, Intel Corporation. + * and on the rdma.c NVMe over Fabrics RDMA host code. + * Copyright (c) 2015-2016 HGST, a Western Digital Company. + */ + +#include <linux/async.h> +#include <linux/blkdev.h> +#include <linux/blk-mq.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/once.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/soc/apple/rtkit.h> +#include <linux/soc/apple/sart.h> +#include <linux/reset.h> +#include <linux/time64.h> + +#include "nvme.h" + +#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC +#define APPLE_ANS_MAX_QUEUE_DEPTH 64 + +#define APPLE_ANS_COPROC_CPU_CONTROL 0x44 +#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) + +#define APPLE_ANS_ACQ_DB 0x1004 +#define APPLE_ANS_IOCQ_DB 0x100c + +#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210 + +#define APPLE_ANS_BOOT_STATUS 0x1300 +#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55 + +#define APPLE_ANS_UNKNOWN_CTRL 0x24008 +#define APPLE_ANS_PRP_NULL_CHECK BIT(11) + +#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908 +#define APPLE_ANS_LINEAR_SQ_EN BIT(0) + +#define APPLE_ANS_LINEAR_ASQ_DB 0x2490c +#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910 + +#define APPLE_NVMMU_NUM_TCBS 0x28100 +#define APPLE_NVMMU_ASQ_TCB_BASE 0x28108 +#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110 +#define APPLE_NVMMU_TCB_INVAL 0x28118 +#define APPLE_NVMMU_TCB_STAT 0x28120 + +/* + * This controller is a bit weird in the way command tags works: Both the + * admin and the IO queue share the same tag space. Additionally, tags + * cannot be higher than 0x40 which effectively limits the combined + * queue depth to 0x40. Instead of wasting half of that on the admin queue + * which gets much less traffic we instead reduce its size here. + * The controller also doesn't support async event such that no space must + * be reserved for NVME_NR_AEN_COMMANDS. + */ +#define APPLE_NVME_AQ_DEPTH 2 +#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) + +/* + * These can be higher, but we need to ensure that any command doesn't + * require an sg allocation that needs more than a page of data. + */ +#define NVME_MAX_KB_SZ 4096 +#define NVME_MAX_SEGS 127 + +/* + * This controller comes with an embedded IOMMU known as NVMMU. + * The NVMMU is pointed to an array of TCBs indexed by the command tag. + * Each command must be configured inside this structure before it's allowed + * to execute, including commands that don't require DMA transfers. + * + * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the + * admin queue): Those commands must still be added to the NVMMU but the DMA + * buffers cannot be represented as PRPs and must instead be allowed using SART. + * + * Programming the PRPs to the same values as those in the submission queue + * looks rather silly at first. This hardware is however designed for a kernel + * that runs the NVMMU code in a higher exception level than the NVMe driver. + * In that setting the NVMe driver first programs the submission queue entry + * and then executes a hypercall to the code that is allowed to program the + * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while + * verifying that they don't point to kernel text, data, pagetables, or similar + * protected areas before programming the TCB to point to this shadow copy. + * Since Linux doesn't do any of that we may as well just point both the queue + * and the TCB PRP pointer to the same memory. + */ +struct apple_nvmmu_tcb { + u8 opcode; + +#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0) +#define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1) + u8 dma_flags; + + u8 command_id; + u8 _unk0; + __le16 length; + u8 _unk1[18]; + __le64 prp1; + __le64 prp2; + u8 _unk2[16]; + u8 aes_iv[8]; + u8 _aes_unk[64]; +}; + +/* + * The Apple NVMe controller only supports a single admin and a single IO queue + * which are both limited to 64 entries and share a single interrupt. + * + * The completion queue works as usual. The submission "queue" instead is + * an array indexed by the command tag on this hardware. Commands must also be + * present in the NVMMU's tcb array. They are triggered by writing their tag to + * a MMIO register. + */ +struct apple_nvme_queue { + struct nvme_command *sqes; + struct nvme_completion *cqes; + struct apple_nvmmu_tcb *tcbs; + + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + dma_addr_t tcb_dma_addr; + + u32 __iomem *sq_db; + u32 __iomem *cq_db; + + u16 cq_head; + u8 cq_phase; + + bool is_adminq; + bool enabled; +}; + +/* + * The apple_nvme_iod describes the data in an I/O. + * + * The sg pointer contains the list of PRP chunk allocations in addition + * to the actual struct scatterlist. + */ +struct apple_nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + struct apple_nvme_queue *q; + int npages; /* In the PRP list. 0 means small pool in use */ + int nents; /* Used in scatterlist */ + dma_addr_t first_dma; + unsigned int dma_len; /* length of single DMA segment mapping */ + struct scatterlist *sg; +}; + +struct apple_nvme { + struct device *dev; + + void __iomem *mmio_coproc; + void __iomem *mmio_nvme; + + struct device **pd_dev; + struct device_link **pd_link; + int pd_count; + + struct apple_sart *sart; + struct apple_rtkit *rtk; + struct reset_control *reset; + + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + mempool_t *iod_mempool; + + struct nvme_ctrl ctrl; + struct work_struct remove_work; + + struct apple_nvme_queue adminq; + struct apple_nvme_queue ioq; + + struct blk_mq_tag_set admin_tagset; + struct blk_mq_tag_set tagset; + + int irq; + spinlock_t lock; +}; + +static_assert(sizeof(struct nvme_command) == 64); +static_assert(sizeof(struct apple_nvmmu_tcb) == 128); + +static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct apple_nvme, ctrl); +} + +static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return container_of(q, struct apple_nvme, adminq); + + return container_of(q, struct apple_nvme, ioq); +} + +static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return APPLE_NVME_AQ_DEPTH; + + return APPLE_ANS_MAX_QUEUE_DEPTH; +} + +static void apple_nvme_rtkit_crashed(void *cookie) +{ + struct apple_nvme *anv = cookie; + + dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot"); + nvme_reset_ctrl(&anv->ctrl); +} + +static int apple_nvme_sart_dma_setup(void *cookie, + struct apple_rtkit_shmem *bfr) +{ + struct apple_nvme *anv = cookie; + int ret; + + if (bfr->iova) + return -EINVAL; + if (!bfr->size) + return -EINVAL; + + bfr->buffer = + dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL); + if (!bfr->buffer) + return -ENOMEM; + + ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size); + if (ret) { + dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); + bfr->buffer = NULL; + return -ENOMEM; + } + + return 0; +} + +static void apple_nvme_sart_dma_destroy(void *cookie, + struct apple_rtkit_shmem *bfr) +{ + struct apple_nvme *anv = cookie; + + apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size); + dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); +} + +static const struct apple_rtkit_ops apple_nvme_rtkit_ops = { + .crashed = apple_nvme_rtkit_crashed, + .shmem_setup = apple_nvme_sart_dma_setup, + .shmem_destroy = apple_nvme_sart_dma_destroy, +}; + +static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + + writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL); + if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT)) + dev_warn_ratelimited(anv->dev, + "NVMMU TCB invalidation failed\n"); +} + +static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, + struct nvme_command *cmd) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + u32 tag = nvme_tag_from_cid(cmd->common.command_id); + struct apple_nvmmu_tcb *tcb = &q->tcbs[tag]; + + tcb->opcode = cmd->common.opcode; + tcb->prp1 = cmd->common.dptr.prp1; + tcb->prp2 = cmd->common.dptr.prp2; + tcb->length = cmd->rw.length; + tcb->command_id = tag; + + if (nvme_is_write(cmd)) + tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE; + else + tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE; + + memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); + + /* + * This lock here doesn't make much sense at a first glace but + * removing it will result in occasional missed completetion + * interrupts even though the commands still appear on the CQ. + * It's unclear why this happens but our best guess is that + * there is a bug in the firmware triggered when a new command + * is issued while we're inside the irq handler between the + * NVMMU invalidation (and making the tag available again) + * and the final CQ update. + */ + spin_lock_irq(&anv->lock); + writel(tag, q->sq_db); + spin_unlock_irq(&anv->lock); +} + +/* + * From pci.c: + * Will slightly overestimate the number of pages needed. This is OK + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +static inline size_t apple_nvme_iod_alloc_size(void) +{ + const unsigned int nprps = DIV_ROUND_UP( + NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE); + const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + const size_t alloc_size = sizeof(__le64 *) * npages + + sizeof(struct scatterlist) * NVME_MAX_SEGS; + + return alloc_size; +} + +static void **apple_nvme_iod_list(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + + return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); +} + +static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req) +{ + const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = apple_nvme_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(anv->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; + } +} + +static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (iod->dma_len) { + dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; + } + + WARN_ON_ONCE(!iod->nents); + + dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); + if (iod->npages == 0) + dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0], + iod->first_dma); + else + apple_nvme_free_prps(anv, req); + mempool_free(iod->sg, anv->iod_mempool); +} + +static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + +static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv, + struct request *req, + struct nvme_rw_command *cmnd) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct dma_pool *pool; + int length = blk_rq_payload_bytes(req); + struct scatterlist *sg = iod->sg; + int dma_len = sg_dma_len(sg); + u64 dma_addr = sg_dma_address(sg); + int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + __le64 *prp_list; + void **list = apple_nvme_iod_list(req); + dma_addr_t prp_dma; + int nprps, i; + + length -= (NVME_CTRL_PAGE_SIZE - offset); + if (length <= 0) { + iod->first_dma = 0; + goto done; + } + + dma_len -= (NVME_CTRL_PAGE_SIZE - offset); + if (dma_len) { + dma_addr += (NVME_CTRL_PAGE_SIZE - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= NVME_CTRL_PAGE_SIZE) { + iod->first_dma = dma_addr; + goto done; + } + + nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); + if (nprps <= (256 / 8)) { + pool = anv->prp_small_pool; + iod->npages = 0; + } else { + pool = anv->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + iod->first_dma = dma_addr; + iod->npages = -1; + return BLK_STS_RESOURCE; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + i = 0; + for (;;) { + if (i == NVME_CTRL_PAGE_SIZE >> 3) { + __le64 *old_prp_list = prp_list; + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) + goto free_prps; + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= NVME_CTRL_PAGE_SIZE; + dma_addr += NVME_CTRL_PAGE_SIZE; + length -= NVME_CTRL_PAGE_SIZE; + if (length <= 0) + break; + if (dma_len > 0) + continue; + if (unlikely(dma_len < 0)) + goto bad_sgl; + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } +done: + cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); + return BLK_STS_OK; +free_prps: + apple_nvme_free_prps(anv, req); + return BLK_STS_RESOURCE; +bad_sgl: + WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents), + "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), + iod->nents); + return BLK_STS_IOERR; +} + +static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv, + struct request *req, + struct nvme_rw_command *cmnd, + struct bio_vec *bv) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); + unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; + + iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0); + if (dma_mapping_error(anv->dev, iod->first_dma)) + return BLK_STS_RESOURCE; + iod->dma_len = bv->bv_len; + + cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); + if (bv->bv_len > first_prp_len) + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); + return BLK_STS_OK; +} + +static blk_status_t apple_nvme_map_data(struct apple_nvme *anv, + struct request *req, + struct nvme_command *cmnd) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret = BLK_STS_RESOURCE; + int nr_mapped; + + if (blk_rq_nr_phys_segments(req) == 1) { + struct bio_vec bv = req_bvec(req); + + if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) + return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw, + &bv); + } + + iod->dma_len = 0; + iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC); + if (!iod->sg) + return BLK_STS_RESOURCE; + sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); + iod->nents = blk_rq_map_sg(req->q, req, iod->sg); + if (!iod->nents) + goto out_free_sg; + + nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents, + rq_dma_dir(req), DMA_ATTR_NO_WARN); + if (!nr_mapped) + goto out_free_sg; + + ret = apple_nvme_setup_prps(anv, req, &cmnd->rw); + if (ret != BLK_STS_OK) + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); +out_free_sg: + mempool_free(iod->sg, anv->iod_mempool); + return ret; +} + +static __always_inline void apple_nvme_unmap_rq(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct apple_nvme *anv = queue_to_apple_nvme(iod->q); + + if (blk_rq_nr_phys_segments(req)) + apple_nvme_unmap_data(anv, req); +} + +static void apple_nvme_complete_rq(struct request *req) +{ + apple_nvme_unmap_rq(req); + nvme_complete_rq(req); +} + +static void apple_nvme_complete_batch(struct io_comp_batch *iob) +{ + nvme_complete_batch(iob, apple_nvme_unmap_rq); +} + +static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q) +{ + struct nvme_completion *hcqe = &q->cqes[q->cq_head]; + + return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase; +} + +static inline struct blk_mq_tags * +apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return anv->admin_tagset.tags[0]; + else + return anv->tagset.tags[0]; +} + +static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, + struct io_comp_batch *iob, u16 idx) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct nvme_completion *cqe = &q->cqes[idx]; + __u16 command_id = READ_ONCE(cqe->command_id); + struct request *req; + + apple_nvmmu_inval(q, command_id); + + req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id); + if (unlikely(!req)) { + dev_warn(anv->dev, "invalid id %d completed", command_id); + return; + } + + if (!nvme_try_complete_req(req, cqe->status, cqe->result) && + !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + apple_nvme_complete_batch)) + apple_nvme_complete_rq(req); +} + +static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q) +{ + u32 tmp = q->cq_head + 1; + + if (tmp == apple_nvme_queue_depth(q)) { + q->cq_head = 0; + q->cq_phase ^= 1; + } else { + q->cq_head = tmp; + } +} + +static bool apple_nvme_poll_cq(struct apple_nvme_queue *q, + struct io_comp_batch *iob) +{ + bool found = false; + + while (apple_nvme_cqe_pending(q)) { + found = true; + + /* + * load-load control dependency between phase and the rest of + * the cqe requires a full read memory barrier + */ + dma_rmb(); + apple_nvme_handle_cqe(q, iob, q->cq_head); + apple_nvme_update_cq_head(q); + } + + if (found) + writel(q->cq_head, q->cq_db); + + return found; +} + +static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) +{ + bool found; + DEFINE_IO_COMP_BATCH(iob); + + if (!READ_ONCE(q->enabled) && !force) + return false; + + found = apple_nvme_poll_cq(q, &iob); + + if (!rq_list_empty(iob.req_list)) + apple_nvme_complete_batch(&iob); + + return found; +} + +static irqreturn_t apple_nvme_irq(int irq, void *data) +{ + struct apple_nvme *anv = data; + bool handled = false; + unsigned long flags; + + spin_lock_irqsave(&anv->lock, flags); + if (apple_nvme_handle_cq(&anv->ioq, false)) + handled = true; + if (apple_nvme_handle_cq(&anv->adminq, false)) + handled = true; + spin_unlock_irqrestore(&anv->lock, flags); + + if (handled) + return IRQ_HANDLED; + return IRQ_NONE; +} + +static int apple_nvme_create_cq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_cq.opcode = nvme_admin_create_cq; + c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); + c.create_cq.cqid = cpu_to_le16(1); + c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); + c.create_cq.irq_vector = cpu_to_le16(0); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_remove_cq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + c.delete_queue.opcode = nvme_admin_delete_cq; + c.delete_queue.qid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_create_sq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_sq.opcode = nvme_admin_create_sq; + c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); + c.create_sq.sqid = cpu_to_le16(1); + c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); + c.create_sq.cqid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_remove_sq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + c.delete_queue.opcode = nvme_admin_delete_sq; + c.delete_queue.qid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct apple_nvme_queue *q = hctx->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct request *req = bd->rq; + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_command *cmnd = &iod->cmd; + blk_status_t ret; + + iod->npages = -1; + iod->nents = 0; + + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!READ_ONCE(q->enabled))) + return BLK_STS_IOERR; + + if (!nvme_check_ready(&anv->ctrl, req, true)) + return nvme_fail_nonready_command(&anv->ctrl, req); + + ret = nvme_setup_cmd(ns, req); + if (ret) + return ret; + + if (blk_rq_nr_phys_segments(req)) { + ret = apple_nvme_map_data(anv, req, cmnd); + if (ret) + goto out_free_cmd; + } + + nvme_start_request(req); + apple_nvme_submit_cmd(q, cmnd); + return BLK_STS_OK; + +out_free_cmd: + nvme_cleanup_cmd(req); + return ret; +} + +static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + hctx->driver_data = data; + return 0; +} + +static int apple_nvme_init_request(struct blk_mq_tag_set *set, + struct request *req, unsigned int hctx_idx, + unsigned int numa_node) +{ + struct apple_nvme_queue *q = set->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_request *nreq = nvme_req(req); + + iod->q = q; + nreq->ctrl = &anv->ctrl; + nreq->cmd = &iod->cmd; + + return 0; +} + +static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) +{ + u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); + bool dead = false, freeze = false; + unsigned long flags; + + if (apple_rtkit_is_crashed(anv->rtk)) + dead = true; + if (!(csts & NVME_CSTS_RDY)) + dead = true; + if (csts & NVME_CSTS_CFS) + dead = true; + + if (anv->ctrl.state == NVME_CTRL_LIVE || + anv->ctrl.state == NVME_CTRL_RESETTING) { + freeze = true; + nvme_start_freeze(&anv->ctrl); + } + + /* + * Give the controller a chance to complete all entered requests if + * doing a safe shutdown. + */ + if (!dead && shutdown && freeze) + nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT); + + nvme_quiesce_io_queues(&anv->ctrl); + + if (!dead) { + if (READ_ONCE(anv->ioq.enabled)) { + apple_nvme_remove_sq(anv); + apple_nvme_remove_cq(anv); + } + + /* + * Always disable the NVMe controller after shutdown. + * We need to do this to bring it back up later anyway, and we + * can't do it while the firmware is not running (e.g. in the + * resume reset path before RTKit is initialized), so for Apple + * controllers it makes sense to unconditionally do it here. + * Additionally, this sequence of events is reliable, while + * others (like disabling after bringing back the firmware on + * resume) seem to run into trouble under some circumstances. + * + * Both U-Boot and m1n1 also use this convention (i.e. an ANS + * NVMe controller is handed off with firmware shut down, in an + * NVMe disabled state, after a clean shutdown). + */ + if (shutdown) + nvme_disable_ctrl(&anv->ctrl, shutdown); + nvme_disable_ctrl(&anv->ctrl, false); + } + + WRITE_ONCE(anv->ioq.enabled, false); + WRITE_ONCE(anv->adminq.enabled, false); + mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */ + nvme_quiesce_admin_queue(&anv->ctrl); + + /* last chance to complete any requests before nvme_cancel_request */ + spin_lock_irqsave(&anv->lock, flags); + apple_nvme_handle_cq(&anv->ioq, true); + apple_nvme_handle_cq(&anv->adminq, true); + spin_unlock_irqrestore(&anv->lock, flags); + + nvme_cancel_tagset(&anv->ctrl); + nvme_cancel_admin_tagset(&anv->ctrl); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) { + nvme_unquiesce_io_queues(&anv->ctrl); + nvme_unquiesce_admin_queue(&anv->ctrl); + } +} + +static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct apple_nvme_queue *q = iod->q; + struct apple_nvme *anv = queue_to_apple_nvme(q); + unsigned long flags; + u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); + + if (anv->ctrl.state != NVME_CTRL_LIVE) { + /* + * From rdma.c: + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. + */ + dev_warn(anv->dev, + "I/O %d(aq:%d) timeout while not in live state\n", + req->tag, q->is_adminq); + if (blk_mq_request_started(req) && + !blk_mq_request_completed(req)) { + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + blk_mq_complete_request(req); + } + return BLK_EH_DONE; + } + + /* check if we just missed an interrupt if we're still alive */ + if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) { + spin_lock_irqsave(&anv->lock, flags); + apple_nvme_handle_cq(q, false); + spin_unlock_irqrestore(&anv->lock, flags); + if (blk_mq_request_completed(req)) { + dev_warn(anv->dev, + "I/O %d(aq:%d) timeout: completion polled\n", + req->tag, q->is_adminq); + return BLK_EH_DONE; + } + } + + /* + * aborting commands isn't supported which leaves a full reset as our + * only option here + */ + dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n", + req->tag, q->is_adminq); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + apple_nvme_disable(anv, false); + nvme_reset_ctrl(&anv->ctrl); + return BLK_EH_DONE; +} + +static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, + struct io_comp_batch *iob) +{ + struct apple_nvme_queue *q = hctx->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + bool found; + unsigned long flags; + + spin_lock_irqsave(&anv->lock, flags); + found = apple_nvme_poll_cq(q, iob); + spin_unlock_irqrestore(&anv->lock, flags); + + return found; +} + +static const struct blk_mq_ops apple_nvme_mq_admin_ops = { + .queue_rq = apple_nvme_queue_rq, + .complete = apple_nvme_complete_rq, + .init_hctx = apple_nvme_init_hctx, + .init_request = apple_nvme_init_request, + .timeout = apple_nvme_timeout, +}; + +static const struct blk_mq_ops apple_nvme_mq_ops = { + .queue_rq = apple_nvme_queue_rq, + .complete = apple_nvme_complete_rq, + .init_hctx = apple_nvme_init_hctx, + .init_request = apple_nvme_init_request, + .timeout = apple_nvme_timeout, + .poll = apple_nvme_poll, +}; + +static void apple_nvme_init_queue(struct apple_nvme_queue *q) +{ + unsigned int depth = apple_nvme_queue_depth(q); + + q->cq_head = 0; + q->cq_phase = 1; + memset(q->tcbs, 0, + APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); + memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); + WRITE_ONCE(q->enabled, true); + wmb(); /* ensure the first interrupt sees the initialization */ +} + +static void apple_nvme_reset_work(struct work_struct *work) +{ + unsigned int nr_io_queues = 1; + int ret; + u32 boot_status, aqa; + struct apple_nvme *anv = + container_of(work, struct apple_nvme, ctrl.reset_work); + + if (anv->ctrl.state != NVME_CTRL_RESETTING) { + dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", + anv->ctrl.state); + ret = -ENODEV; + goto out; + } + + /* there's unfortunately no known way to recover if RTKit crashed :( */ + if (apple_rtkit_is_crashed(anv->rtk)) { + dev_err(anv->dev, + "RTKit has crashed without any way to recover."); + ret = -EIO; + goto out; + } + + /* RTKit must be shut down cleanly for the (soft)-reset to work */ + if (apple_rtkit_is_running(anv->rtk)) { + /* reset the controller if it is enabled */ + if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) + apple_nvme_disable(anv, false); + dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); + ret = apple_rtkit_shutdown(anv->rtk); + if (ret) + goto out; + } + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + ret = reset_control_assert(anv->reset); + if (ret) + goto out; + + ret = apple_rtkit_reinit(anv->rtk); + if (ret) + goto out; + + ret = reset_control_deassert(anv->reset); + if (ret) + goto out; + + writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, + anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + ret = apple_rtkit_boot(anv->rtk); + if (ret) { + dev_err(anv->dev, "ANS did not boot"); + goto out; + } + + ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS, + boot_status, + boot_status == APPLE_ANS_BOOT_STATUS_OK, + USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT); + if (ret) { + dev_err(anv->dev, "ANS did not initialize"); + goto out; + } + + dev_dbg(anv->dev, "ANS booted successfully."); + + /* + * Limit the max command size to prevent iod->sg allocations going + * over a single page. + */ + anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, + dma_max_mapping_size(anv->dev) >> 9); + anv->ctrl.max_segments = NVME_MAX_SEGS; + + dma_set_max_seg_size(anv->dev, 0xffffffff); + + /* + * Enable NVMMU and linear submission queues. + * While we could keep those disabled and pretend this is slightly + * more common NVMe controller we'd still need some quirks (e.g. + * sq entries will be 128 bytes) and Apple might drop support for + * that mode in the future. + */ + writel(APPLE_ANS_LINEAR_SQ_EN, + anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); + + /* Allow as many pending command as possible for both queues */ + writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), + anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); + + /* Setup the NVMMU for the maximum admin and IO queue depth */ + writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, + anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); + + /* + * This is probably a chicken bit: without it all commands where any PRP + * is set to zero (including those that don't use that field) fail and + * the co-processor complains about "completed with err BAD_CMD-" or + * a "NULL_PRP_PTR_ERR" in the syslog + */ + writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & + ~APPLE_ANS_PRP_NULL_CHECK, + anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); + + /* Setup the admin queue */ + aqa = APPLE_NVME_AQ_DEPTH - 1; + aqa |= aqa << 16; + writel(aqa, anv->mmio_nvme + NVME_REG_AQA); + writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ); + writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ); + + /* Setup NVMMU for both queues */ + writeq(anv->adminq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); + writeq(anv->ioq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); + + anv->ctrl.sqsize = + APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ + anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP); + + dev_dbg(anv->dev, "Enabling controller now"); + ret = nvme_enable_ctrl(&anv->ctrl); + if (ret) + goto out; + + dev_dbg(anv->dev, "Starting admin queue"); + apple_nvme_init_queue(&anv->adminq); + nvme_unquiesce_admin_queue(&anv->ctrl); + + if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(anv->ctrl.device, + "failed to mark controller CONNECTING\n"); + ret = -ENODEV; + goto out; + } + + ret = nvme_init_ctrl_finish(&anv->ctrl, false); + if (ret) + goto out; + + dev_dbg(anv->dev, "Creating IOCQ"); + ret = apple_nvme_create_cq(anv); + if (ret) + goto out; + dev_dbg(anv->dev, "Creating IOSQ"); + ret = apple_nvme_create_sq(anv); + if (ret) + goto out_remove_cq; + + apple_nvme_init_queue(&anv->ioq); + nr_io_queues = 1; + ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues); + if (ret) + goto out_remove_sq; + if (nr_io_queues != 1) { + ret = -ENXIO; + goto out_remove_sq; + } + + anv->ctrl.queue_count = nr_io_queues + 1; + + nvme_unquiesce_io_queues(&anv->ctrl); + nvme_wait_freeze(&anv->ctrl); + blk_mq_update_nr_hw_queues(&anv->tagset, 1); + nvme_unfreeze(&anv->ctrl); + + if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) { + dev_warn(anv->ctrl.device, + "failed to mark controller live state\n"); + ret = -ENODEV; + goto out_remove_sq; + } + + nvme_start_ctrl(&anv->ctrl); + + dev_dbg(anv->dev, "ANS boot and NVMe init completed."); + return; + +out_remove_sq: + apple_nvme_remove_sq(anv); +out_remove_cq: + apple_nvme_remove_cq(anv); +out: + dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret); + nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); + nvme_get_ctrl(&anv->ctrl); + apple_nvme_disable(anv, false); + nvme_mark_namespaces_dead(&anv->ctrl); + if (!queue_work(nvme_wq, &anv->remove_work)) + nvme_put_ctrl(&anv->ctrl); +} + +static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work) +{ + struct apple_nvme *anv = + container_of(work, struct apple_nvme, remove_work); + + nvme_put_ctrl(&anv->ctrl); + device_release_driver(anv->dev); +} + +static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) +{ + *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) +{ + writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) +{ + *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + struct device *dev = ctrl_to_apple_nvme(ctrl)->dev; + + return snprintf(buf, size, "%s\n", dev_name(dev)); +} + +static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) +{ + struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); + + if (anv->ctrl.admin_q) + blk_put_queue(anv->ctrl.admin_q); + put_device(anv->dev); +} + +static const struct nvme_ctrl_ops nvme_ctrl_ops = { + .name = "apple-nvme", + .module = THIS_MODULE, + .flags = 0, + .reg_read32 = apple_nvme_reg_read32, + .reg_write32 = apple_nvme_reg_write32, + .reg_read64 = apple_nvme_reg_read64, + .free_ctrl = apple_nvme_free_ctrl, + .get_address = apple_nvme_get_address, +}; + +static void apple_nvme_async_probe(void *data, async_cookie_t cookie) +{ + struct apple_nvme *anv = data; + + flush_work(&anv->ctrl.reset_work); + flush_work(&anv->ctrl.scan_work); + nvme_put_ctrl(&anv->ctrl); +} + +static void devm_apple_nvme_put_tag_set(void *data) +{ + blk_mq_free_tag_set(data); +} + +static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) +{ + int ret; + + anv->admin_tagset.ops = &apple_nvme_mq_admin_ops; + anv->admin_tagset.nr_hw_queues = 1; + anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH; + anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; + anv->admin_tagset.numa_node = NUMA_NO_NODE; + anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod); + anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED; + anv->admin_tagset.driver_data = &anv->adminq; + + ret = blk_mq_alloc_tag_set(&anv->admin_tagset); + if (ret) + return ret; + ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, + &anv->admin_tagset); + if (ret) + return ret; + + anv->tagset.ops = &apple_nvme_mq_ops; + anv->tagset.nr_hw_queues = 1; + anv->tagset.nr_maps = 1; + /* + * Tags are used as an index to the NVMMU and must be unique across + * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which + * must be marked as reserved in the IO queue. + */ + anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; + anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; + anv->tagset.timeout = NVME_IO_TIMEOUT; + anv->tagset.numa_node = NUMA_NO_NODE; + anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); + anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; + anv->tagset.driver_data = &anv->ioq; + + ret = blk_mq_alloc_tag_set(&anv->tagset); + if (ret) + return ret; + ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, + &anv->tagset); + if (ret) + return ret; + + anv->ctrl.admin_tagset = &anv->admin_tagset; + anv->ctrl.tagset = &anv->tagset; + + return 0; +} + +static int apple_nvme_queue_alloc(struct apple_nvme *anv, + struct apple_nvme_queue *q) +{ + unsigned int depth = apple_nvme_queue_depth(q); + + q->cqes = dmam_alloc_coherent(anv->dev, + depth * sizeof(struct nvme_completion), + &q->cq_dma_addr, GFP_KERNEL); + if (!q->cqes) + return -ENOMEM; + + q->sqes = dmam_alloc_coherent(anv->dev, + depth * sizeof(struct nvme_command), + &q->sq_dma_addr, GFP_KERNEL); + if (!q->sqes) + return -ENOMEM; + + /* + * We need the maximum queue depth here because the NVMMU only has a + * single depth configuration shared between both queues. + */ + q->tcbs = dmam_alloc_coherent(anv->dev, + APPLE_ANS_MAX_QUEUE_DEPTH * + sizeof(struct apple_nvmmu_tcb), + &q->tcb_dma_addr, GFP_KERNEL); + if (!q->tcbs) + return -ENOMEM; + + /* + * initialize phase to make sure the allocated and empty memory + * doesn't look like a full cq already. + */ + q->cq_phase = 1; + return 0; +} + +static void apple_nvme_detach_genpd(struct apple_nvme *anv) +{ + int i; + + if (anv->pd_count <= 1) + return; + + for (i = anv->pd_count - 1; i >= 0; i--) { + if (anv->pd_link[i]) + device_link_del(anv->pd_link[i]); + if (!IS_ERR_OR_NULL(anv->pd_dev[i])) + dev_pm_domain_detach(anv->pd_dev[i], true); + } +} + +static int apple_nvme_attach_genpd(struct apple_nvme *anv) +{ + struct device *dev = anv->dev; + int i; + + anv->pd_count = of_count_phandle_with_args( + dev->of_node, "power-domains", "#power-domain-cells"); + if (anv->pd_count <= 1) + return 0; + + anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev), + GFP_KERNEL); + if (!anv->pd_dev) + return -ENOMEM; + + anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link), + GFP_KERNEL); + if (!anv->pd_link) + return -ENOMEM; + + for (i = 0; i < anv->pd_count; i++) { + anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); + if (IS_ERR(anv->pd_dev[i])) { + apple_nvme_detach_genpd(anv); + return PTR_ERR(anv->pd_dev[i]); + } + + anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i], + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!anv->pd_link[i]) { + apple_nvme_detach_genpd(anv); + return -EINVAL; + } + } + + return 0; +} + +static void devm_apple_nvme_mempool_destroy(void *data) +{ + mempool_destroy(data); +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_nvme *anv; + int ret; + + anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); + if (!anv) + return -ENOMEM; + + anv->dev = get_device(dev); + anv->adminq.is_adminq = true; + platform_set_drvdata(pdev, anv); + + ret = apple_nvme_attach_genpd(anv); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to attach power domains"); + goto put_dev; + } + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { + ret = -ENXIO; + goto put_dev; + } + + anv->irq = platform_get_irq(pdev, 0); + if (anv->irq < 0) { + ret = anv->irq; + goto put_dev; + } + if (!anv->irq) { + ret = -ENXIO; + goto put_dev; + } + + anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans"); + if (IS_ERR(anv->mmio_coproc)) { + ret = PTR_ERR(anv->mmio_coproc); + goto put_dev; + } + anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme"); + if (IS_ERR(anv->mmio_nvme)) { + ret = PTR_ERR(anv->mmio_nvme); + goto put_dev; + } + + anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; + anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; + + anv->sart = devm_apple_sart_get(dev); + if (IS_ERR(anv->sart)) { + ret = dev_err_probe(dev, PTR_ERR(anv->sart), + "Failed to initialize SART"); + goto put_dev; + } + + anv->reset = devm_reset_control_array_get_exclusive(anv->dev); + if (IS_ERR(anv->reset)) { + ret = dev_err_probe(dev, PTR_ERR(anv->reset), + "Failed to get reset control"); + goto put_dev; + } + + INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work); + INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work); + spin_lock_init(&anv->lock); + + ret = apple_nvme_queue_alloc(anv, &anv->adminq); + if (ret) + goto put_dev; + ret = apple_nvme_queue_alloc(anv, &anv->ioq); + if (ret) + goto put_dev; + + anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev, + NVME_CTRL_PAGE_SIZE, + NVME_CTRL_PAGE_SIZE, 0); + if (!anv->prp_page_pool) { + ret = -ENOMEM; + goto put_dev; + } + + anv->prp_small_pool = + dmam_pool_create("prp list 256", anv->dev, 256, 256, 0); + if (!anv->prp_small_pool) { + ret = -ENOMEM; + goto put_dev; + } + + WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE); + anv->iod_mempool = + mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size()); + if (!anv->iod_mempool) { + ret = -ENOMEM; + goto put_dev; + } + ret = devm_add_action_or_reset(anv->dev, + devm_apple_nvme_mempool_destroy, anv->iod_mempool); + if (ret) + goto put_dev; + + ret = apple_nvme_alloc_tagsets(anv); + if (ret) + goto put_dev; + + ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0, + "nvme-apple", anv); + if (ret) { + dev_err_probe(dev, ret, "Failed to request IRQ"); + goto put_dev; + } + + anv->rtk = + devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops); + if (IS_ERR(anv->rtk)) { + ret = dev_err_probe(dev, PTR_ERR(anv->rtk), + "Failed to initialize RTKit"); + goto put_dev; + } + + ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops, + NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS); + if (ret) { + dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl"); + goto put_dev; + } + + anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset); + if (IS_ERR(anv->ctrl.admin_q)) { + ret = -ENOMEM; + goto put_dev; + } + + nvme_reset_ctrl(&anv->ctrl); + async_schedule(apple_nvme_async_probe, anv); + + return 0; + +put_dev: + put_device(anv->dev); + return ret; +} + +static int apple_nvme_remove(struct platform_device *pdev) +{ + struct apple_nvme *anv = platform_get_drvdata(pdev); + + nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); + flush_work(&anv->ctrl.reset_work); + nvme_stop_ctrl(&anv->ctrl); + nvme_remove_namespaces(&anv->ctrl); + apple_nvme_disable(anv, true); + nvme_uninit_ctrl(&anv->ctrl); + + if (apple_rtkit_is_running(anv->rtk)) + apple_rtkit_shutdown(anv->rtk); + + apple_nvme_detach_genpd(anv); + + return 0; +} + +static void apple_nvme_shutdown(struct platform_device *pdev) +{ + struct apple_nvme *anv = platform_get_drvdata(pdev); + + apple_nvme_disable(anv, true); + if (apple_rtkit_is_running(anv->rtk)) + apple_rtkit_shutdown(anv->rtk); +} + +static int apple_nvme_resume(struct device *dev) +{ + struct apple_nvme *anv = dev_get_drvdata(dev); + + return nvme_reset_ctrl(&anv->ctrl); +} + +static int apple_nvme_suspend(struct device *dev) +{ + struct apple_nvme *anv = dev_get_drvdata(dev); + int ret = 0; + + apple_nvme_disable(anv, true); + + if (apple_rtkit_is_running(anv->rtk)) + ret = apple_rtkit_shutdown(anv->rtk); + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, + apple_nvme_resume); + +static const struct of_device_id apple_nvme_of_match[] = { + { .compatible = "apple,nvme-ans2" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_nvme_of_match); + +static struct platform_driver apple_nvme_driver = { + .driver = { + .name = "nvme-apple", + .of_match_table = apple_nvme_of_match, + .pm = pm_sleep_ptr(&apple_nvme_pm_ops), + }, + .probe = apple_nvme_probe, + .remove = apple_nvme_remove, + .shutdown = apple_nvme_shutdown, +}; +module_platform_driver(apple_nvme_driver); + +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c new file mode 100644 index 0000000000..811541ce20 --- /dev/null +++ b/drivers/nvme/host/auth.c @@ -0,0 +1,1043 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux + */ + +#include <linux/crc32.h> +#include <linux/base64.h> +#include <linux/prandom.h> +#include <asm/unaligned.h> +#include <crypto/hash.h> +#include <crypto/dh.h> +#include "nvme.h" +#include "fabrics.h" +#include <linux/nvme-auth.h> + +#define CHAP_BUF_SIZE 4096 +static struct kmem_cache *nvme_chap_buf_cache; +static mempool_t *nvme_chap_buf_pool; + +struct nvme_dhchap_queue_context { + struct list_head entry; + struct work_struct auth_work; + struct nvme_ctrl *ctrl; + struct crypto_shash *shash_tfm; + struct crypto_kpp *dh_tfm; + void *buf; + int qid; + int error; + u32 s1; + u32 s2; + u16 transaction; + u8 status; + u8 dhgroup_id; + u8 hash_id; + size_t hash_len; + u8 c1[64]; + u8 c2[64]; + u8 response[64]; + u8 *host_response; + u8 *ctrl_key; + u8 *host_key; + u8 *sess_key; + int ctrl_key_len; + int host_key_len; + int sess_key_len; +}; + +static struct workqueue_struct *nvme_auth_wq; + +#define nvme_auth_flags_from_qid(qid) \ + (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED +#define nvme_auth_queue_from_qid(ctrl, qid) \ + (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q + +static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) +{ + return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + + ctrl->opts->nr_poll_queues + 1; +} + +static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, + void *data, size_t data_len, bool auth_send) +{ + struct nvme_command cmd = {}; + blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); + struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); + int ret; + + cmd.auth_common.opcode = nvme_fabrics_command; + cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; + cmd.auth_common.spsp0 = 0x01; + cmd.auth_common.spsp1 = 0x01; + if (auth_send) { + cmd.auth_send.fctype = nvme_fabrics_type_auth_send; + cmd.auth_send.tl = cpu_to_le32(data_len); + } else { + cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; + cmd.auth_receive.al = cpu_to_le32(data_len); + } + + ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, + qid == 0 ? NVME_QID_ANY : qid, + 0, flags); + if (ret > 0) + dev_warn(ctrl->device, + "qid %d auth_send failed with status %d\n", qid, ret); + else if (ret < 0) + dev_err(ctrl->device, + "qid %d auth_send failed with error %d\n", qid, ret); + return ret; +} + +static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, + struct nvmf_auth_dhchap_failure_data *data, + u16 transaction, u8 expected_msg) +{ + dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", + __func__, qid, data->auth_type, data->auth_id); + + if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && + data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { + return data->rescode_exp; + } + if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || + data->auth_id != expected_msg) { + dev_warn(ctrl->device, + "qid %d invalid message %02x/%02x\n", + qid, data->auth_type, data->auth_id); + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; + } + if (le16_to_cpu(data->t_id) != transaction) { + dev_warn(ctrl->device, + "qid %d invalid transaction ID %d\n", + qid, le16_to_cpu(data->t_id)); + return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; + } + return 0; +} + +static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; + size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); + + if (size > CHAP_BUF_SIZE) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EINVAL; + } + memset((u8 *)chap->buf, 0, size); + data->auth_type = NVME_AUTH_COMMON_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + data->t_id = cpu_to_le16(chap->transaction); + data->sc_c = 0; /* No secure channel concatenation */ + data->napd = 1; + data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; + data->auth_protocol[0].dhchap.halen = 3; + data->auth_protocol[0].dhchap.dhlen = 6; + data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256; + data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384; + data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512; + data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL; + data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048; + data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072; + data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; + data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; + data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; + + return size; +} + +static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_challenge_data *data = chap->buf; + u16 dhvlen = le16_to_cpu(data->dhvlen); + size_t size = sizeof(*data) + data->hl + dhvlen; + const char *gid_name = nvme_auth_dhgroup_name(data->dhgid); + const char *hmac_name, *kpp_name; + + if (size > CHAP_BUF_SIZE) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EINVAL; + } + + hmac_name = nvme_auth_hmac_name(data->hashid); + if (!hmac_name) { + dev_warn(ctrl->device, + "qid %d: invalid HASH ID %d\n", + chap->qid, data->hashid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + return -EPROTO; + } + + if (chap->hash_id == data->hashid && chap->shash_tfm && + !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) && + crypto_shash_digestsize(chap->shash_tfm) == data->hl) { + dev_dbg(ctrl->device, + "qid %d: reuse existing hash %s\n", + chap->qid, hmac_name); + goto select_kpp; + } + + /* Reset if hash cannot be reused */ + if (chap->shash_tfm) { + crypto_free_shash(chap->shash_tfm); + chap->hash_id = 0; + chap->hash_len = 0; + } + chap->shash_tfm = crypto_alloc_shash(hmac_name, 0, + CRYPTO_ALG_ALLOCATES_MEMORY); + if (IS_ERR(chap->shash_tfm)) { + dev_warn(ctrl->device, + "qid %d: failed to allocate hash %s, error %ld\n", + chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); + chap->shash_tfm = NULL; + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ENOMEM; + } + + if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { + dev_warn(ctrl->device, + "qid %d: invalid hash length %d\n", + chap->qid, data->hl); + crypto_free_shash(chap->shash_tfm); + chap->shash_tfm = NULL; + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + return -EPROTO; + } + + chap->hash_id = data->hashid; + chap->hash_len = data->hl; + dev_dbg(ctrl->device, "qid %d: selected hash %s\n", + chap->qid, hmac_name); + +select_kpp: + kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); + if (!kpp_name) { + dev_warn(ctrl->device, + "qid %d: invalid DH group id %d\n", + chap->qid, data->dhgid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + /* Leave previous dh_tfm intact */ + return -EPROTO; + } + + if (chap->dhgroup_id == data->dhgid && + (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) { + dev_dbg(ctrl->device, + "qid %d: reuse existing DH group %s\n", + chap->qid, gid_name); + goto skip_kpp; + } + + /* Reset dh_tfm if it can't be reused */ + if (chap->dh_tfm) { + crypto_free_kpp(chap->dh_tfm); + chap->dh_tfm = NULL; + } + + if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { + if (dhvlen == 0) { + dev_warn(ctrl->device, + "qid %d: empty DH value\n", + chap->qid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + return -EPROTO; + } + + chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); + if (IS_ERR(chap->dh_tfm)) { + int ret = PTR_ERR(chap->dh_tfm); + + dev_warn(ctrl->device, + "qid %d: error %d initializing DH group %s\n", + chap->qid, ret, gid_name); + chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + chap->dh_tfm = NULL; + return ret; + } + dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", + chap->qid, gid_name); + } else if (dhvlen != 0) { + dev_warn(ctrl->device, + "qid %d: invalid DH value for NULL DH\n", + chap->qid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EPROTO; + } + chap->dhgroup_id = data->dhgid; + +skip_kpp: + chap->s1 = le32_to_cpu(data->seqnum); + memcpy(chap->c1, data->cval, chap->hash_len); + if (dhvlen) { + chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); + if (!chap->ctrl_key) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ENOMEM; + } + chap->ctrl_key_len = dhvlen; + memcpy(chap->ctrl_key, data->cval + chap->hash_len, + dhvlen); + dev_dbg(ctrl->device, "ctrl public key %*ph\n", + (int)chap->ctrl_key_len, chap->ctrl_key); + } + + return 0; +} + +static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_reply_data *data = chap->buf; + size_t size = sizeof(*data); + + size += 2 * chap->hash_len; + + if (chap->host_key_len) + size += chap->host_key_len; + + if (size > CHAP_BUF_SIZE) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EINVAL; + } + + memset(chap->buf, 0, size); + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; + data->t_id = cpu_to_le16(chap->transaction); + data->hl = chap->hash_len; + data->dhvlen = cpu_to_le16(chap->host_key_len); + memcpy(data->rval, chap->response, chap->hash_len); + if (ctrl->ctrl_key) { + get_random_bytes(chap->c2, chap->hash_len); + data->cvalid = 1; + chap->s2 = nvme_auth_get_seqnum(); + memcpy(data->rval + chap->hash_len, chap->c2, + chap->hash_len); + dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", + __func__, chap->qid, (int)chap->hash_len, chap->c2); + } else { + memset(chap->c2, 0, chap->hash_len); + chap->s2 = 0; + } + data->seqnum = cpu_to_le32(chap->s2); + if (chap->host_key_len) { + dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", + __func__, chap->qid, + chap->host_key_len, chap->host_key); + memcpy(data->rval + 2 * chap->hash_len, chap->host_key, + chap->host_key_len); + } + + return size; +} + +static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_success1_data *data = chap->buf; + size_t size = sizeof(*data); + + if (chap->s2) + size += chap->hash_len; + + if (size > CHAP_BUF_SIZE) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return -EINVAL; + } + + if (data->hl != chap->hash_len) { + dev_warn(ctrl->device, + "qid %d: invalid hash length %u\n", + chap->qid, data->hl); + chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; + return -EPROTO; + } + + /* Just print out information for the admin queue */ + if (chap->qid == 0) + dev_info(ctrl->device, + "qid 0: authenticated with hash %s dhgroup %s\n", + nvme_auth_hmac_name(chap->hash_id), + nvme_auth_dhgroup_name(chap->dhgroup_id)); + + if (!data->rvalid) + return 0; + + /* Validate controller response */ + if (memcmp(chap->response, data->rval, data->hl)) { + dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", + __func__, chap->qid, (int)chap->hash_len, data->rval); + dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", + __func__, chap->qid, (int)chap->hash_len, + chap->response); + dev_warn(ctrl->device, + "qid %d: controller authentication failed\n", + chap->qid); + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ECONNREFUSED; + } + + /* Just print out information for the admin queue */ + if (chap->qid == 0) + dev_info(ctrl->device, + "qid 0: controller authenticated\n"); + return 0; +} + +static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_success2_data *data = chap->buf; + size_t size = sizeof(*data); + + memset(chap->buf, 0, size); + data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; + data->t_id = cpu_to_le16(chap->transaction); + + return size; +} + +static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + struct nvmf_auth_dhchap_failure_data *data = chap->buf; + size_t size = sizeof(*data); + + memset(chap->buf, 0, size); + data->auth_type = NVME_AUTH_COMMON_MESSAGES; + data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; + data->t_id = cpu_to_le16(chap->transaction); + data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; + data->rescode_exp = chap->status; + + return size; +} + +static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + SHASH_DESC_ON_STACK(shash, chap->shash_tfm); + u8 buf[4], *challenge = chap->c1; + int ret; + + dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", + __func__, chap->qid, chap->s1, chap->transaction); + + if (!chap->host_response) { + chap->host_response = nvme_auth_transform_key(ctrl->host_key, + ctrl->opts->host->nqn); + if (IS_ERR(chap->host_response)) { + ret = PTR_ERR(chap->host_response); + chap->host_response = NULL; + return ret; + } + } else { + dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", + __func__, chap->qid); + } + + ret = crypto_shash_setkey(chap->shash_tfm, + chap->host_response, ctrl->host_key->len); + if (ret) { + dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", + chap->qid, ret); + goto out; + } + + if (chap->dh_tfm) { + challenge = kmalloc(chap->hash_len, GFP_KERNEL); + if (!challenge) { + ret = -ENOMEM; + goto out; + } + ret = nvme_auth_augmented_challenge(chap->hash_id, + chap->sess_key, + chap->sess_key_len, + chap->c1, challenge, + chap->hash_len); + if (ret) + goto out; + } + + shash->tfm = chap->shash_tfm; + ret = crypto_shash_init(shash); + if (ret) + goto out; + ret = crypto_shash_update(shash, challenge, chap->hash_len); + if (ret) + goto out; + put_unaligned_le32(chap->s1, buf); + ret = crypto_shash_update(shash, buf, 4); + if (ret) + goto out; + put_unaligned_le16(chap->transaction, buf); + ret = crypto_shash_update(shash, buf, 2); + if (ret) + goto out; + memset(buf, 0, sizeof(buf)); + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, "HostHost", 8); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->host->nqn, + strlen(ctrl->opts->host->nqn)); + if (ret) + goto out; + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, + strlen(ctrl->opts->subsysnqn)); + if (ret) + goto out; + ret = crypto_shash_final(shash, chap->response); +out: + if (challenge != chap->c1) + kfree(challenge); + return ret; +} + +static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + SHASH_DESC_ON_STACK(shash, chap->shash_tfm); + u8 *ctrl_response; + u8 buf[4], *challenge = chap->c2; + int ret; + + ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, + ctrl->opts->subsysnqn); + if (IS_ERR(ctrl_response)) { + ret = PTR_ERR(ctrl_response); + return ret; + } + + ret = crypto_shash_setkey(chap->shash_tfm, + ctrl_response, ctrl->ctrl_key->len); + if (ret) { + dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", + chap->qid, ret); + goto out; + } + + if (chap->dh_tfm) { + challenge = kmalloc(chap->hash_len, GFP_KERNEL); + if (!challenge) { + ret = -ENOMEM; + goto out; + } + ret = nvme_auth_augmented_challenge(chap->hash_id, + chap->sess_key, + chap->sess_key_len, + chap->c2, challenge, + chap->hash_len); + if (ret) + goto out; + } + dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", + __func__, chap->qid, chap->s2, chap->transaction); + dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", + __func__, chap->qid, (int)chap->hash_len, challenge); + dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", + __func__, chap->qid, ctrl->opts->subsysnqn); + dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", + __func__, chap->qid, ctrl->opts->host->nqn); + shash->tfm = chap->shash_tfm; + ret = crypto_shash_init(shash); + if (ret) + goto out; + ret = crypto_shash_update(shash, challenge, chap->hash_len); + if (ret) + goto out; + put_unaligned_le32(chap->s2, buf); + ret = crypto_shash_update(shash, buf, 4); + if (ret) + goto out; + put_unaligned_le16(chap->transaction, buf); + ret = crypto_shash_update(shash, buf, 2); + if (ret) + goto out; + memset(buf, 0, 4); + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, "Controller", 10); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, + strlen(ctrl->opts->subsysnqn)); + if (ret) + goto out; + ret = crypto_shash_update(shash, buf, 1); + if (ret) + goto out; + ret = crypto_shash_update(shash, ctrl->opts->host->nqn, + strlen(ctrl->opts->host->nqn)); + if (ret) + goto out; + ret = crypto_shash_final(shash, chap->response); +out: + if (challenge != chap->c2) + kfree(challenge); + kfree(ctrl_response); + return ret; +} + +static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + int ret; + + if (chap->host_key && chap->host_key_len) { + dev_dbg(ctrl->device, + "qid %d: reusing host key\n", chap->qid); + goto gen_sesskey; + } + ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id); + if (ret < 0) { + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return ret; + } + + chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm); + + chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL); + if (!chap->host_key) { + chap->host_key_len = 0; + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ENOMEM; + } + ret = nvme_auth_gen_pubkey(chap->dh_tfm, + chap->host_key, chap->host_key_len); + if (ret) { + dev_dbg(ctrl->device, + "failed to generate public key, error %d\n", ret); + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return ret; + } + +gen_sesskey: + chap->sess_key_len = chap->host_key_len; + chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); + if (!chap->sess_key) { + chap->sess_key_len = 0; + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + return -ENOMEM; + } + + ret = nvme_auth_gen_shared_secret(chap->dh_tfm, + chap->ctrl_key, chap->ctrl_key_len, + chap->sess_key, chap->sess_key_len); + if (ret) { + dev_dbg(ctrl->device, + "failed to generate shared secret, error %d\n", ret); + chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; + return ret; + } + dev_dbg(ctrl->device, "shared secret %*ph\n", + (int)chap->sess_key_len, chap->sess_key); + return 0; +} + +static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) +{ + kfree_sensitive(chap->host_response); + chap->host_response = NULL; + kfree_sensitive(chap->host_key); + chap->host_key = NULL; + chap->host_key_len = 0; + kfree_sensitive(chap->ctrl_key); + chap->ctrl_key = NULL; + chap->ctrl_key_len = 0; + kfree_sensitive(chap->sess_key); + chap->sess_key = NULL; + chap->sess_key_len = 0; + chap->status = 0; + chap->error = 0; + chap->s1 = 0; + chap->s2 = 0; + chap->transaction = 0; + memset(chap->c1, 0, sizeof(chap->c1)); + memset(chap->c2, 0, sizeof(chap->c2)); + mempool_free(chap->buf, nvme_chap_buf_pool); + chap->buf = NULL; +} + +static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) +{ + nvme_auth_reset_dhchap(chap); + if (chap->shash_tfm) + crypto_free_shash(chap->shash_tfm); + if (chap->dh_tfm) + crypto_free_kpp(chap->dh_tfm); +} + +static void nvme_queue_auth_work(struct work_struct *work) +{ + struct nvme_dhchap_queue_context *chap = + container_of(work, struct nvme_dhchap_queue_context, auth_work); + struct nvme_ctrl *ctrl = chap->ctrl; + size_t tl; + int ret = 0; + + /* + * Allocate a large enough buffer for the entire negotiation: + * 4k is enough to ffdhe8192. + */ + chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL); + if (!chap->buf) { + chap->error = -ENOMEM; + return; + } + + chap->transaction = ctrl->transaction++; + + /* DH-HMAC-CHAP Step 1: send negotiate */ + dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", + __func__, chap->qid); + ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); + if (ret < 0) { + chap->error = ret; + return; + } + tl = ret; + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + if (ret) { + chap->error = ret; + return; + } + + /* DH-HMAC-CHAP Step 2: receive challenge */ + dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", + __func__, chap->qid); + + memset(chap->buf, 0, CHAP_BUF_SIZE); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, + false); + if (ret) { + dev_warn(ctrl->device, + "qid %d failed to receive challenge, %s %d\n", + chap->qid, ret < 0 ? "error" : "nvme status", ret); + chap->error = ret; + return; + } + ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, + NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); + if (ret) { + chap->status = ret; + chap->error = -ECONNREFUSED; + return; + } + + ret = nvme_auth_process_dhchap_challenge(ctrl, chap); + if (ret) { + /* Invalid challenge parameters */ + chap->error = ret; + goto fail2; + } + + if (chap->ctrl_key_len) { + dev_dbg(ctrl->device, + "%s: qid %d DH exponential\n", + __func__, chap->qid); + ret = nvme_auth_dhchap_exponential(ctrl, chap); + if (ret) { + chap->error = ret; + goto fail2; + } + } + + dev_dbg(ctrl->device, "%s: qid %d host response\n", + __func__, chap->qid); + mutex_lock(&ctrl->dhchap_auth_mutex); + ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); + if (ret) { + mutex_unlock(&ctrl->dhchap_auth_mutex); + chap->error = ret; + goto fail2; + } + mutex_unlock(&ctrl->dhchap_auth_mutex); + + /* DH-HMAC-CHAP Step 3: send reply */ + dev_dbg(ctrl->device, "%s: qid %d send reply\n", + __func__, chap->qid); + ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); + if (ret < 0) { + chap->error = ret; + goto fail2; + } + + tl = ret; + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + if (ret) { + chap->error = ret; + goto fail2; + } + + /* DH-HMAC-CHAP Step 4: receive success1 */ + dev_dbg(ctrl->device, "%s: qid %d receive success1\n", + __func__, chap->qid); + + memset(chap->buf, 0, CHAP_BUF_SIZE); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, + false); + if (ret) { + dev_warn(ctrl->device, + "qid %d failed to receive success1, %s %d\n", + chap->qid, ret < 0 ? "error" : "nvme status", ret); + chap->error = ret; + return; + } + ret = nvme_auth_receive_validate(ctrl, chap->qid, + chap->buf, chap->transaction, + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); + if (ret) { + chap->status = ret; + chap->error = -ECONNREFUSED; + return; + } + + mutex_lock(&ctrl->dhchap_auth_mutex); + if (ctrl->ctrl_key) { + dev_dbg(ctrl->device, + "%s: qid %d controller response\n", + __func__, chap->qid); + ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); + if (ret) { + mutex_unlock(&ctrl->dhchap_auth_mutex); + chap->error = ret; + goto fail2; + } + } + mutex_unlock(&ctrl->dhchap_auth_mutex); + + ret = nvme_auth_process_dhchap_success1(ctrl, chap); + if (ret) { + /* Controller authentication failed */ + chap->error = -ECONNREFUSED; + goto fail2; + } + + if (chap->s2) { + /* DH-HMAC-CHAP Step 5: send success2 */ + dev_dbg(ctrl->device, "%s: qid %d send success2\n", + __func__, chap->qid); + tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + if (ret) + chap->error = ret; + } + if (!ret) { + chap->error = 0; + return; + } + +fail2: + if (chap->status == 0) + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; + dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", + __func__, chap->qid, chap->status); + tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); + ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); + /* + * only update error if send failure2 failed and no other + * error had been set during authentication. + */ + if (ret && !chap->error) + chap->error = ret; +} + +int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) +{ + struct nvme_dhchap_queue_context *chap; + + if (!ctrl->host_key) { + dev_warn(ctrl->device, "qid %d: no key\n", qid); + return -ENOKEY; + } + + if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { + dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); + return -ENOKEY; + } + + chap = &ctrl->dhchap_ctxs[qid]; + cancel_work_sync(&chap->auth_work); + queue_work(nvme_auth_wq, &chap->auth_work); + return 0; +} +EXPORT_SYMBOL_GPL(nvme_auth_negotiate); + +int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) +{ + struct nvme_dhchap_queue_context *chap; + int ret; + + chap = &ctrl->dhchap_ctxs[qid]; + flush_work(&chap->auth_work); + ret = chap->error; + /* clear sensitive info */ + nvme_auth_reset_dhchap(chap); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_wait); + +static void nvme_ctrl_auth_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, dhchap_auth_work); + int ret, q; + + /* + * If the ctrl is no connected, bail as reconnect will handle + * authentication. + */ + if (ctrl->state != NVME_CTRL_LIVE) + return; + + /* Authenticate admin queue first */ + ret = nvme_auth_negotiate(ctrl, 0); + if (ret) { + dev_warn(ctrl->device, + "qid 0: error %d setting up authentication\n", ret); + return; + } + ret = nvme_auth_wait(ctrl, 0); + if (ret) { + dev_warn(ctrl->device, + "qid 0: authentication failed\n"); + return; + } + + for (q = 1; q < ctrl->queue_count; q++) { + ret = nvme_auth_negotiate(ctrl, q); + if (ret) { + dev_warn(ctrl->device, + "qid %d: error %d setting up authentication\n", + q, ret); + break; + } + } + + /* + * Failure is a soft-state; credentials remain valid until + * the controller terminates the connection. + */ + for (q = 1; q < ctrl->queue_count; q++) { + ret = nvme_auth_wait(ctrl, q); + if (ret) + dev_warn(ctrl->device, + "qid %d: authentication failed\n", q); + } +} + +int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) +{ + struct nvme_dhchap_queue_context *chap; + int i, ret; + + mutex_init(&ctrl->dhchap_auth_mutex); + INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work); + if (!ctrl->opts) + return 0; + ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret, + &ctrl->host_key); + if (ret) + return ret; + ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, + &ctrl->ctrl_key); + if (ret) + goto err_free_dhchap_secret; + + if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) + return 0; + + ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl), + sizeof(*chap), GFP_KERNEL); + if (!ctrl->dhchap_ctxs) { + ret = -ENOMEM; + goto err_free_dhchap_ctrl_secret; + } + + for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) { + chap = &ctrl->dhchap_ctxs[i]; + chap->qid = i; + chap->ctrl = ctrl; + INIT_WORK(&chap->auth_work, nvme_queue_auth_work); + } + + return 0; +err_free_dhchap_ctrl_secret: + nvme_auth_free_key(ctrl->ctrl_key); + ctrl->ctrl_key = NULL; +err_free_dhchap_secret: + nvme_auth_free_key(ctrl->host_key); + ctrl->host_key = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); + +void nvme_auth_stop(struct nvme_ctrl *ctrl) +{ + cancel_work_sync(&ctrl->dhchap_auth_work); +} +EXPORT_SYMBOL_GPL(nvme_auth_stop); + +void nvme_auth_free(struct nvme_ctrl *ctrl) +{ + int i; + + if (ctrl->dhchap_ctxs) { + for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) + nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); + kfree(ctrl->dhchap_ctxs); + } + if (ctrl->host_key) { + nvme_auth_free_key(ctrl->host_key); + ctrl->host_key = NULL; + } + if (ctrl->ctrl_key) { + nvme_auth_free_key(ctrl->ctrl_key); + ctrl->ctrl_key = NULL; + } +} +EXPORT_SYMBOL_GPL(nvme_auth_free); + +int __init nvme_init_auth(void) +{ + nvme_auth_wq = alloc_workqueue("nvme-auth-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + if (!nvme_auth_wq) + return -ENOMEM; + + nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", + CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); + if (!nvme_chap_buf_cache) + goto err_destroy_workqueue; + + nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, + mempool_free_slab, nvme_chap_buf_cache); + if (!nvme_chap_buf_pool) + goto err_destroy_chap_buf_cache; + + return 0; +err_destroy_chap_buf_cache: + kmem_cache_destroy(nvme_chap_buf_cache); +err_destroy_workqueue: + destroy_workqueue(nvme_auth_wq); + return -ENOMEM; +} + +void __exit nvme_exit_auth(void) +{ + mempool_destroy(nvme_chap_buf_pool); + kmem_cache_destroy(nvme_chap_buf_cache); + destroy_workqueue(nvme_auth_wq); +} diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c new file mode 100644 index 0000000000..20f46c2308 --- /dev/null +++ b/drivers/nvme/host/constants.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM Express device driver verbose errors + * Copyright (c) 2022, Oracle and/or its affiliates + */ + +#include "nvme.h" + +static const char * const nvme_ops[] = { + [nvme_cmd_flush] = "Flush", + [nvme_cmd_write] = "Write", + [nvme_cmd_read] = "Read", + [nvme_cmd_write_uncor] = "Write Uncorrectable", + [nvme_cmd_compare] = "Compare", + [nvme_cmd_write_zeroes] = "Write Zeroes", + [nvme_cmd_dsm] = "Dataset Management", + [nvme_cmd_verify] = "Verify", + [nvme_cmd_resv_register] = "Reservation Register", + [nvme_cmd_resv_report] = "Reservation Report", + [nvme_cmd_resv_acquire] = "Reservation Acquire", + [nvme_cmd_resv_release] = "Reservation Release", + [nvme_cmd_zone_mgmt_send] = "Zone Management Send", + [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", + [nvme_cmd_zone_append] = "Zone Append", +}; + +static const char * const nvme_admin_ops[] = { + [nvme_admin_delete_sq] = "Delete SQ", + [nvme_admin_create_sq] = "Create SQ", + [nvme_admin_get_log_page] = "Get Log Page", + [nvme_admin_delete_cq] = "Delete CQ", + [nvme_admin_create_cq] = "Create CQ", + [nvme_admin_identify] = "Identify", + [nvme_admin_abort_cmd] = "Abort Command", + [nvme_admin_set_features] = "Set Features", + [nvme_admin_get_features] = "Get Features", + [nvme_admin_async_event] = "Async Event", + [nvme_admin_ns_mgmt] = "Namespace Management", + [nvme_admin_activate_fw] = "Activate Firmware", + [nvme_admin_download_fw] = "Download Firmware", + [nvme_admin_dev_self_test] = "Device Self Test", + [nvme_admin_ns_attach] = "Namespace Attach", + [nvme_admin_keep_alive] = "Keep Alive", + [nvme_admin_directive_send] = "Directive Send", + [nvme_admin_directive_recv] = "Directive Receive", + [nvme_admin_virtual_mgmt] = "Virtual Management", + [nvme_admin_nvme_mi_send] = "NVMe Send MI", + [nvme_admin_nvme_mi_recv] = "NVMe Receive MI", + [nvme_admin_dbbuf] = "Doorbell Buffer Config", + [nvme_admin_format_nvm] = "Format NVM", + [nvme_admin_security_send] = "Security Send", + [nvme_admin_security_recv] = "Security Receive", + [nvme_admin_sanitize_nvm] = "Sanitize NVM", + [nvme_admin_get_lba_status] = "Get LBA Status", +}; + +static const char * const nvme_fabrics_ops[] = { + [nvme_fabrics_type_property_set] = "Property Set", + [nvme_fabrics_type_property_get] = "Property Get", + [nvme_fabrics_type_connect] = "Connect", + [nvme_fabrics_type_auth_send] = "Authentication Send", + [nvme_fabrics_type_auth_receive] = "Authentication Receive", +}; + +static const char * const nvme_statuses[] = { + [NVME_SC_SUCCESS] = "Success", + [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode", + [NVME_SC_INVALID_FIELD] = "Invalid Field in Command", + [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict", + [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error", + [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification", + [NVME_SC_INTERNAL] = "Internal Error", + [NVME_SC_ABORT_REQ] = "Command Abort Requested", + [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion", + [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command", + [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command", + [NVME_SC_INVALID_NS] = "Invalid Namespace or Format", + [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error", + [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor", + [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors", + [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid", + [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid", + [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid", + [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer", + [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid", + [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded", + [NVME_SC_OP_DENIED] = "Operation Denied", + [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid", + [NVME_SC_RESERVED] = "Reserved", + [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format", + [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired", + [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid", + [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort", + [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed", + [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress", + [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid", + [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB", + [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected", + [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted", + [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error", + [NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY] = "Admin Command Media Not Ready", + [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set", + [NVME_SC_LBA_RANGE] = "LBA Out of Range", + [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded", + [NVME_SC_NS_NOT_READY] = "Namespace Not Ready", + [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict", + [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress", + [NVME_SC_CQ_INVALID] = "Completion Queue Invalid", + [NVME_SC_QID_INVALID] = "Invalid Queue Identifier", + [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size", + [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded", + [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */ + [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded", + [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot", + [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image", + [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector", + [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page", + [NVME_SC_INVALID_FORMAT] = "Invalid Format", + [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset", + [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion", + [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable", + [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable", + [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific", + [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset", + [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset", + [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation", + [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited", + [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range", + [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity", + [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable", + [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached", + [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private", + [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached", + [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported", + [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid", + [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress", + [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited", + [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier", + [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State", + [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources", + [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier", + [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited", + [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid", + [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed", + [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes", + [NVME_SC_INVALID_PI] = "Invalid Protection Information", + [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range", + [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported", + [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error", + [NVME_SC_ZONE_FULL] = "Zone Is Full", + [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only", + [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline", + [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write", + [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones", + [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones", + [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition", + [NVME_SC_WRITE_FAULT] = "Write Fault", + [NVME_SC_READ_ERROR] = "Unrecovered Read Error", + [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error", + [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error", + [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error", + [NVME_SC_COMPARE_FAILED] = "Compare Failure", + [NVME_SC_ACCESS_DENIED] = "Access Denied", + [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block", + [NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error", + [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss", + [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible", + [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition", + [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error", + [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error", + [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command", +}; + +const unsigned char *nvme_get_error_status_str(u16 status) +{ + status &= 0x7ff; + if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) + return nvme_statuses[status & 0x7ff]; + return "Unknown"; +} + +const unsigned char *nvme_get_opcode_str(u8 opcode) +{ + if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode]) + return nvme_ops[opcode]; + return "Unknown"; +} +EXPORT_SYMBOL_GPL(nvme_get_opcode_str); + +const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +{ + if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode]) + return nvme_admin_ops[opcode]; + return "Unknown"; +} +EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str); + +const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) { + if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode]) + return nvme_fabrics_ops[opcode]; + return "Unknown"; +} +EXPORT_SYMBOL_GPL(nvme_get_fabrics_opcode_str); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c new file mode 100644 index 0000000000..d4564a2517 --- /dev/null +++ b/drivers/nvme/host/core.c @@ -0,0 +1,4803 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM Express device driver + * Copyright (c) 2011-2014, Intel Corporation. + */ + +#include <linux/blkdev.h> +#include <linux/blk-mq.h> +#include <linux/blk-integrity.h> +#include <linux/compat.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/hdreg.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/backing-dev.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pr.h> +#include <linux/ptrace.h> +#include <linux/nvme_ioctl.h> +#include <linux/pm_qos.h> +#include <asm/unaligned.h> + +#include "nvme.h" +#include "fabrics.h" +#include <linux/nvme-auth.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" + +#define NVME_MINORS (1U << MINORBITS) + +struct nvme_ns_info { + struct nvme_ns_ids ids; + u32 nsid; + __le32 anagrpid; + bool is_shared; + bool is_readonly; + bool is_ready; + bool is_removed; +}; + +unsigned int admin_timeout = 60; +module_param(admin_timeout, uint, 0644); +MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); +EXPORT_SYMBOL_GPL(admin_timeout); + +unsigned int nvme_io_timeout = 30; +module_param_named(io_timeout, nvme_io_timeout, uint, 0644); +MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); +EXPORT_SYMBOL_GPL(nvme_io_timeout); + +static unsigned char shutdown_timeout = 5; +module_param(shutdown_timeout, byte, 0644); +MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); + +static u8 nvme_max_retries = 5; +module_param_named(max_retries, nvme_max_retries, byte, 0644); +MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); + +static unsigned long default_ps_max_latency_us = 100000; +module_param(default_ps_max_latency_us, ulong, 0644); +MODULE_PARM_DESC(default_ps_max_latency_us, + "max power saving latency for new devices; use PM QOS to change per device"); + +static bool force_apst; +module_param(force_apst, bool, 0644); +MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); + +static unsigned long apst_primary_timeout_ms = 100; +module_param(apst_primary_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(apst_primary_timeout_ms, + "primary APST timeout in ms"); + +static unsigned long apst_secondary_timeout_ms = 2000; +module_param(apst_secondary_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(apst_secondary_timeout_ms, + "secondary APST timeout in ms"); + +static unsigned long apst_primary_latency_tol_us = 15000; +module_param(apst_primary_latency_tol_us, ulong, 0644); +MODULE_PARM_DESC(apst_primary_latency_tol_us, + "primary APST latency tolerance in us"); + +static unsigned long apst_secondary_latency_tol_us = 100000; +module_param(apst_secondary_latency_tol_us, ulong, 0644); +MODULE_PARM_DESC(apst_secondary_latency_tol_us, + "secondary APST latency tolerance in us"); + +/* + * nvme_wq - hosts nvme related works that are not reset or delete + * nvme_reset_wq - hosts nvme reset works + * nvme_delete_wq - hosts nvme delete works + * + * nvme_wq will host works such as scan, aen handling, fw activation, + * keep-alive, periodic reconnects etc. nvme_reset_wq + * runs reset works which also flush works hosted on nvme_wq for + * serialization purposes. nvme_delete_wq host controller deletion + * works which flush reset works for serialization. + */ +struct workqueue_struct *nvme_wq; +EXPORT_SYMBOL_GPL(nvme_wq); + +struct workqueue_struct *nvme_reset_wq; +EXPORT_SYMBOL_GPL(nvme_reset_wq); + +struct workqueue_struct *nvme_delete_wq; +EXPORT_SYMBOL_GPL(nvme_delete_wq); + +static LIST_HEAD(nvme_subsystems); +static DEFINE_MUTEX(nvme_subsystems_lock); + +static DEFINE_IDA(nvme_instance_ida); +static dev_t nvme_ctrl_base_chr_devt; +static struct class *nvme_class; +static struct class *nvme_subsys_class; + +static DEFINE_IDA(nvme_ns_chr_minor_ida); +static dev_t nvme_ns_chr_devt; +static struct class *nvme_ns_chr_class; + +static void nvme_put_subsystem(struct nvme_subsystem *subsys); +static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, + unsigned nsid); +static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, + struct nvme_command *cmd); + +void nvme_queue_scan(struct nvme_ctrl *ctrl) +{ + /* + * Only new queue scan work when admin and IO queues are both alive + */ + if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset) + queue_work(nvme_wq, &ctrl->scan_work); +} + +/* + * Use this function to proceed with scheduling reset_work for a controller + * that had previously been set to the resetting state. This is intended for + * code paths that can't be interrupted by other reset attempts. A hot removal + * may prevent this from succeeding. + */ +int nvme_try_sched_reset(struct nvme_ctrl *ctrl) +{ + if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING) + return -EBUSY; + if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(nvme_try_sched_reset); + +static void nvme_failfast_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), + struct nvme_ctrl, failfast_work); + + if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING) + return; + + set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); + dev_info(ctrl->device, "failfast expired\n"); + nvme_kick_requeue_lists(ctrl); +} + +static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) +{ + if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) + return; + + schedule_delayed_work(&ctrl->failfast_work, + ctrl->opts->fast_io_fail_tmo * HZ); +} + +static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) +{ + if (!ctrl->opts) + return; + + cancel_delayed_work_sync(&ctrl->failfast_work); + clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); +} + + +int nvme_reset_ctrl(struct nvme_ctrl *ctrl) +{ + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) + return -EBUSY; + if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(nvme_reset_ctrl); + +int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) +{ + int ret; + + ret = nvme_reset_ctrl(ctrl); + if (!ret) { + flush_work(&ctrl->reset_work); + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) + ret = -ENETRESET; + } + + return ret; +} + +static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) +{ + dev_info(ctrl->device, + "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); + + flush_work(&ctrl->reset_work); + nvme_stop_ctrl(ctrl); + nvme_remove_namespaces(ctrl); + ctrl->ops->delete_ctrl(ctrl); + nvme_uninit_ctrl(ctrl); +} + +static void nvme_delete_ctrl_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, delete_work); + + nvme_do_delete_ctrl(ctrl); +} + +int nvme_delete_ctrl(struct nvme_ctrl *ctrl) +{ + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) + return -EBUSY; + if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(nvme_delete_ctrl); + +void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) +{ + /* + * Keep a reference until nvme_do_delete_ctrl() complete, + * since ->delete_ctrl can free the controller. + */ + nvme_get_ctrl(ctrl); + if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) + nvme_do_delete_ctrl(ctrl); + nvme_put_ctrl(ctrl); +} + +static blk_status_t nvme_error_status(u16 status) +{ + switch (status & 0x7ff) { + case NVME_SC_SUCCESS: + return BLK_STS_OK; + case NVME_SC_CAP_EXCEEDED: + return BLK_STS_NOSPC; + case NVME_SC_LBA_RANGE: + case NVME_SC_CMD_INTERRUPTED: + case NVME_SC_NS_NOT_READY: + return BLK_STS_TARGET; + case NVME_SC_BAD_ATTRIBUTES: + case NVME_SC_ONCS_NOT_SUPPORTED: + case NVME_SC_INVALID_OPCODE: + case NVME_SC_INVALID_FIELD: + case NVME_SC_INVALID_NS: + return BLK_STS_NOTSUPP; + case NVME_SC_WRITE_FAULT: + case NVME_SC_READ_ERROR: + case NVME_SC_UNWRITTEN_BLOCK: + case NVME_SC_ACCESS_DENIED: + case NVME_SC_READ_ONLY: + case NVME_SC_COMPARE_FAILED: + return BLK_STS_MEDIUM; + case NVME_SC_GUARD_CHECK: + case NVME_SC_APPTAG_CHECK: + case NVME_SC_REFTAG_CHECK: + case NVME_SC_INVALID_PI: + return BLK_STS_PROTECTION; + case NVME_SC_RESERVATION_CONFLICT: + return BLK_STS_RESV_CONFLICT; + case NVME_SC_HOST_PATH_ERROR: + return BLK_STS_TRANSPORT; + case NVME_SC_ZONE_TOO_MANY_ACTIVE: + return BLK_STS_ZONE_ACTIVE_RESOURCE; + case NVME_SC_ZONE_TOO_MANY_OPEN: + return BLK_STS_ZONE_OPEN_RESOURCE; + default: + return BLK_STS_IOERR; + } +} + +static void nvme_retry_req(struct request *req) +{ + unsigned long delay = 0; + u16 crd; + + /* The mask and shift result must be <= 3 */ + crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + if (crd) + delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; + + nvme_req(req)->retries++; + blk_mq_requeue_request(req, false); + blk_mq_delay_kick_requeue_list(req->q, delay); +} + +static void nvme_log_error(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + struct nvme_request *nr = nvme_req(req); + + if (ns) { + pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", + ns->disk ? ns->disk->disk_name : "?", + nvme_get_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), + (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, + nvme_get_error_status_str(nr->status), + nr->status >> 8 & 7, /* Status Code Type */ + nr->status & 0xff, /* Status Code */ + nr->status & NVME_SC_MORE ? "MORE " : "", + nr->status & NVME_SC_DNR ? "DNR " : ""); + return; + } + + pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", + dev_name(nr->ctrl->device), + nvme_get_admin_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + nvme_get_error_status_str(nr->status), + nr->status >> 8 & 7, /* Status Code Type */ + nr->status & 0xff, /* Status Code */ + nr->status & NVME_SC_MORE ? "MORE " : "", + nr->status & NVME_SC_DNR ? "DNR " : ""); +} + +enum nvme_disposition { + COMPLETE, + RETRY, + FAILOVER, + AUTHENTICATE, +}; + +static inline enum nvme_disposition nvme_decide_disposition(struct request *req) +{ + if (likely(nvme_req(req)->status == 0)) + return COMPLETE; + + if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) + return AUTHENTICATE; + + if (blk_noretry_request(req) || + (nvme_req(req)->status & NVME_SC_DNR) || + nvme_req(req)->retries >= nvme_max_retries) + return COMPLETE; + + if (req->cmd_flags & REQ_NVME_MPATH) { + if (nvme_is_path_error(nvme_req(req)->status) || + blk_queue_dying(req->q)) + return FAILOVER; + } else { + if (blk_queue_dying(req->q)) + return COMPLETE; + } + + return RETRY; +} + +static inline void nvme_end_req_zoned(struct request *req) +{ + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + req_op(req) == REQ_OP_ZONE_APPEND) + req->__sector = nvme_lba_to_sect(req->q->queuedata, + le64_to_cpu(nvme_req(req)->result.u64)); +} + +static inline void nvme_end_req(struct request *req) +{ + blk_status_t status = nvme_error_status(nvme_req(req)->status); + + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) + nvme_log_error(req); + nvme_end_req_zoned(req); + nvme_trace_bio_complete(req); + if (req->cmd_flags & REQ_NVME_MPATH) + nvme_mpath_end_request(req); + blk_mq_end_request(req, status); +} + +void nvme_complete_rq(struct request *req) +{ + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; + + trace_nvme_complete_rq(req); + nvme_cleanup_cmd(req); + + /* + * Completions of long-running commands should not be able to + * defer sending of periodic keep alives, since the controller + * may have completed processing such commands a long time ago + * (arbitrarily close to command submission time). + * req->deadline - req->timeout is the command submission time + * in jiffies. + */ + if (ctrl->kas && + req->deadline - req->timeout >= ctrl->ka_last_check_time) + ctrl->comp_seen = true; + + switch (nvme_decide_disposition(req)) { + case COMPLETE: + nvme_end_req(req); + return; + case RETRY: + nvme_retry_req(req); + return; + case FAILOVER: + nvme_failover_req(req); + return; + case AUTHENTICATE: +#ifdef CONFIG_NVME_AUTH + queue_work(nvme_wq, &ctrl->dhchap_auth_work); + nvme_retry_req(req); +#else + nvme_end_req(req); +#endif + return; + } +} +EXPORT_SYMBOL_GPL(nvme_complete_rq); + +void nvme_complete_batch_req(struct request *req) +{ + trace_nvme_complete_rq(req); + nvme_cleanup_cmd(req); + nvme_end_req_zoned(req); +} +EXPORT_SYMBOL_GPL(nvme_complete_batch_req); + +/* + * Called to unwind from ->queue_rq on a failed command submission so that the + * multipathing code gets called to potentially failover to another path. + * The caller needs to unwind all transport specific resource allocations and + * must return propagate the return value. + */ +blk_status_t nvme_host_path_error(struct request *req) +{ + nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; + blk_mq_set_request_complete(req); + nvme_complete_rq(req); + return BLK_STS_OK; +} +EXPORT_SYMBOL_GPL(nvme_host_path_error); + +bool nvme_cancel_request(struct request *req, void *data) +{ + dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, + "Cancelling I/O %d", req->tag); + + /* don't abort one completed or idle request */ + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) + return true; + + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + blk_mq_complete_request(req); + return true; +} +EXPORT_SYMBOL_GPL(nvme_cancel_request); + +void nvme_cancel_tagset(struct nvme_ctrl *ctrl) +{ + if (ctrl->tagset) { + blk_mq_tagset_busy_iter(ctrl->tagset, + nvme_cancel_request, ctrl); + blk_mq_tagset_wait_completed_request(ctrl->tagset); + } +} +EXPORT_SYMBOL_GPL(nvme_cancel_tagset); + +void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) +{ + if (ctrl->admin_tagset) { + blk_mq_tagset_busy_iter(ctrl->admin_tagset, + nvme_cancel_request, ctrl); + blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); + } +} +EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); + +bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, + enum nvme_ctrl_state new_state) +{ + enum nvme_ctrl_state old_state; + unsigned long flags; + bool changed = false; + + spin_lock_irqsave(&ctrl->lock, flags); + + old_state = nvme_ctrl_state(ctrl); + switch (new_state) { + case NVME_CTRL_LIVE: + switch (old_state) { + case NVME_CTRL_NEW: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + changed = true; + fallthrough; + default: + break; + } + break; + case NVME_CTRL_RESETTING: + switch (old_state) { + case NVME_CTRL_NEW: + case NVME_CTRL_LIVE: + changed = true; + fallthrough; + default: + break; + } + break; + case NVME_CTRL_CONNECTING: + switch (old_state) { + case NVME_CTRL_NEW: + case NVME_CTRL_RESETTING: + changed = true; + fallthrough; + default: + break; + } + break; + case NVME_CTRL_DELETING: + switch (old_state) { + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + changed = true; + fallthrough; + default: + break; + } + break; + case NVME_CTRL_DELETING_NOIO: + switch (old_state) { + case NVME_CTRL_DELETING: + case NVME_CTRL_DEAD: + changed = true; + fallthrough; + default: + break; + } + break; + case NVME_CTRL_DEAD: + switch (old_state) { + case NVME_CTRL_DELETING: + changed = true; + fallthrough; + default: + break; + } + break; + default: + break; + } + + if (changed) { + WRITE_ONCE(ctrl->state, new_state); + wake_up_all(&ctrl->state_wq); + } + + spin_unlock_irqrestore(&ctrl->lock, flags); + if (!changed) + return false; + + if (new_state == NVME_CTRL_LIVE) { + if (old_state == NVME_CTRL_CONNECTING) + nvme_stop_failfast_work(ctrl); + nvme_kick_requeue_lists(ctrl); + } else if (new_state == NVME_CTRL_CONNECTING && + old_state == NVME_CTRL_RESETTING) { + nvme_start_failfast_work(ctrl); + } + return changed; +} +EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); + +/* + * Returns true for sink states that can't ever transition back to live. + */ +static bool nvme_state_terminal(struct nvme_ctrl *ctrl) +{ + switch (nvme_ctrl_state(ctrl)) { + case NVME_CTRL_NEW: + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + return false; + case NVME_CTRL_DELETING: + case NVME_CTRL_DELETING_NOIO: + case NVME_CTRL_DEAD: + return true; + default: + WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); + return true; + } +} + +/* + * Waits for the controller state to be resetting, or returns false if it is + * not possible to ever transition to that state. + */ +bool nvme_wait_reset(struct nvme_ctrl *ctrl) +{ + wait_event(ctrl->state_wq, + nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || + nvme_state_terminal(ctrl)); + return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING; +} +EXPORT_SYMBOL_GPL(nvme_wait_reset); + +static void nvme_free_ns_head(struct kref *ref) +{ + struct nvme_ns_head *head = + container_of(ref, struct nvme_ns_head, ref); + + nvme_mpath_remove_disk(head); + ida_free(&head->subsys->ns_ida, head->instance); + cleanup_srcu_struct(&head->srcu); + nvme_put_subsystem(head->subsys); + kfree(head); +} + +bool nvme_tryget_ns_head(struct nvme_ns_head *head) +{ + return kref_get_unless_zero(&head->ref); +} + +void nvme_put_ns_head(struct nvme_ns_head *head) +{ + kref_put(&head->ref, nvme_free_ns_head); +} + +static void nvme_free_ns(struct kref *kref) +{ + struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); + + put_disk(ns->disk); + nvme_put_ns_head(ns->head); + nvme_put_ctrl(ns->ctrl); + kfree(ns); +} + +static inline bool nvme_get_ns(struct nvme_ns *ns) +{ + return kref_get_unless_zero(&ns->kref); +} + +void nvme_put_ns(struct nvme_ns *ns) +{ + kref_put(&ns->kref, nvme_free_ns); +} +EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); + +static inline void nvme_clear_nvme_request(struct request *req) +{ + nvme_req(req)->status = 0; + nvme_req(req)->retries = 0; + nvme_req(req)->flags = 0; + req->rq_flags |= RQF_DONTPREP; +} + +/* initialize a passthrough request */ +void nvme_init_request(struct request *req, struct nvme_command *cmd) +{ + if (req->q->queuedata) + req->timeout = NVME_IO_TIMEOUT; + else /* no queuedata implies admin queue */ + req->timeout = NVME_ADMIN_TIMEOUT; + + /* passthru commands should let the driver set the SGL flags */ + cmd->common.flags &= ~NVME_CMD_SGL_ALL; + + req->cmd_flags |= REQ_FAILFAST_DRIVER; + if (req->mq_hctx->type == HCTX_TYPE_POLL) + req->cmd_flags |= REQ_POLLED; + nvme_clear_nvme_request(req); + req->rq_flags |= RQF_QUIET; + memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); +} +EXPORT_SYMBOL_GPL(nvme_init_request); + +/* + * For something we're not in a state to send to the device the default action + * is to busy it and retry it after the controller state is recovered. However, + * if the controller is deleting or if anything is marked for failfast or + * nvme multipath it is immediately failed. + * + * Note: commands used to initialize the controller will be marked for failfast. + * Note: nvme cli/ioctl commands are marked for failfast. + */ +blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, + struct request *rq) +{ + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (state != NVME_CTRL_DELETING_NOIO && + state != NVME_CTRL_DELETING && + state != NVME_CTRL_DEAD && + !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && + !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) + return BLK_STS_RESOURCE; + return nvme_host_path_error(rq); +} +EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); + +bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, + bool queue_live) +{ + struct nvme_request *req = nvme_req(rq); + + /* + * currently we have a problem sending passthru commands + * on the admin_q if the controller is not LIVE because we can't + * make sure that they are going out after the admin connect, + * controller enable and/or other commands in the initialization + * sequence. until the controller will be LIVE, fail with + * BLK_STS_RESOURCE so that they will be rescheduled. + */ + if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) + return false; + + if (ctrl->ops->flags & NVME_F_FABRICS) { + /* + * Only allow commands on a live queue, except for the connect + * command, which is require to set the queue live in the + * appropinquate states. + */ + switch (nvme_ctrl_state(ctrl)) { + case NVME_CTRL_CONNECTING: + if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && + (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || + req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || + req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) + return true; + break; + default: + break; + case NVME_CTRL_DEAD: + return false; + } + } + + return queue_live; +} +EXPORT_SYMBOL_GPL(__nvme_check_ready); + +static inline void nvme_setup_flush(struct nvme_ns *ns, + struct nvme_command *cmnd) +{ + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->common.opcode = nvme_cmd_flush; + cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); +} + +static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, + struct nvme_command *cmnd) +{ + unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; + struct nvme_dsm_range *range; + struct bio *bio; + + /* + * Some devices do not consider the DSM 'Number of Ranges' field when + * determining how much data to DMA. Always allocate memory for maximum + * number of segments to prevent device reading beyond end of buffer. + */ + static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; + + range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); + if (!range) { + /* + * If we fail allocation our range, fallback to the controller + * discard page. If that's also busy, it's safe to return + * busy, as we know we can make progress once that's freed. + */ + if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) + return BLK_STS_RESOURCE; + + range = page_address(ns->ctrl->discard_page); + } + + if (queue_max_discard_segments(req->q) == 1) { + u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); + u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); + + range[0].cattr = cpu_to_le32(0); + range[0].nlb = cpu_to_le32(nlb); + range[0].slba = cpu_to_le64(slba); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + + if (n < segments) { + range[n].cattr = cpu_to_le32(0); + range[n].nlb = cpu_to_le32(nlb); + range[n].slba = cpu_to_le64(slba); + } + n++; + } + } + + if (WARN_ON_ONCE(n != segments)) { + if (virt_to_page(range) == ns->ctrl->discard_page) + clear_bit_unlock(0, &ns->ctrl->discard_page_busy); + else + kfree(range); + return BLK_STS_IOERR; + } + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->dsm.opcode = nvme_cmd_dsm; + cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->dsm.nr = cpu_to_le32(segments - 1); + cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + bvec_set_virt(&req->special_vec, range, alloc_size); + req->rq_flags |= RQF_SPECIAL_PAYLOAD; + + return BLK_STS_OK; +} + +static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, + struct request *req) +{ + u32 upper, lower; + u64 ref48; + + /* both rw and write zeroes share the same reftag format */ + switch (ns->guard_type) { + case NVME_NVM_NS_16B_GUARD: + cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); + break; + case NVME_NVM_NS_64B_GUARD: + ref48 = ext_pi_ref_tag(req); + lower = lower_32_bits(ref48); + upper = upper_32_bits(ref48); + + cmnd->rw.reftag = cpu_to_le32(lower); + cmnd->rw.cdw3 = cpu_to_le32(upper); + break; + default: + break; + } +} + +static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd) +{ + memset(cmnd, 0, sizeof(*cmnd)); + + if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) + return nvme_setup_discard(ns, req, cmnd); + + cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; + cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->write_zeroes.slba = + cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cmnd->write_zeroes.length = + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + + if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) + cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); + + if (nvme_ns_has_pi(ns)) { + cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + nvme_set_ref_tag(ns, cmnd, req); + break; + } + } + + return BLK_STS_OK; +} + +static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd, + enum nvme_opcode op) +{ + u16 control = 0; + u32 dsmgmt = 0; + + if (req->cmd_flags & REQ_FUA) + control |= NVME_RW_FUA; + if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) + control |= NVME_RW_LR; + + if (req->cmd_flags & REQ_RAHEAD) + dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + + cmnd->rw.opcode = op; + cmnd->rw.flags = 0; + cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->rw.cdw2 = 0; + cmnd->rw.cdw3 = 0; + cmnd->rw.metadata = 0; + cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cmnd->rw.reftag = 0; + cmnd->rw.apptag = 0; + cmnd->rw.appmask = 0; + + if (ns->ms) { + /* + * If formated with metadata, the block layer always provides a + * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else + * we enable the PRACT bit for protection information or set the + * namespace capacity to zero to prevent any I/O. + */ + if (!blk_integrity_rq(req)) { + if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) + return BLK_STS_NOTSUPP; + control |= NVME_RW_PRINFO_PRACT; + } + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE3: + control |= NVME_RW_PRINFO_PRCHK_GUARD; + break; + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + control |= NVME_RW_PRINFO_PRCHK_GUARD | + NVME_RW_PRINFO_PRCHK_REF; + if (op == nvme_cmd_zone_append) + control |= NVME_RW_APPEND_PIREMAP; + nvme_set_ref_tag(ns, cmnd, req); + break; + } + } + + cmnd->rw.control = cpu_to_le16(control); + cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); + return 0; +} + +void nvme_cleanup_cmd(struct request *req) +{ + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; + + if (req->special_vec.bv_page == ctrl->discard_page) + clear_bit_unlock(0, &ctrl->discard_page_busy); + else + kfree(bvec_virt(&req->special_vec)); + } +} +EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); + +blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) +{ + struct nvme_command *cmd = nvme_req(req)->cmd; + blk_status_t ret = BLK_STS_OK; + + if (!(req->rq_flags & RQF_DONTPREP)) + nvme_clear_nvme_request(req); + + switch (req_op(req)) { + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + /* these are setup prior to execution in nvme_init_request() */ + break; + case REQ_OP_FLUSH: + nvme_setup_flush(ns, cmd); + break; + case REQ_OP_ZONE_RESET_ALL: + case REQ_OP_ZONE_RESET: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); + break; + case REQ_OP_ZONE_OPEN: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); + break; + case REQ_OP_ZONE_CLOSE: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); + break; + case REQ_OP_ZONE_FINISH: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); + break; + case REQ_OP_WRITE_ZEROES: + ret = nvme_setup_write_zeroes(ns, req, cmd); + break; + case REQ_OP_DISCARD: + ret = nvme_setup_discard(ns, req, cmd); + break; + case REQ_OP_READ: + ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); + break; + case REQ_OP_WRITE: + ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); + break; + case REQ_OP_ZONE_APPEND: + ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); + break; + default: + WARN_ON_ONCE(1); + return BLK_STS_IOERR; + } + + cmd->common.command_id = nvme_cid(req); + trace_nvme_setup_cmd(req, cmd); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_setup_cmd); + +/* + * Return values: + * 0: success + * >0: nvme controller's cqe status response + * <0: kernel error in lieu of controller response + */ +int nvme_execute_rq(struct request *rq, bool at_head) +{ + blk_status_t status; + + status = blk_execute_rq(rq, at_head); + if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) + return -EINTR; + if (nvme_req(rq)->status) + return nvme_req(rq)->status; + return blk_status_to_errno(status); +} +EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); + +/* + * Returns 0 on success. If the result is negative, it's a Linux error code; + * if the result is positive, it's an NVM Express status code + */ +int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, + union nvme_result *result, void *buffer, unsigned bufflen, + int qid, int at_head, blk_mq_req_flags_t flags) +{ + struct request *req; + int ret; + + if (qid == NVME_QID_ANY) + req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); + else + req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, + qid - 1); + + if (IS_ERR(req)) + return PTR_ERR(req); + nvme_init_request(req, cmd); + + if (buffer && bufflen) { + ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); + if (ret) + goto out; + } + + ret = nvme_execute_rq(req, at_head); + if (result && ret >= 0) + *result = nvme_req(req)->result; + out: + blk_mq_free_request(req); + return ret; +} +EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); + +int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, + void *buffer, unsigned bufflen) +{ + return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, + NVME_QID_ANY, 0, 0); +} +EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); + +u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) +{ + u32 effects = 0; + + if (ns) { + effects = le32_to_cpu(ns->head->effects->iocs[opcode]); + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) + dev_warn_once(ctrl->device, + "IO command:%02x has unusual effects:%08x\n", + opcode, effects); + + /* + * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, + * which would deadlock when done on an I/O command. Note that + * We already warn about an unusual effect above. + */ + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; + } else { + effects = le32_to_cpu(ctrl->effects->acs[opcode]); + } + + return effects; +} +EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); + +u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) +{ + u32 effects = nvme_command_effects(ctrl, ns, opcode); + + /* + * For simplicity, IO to all namespaces is quiesced even if the command + * effects say only one namespace is affected. + */ + if (effects & NVME_CMD_EFFECTS_CSE_MASK) { + mutex_lock(&ctrl->scan_lock); + mutex_lock(&ctrl->subsys->lock); + nvme_mpath_start_freeze(ctrl->subsys); + nvme_mpath_wait_freeze(ctrl->subsys); + nvme_start_freeze(ctrl); + nvme_wait_freeze(ctrl); + } + return effects; +} +EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); + +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, + struct nvme_command *cmd, int status) +{ + if (effects & NVME_CMD_EFFECTS_CSE_MASK) { + nvme_unfreeze(ctrl); + nvme_mpath_unfreeze(ctrl->subsys); + mutex_unlock(&ctrl->subsys->lock); + mutex_unlock(&ctrl->scan_lock); + } + if (effects & NVME_CMD_EFFECTS_CCC) { + if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY, + &ctrl->flags)) { + dev_info(ctrl->device, +"controller capabilities changed, reset may be required to take effect.\n"); + } + } + if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { + nvme_queue_scan(ctrl); + flush_work(&ctrl->scan_work); + } + if (ns) + return; + + switch (cmd->common.opcode) { + case nvme_admin_set_features: + switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { + case NVME_FEAT_KATO: + /* + * Keep alive commands interval on the host should be + * updated when KATO is modified by Set Features + * commands. + */ + if (!status) + nvme_update_keep_alive(ctrl, cmd); + break; + default: + break; + } + break; + default: + break; + } +} +EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); + +/* + * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: + * + * The host should send Keep Alive commands at half of the Keep Alive Timeout + * accounting for transport roundtrip times [..]. + */ +static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) +{ + unsigned long delay = ctrl->kato * HZ / 2; + + /* + * When using Traffic Based Keep Alive, we need to run + * nvme_keep_alive_work at twice the normal frequency, as one + * command completion can postpone sending a keep alive command + * by up to twice the delay between runs. + */ + if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) + delay /= 2; + return delay; +} + +static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) +{ + queue_delayed_work(nvme_wq, &ctrl->ka_work, + nvme_keep_alive_work_period(ctrl)); +} + +static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, + blk_status_t status) +{ + struct nvme_ctrl *ctrl = rq->end_io_data; + unsigned long flags; + bool startka = false; + unsigned long rtt = jiffies - (rq->deadline - rq->timeout); + unsigned long delay = nvme_keep_alive_work_period(ctrl); + + /* + * Subtract off the keepalive RTT so nvme_keep_alive_work runs + * at the desired frequency. + */ + if (rtt <= delay) { + delay -= rtt; + } else { + dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", + jiffies_to_msecs(rtt)); + delay = 0; + } + + blk_mq_free_request(rq); + + if (status) { + dev_err(ctrl->device, + "failed nvme_keep_alive_end_io error=%d\n", + status); + return RQ_END_IO_NONE; + } + + ctrl->ka_last_check_time = jiffies; + ctrl->comp_seen = false; + spin_lock_irqsave(&ctrl->lock, flags); + if (ctrl->state == NVME_CTRL_LIVE || + ctrl->state == NVME_CTRL_CONNECTING) + startka = true; + spin_unlock_irqrestore(&ctrl->lock, flags); + if (startka) + queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); + return RQ_END_IO_NONE; +} + +static void nvme_keep_alive_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), + struct nvme_ctrl, ka_work); + bool comp_seen = ctrl->comp_seen; + struct request *rq; + + ctrl->ka_last_check_time = jiffies; + + if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { + dev_dbg(ctrl->device, + "reschedule traffic based keep-alive timer\n"); + ctrl->comp_seen = false; + nvme_queue_keep_alive_work(ctrl); + return; + } + + rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + if (IS_ERR(rq)) { + /* allocation failure, reset the controller */ + dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); + nvme_reset_ctrl(ctrl); + return; + } + nvme_init_request(rq, &ctrl->ka_cmd); + + rq->timeout = ctrl->kato * HZ; + rq->end_io = nvme_keep_alive_end_io; + rq->end_io_data = ctrl; + blk_execute_rq_nowait(rq, false); +} + +static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) +{ + if (unlikely(ctrl->kato == 0)) + return; + + nvme_queue_keep_alive_work(ctrl); +} + +void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) +{ + if (unlikely(ctrl->kato == 0)) + return; + + cancel_delayed_work_sync(&ctrl->ka_work); +} +EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); + +static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, + struct nvme_command *cmd) +{ + unsigned int new_kato = + DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); + + dev_info(ctrl->device, + "keep alive interval updated from %u ms to %u ms\n", + ctrl->kato * 1000 / 2, new_kato * 1000 / 2); + + nvme_stop_keep_alive(ctrl); + ctrl->kato = new_kato; + nvme_start_keep_alive(ctrl); +} + +/* + * In NVMe 1.0 the CNS field was just a binary controller or namespace + * flag, thus sending any new CNS opcodes has a big chance of not working. + * Qemu unfortunately had that bug after reporting a 1.1 version compliance + * (but not for any later version). + */ +static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) +{ + if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) + return ctrl->vs < NVME_VS(1, 2, 0); + return ctrl->vs < NVME_VS(1, 1, 0); +} + +static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) +{ + struct nvme_command c = { }; + int error; + + /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ + c.identify.opcode = nvme_admin_identify; + c.identify.cns = NVME_ID_CNS_CTRL; + + *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); + if (!*id) + return -ENOMEM; + + error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, + sizeof(struct nvme_id_ctrl)); + if (error) + kfree(*id); + return error; +} + +static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, + struct nvme_ns_id_desc *cur, bool *csi_seen) +{ + const char *warn_str = "ctrl returned bogus length:"; + void *data = cur; + + switch (cur->nidt) { + case NVME_NIDT_EUI64: + if (cur->nidl != NVME_NIDT_EUI64_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", + warn_str, cur->nidl); + return -1; + } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_EUI64_LEN; + memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); + return NVME_NIDT_EUI64_LEN; + case NVME_NIDT_NGUID: + if (cur->nidl != NVME_NIDT_NGUID_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", + warn_str, cur->nidl); + return -1; + } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_NGUID_LEN; + memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); + return NVME_NIDT_NGUID_LEN; + case NVME_NIDT_UUID: + if (cur->nidl != NVME_NIDT_UUID_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", + warn_str, cur->nidl); + return -1; + } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_UUID_LEN; + uuid_copy(&ids->uuid, data + sizeof(*cur)); + return NVME_NIDT_UUID_LEN; + case NVME_NIDT_CSI: + if (cur->nidl != NVME_NIDT_CSI_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", + warn_str, cur->nidl); + return -1; + } + memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); + *csi_seen = true; + return NVME_NIDT_CSI_LEN; + default: + /* Skip unknown types */ + return cur->nidl; + } +} + +static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info) +{ + struct nvme_command c = { }; + bool csi_seen = false; + int status, pos, len; + void *data; + + if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) + return 0; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(info->nsid); + c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; + + data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); + if (!data) + return -ENOMEM; + + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, + NVME_IDENTIFY_DATA_SIZE); + if (status) { + dev_warn(ctrl->device, + "Identify Descriptors failed (nsid=%u, status=0x%x)\n", + info->nsid, status); + goto free_data; + } + + for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { + struct nvme_ns_id_desc *cur = data + pos; + + if (cur->nidl == 0) + break; + + len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); + if (len < 0) + break; + + len += sizeof(*cur); + } + + if (nvme_multi_css(ctrl) && !csi_seen) { + dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", + info->nsid); + status = -EINVAL; + } + +free_data: + kfree(data); + return status; +} + +static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, + struct nvme_id_ns **id) +{ + struct nvme_command c = { }; + int error; + + /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(nsid); + c.identify.cns = NVME_ID_CNS_NS; + + *id = kmalloc(sizeof(**id), GFP_KERNEL); + if (!*id) + return -ENOMEM; + + error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); + if (error) { + dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); + kfree(*id); + } + return error; +} + +static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info) +{ + struct nvme_ns_ids *ids = &info->ids; + struct nvme_id_ns *id; + int ret; + + ret = nvme_identify_ns(ctrl, info->nsid, &id); + if (ret) + return ret; + + if (id->ncap == 0) { + /* namespace not allocated or attached */ + info->is_removed = true; + ret = -ENODEV; + goto error; + } + + info->anagrpid = id->anagrpid; + info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; + info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; + info->is_ready = true; + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { + dev_info(ctrl->device, + "Ignoring bogus Namespace Identifiers\n"); + } else { + if (ctrl->vs >= NVME_VS(1, 1, 0) && + !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0) && + !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); + } + +error: + kfree(id); + return ret; +} + +static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info) +{ + struct nvme_id_ns_cs_indep *id; + struct nvme_command c = { + .identify.opcode = nvme_admin_identify, + .identify.nsid = cpu_to_le32(info->nsid), + .identify.cns = NVME_ID_CNS_NS_CS_INDEP, + }; + int ret; + + id = kmalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); + if (!ret) { + info->anagrpid = id->anagrpid; + info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; + info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; + info->is_ready = id->nstat & NVME_NSTAT_NRDY; + } + kfree(id); + return ret; +} + +static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, + unsigned int dword11, void *buffer, size_t buflen, u32 *result) +{ + union nvme_result res = { 0 }; + struct nvme_command c = { }; + int ret; + + c.features.opcode = op; + c.features.fid = cpu_to_le32(fid); + c.features.dword11 = cpu_to_le32(dword11); + + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, + buffer, buflen, NVME_QID_ANY, 0, 0); + if (ret >= 0 && result) + *result = le32_to_cpu(res.u32); + return ret; +} + +int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, + unsigned int dword11, void *buffer, size_t buflen, + u32 *result) +{ + return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, + buflen, result); +} +EXPORT_SYMBOL_GPL(nvme_set_features); + +int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, + unsigned int dword11, void *buffer, size_t buflen, + u32 *result) +{ + return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, + buflen, result); +} +EXPORT_SYMBOL_GPL(nvme_get_features); + +int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) +{ + u32 q_count = (*count - 1) | ((*count - 1) << 16); + u32 result; + int status, nr_io_queues; + + status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, + &result); + if (status < 0) + return status; + + /* + * Degraded controllers might return an error when setting the queue + * count. We still want to be able to bring them online and offer + * access to the admin queue, as that might be only way to fix them up. + */ + if (status > 0) { + dev_err(ctrl->device, "Could not set queue count (%d)\n", status); + *count = 0; + } else { + nr_io_queues = min(result & 0xffff, result >> 16) + 1; + *count = min(*count, nr_io_queues); + } + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_set_queue_count); + +#define NVME_AEN_SUPPORTED \ + (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ + NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) + +static void nvme_enable_aen(struct nvme_ctrl *ctrl) +{ + u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; + int status; + + if (!supported_aens) + return; + + status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, + NULL, 0, &result); + if (status) + dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", + supported_aens); + + queue_work(nvme_wq, &ctrl->async_event_work); +} + +static int nvme_ns_open(struct nvme_ns *ns) +{ + + /* should never be called due to GENHD_FL_HIDDEN */ + if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) + goto fail; + if (!nvme_get_ns(ns)) + goto fail; + if (!try_module_get(ns->ctrl->ops->module)) + goto fail_put_ns; + + return 0; + +fail_put_ns: + nvme_put_ns(ns); +fail: + return -ENXIO; +} + +static void nvme_ns_release(struct nvme_ns *ns) +{ + + module_put(ns->ctrl->ops->module); + nvme_put_ns(ns); +} + +static int nvme_open(struct gendisk *disk, blk_mode_t mode) +{ + return nvme_ns_open(disk->private_data); +} + +static void nvme_release(struct gendisk *disk) +{ + nvme_ns_release(disk->private_data); +} + +int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + /* some standard values */ + geo->heads = 1 << 6; + geo->sectors = 1 << 5; + geo->cylinders = get_capacity(bdev->bd_disk) >> 11; + return 0; +} + +#ifdef CONFIG_BLK_DEV_INTEGRITY +static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, + u32 max_integrity_segments) +{ + struct blk_integrity integrity = { }; + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE3: + switch (ns->guard_type) { + case NVME_NVM_NS_16B_GUARD: + integrity.profile = &t10_pi_type3_crc; + integrity.tag_size = sizeof(u16) + sizeof(u32); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + case NVME_NVM_NS_64B_GUARD: + integrity.profile = &ext_pi_type3_crc64; + integrity.tag_size = sizeof(u16) + 6; + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + default: + integrity.profile = NULL; + break; + } + break; + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + switch (ns->guard_type) { + case NVME_NVM_NS_16B_GUARD: + integrity.profile = &t10_pi_type1_crc; + integrity.tag_size = sizeof(u16); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + case NVME_NVM_NS_64B_GUARD: + integrity.profile = &ext_pi_type1_crc64; + integrity.tag_size = sizeof(u16); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + default: + integrity.profile = NULL; + break; + } + break; + default: + integrity.profile = NULL; + break; + } + + integrity.tuple_size = ns->ms; + blk_integrity_register(disk, &integrity); + blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); +} +#else +static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, + u32 max_integrity_segments) +{ +} +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + struct request_queue *queue = disk->queue; + u32 size = queue_logical_block_size(queue); + + if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) + ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); + + if (ctrl->max_discard_sectors == 0) { + blk_queue_max_discard_sectors(queue, 0); + return; + } + + BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < + NVME_DSM_MAX_RANGES); + + queue->limits.discard_granularity = size; + + /* If discard is already enabled, don't reset queue limits */ + if (queue->limits.max_discard_sectors) + return; + + blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); + blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); + + if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) + blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); +} + +static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) +{ + return uuid_equal(&a->uuid, &b->uuid) && + memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && + memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && + a->csi == b->csi; +} + +static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + bool first = id->dps & NVME_NS_DPS_PI_FIRST; + unsigned lbaf = nvme_lbaf_index(id->flbas); + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_command c = { }; + struct nvme_id_ns_nvm *nvm; + int ret = 0; + u32 elbaf; + + ns->pi_size = 0; + ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { + ns->pi_size = sizeof(struct t10_pi_tuple); + ns->guard_type = NVME_NVM_NS_16B_GUARD; + goto set_pi; + } + + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); + if (!nvm) + return -ENOMEM; + + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(ns->head->ns_id); + c.identify.cns = NVME_ID_CNS_CS_NS; + c.identify.csi = NVME_CSI_NVM; + + ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); + if (ret) + goto free_data; + + elbaf = le32_to_cpu(nvm->elbaf[lbaf]); + + /* no support for storage tag formats right now */ + if (nvme_elbaf_sts(elbaf)) + goto free_data; + + ns->guard_type = nvme_elbaf_guard_type(elbaf); + switch (ns->guard_type) { + case NVME_NVM_NS_64B_GUARD: + ns->pi_size = sizeof(struct crc64_pi_tuple); + break; + case NVME_NVM_NS_16B_GUARD: + ns->pi_size = sizeof(struct t10_pi_tuple); + break; + default: + break; + } + +free_data: + kfree(nvm); +set_pi: + if (ns->pi_size && (first || ns->ms == ns->pi_size)) + ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; + else + ns->pi_type = 0; + + return ret; +} + +static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + int ret; + + ret = nvme_init_ms(ns, id); + if (ret) + return ret; + + ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); + if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) + return 0; + + if (ctrl->ops->flags & NVME_F_FABRICS) { + /* + * The NVMe over Fabrics specification only supports metadata as + * part of the extended data LBA. We rely on HCA/HBA support to + * remap the separate metadata buffer from the block layer. + */ + if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) + return 0; + + ns->features |= NVME_NS_EXT_LBAS; + + /* + * The current fabrics transport drivers support namespace + * metadata formats only if nvme_ns_has_pi() returns true. + * Suppress support for all other formats so the namespace will + * have a 0 capacity and not be usable through the block stack. + * + * Note, this check will need to be modified if any drivers + * gain the ability to use other metadata formats. + */ + if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) + ns->features |= NVME_NS_METADATA_SUPPORTED; + } else { + /* + * For PCIe controllers, we can't easily remap the separate + * metadata buffer from the block layer and thus require a + * separate metadata buffer for block layer metadata/PI support. + * We allow extended LBAs for the passthrough interface, though. + */ + if (id->flbas & NVME_NS_FLBAS_META_EXT) + ns->features |= NVME_NS_EXT_LBAS; + else + ns->features |= NVME_NS_METADATA_SUPPORTED; + } + return 0; +} + +static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, + struct request_queue *q) +{ + bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; + + if (ctrl->max_hw_sectors) { + u32 max_segments = + (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; + + max_segments = min_not_zero(max_segments, ctrl->max_segments); + blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); + blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); + } + blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); + blk_queue_dma_alignment(q, 3); + blk_queue_write_cache(q, vwc, vwc); +} + +static void nvme_update_disk_info(struct gendisk *disk, + struct nvme_ns *ns, struct nvme_id_ns *id) +{ + sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); + u32 bs = 1U << ns->lba_shift; + u32 atomic_bs, phys_bs, io_opt = 0; + + /* + * The block layer can't support LBA sizes larger than the page size + * or smaller than a sector size yet, so catch this early and don't + * allow block I/O. + */ + if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) { + capacity = 0; + bs = (1 << 9); + } + + blk_integrity_unregister(disk); + + atomic_bs = phys_bs = bs; + if (id->nabo == 0) { + /* + * Bit 1 indicates whether NAWUPF is defined for this namespace + * and whether it should be used instead of AWUPF. If NAWUPF == + * 0 then AWUPF must be used instead. + */ + if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) + atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; + else + atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + } + + if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { + /* NPWG = Namespace Preferred Write Granularity */ + phys_bs = bs * (1 + le16_to_cpu(id->npwg)); + /* NOWS = Namespace Optimal Write Size */ + io_opt = bs * (1 + le16_to_cpu(id->nows)); + } + + blk_queue_logical_block_size(disk->queue, bs); + /* + * Linux filesystems assume writing a single physical block is + * an atomic operation. Hence limit the physical block size to the + * value of the Atomic Write Unit Power Fail parameter. + */ + blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); + blk_queue_io_min(disk->queue, phys_bs); + blk_queue_io_opt(disk->queue, io_opt); + + /* + * Register a metadata profile for PI, or the plain non-integrity NVMe + * metadata masquerading as Type 0 if supported, otherwise reject block + * I/O to namespaces with metadata except when the namespace supports + * PI, as it can strip/insert in that case. + */ + if (ns->ms) { + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + (ns->features & NVME_NS_METADATA_SUPPORTED)) + nvme_init_integrity(disk, ns, + ns->ctrl->max_integrity_segments); + else if (!nvme_ns_has_pi(ns)) + capacity = 0; + } + + set_capacity_and_notify(disk, capacity); + + nvme_config_discard(disk, ns); + blk_queue_max_write_zeroes_sectors(disk->queue, + ns->ctrl->max_zeroes_sectors); +} + +static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); +} + +static inline bool nvme_first_scan(struct gendisk *disk) +{ + /* nvme_alloc_ns() scans the disk prior to adding it */ + return !disk_live(disk); +} + +static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + u32 iob; + + if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && + is_power_of_2(ctrl->max_hw_sectors)) + iob = ctrl->max_hw_sectors; + else + iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); + + if (!iob) + return; + + if (!is_power_of_2(iob)) { + if (nvme_first_scan(ns->disk)) + pr_warn("%s: ignoring unaligned IO boundary:%u\n", + ns->disk->disk_name, iob); + return; + } + + if (blk_queue_is_zoned(ns->disk->queue)) { + if (nvme_first_scan(ns->disk)) + pr_warn("%s: ignoring zoned namespace IO boundary\n", + ns->disk->disk_name); + return; + } + + blk_queue_chunk_sectors(ns->queue, iob); +} + +static int nvme_update_ns_info_generic(struct nvme_ns *ns, + struct nvme_ns_info *info) +{ + blk_mq_freeze_queue(ns->disk->queue); + nvme_set_queue_limits(ns->ctrl, ns->queue); + set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); + blk_mq_unfreeze_queue(ns->disk->queue); + + if (nvme_ns_head_multipath(ns->head)) { + blk_mq_freeze_queue(ns->head->disk->queue); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + blk_stack_limits(&ns->head->disk->queue->limits, + &ns->queue->limits, 0); + ns->head->disk->flags |= GENHD_FL_HIDDEN; + blk_mq_unfreeze_queue(ns->head->disk->queue); + } + + /* Hide the block-interface for these devices */ + ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); + + return 0; +} + +static int nvme_update_ns_info_block(struct nvme_ns *ns, + struct nvme_ns_info *info) +{ + struct nvme_id_ns *id; + unsigned lbaf; + int ret; + + ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); + if (ret) + return ret; + + if (id->ncap == 0) { + /* namespace not allocated or attached */ + info->is_removed = true; + ret = -ENODEV; + goto error; + } + + blk_mq_freeze_queue(ns->disk->queue); + lbaf = nvme_lbaf_index(id->flbas); + ns->lba_shift = id->lbaf[lbaf].ds; + nvme_set_queue_limits(ns->ctrl, ns->queue); + + ret = nvme_configure_metadata(ns, id); + if (ret < 0) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } + nvme_set_chunk_sectors(ns, id); + nvme_update_disk_info(ns->disk, ns, id); + + if (ns->head->ids.csi == NVME_CSI_ZNS) { + ret = nvme_update_zone_info(ns, lbaf); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } + } + + /* + * Only set the DEAC bit if the device guarantees that reads from + * deallocated data return zeroes. While the DEAC bit does not + * require that, it must be a no-op if reads from deallocated data + * do not return zeroes. + */ + if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) + ns->features |= NVME_NS_DEAC; + set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); + set_bit(NVME_NS_READY, &ns->flags); + blk_mq_unfreeze_queue(ns->disk->queue); + + if (blk_queue_is_zoned(ns->queue)) { + ret = nvme_revalidate_zones(ns); + if (ret && !nvme_first_scan(ns->disk)) + goto out; + } + + if (nvme_ns_head_multipath(ns->head)) { + blk_mq_freeze_queue(ns->head->disk->queue); + nvme_update_disk_info(ns->head->disk, ns, id); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + blk_stack_limits(&ns->head->disk->queue->limits, + &ns->queue->limits, 0); + disk_update_readahead(ns->head->disk); + blk_mq_unfreeze_queue(ns->head->disk->queue); + } + + ret = 0; +out: + /* + * If probing fails due an unsupported feature, hide the block device, + * but still allow other access. + */ + if (ret == -ENODEV) { + ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); + ret = 0; + } + +error: + kfree(id); + return ret; +} + +static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + switch (info->ids.csi) { + case NVME_CSI_ZNS: + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + dev_info(ns->ctrl->device, + "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", + info->nsid); + return nvme_update_ns_info_generic(ns, info); + } + return nvme_update_ns_info_block(ns, info); + case NVME_CSI_NVM: + return nvme_update_ns_info_block(ns, info); + default: + dev_info(ns->ctrl->device, + "block device for nsid %u not supported (csi %u)\n", + info->nsid, info->ids.csi); + return nvme_update_ns_info_generic(ns, info); + } +} + +#ifdef CONFIG_BLK_SED_OPAL +static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, + bool send) +{ + struct nvme_ctrl *ctrl = data; + struct nvme_command cmd = { }; + + if (send) + cmd.common.opcode = nvme_admin_security_send; + else + cmd.common.opcode = nvme_admin_security_recv; + cmd.common.nsid = 0; + cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); + cmd.common.cdw11 = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + NVME_QID_ANY, 1, 0); +} + +static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) +{ + if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { + if (!ctrl->opal_dev) + ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); + else if (was_suspended) + opal_unlock_from_suspend(ctrl->opal_dev); + } else { + free_opal_dev(ctrl->opal_dev); + ctrl->opal_dev = NULL; + } +} +#else +static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) +{ +} +#endif /* CONFIG_BLK_SED_OPAL */ + +#ifdef CONFIG_BLK_DEV_ZONED +static int nvme_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, + data); +} +#else +#define nvme_report_zones NULL +#endif /* CONFIG_BLK_DEV_ZONED */ + +const struct block_device_operations nvme_bdev_ops = { + .owner = THIS_MODULE, + .ioctl = nvme_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, + .open = nvme_open, + .release = nvme_release, + .getgeo = nvme_getgeo, + .report_zones = nvme_report_zones, + .pr_ops = &nvme_pr_ops, +}; + +static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, + u32 timeout, const char *op) +{ + unsigned long timeout_jiffies = jiffies + timeout * HZ; + u32 csts; + int ret; + + while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { + if (csts == ~0) + return -ENODEV; + if ((csts & mask) == val) + break; + + usleep_range(1000, 2000); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout_jiffies)) { + dev_err(ctrl->device, + "Device not ready; aborting %s, CSTS=0x%x\n", + op, csts); + return -ENODEV; + } + } + + return ret; +} + +int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) +{ + int ret; + + ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; + if (shutdown) + ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; + else + ctrl->ctrl_config &= ~NVME_CC_ENABLE; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); + if (ret) + return ret; + + if (shutdown) { + return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, + NVME_CSTS_SHST_CMPLT, + ctrl->shutdown_timeout, "shutdown"); + } + if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) + msleep(NVME_QUIRK_DELAY_AMOUNT); + return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, + (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); +} +EXPORT_SYMBOL_GPL(nvme_disable_ctrl); + +int nvme_enable_ctrl(struct nvme_ctrl *ctrl) +{ + unsigned dev_page_min; + u32 timeout; + int ret; + + ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); + if (ret) { + dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); + return ret; + } + dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; + + if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { + dev_err(ctrl->device, + "Minimum device page size %u too large for host (%u)\n", + 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); + return -ENODEV; + } + + if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) + ctrl->ctrl_config = NVME_CC_CSS_CSI; + else + ctrl->ctrl_config = NVME_CC_CSS_NVM; + + if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) + ctrl->ctrl_config |= NVME_CC_CRIME; + + ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; + ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; + ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); + if (ret) + return ret; + + /* Flush write to device (required if transport is PCI) */ + ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); + if (ret) + return ret; + + /* CAP value may change after initial CC write */ + ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); + if (ret) + return ret; + + timeout = NVME_CAP_TIMEOUT(ctrl->cap); + if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { + u32 crto, ready_timeout; + + ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); + if (ret) { + dev_err(ctrl->device, "Reading CRTO failed (%d)\n", + ret); + return ret; + } + + /* + * CRTO should always be greater or equal to CAP.TO, but some + * devices are known to get this wrong. Use the larger of the + * two values. + */ + if (ctrl->ctrl_config & NVME_CC_CRIME) + ready_timeout = NVME_CRTO_CRIMT(crto); + else + ready_timeout = NVME_CRTO_CRWMT(crto); + + if (ready_timeout < timeout) + dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", + crto, ctrl->cap); + else + timeout = ready_timeout; + } + + ctrl->ctrl_config |= NVME_CC_ENABLE; + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); + if (ret) + return ret; + return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, + (timeout + 1) / 2, "initialisation"); +} +EXPORT_SYMBOL_GPL(nvme_enable_ctrl); + +static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) +{ + __le64 ts; + int ret; + + if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) + return 0; + + ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); + ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), + NULL); + if (ret) + dev_warn_once(ctrl->device, + "could not set timestamp (%d)\n", ret); + return ret; +} + +static int nvme_configure_host_options(struct nvme_ctrl *ctrl) +{ + struct nvme_feat_host_behavior *host; + u8 acre = 0, lbafee = 0; + int ret; + + /* Don't bother enabling the feature if retry delay is not reported */ + if (ctrl->crdt[0]) + acre = NVME_ENABLE_ACRE; + if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) + lbafee = NVME_ENABLE_LBAFEE; + + if (!acre && !lbafee) + return 0; + + host = kzalloc(sizeof(*host), GFP_KERNEL); + if (!host) + return 0; + + host->acre = acre; + host->lbafee = lbafee; + ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, + host, sizeof(*host), NULL); + kfree(host); + return ret; +} + +/* + * The function checks whether the given total (exlat + enlat) latency of + * a power state allows the latter to be used as an APST transition target. + * It does so by comparing the latency to the primary and secondary latency + * tolerances defined by module params. If there's a match, the corresponding + * timeout value is returned and the matching tolerance index (1 or 2) is + * reported. + */ +static bool nvme_apst_get_transition_time(u64 total_latency, + u64 *transition_time, unsigned *last_index) +{ + if (total_latency <= apst_primary_latency_tol_us) { + if (*last_index == 1) + return false; + *last_index = 1; + *transition_time = apst_primary_timeout_ms; + return true; + } + if (apst_secondary_timeout_ms && + total_latency <= apst_secondary_latency_tol_us) { + if (*last_index <= 2) + return false; + *last_index = 2; + *transition_time = apst_secondary_timeout_ms; + return true; + } + return false; +} + +/* + * APST (Autonomous Power State Transition) lets us program a table of power + * state transitions that the controller will perform automatically. + * + * Depending on module params, one of the two supported techniques will be used: + * + * - If the parameters provide explicit timeouts and tolerances, they will be + * used to build a table with up to 2 non-operational states to transition to. + * The default parameter values were selected based on the values used by + * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic + * regeneration of the APST table in the event of switching between external + * and battery power, the timeouts and tolerances reflect a compromise + * between values used by Microsoft for AC and battery scenarios. + * - If not, we'll configure the table with a simple heuristic: we are willing + * to spend at most 2% of the time transitioning between power states. + * Therefore, when running in any given state, we will enter the next + * lower-power non-operational state after waiting 50 * (enlat + exlat) + * microseconds, as long as that state's exit latency is under the requested + * maximum latency. + * + * We will not autonomously enter any non-operational state for which the total + * latency exceeds ps_max_latency_us. + * + * Users can set ps_max_latency_us to zero to turn off APST. + */ +static int nvme_configure_apst(struct nvme_ctrl *ctrl) +{ + struct nvme_feat_auto_pst *table; + unsigned apste = 0; + u64 max_lat_us = 0; + __le64 target = 0; + int max_ps = -1; + int state; + int ret; + unsigned last_lt_index = UINT_MAX; + + /* + * If APST isn't supported or if we haven't been initialized yet, + * then don't do anything. + */ + if (!ctrl->apsta) + return 0; + + if (ctrl->npss > 31) { + dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); + return 0; + } + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return 0; + + if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { + /* Turn off APST. */ + dev_dbg(ctrl->device, "APST disabled\n"); + goto done; + } + + /* + * Walk through all states from lowest- to highest-power. + * According to the spec, lower-numbered states use more power. NPSS, + * despite the name, is the index of the lowest-power state, not the + * number of states. + */ + for (state = (int)ctrl->npss; state >= 0; state--) { + u64 total_latency_us, exit_latency_us, transition_ms; + + if (target) + table->entries[state] = target; + + /* + * Don't allow transitions to the deepest state if it's quirked + * off. + */ + if (state == ctrl->npss && + (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) + continue; + + /* + * Is this state a useful non-operational state for higher-power + * states to autonomously transition to? + */ + if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) + continue; + + exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); + if (exit_latency_us > ctrl->ps_max_latency_us) + continue; + + total_latency_us = exit_latency_us + + le32_to_cpu(ctrl->psd[state].entry_lat); + + /* + * This state is good. It can be used as the APST idle target + * for higher power states. + */ + if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { + if (!nvme_apst_get_transition_time(total_latency_us, + &transition_ms, &last_lt_index)) + continue; + } else { + transition_ms = total_latency_us + 19; + do_div(transition_ms, 20); + if (transition_ms > (1 << 24) - 1) + transition_ms = (1 << 24) - 1; + } + + target = cpu_to_le64((state << 3) | (transition_ms << 8)); + if (max_ps == -1) + max_ps = state; + if (total_latency_us > max_lat_us) + max_lat_us = total_latency_us; + } + + if (max_ps == -1) + dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); + else + dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", + max_ps, max_lat_us, (int)sizeof(*table), table); + apste = 1; + +done: + ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, + table, sizeof(*table), NULL); + if (ret) + dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); + kfree(table); + return ret; +} + +static void nvme_set_latency_tolerance(struct device *dev, s32 val) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + u64 latency; + + switch (val) { + case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: + case PM_QOS_LATENCY_ANY: + latency = U64_MAX; + break; + + default: + latency = val; + } + + if (ctrl->ps_max_latency_us != latency) { + ctrl->ps_max_latency_us = latency; + if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) + nvme_configure_apst(ctrl); + } +} + +struct nvme_core_quirk_entry { + /* + * NVMe model and firmware strings are padded with spaces. For + * simplicity, strings in the quirk table are padded with NULLs + * instead. + */ + u16 vid; + const char *mn; + const char *fr; + unsigned long quirks; +}; + +static const struct nvme_core_quirk_entry core_quirks[] = { + { + /* + * This Toshiba device seems to die using any APST states. See: + * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 + */ + .vid = 0x1179, + .mn = "THNSF5256GPUK TOSHIBA", + .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * This LiteON CL1-3D*-Q11 firmware version has a race + * condition associated with actions related to suspend to idle + * LiteON has resolved the problem in future firmware + */ + .vid = 0x14a4, + .fr = "22301111", + .quirks = NVME_QUIRK_SIMPLE_SUSPEND, + }, + { + /* + * This Kioxia CD6-V Series / HPE PE8030 device times out and + * aborts I/O during any load, but more easily reproducible + * with discards (fstrim). + * + * The device is left in a state where it is also not possible + * to use "nvme set-feature" to disable APST, but booting with + * nvme_core.default_ps_max_latency=0 works. + */ + .vid = 0x1e0f, + .mn = "KCD6XVUL6T40", + .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * The external Samsung X5 SSD fails initialization without a + * delay before checking if it is ready and has a whole set of + * other problems. To make this even more interesting, it + * shares the PCI ID with internal Samsung 970 Evo Plus that + * does not need or want these quirks. + */ + .vid = 0x144d, + .mn = "Samsung Portable SSD X5", + .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, + } +}; + +/* match is null-terminated but idstr is space-padded. */ +static bool string_matches(const char *idstr, const char *match, size_t len) +{ + size_t matchlen; + + if (!match) + return true; + + matchlen = strlen(match); + WARN_ON_ONCE(matchlen > len); + + if (memcmp(idstr, match, matchlen)) + return false; + + for (; matchlen < len; matchlen++) + if (idstr[matchlen] != ' ') + return false; + + return true; +} + +static bool quirk_matches(const struct nvme_id_ctrl *id, + const struct nvme_core_quirk_entry *q) +{ + return q->vid == le16_to_cpu(id->vid) && + string_matches(id->mn, q->mn, sizeof(id->mn)) && + string_matches(id->fr, q->fr, sizeof(id->fr)); +} + +static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, + struct nvme_id_ctrl *id) +{ + size_t nqnlen; + int off; + + if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { + nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); + if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { + strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); + return; + } + + if (ctrl->vs >= NVME_VS(1, 2, 1)) + dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); + } + + /* + * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe + * Base Specification 2.0. It is slightly different from the format + * specified there due to historic reasons, and we can't change it now. + */ + off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, + "nqn.2014.08.org.nvmexpress:%04x%04x", + le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); + memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); + off += sizeof(id->sn); + memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); + off += sizeof(id->mn); + memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); +} + +static void nvme_release_subsystem(struct device *dev) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + if (subsys->instance >= 0) + ida_free(&nvme_instance_ida, subsys->instance); + kfree(subsys); +} + +static void nvme_destroy_subsystem(struct kref *ref) +{ + struct nvme_subsystem *subsys = + container_of(ref, struct nvme_subsystem, ref); + + mutex_lock(&nvme_subsystems_lock); + list_del(&subsys->entry); + mutex_unlock(&nvme_subsystems_lock); + + ida_destroy(&subsys->ns_ida); + device_del(&subsys->dev); + put_device(&subsys->dev); +} + +static void nvme_put_subsystem(struct nvme_subsystem *subsys) +{ + kref_put(&subsys->ref, nvme_destroy_subsystem); +} + +static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) +{ + struct nvme_subsystem *subsys; + + lockdep_assert_held(&nvme_subsystems_lock); + + /* + * Fail matches for discovery subsystems. This results + * in each discovery controller bound to a unique subsystem. + * This avoids issues with validating controller values + * that can only be true when there is a single unique subsystem. + * There may be multiple and completely independent entities + * that provide discovery controllers. + */ + if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) + return NULL; + + list_for_each_entry(subsys, &nvme_subsystems, entry) { + if (strcmp(subsys->subnqn, subsysnqn)) + continue; + if (!kref_get_unless_zero(&subsys->ref)) + continue; + return subsys; + } + + return NULL; +} + +static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) +{ + return ctrl->opts && ctrl->opts->discovery_nqn; +} + +static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, + struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + struct nvme_ctrl *tmp; + + lockdep_assert_held(&nvme_subsystems_lock); + + list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { + if (nvme_state_terminal(tmp)) + continue; + + if (tmp->cntlid == ctrl->cntlid) { + dev_err(ctrl->device, + "Duplicate cntlid %u with %s, subsys %s, rejecting\n", + ctrl->cntlid, dev_name(tmp->device), + subsys->subnqn); + return false; + } + + if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + nvme_discovery_ctrl(ctrl)) + continue; + + dev_err(ctrl->device, + "Subsystem does not support multiple controllers\n"); + return false; + } + + return true; +} + +static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + struct nvme_subsystem *subsys, *found; + int ret; + + subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); + if (!subsys) + return -ENOMEM; + + subsys->instance = -1; + mutex_init(&subsys->lock); + kref_init(&subsys->ref); + INIT_LIST_HEAD(&subsys->ctrls); + INIT_LIST_HEAD(&subsys->nsheads); + nvme_init_subnqn(subsys, ctrl, id); + memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); + memcpy(subsys->model, id->mn, sizeof(subsys->model)); + subsys->vendor_id = le16_to_cpu(id->vid); + subsys->cmic = id->cmic; + + /* Versions prior to 1.4 don't necessarily report a valid type */ + if (id->cntrltype == NVME_CTRL_DISC || + !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) + subsys->subtype = NVME_NQN_DISC; + else + subsys->subtype = NVME_NQN_NVME; + + if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { + dev_err(ctrl->device, + "Subsystem %s is not a discovery controller", + subsys->subnqn); + kfree(subsys); + return -EINVAL; + } + subsys->awupf = le16_to_cpu(id->awupf); + nvme_mpath_default_iopolicy(subsys); + + subsys->dev.class = nvme_subsys_class; + subsys->dev.release = nvme_release_subsystem; + subsys->dev.groups = nvme_subsys_attrs_groups; + dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); + device_initialize(&subsys->dev); + + mutex_lock(&nvme_subsystems_lock); + found = __nvme_find_get_subsystem(subsys->subnqn); + if (found) { + put_device(&subsys->dev); + subsys = found; + + if (!nvme_validate_cntlid(subsys, ctrl, id)) { + ret = -EINVAL; + goto out_put_subsystem; + } + } else { + ret = device_add(&subsys->dev); + if (ret) { + dev_err(ctrl->device, + "failed to register subsystem device.\n"); + put_device(&subsys->dev); + goto out_unlock; + } + ida_init(&subsys->ns_ida); + list_add_tail(&subsys->entry, &nvme_subsystems); + } + + ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, + dev_name(ctrl->device)); + if (ret) { + dev_err(ctrl->device, + "failed to create sysfs link from subsystem.\n"); + goto out_put_subsystem; + } + + if (!found) + subsys->instance = ctrl->instance; + ctrl->subsys = subsys; + list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); + mutex_unlock(&nvme_subsystems_lock); + return 0; + +out_put_subsystem: + nvme_put_subsystem(subsys); +out_unlock: + mutex_unlock(&nvme_subsystems_lock); + return ret; +} + +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, + void *log, size_t size, u64 offset) +{ + struct nvme_command c = { }; + u32 dwlen = nvme_bytes_to_numd(size); + + c.get_log_page.opcode = nvme_admin_get_log_page; + c.get_log_page.nsid = cpu_to_le32(nsid); + c.get_log_page.lid = log_page; + c.get_log_page.lsp = lsp; + c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); + c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); + c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); + c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); + c.get_log_page.csi = csi; + + return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); +} + +static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, + struct nvme_effects_log **log) +{ + struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); + int ret; + + if (cel) + goto out; + + cel = kzalloc(sizeof(*cel), GFP_KERNEL); + if (!cel) + return -ENOMEM; + + ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, + cel, sizeof(*cel), 0); + if (ret) { + kfree(cel); + return ret; + } + + xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); +out: + *log = cel; + return 0; +} + +static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) +{ + u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; + + if (check_shl_overflow(1U, units + page_shift - 9, &val)) + return UINT_MAX; + return val; +} + +static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) +{ + struct nvme_command c = { }; + struct nvme_id_ctrl_nvm *id; + int ret; + + if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { + ctrl->max_discard_sectors = UINT_MAX; + ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; + } else { + ctrl->max_discard_sectors = 0; + ctrl->max_discard_segments = 0; + } + + /* + * Even though NVMe spec explicitly states that MDTS is not applicable + * to the write-zeroes, we are cautious and limit the size to the + * controllers max_hw_sectors value, which is based on the MDTS field + * and possibly other limiting factors. + */ + if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && + !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) + ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; + else + ctrl->max_zeroes_sectors = 0; + + if (ctrl->subsys->subtype != NVME_NQN_NVME || + nvme_ctrl_limited_cns(ctrl) || + test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) + return 0; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + c.identify.opcode = nvme_admin_identify; + c.identify.cns = NVME_ID_CNS_CS_CTRL; + c.identify.csi = NVME_CSI_NVM; + + ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); + if (ret) + goto free_data; + + if (id->dmrl) + ctrl->max_discard_segments = id->dmrl; + ctrl->dmrsl = le32_to_cpu(id->dmrsl); + if (id->wzsl) + ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); + +free_data: + if (ret > 0) + set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); + kfree(id); + return ret; +} + +static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) +{ + struct nvme_effects_log *log = ctrl->effects; + + log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | + NVME_CMD_EFFECTS_NCC | + NVME_CMD_EFFECTS_CSE_MASK); + log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | + NVME_CMD_EFFECTS_CSE_MASK); + + /* + * The spec says the result of a security receive command depends on + * the previous security send command. As such, many vendors log this + * command as one to submitted only when no other commands to the same + * namespace are outstanding. The intention is to tell the host to + * prevent mixing security send and receive. + * + * This driver can only enforce such exclusive access against IO + * queues, though. We are not readily able to enforce such a rule for + * two commands to the admin queue, which is the only queue that + * matters for this command. + * + * Rather than blindly freezing the IO queues for this effect that + * doesn't even apply to IO, mask it off. + */ + log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); + + log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); + log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); + log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); +} + +static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + int ret = 0; + + if (ctrl->effects) + return 0; + + if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { + ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); + if (ret < 0) + return ret; + } + + if (!ctrl->effects) { + ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); + if (!ctrl->effects) + return -ENOMEM; + xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); + } + + nvme_init_known_nvm_effects(ctrl); + return 0; +} + +static int nvme_init_identify(struct nvme_ctrl *ctrl) +{ + struct nvme_id_ctrl *id; + u32 max_hw_sectors; + bool prev_apst_enabled; + int ret; + + ret = nvme_identify_ctrl(ctrl, &id); + if (ret) { + dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); + return -EIO; + } + + if (!(ctrl->ops->flags & NVME_F_FABRICS)) + ctrl->cntlid = le16_to_cpu(id->cntlid); + + if (!ctrl->identified) { + unsigned int i; + + /* + * Check for quirks. Quirk can depend on firmware version, + * so, in principle, the set of quirks present can change + * across a reset. As a possible future enhancement, we + * could re-scan for quirks every time we reinitialize + * the device, but we'd have to make sure that the driver + * behaves intelligently if the quirks change. + */ + for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { + if (quirk_matches(id, &core_quirks[i])) + ctrl->quirks |= core_quirks[i].quirks; + } + + ret = nvme_init_subsystem(ctrl, id); + if (ret) + goto out_free; + + ret = nvme_init_effects(ctrl, id); + if (ret) + goto out_free; + } + memcpy(ctrl->subsys->firmware_rev, id->fr, + sizeof(ctrl->subsys->firmware_rev)); + + if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { + dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); + ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; + } + + ctrl->crdt[0] = le16_to_cpu(id->crdt1); + ctrl->crdt[1] = le16_to_cpu(id->crdt2); + ctrl->crdt[2] = le16_to_cpu(id->crdt3); + + ctrl->oacs = le16_to_cpu(id->oacs); + ctrl->oncs = le16_to_cpu(id->oncs); + ctrl->mtfa = le16_to_cpu(id->mtfa); + ctrl->oaes = le32_to_cpu(id->oaes); + ctrl->wctemp = le16_to_cpu(id->wctemp); + ctrl->cctemp = le16_to_cpu(id->cctemp); + + atomic_set(&ctrl->abort_limit, id->acl + 1); + ctrl->vwc = id->vwc; + if (id->mdts) + max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); + else + max_hw_sectors = UINT_MAX; + ctrl->max_hw_sectors = + min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); + + nvme_set_queue_limits(ctrl, ctrl->admin_q); + ctrl->sgls = le32_to_cpu(id->sgls); + ctrl->kas = le16_to_cpu(id->kas); + ctrl->max_namespaces = le32_to_cpu(id->mnan); + ctrl->ctratt = le32_to_cpu(id->ctratt); + + ctrl->cntrltype = id->cntrltype; + ctrl->dctype = id->dctype; + + if (id->rtd3e) { + /* us -> s */ + u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; + + ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, + shutdown_timeout, 60); + + if (ctrl->shutdown_timeout != shutdown_timeout) + dev_info(ctrl->device, + "Shutdown timeout set to %u seconds\n", + ctrl->shutdown_timeout); + } else + ctrl->shutdown_timeout = shutdown_timeout; + + ctrl->npss = id->npss; + ctrl->apsta = id->apsta; + prev_apst_enabled = ctrl->apst_enabled; + if (ctrl->quirks & NVME_QUIRK_NO_APST) { + if (force_apst && id->apsta) { + dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); + ctrl->apst_enabled = true; + } else { + ctrl->apst_enabled = false; + } + } else { + ctrl->apst_enabled = id->apsta; + } + memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); + + if (ctrl->ops->flags & NVME_F_FABRICS) { + ctrl->icdoff = le16_to_cpu(id->icdoff); + ctrl->ioccsz = le32_to_cpu(id->ioccsz); + ctrl->iorcsz = le32_to_cpu(id->iorcsz); + ctrl->maxcmd = le16_to_cpu(id->maxcmd); + + /* + * In fabrics we need to verify the cntlid matches the + * admin connect + */ + if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { + dev_err(ctrl->device, + "Mismatching cntlid: Connect %u vs Identify " + "%u, rejecting\n", + ctrl->cntlid, le16_to_cpu(id->cntlid)); + ret = -EINVAL; + goto out_free; + } + + if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { + dev_err(ctrl->device, + "keep-alive support is mandatory for fabrics\n"); + ret = -EINVAL; + goto out_free; + } + } else { + ctrl->hmpre = le32_to_cpu(id->hmpre); + ctrl->hmmin = le32_to_cpu(id->hmmin); + ctrl->hmminds = le32_to_cpu(id->hmminds); + ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); + } + + ret = nvme_mpath_init_identify(ctrl, id); + if (ret < 0) + goto out_free; + + if (ctrl->apst_enabled && !prev_apst_enabled) + dev_pm_qos_expose_latency_tolerance(ctrl->device); + else if (!ctrl->apst_enabled && prev_apst_enabled) + dev_pm_qos_hide_latency_tolerance(ctrl->device); + +out_free: + kfree(id); + return ret; +} + +/* + * Initialize the cached copies of the Identify data and various controller + * register in our nvme_ctrl structure. This should be called as soon as + * the admin queue is fully up and running. + */ +int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) +{ + int ret; + + ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); + if (ret) { + dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); + return ret; + } + + ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); + + if (ctrl->vs >= NVME_VS(1, 1, 0)) + ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); + + ret = nvme_init_identify(ctrl); + if (ret) + return ret; + + ret = nvme_configure_apst(ctrl); + if (ret < 0) + return ret; + + ret = nvme_configure_timestamp(ctrl); + if (ret < 0) + return ret; + + ret = nvme_configure_host_options(ctrl); + if (ret < 0) + return ret; + + nvme_configure_opal(ctrl, was_suspended); + + if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { + /* + * Do not return errors unless we are in a controller reset, + * the controller works perfectly fine without hwmon. + */ + ret = nvme_hwmon_init(ctrl); + if (ret == -EINTR) + return ret; + } + + clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); + ctrl->identified = true; + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); + +static int nvme_dev_open(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + switch (nvme_ctrl_state(ctrl)) { + case NVME_CTRL_LIVE: + break; + default: + return -EWOULDBLOCK; + } + + nvme_get_ctrl(ctrl); + if (!try_module_get(ctrl->ops->module)) { + nvme_put_ctrl(ctrl); + return -EINVAL; + } + + file->private_data = ctrl; + return 0; +} + +static int nvme_dev_release(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + module_put(ctrl->ops->module); + nvme_put_ctrl(ctrl); + return 0; +} + +static const struct file_operations nvme_dev_fops = { + .owner = THIS_MODULE, + .open = nvme_dev_open, + .release = nvme_dev_release, + .unlocked_ioctl = nvme_dev_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .uring_cmd = nvme_dev_uring_cmd, +}; + +static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, + unsigned nsid) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&ctrl->subsys->lock); + + list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { + /* + * Private namespaces can share NSIDs under some conditions. + * In that case we can't use the same ns_head for namespaces + * with the same NSID. + */ + if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) + continue; + if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) + return h; + } + + return NULL; +} + +static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, + struct nvme_ns_ids *ids) +{ + bool has_uuid = !uuid_is_null(&ids->uuid); + bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); + bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + + list_for_each_entry(h, &subsys->nsheads, entry) { + if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) + return -EINVAL; + if (has_nguid && + memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) + return -EINVAL; + if (has_eui64 && + memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) + return -EINVAL; + } + + return 0; +} + +static void nvme_cdev_rel(struct device *dev) +{ + ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); +} + +void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) +{ + cdev_device_del(cdev, cdev_device); + put_device(cdev_device); +} + +int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, + const struct file_operations *fops, struct module *owner) +{ + int minor, ret; + + minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); + if (minor < 0) + return minor; + cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); + cdev_device->class = nvme_ns_chr_class; + cdev_device->release = nvme_cdev_rel; + device_initialize(cdev_device); + cdev_init(cdev, fops); + cdev->owner = owner; + ret = cdev_device_add(cdev, cdev_device); + if (ret) + put_device(cdev_device); + + return ret; +} + +static int nvme_ns_chr_open(struct inode *inode, struct file *file) +{ + return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); +} + +static int nvme_ns_chr_release(struct inode *inode, struct file *file) +{ + nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); + return 0; +} + +static const struct file_operations nvme_ns_chr_fops = { + .owner = THIS_MODULE, + .open = nvme_ns_chr_open, + .release = nvme_ns_chr_release, + .unlocked_ioctl = nvme_ns_chr_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .uring_cmd = nvme_ns_chr_uring_cmd, + .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, +}; + +static int nvme_add_ns_cdev(struct nvme_ns *ns) +{ + int ret; + + ns->cdev_device.parent = ns->ctrl->device; + ret = dev_set_name(&ns->cdev_device, "ng%dn%d", + ns->ctrl->instance, ns->head->instance); + if (ret) + return ret; + + return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, + ns->ctrl->ops->module); +} + +static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info) +{ + struct nvme_ns_head *head; + size_t size = sizeof(*head); + int ret = -ENOMEM; + +#ifdef CONFIG_NVME_MULTIPATH + size += num_possible_nodes() * sizeof(struct nvme_ns *); +#endif + + head = kzalloc(size, GFP_KERNEL); + if (!head) + goto out; + ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); + if (ret < 0) + goto out_free_head; + head->instance = ret; + INIT_LIST_HEAD(&head->list); + ret = init_srcu_struct(&head->srcu); + if (ret) + goto out_ida_remove; + head->subsys = ctrl->subsys; + head->ns_id = info->nsid; + head->ids = info->ids; + head->shared = info->is_shared; + kref_init(&head->ref); + + if (head->ids.csi) { + ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); + if (ret) + goto out_cleanup_srcu; + } else + head->effects = ctrl->effects; + + ret = nvme_mpath_alloc_disk(ctrl, head); + if (ret) + goto out_cleanup_srcu; + + list_add_tail(&head->entry, &ctrl->subsys->nsheads); + + kref_get(&ctrl->subsys->ref); + + return head; +out_cleanup_srcu: + cleanup_srcu_struct(&head->srcu); +out_ida_remove: + ida_free(&ctrl->subsys->ns_ida, head->instance); +out_free_head: + kfree(head); +out: + if (ret > 0) + ret = blk_status_to_errno(nvme_error_status(ret)); + return ERR_PTR(ret); +} + +static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, + struct nvme_ns_ids *ids) +{ + struct nvme_subsystem *s; + int ret = 0; + + /* + * Note that this check is racy as we try to avoid holding the global + * lock over the whole ns_head creation. But it is only intended as + * a sanity check anyway. + */ + mutex_lock(&nvme_subsystems_lock); + list_for_each_entry(s, &nvme_subsystems, entry) { + if (s == this) + continue; + mutex_lock(&s->lock); + ret = nvme_subsys_check_duplicate_ids(s, ids); + mutex_unlock(&s->lock); + if (ret) + break; + } + mutex_unlock(&nvme_subsystems_lock); + + return ret; +} + +static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_ns_head *head = NULL; + int ret; + + ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); + if (ret) { + /* + * We've found two different namespaces on two different + * subsystems that report the same ID. This is pretty nasty + * for anything that actually requires unique device + * identification. In the kernel we need this for multipathing, + * and in user space the /dev/disk/by-id/ links rely on it. + * + * If the device also claims to be multi-path capable back off + * here now and refuse the probe the second device as this is a + * recipe for data corruption. If not this is probably a + * cheap consumer device if on the PCIe bus, so let the user + * proceed and use the shiny toy, but warn that with changing + * probing order (which due to our async probing could just be + * device taking longer to startup) the other device could show + * up at any time. + */ + nvme_print_device_info(ctrl); + if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ + ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && + info->is_shared)) { + dev_err(ctrl->device, + "ignoring nsid %d because of duplicate IDs\n", + info->nsid); + return ret; + } + + dev_err(ctrl->device, + "clearing duplicate IDs for nsid %d\n", info->nsid); + dev_err(ctrl->device, + "use of /dev/disk/by-id/ may cause data corruption\n"); + memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); + memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); + memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); + ctrl->quirks |= NVME_QUIRK_BOGUS_NID; + } + + mutex_lock(&ctrl->subsys->lock); + head = nvme_find_ns_head(ctrl, info->nsid); + if (!head) { + ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); + if (ret) { + dev_err(ctrl->device, + "duplicate IDs in subsystem for nsid %d\n", + info->nsid); + goto out_unlock; + } + head = nvme_alloc_ns_head(ctrl, info); + if (IS_ERR(head)) { + ret = PTR_ERR(head); + goto out_unlock; + } + } else { + ret = -EINVAL; + if (!info->is_shared || !head->shared) { + dev_err(ctrl->device, + "Duplicate unshared namespace %d\n", + info->nsid); + goto out_put_ns_head; + } + if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { + dev_err(ctrl->device, + "IDs don't match for shared namespace %d\n", + info->nsid); + goto out_put_ns_head; + } + + if (!multipath) { + dev_warn(ctrl->device, + "Found shared namespace %d, but multipathing not supported.\n", + info->nsid); + dev_warn_once(ctrl->device, + "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); + } + } + + list_add_tail_rcu(&ns->siblings, &head->list); + ns->head = head; + mutex_unlock(&ctrl->subsys->lock); + return 0; + +out_put_ns_head: + nvme_put_ns_head(head); +out_unlock: + mutex_unlock(&ctrl->subsys->lock); + return ret; +} + +struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) +{ + struct nvme_ns *ns, *ret = NULL; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) { + if (ns->head->ns_id == nsid) { + if (!nvme_get_ns(ns)) + continue; + ret = ns; + break; + } + if (ns->head->ns_id > nsid) + break; + } + up_read(&ctrl->namespaces_rwsem); + return ret; +} +EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); + +/* + * Add the namespace to the controller list while keeping the list ordered. + */ +static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) +{ + struct nvme_ns *tmp; + + list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { + if (tmp->head->ns_id < ns->head->ns_id) { + list_add(&ns->list, &tmp->list); + return; + } + } + list_add(&ns->list, &ns->ctrl->namespaces); +} + +static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) +{ + struct nvme_ns *ns; + struct gendisk *disk; + int node = ctrl->numa_node; + + ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); + if (!ns) + return; + + disk = blk_mq_alloc_disk(ctrl->tagset, ns); + if (IS_ERR(disk)) + goto out_free_ns; + disk->fops = &nvme_bdev_ops; + disk->private_data = ns; + + ns->disk = disk; + ns->queue = disk->queue; + + if (ctrl->opts && ctrl->opts->data_digest) + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); + + blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); + if (ctrl->ops->supports_pci_p2pdma && + ctrl->ops->supports_pci_p2pdma(ctrl)) + blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); + + ns->ctrl = ctrl; + kref_init(&ns->kref); + + if (nvme_init_ns_head(ns, info)) + goto out_cleanup_disk; + + /* + * If multipathing is enabled, the device name for all disks and not + * just those that represent shared namespaces needs to be based on the + * subsystem instance. Using the controller instance for private + * namespaces could lead to naming collisions between shared and private + * namespaces if they don't use a common numbering scheme. + * + * If multipathing is not enabled, disk names must use the controller + * instance as shared namespaces will show up as multiple block + * devices. + */ + if (nvme_ns_head_multipath(ns->head)) { + sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, + ctrl->instance, ns->head->instance); + disk->flags |= GENHD_FL_HIDDEN; + } else if (multipath) { + sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, + ns->head->instance); + } else { + sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, + ns->head->instance); + } + + if (nvme_update_ns_info(ns, info)) + goto out_unlink_ns; + + down_write(&ctrl->namespaces_rwsem); + /* + * Ensure that no namespaces are added to the ctrl list after the queues + * are frozen, thereby avoiding a deadlock between scan and reset. + */ + if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { + up_write(&ctrl->namespaces_rwsem); + goto out_unlink_ns; + } + nvme_ns_add_to_ctrl_list(ns); + up_write(&ctrl->namespaces_rwsem); + nvme_get_ctrl(ctrl); + + if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) + goto out_cleanup_ns_from_list; + + if (!nvme_ns_head_multipath(ns->head)) + nvme_add_ns_cdev(ns); + + nvme_mpath_add_disk(ns, info->anagrpid); + nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); + + return; + + out_cleanup_ns_from_list: + nvme_put_ctrl(ctrl); + down_write(&ctrl->namespaces_rwsem); + list_del_init(&ns->list); + up_write(&ctrl->namespaces_rwsem); + out_unlink_ns: + mutex_lock(&ctrl->subsys->lock); + list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) + list_del_init(&ns->head->entry); + mutex_unlock(&ctrl->subsys->lock); + nvme_put_ns_head(ns->head); + out_cleanup_disk: + put_disk(disk); + out_free_ns: + kfree(ns); +} + +static void nvme_ns_remove(struct nvme_ns *ns) +{ + bool last_path = false; + + if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) + return; + + clear_bit(NVME_NS_READY, &ns->flags); + set_capacity(ns->disk, 0); + nvme_fault_inject_fini(&ns->fault_inject); + + /* + * Ensure that !NVME_NS_READY is seen by other threads to prevent + * this ns going back into current_path. + */ + synchronize_srcu(&ns->head->srcu); + + /* wait for concurrent submissions */ + if (nvme_mpath_clear_current_path(ns)) + synchronize_srcu(&ns->head->srcu); + + mutex_lock(&ns->ctrl->subsys->lock); + list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) { + list_del_init(&ns->head->entry); + last_path = true; + } + mutex_unlock(&ns->ctrl->subsys->lock); + + /* guarantee not available in head->list */ + synchronize_srcu(&ns->head->srcu); + + if (!nvme_ns_head_multipath(ns->head)) + nvme_cdev_del(&ns->cdev, &ns->cdev_device); + del_gendisk(ns->disk); + + down_write(&ns->ctrl->namespaces_rwsem); + list_del_init(&ns->list); + up_write(&ns->ctrl->namespaces_rwsem); + + if (last_path) + nvme_mpath_shutdown_disk(ns->head); + nvme_put_ns(ns); +} + +static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) +{ + struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); + + if (ns) { + nvme_ns_remove(ns); + nvme_put_ns(ns); + } +} + +static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; + + if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { + dev_err(ns->ctrl->device, + "identifiers changed for nsid %d\n", ns->head->ns_id); + goto out; + } + + ret = nvme_update_ns_info(ns, info); +out: + /* + * Only remove the namespace if we got a fatal error back from the + * device, otherwise ignore the error and just move on. + * + * TODO: we should probably schedule a delayed retry here. + */ + if (ret > 0 && (ret & NVME_SC_DNR)) + nvme_ns_remove(ns); +} + +static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) +{ + struct nvme_ns_info info = { .nsid = nsid }; + struct nvme_ns *ns; + int ret; + + if (nvme_identify_ns_descs(ctrl, &info)) + return; + + if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { + dev_warn(ctrl->device, + "command set not reported for nsid: %d\n", nsid); + return; + } + + /* + * If available try to use the Command Set Idependent Identify Namespace + * data structure to find all the generic information that is needed to + * set up a namespace. If not fall back to the legacy version. + */ + if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || + (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) + ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); + else + ret = nvme_ns_info_from_identify(ctrl, &info); + + if (info.is_removed) + nvme_ns_remove_by_nsid(ctrl, nsid); + + /* + * Ignore the namespace if it is not ready. We will get an AEN once it + * becomes ready and restart the scan. + */ + if (ret || !info.is_ready) + return; + + ns = nvme_find_get_ns(ctrl, nsid); + if (ns) { + nvme_validate_ns(ns, &info); + nvme_put_ns(ns); + } else { + nvme_alloc_ns(ctrl, &info); + } +} + +static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, + unsigned nsid) +{ + struct nvme_ns *ns, *next; + LIST_HEAD(rm_list); + + down_write(&ctrl->namespaces_rwsem); + list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { + if (ns->head->ns_id > nsid) + list_move_tail(&ns->list, &rm_list); + } + up_write(&ctrl->namespaces_rwsem); + + list_for_each_entry_safe(ns, next, &rm_list, list) + nvme_ns_remove(ns); + +} + +static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) +{ + const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); + __le32 *ns_list; + u32 prev = 0; + int ret = 0, i; + + ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); + if (!ns_list) + return -ENOMEM; + + for (;;) { + struct nvme_command cmd = { + .identify.opcode = nvme_admin_identify, + .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, + .identify.nsid = cpu_to_le32(prev), + }; + + ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, + NVME_IDENTIFY_DATA_SIZE); + if (ret) { + dev_warn(ctrl->device, + "Identify NS List failed (status=0x%x)\n", ret); + goto free; + } + + for (i = 0; i < nr_entries; i++) { + u32 nsid = le32_to_cpu(ns_list[i]); + + if (!nsid) /* end of the list? */ + goto out; + nvme_scan_ns(ctrl, nsid); + while (++prev < nsid) + nvme_ns_remove_by_nsid(ctrl, prev); + } + } + out: + nvme_remove_invalid_namespaces(ctrl, prev); + free: + kfree(ns_list); + return ret; +} + +static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) +{ + struct nvme_id_ctrl *id; + u32 nn, i; + + if (nvme_identify_ctrl(ctrl, &id)) + return; + nn = le32_to_cpu(id->nn); + kfree(id); + + for (i = 1; i <= nn; i++) + nvme_scan_ns(ctrl, i); + + nvme_remove_invalid_namespaces(ctrl, nn); +} + +static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) +{ + size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); + __le32 *log; + int error; + + log = kzalloc(log_size, GFP_KERNEL); + if (!log) + return; + + /* + * We need to read the log to clear the AEN, but we don't want to rely + * on it for the changed namespace information as userspace could have + * raced with us in reading the log page, which could cause us to miss + * updates. + */ + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, + NVME_CSI_NVM, log, log_size, 0); + if (error) + dev_warn(ctrl->device, + "reading changed ns log failed: %d\n", error); + + kfree(log); +} + +static void nvme_scan_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, scan_work); + int ret; + + /* No tagset on a live ctrl means IO queues could not created */ + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset) + return; + + /* + * Identify controller limits can change at controller reset due to + * new firmware download, even though it is not common we cannot ignore + * such scenario. Controller's non-mdts limits are reported in the unit + * of logical blocks that is dependent on the format of attached + * namespace. Hence re-read the limits at the time of ns allocation. + */ + ret = nvme_init_non_mdts_limits(ctrl); + if (ret < 0) { + dev_warn(ctrl->device, + "reading non-mdts-limits failed: %d\n", ret); + return; + } + + if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { + dev_info(ctrl->device, "rescanning namespaces.\n"); + nvme_clear_changed_ns_log(ctrl); + } + + mutex_lock(&ctrl->scan_lock); + if (nvme_ctrl_limited_cns(ctrl)) { + nvme_scan_ns_sequential(ctrl); + } else { + /* + * Fall back to sequential scan if DNR is set to handle broken + * devices which should support Identify NS List (as per the VS + * they report) but don't actually support it. + */ + ret = nvme_scan_ns_list(ctrl); + if (ret > 0 && ret & NVME_SC_DNR) + nvme_scan_ns_sequential(ctrl); + } + mutex_unlock(&ctrl->scan_lock); +} + +/* + * This function iterates the namespace list unlocked to allow recovery from + * controller failure. It is up to the caller to ensure the namespace list is + * not modified by scan work while this function is executing. + */ +void nvme_remove_namespaces(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns, *next; + LIST_HEAD(ns_list); + + /* + * make sure to requeue I/O to all namespaces as these + * might result from the scan itself and must complete + * for the scan_work to make progress + */ + nvme_mpath_clear_ctrl_paths(ctrl); + + /* + * Unquiesce io queues so any pending IO won't hang, especially + * those submitted from scan work + */ + nvme_unquiesce_io_queues(ctrl); + + /* prevent racing with ns scanning */ + flush_work(&ctrl->scan_work); + + /* + * The dead states indicates the controller was not gracefully + * disconnected. In that case, we won't be able to flush any data while + * removing the namespaces' disks; fail all the queues now to avoid + * potentially having to clean up the failed sync later. + */ + if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD) + nvme_mark_namespaces_dead(ctrl); + + /* this is a no-op when called from the controller reset handler */ + nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); + + down_write(&ctrl->namespaces_rwsem); + list_splice_init(&ctrl->namespaces, &ns_list); + up_write(&ctrl->namespaces_rwsem); + + list_for_each_entry_safe(ns, next, &ns_list, list) + nvme_ns_remove(ns); +} +EXPORT_SYMBOL_GPL(nvme_remove_namespaces); + +static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct nvme_ctrl *ctrl = + container_of(dev, struct nvme_ctrl, ctrl_device); + struct nvmf_ctrl_options *opts = ctrl->opts; + int ret; + + ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); + if (ret) + return ret; + + if (opts) { + ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); + if (ret) + return ret; + + ret = add_uevent_var(env, "NVME_TRSVCID=%s", + opts->trsvcid ?: "none"); + if (ret) + return ret; + + ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", + opts->host_traddr ?: "none"); + if (ret) + return ret; + + ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", + opts->host_iface ?: "none"); + } + return ret; +} + +static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) +{ + char *envp[2] = { envdata, NULL }; + + kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); +} + +static void nvme_aen_uevent(struct nvme_ctrl *ctrl) +{ + char *envp[2] = { NULL, NULL }; + u32 aen_result = ctrl->aen_result; + + ctrl->aen_result = 0; + if (!aen_result) + return; + + envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); + if (!envp[0]) + return; + kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); + kfree(envp[0]); +} + +static void nvme_async_event_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, async_event_work); + + nvme_aen_uevent(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); +} + +static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) +{ + + u32 csts; + + if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) + return false; + + if (csts == ~0) + return false; + + return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); +} + +static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) +{ + struct nvme_fw_slot_info_log *log; + + log = kmalloc(sizeof(*log), GFP_KERNEL); + if (!log) + return; + + if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, + log, sizeof(*log), 0)) + dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); + kfree(log); +} + +static void nvme_fw_act_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = container_of(work, + struct nvme_ctrl, fw_act_work); + unsigned long fw_act_timeout; + + nvme_auth_stop(ctrl); + + if (ctrl->mtfa) + fw_act_timeout = jiffies + + msecs_to_jiffies(ctrl->mtfa * 100); + else + fw_act_timeout = jiffies + + msecs_to_jiffies(admin_timeout * 1000); + + nvme_quiesce_io_queues(ctrl); + while (nvme_ctrl_pp_status(ctrl)) { + if (time_after(jiffies, fw_act_timeout)) { + dev_warn(ctrl->device, + "Fw activation timeout, reset controller\n"); + nvme_try_sched_reset(ctrl); + return; + } + msleep(100); + } + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + return; + + nvme_unquiesce_io_queues(ctrl); + /* read FW slot information to clear the AER */ + nvme_get_fw_slot_info(ctrl); + + queue_work(nvme_wq, &ctrl->async_event_work); +} + +static u32 nvme_aer_type(u32 result) +{ + return result & 0x7; +} + +static u32 nvme_aer_subtype(u32 result) +{ + return (result & 0xff00) >> 8; +} + +static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) +{ + u32 aer_notice_type = nvme_aer_subtype(result); + bool requeue = true; + + switch (aer_notice_type) { + case NVME_AER_NOTICE_NS_CHANGED: + set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); + nvme_queue_scan(ctrl); + break; + case NVME_AER_NOTICE_FW_ACT_STARTING: + /* + * We are (ab)using the RESETTING state to prevent subsequent + * recovery actions from interfering with the controller's + * firmware activation. + */ + if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { + requeue = false; + queue_work(nvme_wq, &ctrl->fw_act_work); + } + break; +#ifdef CONFIG_NVME_MULTIPATH + case NVME_AER_NOTICE_ANA: + if (!ctrl->ana_log_buf) + break; + queue_work(nvme_wq, &ctrl->ana_work); + break; +#endif + case NVME_AER_NOTICE_DISC_CHANGED: + ctrl->aen_result = result; + break; + default: + dev_warn(ctrl->device, "async event result %08x\n", result); + } + return requeue; +} + +static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) +{ + dev_warn(ctrl->device, "resetting controller due to AER\n"); + nvme_reset_ctrl(ctrl); +} + +void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, + volatile union nvme_result *res) +{ + u32 result = le32_to_cpu(res->u32); + u32 aer_type = nvme_aer_type(result); + u32 aer_subtype = nvme_aer_subtype(result); + bool requeue = true; + + if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) + return; + + trace_nvme_async_event(ctrl, result); + switch (aer_type) { + case NVME_AER_NOTICE: + requeue = nvme_handle_aen_notice(ctrl, result); + break; + case NVME_AER_ERROR: + /* + * For a persistent internal error, don't run async_event_work + * to submit a new AER. The controller reset will do it. + */ + if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { + nvme_handle_aer_persistent_error(ctrl); + return; + } + fallthrough; + case NVME_AER_SMART: + case NVME_AER_CSS: + case NVME_AER_VS: + ctrl->aen_result = result; + break; + default: + break; + } + + if (requeue) + queue_work(nvme_wq, &ctrl->async_event_work); +} +EXPORT_SYMBOL_GPL(nvme_complete_async_event); + +int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, unsigned int cmd_size) +{ + int ret; + + memset(set, 0, sizeof(*set)); + set->ops = ops; + set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; + if (ctrl->ops->flags & NVME_F_FABRICS) + set->reserved_tags = NVMF_RESERVED_TAGS; + set->numa_node = ctrl->numa_node; + set->flags = BLK_MQ_F_NO_SCHED; + if (ctrl->ops->flags & NVME_F_BLOCKING) + set->flags |= BLK_MQ_F_BLOCKING; + set->cmd_size = cmd_size; + set->driver_data = ctrl; + set->nr_hw_queues = 1; + set->timeout = NVME_ADMIN_TIMEOUT; + ret = blk_mq_alloc_tag_set(set); + if (ret) + return ret; + + ctrl->admin_q = blk_mq_init_queue(set); + if (IS_ERR(ctrl->admin_q)) { + ret = PTR_ERR(ctrl->admin_q); + goto out_free_tagset; + } + + if (ctrl->ops->flags & NVME_F_FABRICS) { + ctrl->fabrics_q = blk_mq_init_queue(set); + if (IS_ERR(ctrl->fabrics_q)) { + ret = PTR_ERR(ctrl->fabrics_q); + goto out_cleanup_admin_q; + } + } + + ctrl->admin_tagset = set; + return 0; + +out_cleanup_admin_q: + blk_mq_destroy_queue(ctrl->admin_q); + blk_put_queue(ctrl->admin_q); +out_free_tagset: + blk_mq_free_tag_set(set); + ctrl->admin_q = NULL; + ctrl->fabrics_q = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); + +void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) +{ + blk_mq_destroy_queue(ctrl->admin_q); + blk_put_queue(ctrl->admin_q); + if (ctrl->ops->flags & NVME_F_FABRICS) { + blk_mq_destroy_queue(ctrl->fabrics_q); + blk_put_queue(ctrl->fabrics_q); + } + blk_mq_free_tag_set(ctrl->admin_tagset); +} +EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); + +int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, unsigned int nr_maps, + unsigned int cmd_size) +{ + int ret; + + memset(set, 0, sizeof(*set)); + set->ops = ops; + set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); + /* + * Some Apple controllers requires tags to be unique across admin and + * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. + */ + if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) + set->reserved_tags = NVME_AQ_DEPTH; + else if (ctrl->ops->flags & NVME_F_FABRICS) + set->reserved_tags = NVMF_RESERVED_TAGS; + set->numa_node = ctrl->numa_node; + set->flags = BLK_MQ_F_SHOULD_MERGE; + if (ctrl->ops->flags & NVME_F_BLOCKING) + set->flags |= BLK_MQ_F_BLOCKING; + set->cmd_size = cmd_size, + set->driver_data = ctrl; + set->nr_hw_queues = ctrl->queue_count - 1; + set->timeout = NVME_IO_TIMEOUT; + set->nr_maps = nr_maps; + ret = blk_mq_alloc_tag_set(set); + if (ret) + return ret; + + if (ctrl->ops->flags & NVME_F_FABRICS) { + ctrl->connect_q = blk_mq_init_queue(set); + if (IS_ERR(ctrl->connect_q)) { + ret = PTR_ERR(ctrl->connect_q); + goto out_free_tag_set; + } + blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, + ctrl->connect_q); + } + + ctrl->tagset = set; + return 0; + +out_free_tag_set: + blk_mq_free_tag_set(set); + ctrl->connect_q = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); + +void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) +{ + if (ctrl->ops->flags & NVME_F_FABRICS) { + blk_mq_destroy_queue(ctrl->connect_q); + blk_put_queue(ctrl->connect_q); + } + blk_mq_free_tag_set(ctrl->tagset); +} +EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); + +void nvme_stop_ctrl(struct nvme_ctrl *ctrl) +{ + nvme_mpath_stop(ctrl); + nvme_auth_stop(ctrl); + nvme_stop_keep_alive(ctrl); + nvme_stop_failfast_work(ctrl); + flush_work(&ctrl->async_event_work); + cancel_work_sync(&ctrl->fw_act_work); + if (ctrl->ops->stop_ctrl) + ctrl->ops->stop_ctrl(ctrl); +} +EXPORT_SYMBOL_GPL(nvme_stop_ctrl); + +void nvme_start_ctrl(struct nvme_ctrl *ctrl) +{ + nvme_start_keep_alive(ctrl); + + nvme_enable_aen(ctrl); + + /* + * persistent discovery controllers need to send indication to userspace + * to re-read the discovery log page to learn about possible changes + * that were missed. We identify persistent discovery controllers by + * checking that they started once before, hence are reconnecting back. + */ + if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && + nvme_discovery_ctrl(ctrl)) + nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); + + if (ctrl->queue_count > 1) { + nvme_queue_scan(ctrl); + nvme_unquiesce_io_queues(ctrl); + nvme_mpath_update(ctrl); + } + + nvme_change_uevent(ctrl, "NVME_EVENT=connected"); + set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); +} +EXPORT_SYMBOL_GPL(nvme_start_ctrl); + +void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) +{ + nvme_hwmon_exit(ctrl); + nvme_fault_inject_fini(&ctrl->fault_inject); + dev_pm_qos_hide_latency_tolerance(ctrl->device); + cdev_device_del(&ctrl->cdev, ctrl->device); + nvme_put_ctrl(ctrl); +} +EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); + +static void nvme_free_cels(struct nvme_ctrl *ctrl) +{ + struct nvme_effects_log *cel; + unsigned long i; + + xa_for_each(&ctrl->cels, i, cel) { + xa_erase(&ctrl->cels, i); + kfree(cel); + } + + xa_destroy(&ctrl->cels); +} + +static void nvme_free_ctrl(struct device *dev) +{ + struct nvme_ctrl *ctrl = + container_of(dev, struct nvme_ctrl, ctrl_device); + struct nvme_subsystem *subsys = ctrl->subsys; + + if (!subsys || ctrl->instance != subsys->instance) + ida_free(&nvme_instance_ida, ctrl->instance); + + nvme_free_cels(ctrl); + nvme_mpath_uninit(ctrl); + nvme_auth_stop(ctrl); + nvme_auth_free(ctrl); + __free_page(ctrl->discard_page); + free_opal_dev(ctrl->opal_dev); + + if (subsys) { + mutex_lock(&nvme_subsystems_lock); + list_del(&ctrl->subsys_entry); + sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); + mutex_unlock(&nvme_subsystems_lock); + } + + ctrl->ops->free_ctrl(ctrl); + + if (subsys) + nvme_put_subsystem(subsys); +} + +/* + * Initialize a NVMe controller structures. This needs to be called during + * earliest initialization so that we have the initialized structured around + * during probing. + */ +int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, + const struct nvme_ctrl_ops *ops, unsigned long quirks) +{ + int ret; + + WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); + clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); + spin_lock_init(&ctrl->lock); + mutex_init(&ctrl->scan_lock); + INIT_LIST_HEAD(&ctrl->namespaces); + xa_init(&ctrl->cels); + init_rwsem(&ctrl->namespaces_rwsem); + ctrl->dev = dev; + ctrl->ops = ops; + ctrl->quirks = quirks; + ctrl->numa_node = NUMA_NO_NODE; + INIT_WORK(&ctrl->scan_work, nvme_scan_work); + INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); + INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); + INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); + init_waitqueue_head(&ctrl->state_wq); + + INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); + INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); + memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); + ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; + + BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > + PAGE_SIZE); + ctrl->discard_page = alloc_page(GFP_KERNEL); + if (!ctrl->discard_page) { + ret = -ENOMEM; + goto out; + } + + ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); + if (ret < 0) + goto out; + ctrl->instance = ret; + + device_initialize(&ctrl->ctrl_device); + ctrl->device = &ctrl->ctrl_device; + ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), + ctrl->instance); + ctrl->device->class = nvme_class; + ctrl->device->parent = ctrl->dev; + if (ops->dev_attr_groups) + ctrl->device->groups = ops->dev_attr_groups; + else + ctrl->device->groups = nvme_dev_attr_groups; + ctrl->device->release = nvme_free_ctrl; + dev_set_drvdata(ctrl->device, ctrl); + ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); + if (ret) + goto out_release_instance; + + nvme_get_ctrl(ctrl); + cdev_init(&ctrl->cdev, &nvme_dev_fops); + ctrl->cdev.owner = ops->module; + ret = cdev_device_add(&ctrl->cdev, ctrl->device); + if (ret) + goto out_free_name; + + /* + * Initialize latency tolerance controls. The sysfs files won't + * be visible to userspace unless the device actually supports APST. + */ + ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; + dev_pm_qos_update_user_latency_tolerance(ctrl->device, + min(default_ps_max_latency_us, (unsigned long)S32_MAX)); + + nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); + nvme_mpath_init_ctrl(ctrl); + ret = nvme_auth_init_ctrl(ctrl); + if (ret) + goto out_free_cdev; + + return 0; +out_free_cdev: + nvme_fault_inject_fini(&ctrl->fault_inject); + dev_pm_qos_hide_latency_tolerance(ctrl->device); + cdev_device_del(&ctrl->cdev, ctrl->device); +out_free_name: + nvme_put_ctrl(ctrl); + kfree_const(ctrl->device->kobj.name); +out_release_instance: + ida_free(&nvme_instance_ida, ctrl->instance); +out: + if (ctrl->discard_page) + __free_page(ctrl->discard_page); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_init_ctrl); + +/* let I/O to all namespaces fail in preparation for surprise removal */ +void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mark_disk_dead(ns->disk); + up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); + +void nvme_unfreeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_unfreeze_queue(ns->queue); + up_read(&ctrl->namespaces_rwsem); + clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); +} +EXPORT_SYMBOL_GPL(nvme_unfreeze); + +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) { + timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); + if (timeout <= 0) + break; + } + up_read(&ctrl->namespaces_rwsem); + return timeout; +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); + +void nvme_wait_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_wait(ns->queue); + up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze); + +void nvme_start_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + set_bit(NVME_CTRL_FROZEN, &ctrl->flags); + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_freeze_queue_start(ns->queue); + up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_start_freeze); + +void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) +{ + if (!ctrl->tagset) + return; + if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) + blk_mq_quiesce_tagset(ctrl->tagset); + else + blk_mq_wait_quiesce_done(ctrl->tagset); +} +EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); + +void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) +{ + if (!ctrl->tagset) + return; + if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) + blk_mq_unquiesce_tagset(ctrl->tagset); +} +EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); + +void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) +{ + if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) + blk_mq_quiesce_queue(ctrl->admin_q); + else + blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); +} +EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); + +void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) +{ + if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) + blk_mq_unquiesce_queue(ctrl->admin_q); +} +EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); + +void nvme_sync_io_queues(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_sync_queue(ns->queue); + up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_sync_io_queues); + +void nvme_sync_queues(struct nvme_ctrl *ctrl) +{ + nvme_sync_io_queues(ctrl); + if (ctrl->admin_q) + blk_sync_queue(ctrl->admin_q); +} +EXPORT_SYMBOL_GPL(nvme_sync_queues); + +struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) +{ + if (file->f_op != &nvme_dev_fops) + return NULL; + return file->private_data; +} +EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); + +/* + * Check we didn't inadvertently grow the command structure sizes: + */ +static inline void _nvme_check_size(void) +{ + BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); + BUILD_BUG_ON(sizeof(struct nvme_features) != 64); + BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); + BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != + NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); + BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); + BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); + BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); +} + + +static int __init nvme_core_init(void) +{ + int result = -ENOMEM; + + _nvme_check_size(); + + nvme_wq = alloc_workqueue("nvme-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + if (!nvme_wq) + goto out; + + nvme_reset_wq = alloc_workqueue("nvme-reset-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + if (!nvme_reset_wq) + goto destroy_wq; + + nvme_delete_wq = alloc_workqueue("nvme-delete-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + if (!nvme_delete_wq) + goto destroy_reset_wq; + + result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, + NVME_MINORS, "nvme"); + if (result < 0) + goto destroy_delete_wq; + + nvme_class = class_create("nvme"); + if (IS_ERR(nvme_class)) { + result = PTR_ERR(nvme_class); + goto unregister_chrdev; + } + nvme_class->dev_uevent = nvme_class_uevent; + + nvme_subsys_class = class_create("nvme-subsystem"); + if (IS_ERR(nvme_subsys_class)) { + result = PTR_ERR(nvme_subsys_class); + goto destroy_class; + } + + result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, + "nvme-generic"); + if (result < 0) + goto destroy_subsys_class; + + nvme_ns_chr_class = class_create("nvme-generic"); + if (IS_ERR(nvme_ns_chr_class)) { + result = PTR_ERR(nvme_ns_chr_class); + goto unregister_generic_ns; + } + + result = nvme_init_auth(); + if (result) + goto destroy_ns_chr; + return 0; + +destroy_ns_chr: + class_destroy(nvme_ns_chr_class); +unregister_generic_ns: + unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); +destroy_subsys_class: + class_destroy(nvme_subsys_class); +destroy_class: + class_destroy(nvme_class); +unregister_chrdev: + unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); +destroy_delete_wq: + destroy_workqueue(nvme_delete_wq); +destroy_reset_wq: + destroy_workqueue(nvme_reset_wq); +destroy_wq: + destroy_workqueue(nvme_wq); +out: + return result; +} + +static void __exit nvme_core_exit(void) +{ + nvme_exit_auth(); + class_destroy(nvme_ns_chr_class); + class_destroy(nvme_subsys_class); + class_destroy(nvme_class); + unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); + unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); + destroy_workqueue(nvme_delete_wq); + destroy_workqueue(nvme_reset_wq); + destroy_workqueue(nvme_wq); + ida_destroy(&nvme_ns_chr_minor_ida); + ida_destroy(&nvme_instance_ida); +} + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); +module_init(nvme_core_init); +module_exit(nvme_core_exit); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c new file mode 100644 index 0000000000..92ba315cfe --- /dev/null +++ b/drivers/nvme/host/fabrics.c @@ -0,0 +1,1428 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics common host code. + * Copyright (c) 2015-2016 HGST, a Western Digital Company. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/parser.h> +#include <linux/seq_file.h> +#include "nvme.h" +#include "fabrics.h" + +static LIST_HEAD(nvmf_transports); +static DECLARE_RWSEM(nvmf_transports_rwsem); + +static LIST_HEAD(nvmf_hosts); +static DEFINE_MUTEX(nvmf_hosts_mutex); + +static struct nvmf_host *nvmf_default_host; + +static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id) +{ + struct nvmf_host *host; + + host = kmalloc(sizeof(*host), GFP_KERNEL); + if (!host) + return NULL; + + kref_init(&host->ref); + uuid_copy(&host->id, id); + strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE); + + return host; +} + +static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id) +{ + struct nvmf_host *host; + + mutex_lock(&nvmf_hosts_mutex); + + /* + * We have defined a host as how it is perceived by the target. + * Therefore, we don't allow different Host NQNs with the same Host ID. + * Similarly, we do not allow the usage of the same Host NQN with + * different Host IDs. This'll maintain unambiguous host identification. + */ + list_for_each_entry(host, &nvmf_hosts, list) { + bool same_hostnqn = !strcmp(host->nqn, hostnqn); + bool same_hostid = uuid_equal(&host->id, id); + + if (same_hostnqn && same_hostid) { + kref_get(&host->ref); + goto out_unlock; + } + if (same_hostnqn) { + pr_err("found same hostnqn %s but different hostid %pUb\n", + hostnqn, id); + host = ERR_PTR(-EINVAL); + goto out_unlock; + } + if (same_hostid) { + pr_err("found same hostid %pUb but different hostnqn %s\n", + id, hostnqn); + host = ERR_PTR(-EINVAL); + goto out_unlock; + } + } + + host = nvmf_host_alloc(hostnqn, id); + if (!host) { + host = ERR_PTR(-ENOMEM); + goto out_unlock; + } + + list_add_tail(&host->list, &nvmf_hosts); +out_unlock: + mutex_unlock(&nvmf_hosts_mutex); + return host; +} + +static struct nvmf_host *nvmf_host_default(void) +{ + struct nvmf_host *host; + char nqn[NVMF_NQN_SIZE]; + uuid_t id; + + uuid_gen(&id); + snprintf(nqn, NVMF_NQN_SIZE, + "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); + + host = nvmf_host_alloc(nqn, &id); + if (!host) + return NULL; + + mutex_lock(&nvmf_hosts_mutex); + list_add_tail(&host->list, &nvmf_hosts); + mutex_unlock(&nvmf_hosts_mutex); + + return host; +} + +static void nvmf_host_destroy(struct kref *ref) +{ + struct nvmf_host *host = container_of(ref, struct nvmf_host, ref); + + mutex_lock(&nvmf_hosts_mutex); + list_del(&host->list); + mutex_unlock(&nvmf_hosts_mutex); + + kfree(host); +} + +static void nvmf_host_put(struct nvmf_host *host) +{ + if (host) + kref_put(&host->ref, nvmf_host_destroy); +} + +/** + * nvmf_get_address() - Get address/port + * @ctrl: Host NVMe controller instance which we got the address + * @buf: OUTPUT parameter that will contain the address/port + * @size: buffer size + */ +int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + int len = 0; + + if (ctrl->opts->mask & NVMF_OPT_TRADDR) + len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr); + if (ctrl->opts->mask & NVMF_OPT_TRSVCID) + len += scnprintf(buf + len, size - len, "%strsvcid=%s", + (len) ? "," : "", ctrl->opts->trsvcid); + if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) + len += scnprintf(buf + len, size - len, "%shost_traddr=%s", + (len) ? "," : "", ctrl->opts->host_traddr); + if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE) + len += scnprintf(buf + len, size - len, "%shost_iface=%s", + (len) ? "," : "", ctrl->opts->host_iface); + len += scnprintf(buf + len, size - len, "\n"); + + return len; +} +EXPORT_SYMBOL_GPL(nvmf_get_address); + +/** + * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function. + * @ctrl: Host NVMe controller instance maintaining the admin + * queue used to submit the property read command to + * the allocated NVMe controller resource on the target system. + * @off: Starting offset value of the targeted property + * register (see the fabrics section of the NVMe standard). + * @val: OUTPUT parameter that will contain the value of + * the property after a successful read. + * + * Used by the host system to retrieve a 32-bit capsule property value + * from an NVMe controller on the target system. + * + * ("Capsule property" is an "PCIe register concept" applied to the + * NVMe fabrics space.) + * + * Return: + * 0: successful read + * > 0: NVMe error status code + * < 0: Linux errno error code + */ +int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) +{ + struct nvme_command cmd = { }; + union nvme_result res; + int ret; + + cmd.prop_get.opcode = nvme_fabrics_command; + cmd.prop_get.fctype = nvme_fabrics_type_property_get; + cmd.prop_get.offset = cpu_to_le32(off); + + ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, + NVME_QID_ANY, 0, 0); + + if (ret >= 0) + *val = le64_to_cpu(res.u64); + if (unlikely(ret != 0)) + dev_err(ctrl->device, + "Property Get error: %d, offset %#x\n", + ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + + return ret; +} +EXPORT_SYMBOL_GPL(nvmf_reg_read32); + +/** + * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function. + * @ctrl: Host NVMe controller instance maintaining the admin + * queue used to submit the property read command to + * the allocated controller resource on the target system. + * @off: Starting offset value of the targeted property + * register (see the fabrics section of the NVMe standard). + * @val: OUTPUT parameter that will contain the value of + * the property after a successful read. + * + * Used by the host system to retrieve a 64-bit capsule property value + * from an NVMe controller on the target system. + * + * ("Capsule property" is an "PCIe register concept" applied to the + * NVMe fabrics space.) + * + * Return: + * 0: successful read + * > 0: NVMe error status code + * < 0: Linux errno error code + */ +int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) +{ + struct nvme_command cmd = { }; + union nvme_result res; + int ret; + + cmd.prop_get.opcode = nvme_fabrics_command; + cmd.prop_get.fctype = nvme_fabrics_type_property_get; + cmd.prop_get.attrib = 1; + cmd.prop_get.offset = cpu_to_le32(off); + + ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, + NVME_QID_ANY, 0, 0); + + if (ret >= 0) + *val = le64_to_cpu(res.u64); + if (unlikely(ret != 0)) + dev_err(ctrl->device, + "Property Get error: %d, offset %#x\n", + ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + return ret; +} +EXPORT_SYMBOL_GPL(nvmf_reg_read64); + +/** + * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function. + * @ctrl: Host NVMe controller instance maintaining the admin + * queue used to submit the property read command to + * the allocated NVMe controller resource on the target system. + * @off: Starting offset value of the targeted property + * register (see the fabrics section of the NVMe standard). + * @val: Input parameter that contains the value to be + * written to the property. + * + * Used by the NVMe host system to write a 32-bit capsule property value + * to an NVMe controller on the target system. + * + * ("Capsule property" is an "PCIe register concept" applied to the + * NVMe fabrics space.) + * + * Return: + * 0: successful write + * > 0: NVMe error status code + * < 0: Linux errno error code + */ +int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) +{ + struct nvme_command cmd = { }; + int ret; + + cmd.prop_set.opcode = nvme_fabrics_command; + cmd.prop_set.fctype = nvme_fabrics_type_property_set; + cmd.prop_set.attrib = 0; + cmd.prop_set.offset = cpu_to_le32(off); + cmd.prop_set.value = cpu_to_le64(val); + + ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, + NVME_QID_ANY, 0, 0); + if (unlikely(ret)) + dev_err(ctrl->device, + "Property Set error: %d, offset %#x\n", + ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + return ret; +} +EXPORT_SYMBOL_GPL(nvmf_reg_write32); + +/** + * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for + * connect() errors. + * @ctrl: The specific /dev/nvmeX device that had the error. + * @errval: Error code to be decoded in a more human-friendly + * printout. + * @offset: For use with the NVMe error code + * NVME_SC_CONNECT_INVALID_PARAM. + * @cmd: This is the SQE portion of a submission capsule. + * @data: This is the "Data" portion of a submission capsule. + */ +static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, + int errval, int offset, struct nvme_command *cmd, + struct nvmf_connect_data *data) +{ + int err_sctype = errval & ~NVME_SC_DNR; + + if (errval < 0) { + dev_err(ctrl->device, + "Connect command failed, errno: %d\n", errval); + return; + } + + switch (err_sctype) { + case NVME_SC_CONNECT_INVALID_PARAM: + if (offset >> 16) { + char *inv_data = "Connect Invalid Data Parameter"; + + switch (offset & 0xffff) { + case (offsetof(struct nvmf_connect_data, cntlid)): + dev_err(ctrl->device, + "%s, cntlid: %d\n", + inv_data, data->cntlid); + break; + case (offsetof(struct nvmf_connect_data, hostnqn)): + dev_err(ctrl->device, + "%s, hostnqn \"%s\"\n", + inv_data, data->hostnqn); + break; + case (offsetof(struct nvmf_connect_data, subsysnqn)): + dev_err(ctrl->device, + "%s, subsysnqn \"%s\"\n", + inv_data, data->subsysnqn); + break; + default: + dev_err(ctrl->device, + "%s, starting byte offset: %d\n", + inv_data, offset & 0xffff); + break; + } + } else { + char *inv_sqe = "Connect Invalid SQE Parameter"; + + switch (offset) { + case (offsetof(struct nvmf_connect_command, qid)): + dev_err(ctrl->device, + "%s, qid %d\n", + inv_sqe, cmd->connect.qid); + break; + default: + dev_err(ctrl->device, + "%s, starting byte offset: %d\n", + inv_sqe, offset); + } + } + break; + case NVME_SC_CONNECT_INVALID_HOST: + dev_err(ctrl->device, + "Connect for subsystem %s is not allowed, hostnqn: %s\n", + data->subsysnqn, data->hostnqn); + break; + case NVME_SC_CONNECT_CTRL_BUSY: + dev_err(ctrl->device, + "Connect command failed: controller is busy or not available\n"); + break; + case NVME_SC_CONNECT_FORMAT: + dev_err(ctrl->device, + "Connect incompatible format: %d", + cmd->connect.recfmt); + break; + case NVME_SC_HOST_PATH_ERROR: + dev_err(ctrl->device, + "Connect command failed: host path error\n"); + break; + case NVME_SC_AUTH_REQUIRED: + dev_err(ctrl->device, + "Connect command failed: authentication required\n"); + break; + default: + dev_err(ctrl->device, + "Connect command failed, error wo/DNR bit: %d\n", + err_sctype); + break; + } +} + +static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl, + u16 cntlid) +{ + struct nvmf_connect_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + uuid_copy(&data->hostid, &ctrl->opts->host->id); + data->cntlid = cpu_to_le16(cntlid); + strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); + strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); + + return data; +} + +static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid, + struct nvme_command *cmd) +{ + cmd->connect.opcode = nvme_fabrics_command; + cmd->connect.fctype = nvme_fabrics_type_connect; + cmd->connect.qid = cpu_to_le16(qid); + + if (qid) { + cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize); + } else { + cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); + + /* + * set keep-alive timeout in seconds granularity (ms * 1000) + */ + cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000); + } + + if (ctrl->opts->disable_sqflow) + cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; +} + +/** + * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect" + * API function. + * @ctrl: Host nvme controller instance used to request + * a new NVMe controller allocation on the target + * system and establish an NVMe Admin connection to + * that controller. + * + * This function enables an NVMe host device to request a new allocation of + * an NVMe controller resource on a target system as well establish a + * fabrics-protocol connection of the NVMe Admin queue between the + * host system device and the allocated NVMe controller on the + * target system via a NVMe Fabrics "Connect" command. + * + * Return: + * 0: success + * > 0: NVMe error status code + * < 0: Linux errno error code + * + */ +int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) +{ + struct nvme_command cmd = { }; + union nvme_result res; + struct nvmf_connect_data *data; + int ret; + u32 result; + + nvmf_connect_cmd_prep(ctrl, 0, &cmd); + + data = nvmf_connect_data_prep(ctrl, 0xffff); + if (!data) + return -ENOMEM; + + ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, + data, sizeof(*data), NVME_QID_ANY, 1, + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + if (ret) { + nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), + &cmd, data); + goto out_free_data; + } + + result = le32_to_cpu(res.u32); + ctrl->cntlid = result & 0xFFFF; + if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { + /* Secure concatenation is not implemented */ + if (result & NVME_CONNECT_AUTHREQ_ASCR) { + dev_warn(ctrl->device, + "qid 0: secure concatenation is not supported\n"); + ret = NVME_SC_AUTH_REQUIRED; + goto out_free_data; + } + /* Authentication required */ + ret = nvme_auth_negotiate(ctrl, 0); + if (ret) { + dev_warn(ctrl->device, + "qid 0: authentication setup failed\n"); + ret = NVME_SC_AUTH_REQUIRED; + goto out_free_data; + } + ret = nvme_auth_wait(ctrl, 0); + if (ret) + dev_warn(ctrl->device, + "qid 0: authentication failed\n"); + else + dev_info(ctrl->device, + "qid 0: authenticated\n"); + } +out_free_data: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); + +/** + * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect" + * API function. + * @ctrl: Host nvme controller instance used to establish an + * NVMe I/O queue connection to the already allocated NVMe + * controller on the target system. + * @qid: NVMe I/O queue number for the new I/O connection between + * host and target (note qid == 0 is illegal as this is + * the Admin queue, per NVMe standard). + * + * This function issues a fabrics-protocol connection + * of a NVMe I/O queue (via NVMe Fabrics "Connect" command) + * between the host system device and the allocated NVMe controller + * on the target system. + * + * Return: + * 0: success + * > 0: NVMe error status code + * < 0: Linux errno error code + */ +int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) +{ + struct nvme_command cmd = { }; + struct nvmf_connect_data *data; + union nvme_result res; + int ret; + u32 result; + + nvmf_connect_cmd_prep(ctrl, qid, &cmd); + + data = nvmf_connect_data_prep(ctrl, ctrl->cntlid); + if (!data) + return -ENOMEM; + + ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, + data, sizeof(*data), qid, 1, + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + if (ret) { + nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), + &cmd, data); + } + result = le32_to_cpu(res.u32); + if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { + /* Secure concatenation is not implemented */ + if (result & NVME_CONNECT_AUTHREQ_ASCR) { + dev_warn(ctrl->device, + "qid 0: secure concatenation is not supported\n"); + ret = NVME_SC_AUTH_REQUIRED; + goto out_free_data; + } + /* Authentication required */ + ret = nvme_auth_negotiate(ctrl, qid); + if (ret) { + dev_warn(ctrl->device, + "qid %d: authentication setup failed\n", qid); + ret = NVME_SC_AUTH_REQUIRED; + } else { + ret = nvme_auth_wait(ctrl, qid); + if (ret) + dev_warn(ctrl->device, + "qid %u: authentication failed\n", qid); + } + } +out_free_data: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); + +bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) +{ + if (ctrl->opts->max_reconnects == -1 || + ctrl->nr_reconnects < ctrl->opts->max_reconnects) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(nvmf_should_reconnect); + +/** + * nvmf_register_transport() - NVMe Fabrics Library registration function. + * @ops: Transport ops instance to be registered to the + * common fabrics library. + * + * API function that registers the type of specific transport fabric + * being implemented to the common NVMe fabrics library. Part of + * the overall init sequence of starting up a fabrics driver. + */ +int nvmf_register_transport(struct nvmf_transport_ops *ops) +{ + if (!ops->create_ctrl) + return -EINVAL; + + down_write(&nvmf_transports_rwsem); + list_add_tail(&ops->entry, &nvmf_transports); + up_write(&nvmf_transports_rwsem); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmf_register_transport); + +/** + * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function. + * @ops: Transport ops instance to be unregistered from the + * common fabrics library. + * + * Fabrics API function that unregisters the type of specific transport + * fabric being implemented from the common NVMe fabrics library. + * Part of the overall exit sequence of unloading the implemented driver. + */ +void nvmf_unregister_transport(struct nvmf_transport_ops *ops) +{ + down_write(&nvmf_transports_rwsem); + list_del(&ops->entry); + up_write(&nvmf_transports_rwsem); +} +EXPORT_SYMBOL_GPL(nvmf_unregister_transport); + +static struct nvmf_transport_ops *nvmf_lookup_transport( + struct nvmf_ctrl_options *opts) +{ + struct nvmf_transport_ops *ops; + + lockdep_assert_held(&nvmf_transports_rwsem); + + list_for_each_entry(ops, &nvmf_transports, entry) { + if (strcmp(ops->name, opts->transport) == 0) + return ops; + } + + return NULL; +} + +static const match_table_t opt_tokens = { + { NVMF_OPT_TRANSPORT, "transport=%s" }, + { NVMF_OPT_TRADDR, "traddr=%s" }, + { NVMF_OPT_TRSVCID, "trsvcid=%s" }, + { NVMF_OPT_NQN, "nqn=%s" }, + { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" }, + { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" }, + { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" }, + { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" }, + { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, + { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, + { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, + { NVMF_OPT_HOST_IFACE, "host_iface=%s" }, + { NVMF_OPT_HOST_ID, "hostid=%s" }, + { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, + { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, + { NVMF_OPT_HDR_DIGEST, "hdr_digest" }, + { NVMF_OPT_DATA_DIGEST, "data_digest" }, + { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" }, + { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, + { NVMF_OPT_TOS, "tos=%d" }, + { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, + { NVMF_OPT_DISCOVERY, "discovery" }, +#ifdef CONFIG_NVME_HOST_AUTH + { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" }, + { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" }, +#endif + { NVMF_OPT_ERR, NULL } +}; + +static int nvmf_parse_options(struct nvmf_ctrl_options *opts, + const char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + int token, ret = 0; + size_t nqnlen = 0; + int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; + uuid_t hostid; + char hostnqn[NVMF_NQN_SIZE]; + + /* Set defaults */ + opts->queue_size = NVMF_DEF_QUEUE_SIZE; + opts->nr_io_queues = num_online_cpus(); + opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; + opts->kato = 0; + opts->duplicate_connect = false; + opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO; + opts->hdr_digest = false; + opts->data_digest = false; + opts->tos = -1; /* < 0 == use transport default */ + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + /* use default host if not given by user space */ + uuid_copy(&hostid, &nvmf_default_host->id); + strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE); + + while ((p = strsep(&o, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, opt_tokens, args); + opts->mask |= token; + switch (token) { + case NVMF_OPT_TRANSPORT: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->transport); + opts->transport = p; + break; + case NVMF_OPT_NQN: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->subsysnqn); + opts->subsysnqn = p; + nqnlen = strlen(opts->subsysnqn); + if (nqnlen >= NVMF_NQN_SIZE) { + pr_err("%s needs to be < %d bytes\n", + opts->subsysnqn, NVMF_NQN_SIZE); + ret = -EINVAL; + goto out; + } + opts->discovery_nqn = + !(strcmp(opts->subsysnqn, + NVME_DISC_SUBSYS_NAME)); + break; + case NVMF_OPT_TRADDR: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->traddr); + opts->traddr = p; + break; + case NVMF_OPT_TRSVCID: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->trsvcid); + opts->trsvcid = p; + break; + case NVMF_OPT_QUEUE_SIZE: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + if (token < NVMF_MIN_QUEUE_SIZE || + token > NVMF_MAX_QUEUE_SIZE) { + pr_err("Invalid queue_size %d\n", token); + ret = -EINVAL; + goto out; + } + opts->queue_size = token; + break; + case NVMF_OPT_NR_IO_QUEUES: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + if (token <= 0) { + pr_err("Invalid number of IOQs %d\n", token); + ret = -EINVAL; + goto out; + } + if (opts->discovery_nqn) { + pr_debug("Ignoring nr_io_queues value for discovery controller\n"); + break; + } + + opts->nr_io_queues = min_t(unsigned int, + num_online_cpus(), token); + break; + case NVMF_OPT_KATO: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + + if (token < 0) { + pr_err("Invalid keep_alive_tmo %d\n", token); + ret = -EINVAL; + goto out; + } else if (token == 0 && !opts->discovery_nqn) { + /* Allowed for debug */ + pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); + } + opts->kato = token; + break; + case NVMF_OPT_CTRL_LOSS_TMO: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + + if (token < 0) + pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n"); + ctrl_loss_tmo = token; + break; + case NVMF_OPT_FAIL_FAST_TMO: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + + if (token >= 0) + pr_warn("I/O fail on reconnect controller after %d sec\n", + token); + else + token = -1; + + opts->fast_io_fail_tmo = token; + break; + case NVMF_OPT_HOSTNQN: + if (opts->host) { + pr_err("hostnqn already user-assigned: %s\n", + opts->host->nqn); + ret = -EADDRINUSE; + goto out; + } + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + nqnlen = strlen(p); + if (nqnlen >= NVMF_NQN_SIZE) { + pr_err("%s needs to be < %d bytes\n", + p, NVMF_NQN_SIZE); + kfree(p); + ret = -EINVAL; + goto out; + } + strscpy(hostnqn, p, NVMF_NQN_SIZE); + kfree(p); + break; + case NVMF_OPT_RECONNECT_DELAY: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + if (token <= 0) { + pr_err("Invalid reconnect_delay %d\n", token); + ret = -EINVAL; + goto out; + } + opts->reconnect_delay = token; + break; + case NVMF_OPT_HOST_TRADDR: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->host_traddr); + opts->host_traddr = p; + break; + case NVMF_OPT_HOST_IFACE: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->host_iface); + opts->host_iface = p; + break; + case NVMF_OPT_HOST_ID: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = uuid_parse(p, &hostid); + if (ret) { + pr_err("Invalid hostid %s\n", p); + ret = -EINVAL; + kfree(p); + goto out; + } + kfree(p); + break; + case NVMF_OPT_DUP_CONNECT: + opts->duplicate_connect = true; + break; + case NVMF_OPT_DISABLE_SQFLOW: + opts->disable_sqflow = true; + break; + case NVMF_OPT_HDR_DIGEST: + opts->hdr_digest = true; + break; + case NVMF_OPT_DATA_DIGEST: + opts->data_digest = true; + break; + case NVMF_OPT_NR_WRITE_QUEUES: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + if (token <= 0) { + pr_err("Invalid nr_write_queues %d\n", token); + ret = -EINVAL; + goto out; + } + opts->nr_write_queues = token; + break; + case NVMF_OPT_NR_POLL_QUEUES: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + if (token <= 0) { + pr_err("Invalid nr_poll_queues %d\n", token); + ret = -EINVAL; + goto out; + } + opts->nr_poll_queues = token; + break; + case NVMF_OPT_TOS: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + if (token < 0) { + pr_err("Invalid type of service %d\n", token); + ret = -EINVAL; + goto out; + } + if (token > 255) { + pr_warn("Clamping type of service to 255\n"); + token = 255; + } + opts->tos = token; + break; + case NVMF_OPT_DISCOVERY: + opts->discovery_nqn = true; + break; + case NVMF_OPT_DHCHAP_SECRET: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) { + pr_err("Invalid DH-CHAP secret %s\n", p); + ret = -EINVAL; + goto out; + } + kfree(opts->dhchap_secret); + opts->dhchap_secret = p; + break; + case NVMF_OPT_DHCHAP_CTRL_SECRET: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) { + pr_err("Invalid DH-CHAP secret %s\n", p); + ret = -EINVAL; + goto out; + } + kfree(opts->dhchap_ctrl_secret); + opts->dhchap_ctrl_secret = p; + break; + default: + pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", + p); + ret = -EINVAL; + goto out; + } + } + + if (opts->discovery_nqn) { + opts->nr_io_queues = 0; + opts->nr_write_queues = 0; + opts->nr_poll_queues = 0; + opts->duplicate_connect = true; + } else { + if (!opts->kato) + opts->kato = NVME_DEFAULT_KATO; + } + if (ctrl_loss_tmo < 0) { + opts->max_reconnects = -1; + } else { + opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, + opts->reconnect_delay); + if (ctrl_loss_tmo < opts->fast_io_fail_tmo) + pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n", + opts->fast_io_fail_tmo, ctrl_loss_tmo); + } + + opts->host = nvmf_host_add(hostnqn, &hostid); + if (IS_ERR(opts->host)) { + ret = PTR_ERR(opts->host); + opts->host = NULL; + goto out; + } + +out: + kfree(options); + return ret; +} + +void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues, + u32 io_queues[HCTX_MAX_TYPES]) +{ + if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { + /* + * separate read/write queues + * hand out dedicated default queues only after we have + * sufficient read queues. + */ + io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; + nr_io_queues -= io_queues[HCTX_TYPE_READ]; + io_queues[HCTX_TYPE_DEFAULT] = + min(opts->nr_write_queues, nr_io_queues); + nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT]; + } else { + /* + * shared read/write queues + * either no write queues were requested, or we don't have + * sufficient queue count to have dedicated default queues. + */ + io_queues[HCTX_TYPE_DEFAULT] = + min(opts->nr_io_queues, nr_io_queues); + nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT]; + } + + if (opts->nr_poll_queues && nr_io_queues) { + /* map dedicated poll queues only if we have queues left */ + io_queues[HCTX_TYPE_POLL] = + min(opts->nr_poll_queues, nr_io_queues); + } +} +EXPORT_SYMBOL_GPL(nvmf_set_io_queues); + +void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl, + u32 io_queues[HCTX_MAX_TYPES]) +{ + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) { + /* separate read/write queues */ + set->map[HCTX_TYPE_DEFAULT].nr_queues = + io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; + set->map[HCTX_TYPE_READ].nr_queues = + io_queues[HCTX_TYPE_READ]; + set->map[HCTX_TYPE_READ].queue_offset = + io_queues[HCTX_TYPE_DEFAULT]; + } else { + /* shared read/write queues */ + set->map[HCTX_TYPE_DEFAULT].nr_queues = + io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; + set->map[HCTX_TYPE_READ].nr_queues = + io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_READ].queue_offset = 0; + } + + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); + if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) { + /* map dedicated poll queues only if we have queues left */ + set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL]; + set->map[HCTX_TYPE_POLL].queue_offset = + io_queues[HCTX_TYPE_DEFAULT] + + io_queues[HCTX_TYPE_READ]; + blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); + } + + dev_info(ctrl->device, + "mapped %d/%d/%d default/read/poll queues.\n", + io_queues[HCTX_TYPE_DEFAULT], + io_queues[HCTX_TYPE_READ], + io_queues[HCTX_TYPE_POLL]); +} +EXPORT_SYMBOL_GPL(nvmf_map_queues); + +static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, + unsigned int required_opts) +{ + if ((opts->mask & required_opts) != required_opts) { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { + if ((opt_tokens[i].token & required_opts) && + !(opt_tokens[i].token & opts->mask)) { + pr_warn("missing parameter '%s'\n", + opt_tokens[i].pattern); + } + } + + return -EINVAL; + } + + return 0; +} + +bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, + struct nvmf_ctrl_options *opts) +{ + if (!nvmf_ctlr_matches_baseopts(ctrl, opts) || + strcmp(opts->traddr, ctrl->opts->traddr) || + strcmp(opts->trsvcid, ctrl->opts->trsvcid)) + return false; + + /* + * Checking the local address or host interfaces is rough. + * + * In most cases, none is specified and the host port or + * host interface is selected by the stack. + * + * Assume no match if: + * - local address or host interface is specified and address + * or host interface is not the same + * - local address or host interface is not specified but + * remote is, or vice versa (admin using specific + * host_traddr/host_iface when it matters). + */ + if ((opts->mask & NVMF_OPT_HOST_TRADDR) && + (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { + if (strcmp(opts->host_traddr, ctrl->opts->host_traddr)) + return false; + } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) || + (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { + return false; + } + + if ((opts->mask & NVMF_OPT_HOST_IFACE) && + (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) { + if (strcmp(opts->host_iface, ctrl->opts->host_iface)) + return false; + } else if ((opts->mask & NVMF_OPT_HOST_IFACE) || + (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) { + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(nvmf_ip_options_match); + +static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, + unsigned int allowed_opts) +{ + if (opts->mask & ~allowed_opts) { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { + if ((opt_tokens[i].token & opts->mask) && + (opt_tokens[i].token & ~allowed_opts)) { + pr_warn("invalid parameter '%s'\n", + opt_tokens[i].pattern); + } + } + + return -EINVAL; + } + + return 0; +} + +void nvmf_free_options(struct nvmf_ctrl_options *opts) +{ + nvmf_host_put(opts->host); + kfree(opts->transport); + kfree(opts->traddr); + kfree(opts->trsvcid); + kfree(opts->subsysnqn); + kfree(opts->host_traddr); + kfree(opts->host_iface); + kfree(opts->dhchap_secret); + kfree(opts->dhchap_ctrl_secret); + kfree(opts); +} +EXPORT_SYMBOL_GPL(nvmf_free_options); + +#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) +#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ + NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ + NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ + NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\ + NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\ + NVMF_OPT_DHCHAP_CTRL_SECRET) + +static struct nvme_ctrl * +nvmf_create_ctrl(struct device *dev, const char *buf) +{ + struct nvmf_ctrl_options *opts; + struct nvmf_transport_ops *ops; + struct nvme_ctrl *ctrl; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + ret = nvmf_parse_options(opts, buf); + if (ret) + goto out_free_opts; + + + request_module("nvme-%s", opts->transport); + + /* + * Check the generic options first as we need a valid transport for + * the lookup below. Then clear the generic flags so that transport + * drivers don't have to care about them. + */ + ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS); + if (ret) + goto out_free_opts; + opts->mask &= ~NVMF_REQUIRED_OPTS; + + down_read(&nvmf_transports_rwsem); + ops = nvmf_lookup_transport(opts); + if (!ops) { + pr_info("no handler found for transport %s.\n", + opts->transport); + ret = -EINVAL; + goto out_unlock; + } + + if (!try_module_get(ops->module)) { + ret = -EBUSY; + goto out_unlock; + } + up_read(&nvmf_transports_rwsem); + + ret = nvmf_check_required_opts(opts, ops->required_opts); + if (ret) + goto out_module_put; + ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | + ops->allowed_opts | ops->required_opts); + if (ret) + goto out_module_put; + + ctrl = ops->create_ctrl(dev, opts); + if (IS_ERR(ctrl)) { + ret = PTR_ERR(ctrl); + goto out_module_put; + } + + module_put(ops->module); + return ctrl; + +out_module_put: + module_put(ops->module); + goto out_free_opts; +out_unlock: + up_read(&nvmf_transports_rwsem); +out_free_opts: + nvmf_free_options(opts); + return ERR_PTR(ret); +} + +static struct class *nvmf_class; +static struct device *nvmf_device; +static DEFINE_MUTEX(nvmf_dev_mutex); + +static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *pos) +{ + struct seq_file *seq_file = file->private_data; + struct nvme_ctrl *ctrl; + const char *buf; + int ret = 0; + + if (count > PAGE_SIZE) + return -ENOMEM; + + buf = memdup_user_nul(ubuf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + mutex_lock(&nvmf_dev_mutex); + if (seq_file->private) { + ret = -EINVAL; + goto out_unlock; + } + + ctrl = nvmf_create_ctrl(nvmf_device, buf); + if (IS_ERR(ctrl)) { + ret = PTR_ERR(ctrl); + goto out_unlock; + } + + seq_file->private = ctrl; + +out_unlock: + mutex_unlock(&nvmf_dev_mutex); + kfree(buf); + return ret ? ret : count; +} + +static void __nvmf_concat_opt_tokens(struct seq_file *seq_file) +{ + const struct match_token *tok; + int idx; + + /* + * Add dummy entries for instance and cntlid to + * signal an invalid/non-existing controller + */ + seq_puts(seq_file, "instance=-1,cntlid=-1"); + for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) { + tok = &opt_tokens[idx]; + if (tok->token == NVMF_OPT_ERR) + continue; + seq_puts(seq_file, ","); + seq_puts(seq_file, tok->pattern); + } + seq_puts(seq_file, "\n"); +} + +static int nvmf_dev_show(struct seq_file *seq_file, void *private) +{ + struct nvme_ctrl *ctrl; + + mutex_lock(&nvmf_dev_mutex); + ctrl = seq_file->private; + if (!ctrl) { + __nvmf_concat_opt_tokens(seq_file); + goto out_unlock; + } + + seq_printf(seq_file, "instance=%d,cntlid=%d\n", + ctrl->instance, ctrl->cntlid); + +out_unlock: + mutex_unlock(&nvmf_dev_mutex); + return 0; +} + +static int nvmf_dev_open(struct inode *inode, struct file *file) +{ + /* + * The miscdevice code initializes file->private_data, but doesn't + * make use of it later. + */ + file->private_data = NULL; + return single_open(file, nvmf_dev_show, NULL); +} + +static int nvmf_dev_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq_file = file->private_data; + struct nvme_ctrl *ctrl = seq_file->private; + + if (ctrl) + nvme_put_ctrl(ctrl); + return single_release(inode, file); +} + +static const struct file_operations nvmf_dev_fops = { + .owner = THIS_MODULE, + .write = nvmf_dev_write, + .read = seq_read, + .open = nvmf_dev_open, + .release = nvmf_dev_release, +}; + +static struct miscdevice nvmf_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "nvme-fabrics", + .fops = &nvmf_dev_fops, +}; + +static int __init nvmf_init(void) +{ + int ret; + + nvmf_default_host = nvmf_host_default(); + if (!nvmf_default_host) + return -ENOMEM; + + nvmf_class = class_create("nvme-fabrics"); + if (IS_ERR(nvmf_class)) { + pr_err("couldn't register class nvme-fabrics\n"); + ret = PTR_ERR(nvmf_class); + goto out_free_host; + } + + nvmf_device = + device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); + if (IS_ERR(nvmf_device)) { + pr_err("couldn't create nvme-fabrics device!\n"); + ret = PTR_ERR(nvmf_device); + goto out_destroy_class; + } + + ret = misc_register(&nvmf_misc); + if (ret) { + pr_err("couldn't register misc device: %d\n", ret); + goto out_destroy_device; + } + + return 0; + +out_destroy_device: + device_destroy(nvmf_class, MKDEV(0, 0)); +out_destroy_class: + class_destroy(nvmf_class); +out_free_host: + nvmf_host_put(nvmf_default_host); + return ret; +} + +static void __exit nvmf_exit(void) +{ + misc_deregister(&nvmf_misc); + device_destroy(nvmf_class, MKDEV(0, 0)); + class_destroy(nvmf_class); + nvmf_host_put(nvmf_default_host); + + BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64); + BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); + BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); + BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); + BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64); + BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64); + BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024); + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8); + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16); + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16); + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16); + BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16); +} + +MODULE_LICENSE("GPL v2"); + +module_init(nvmf_init); +module_exit(nvmf_exit); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h new file mode 100644 index 0000000000..82e7a27ffb --- /dev/null +++ b/drivers/nvme/host/fabrics.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NVMe over Fabrics common host code. + * Copyright (c) 2015-2016 HGST, a Western Digital Company. + */ +#ifndef _NVME_FABRICS_H +#define _NVME_FABRICS_H 1 + +#include <linux/in.h> +#include <linux/inet.h> + +#define NVMF_MIN_QUEUE_SIZE 16 +#define NVMF_MAX_QUEUE_SIZE 1024 +#define NVMF_DEF_QUEUE_SIZE 128 +#define NVMF_DEF_RECONNECT_DELAY 10 +/* default to 600 seconds of reconnect attempts before giving up */ +#define NVMF_DEF_CTRL_LOSS_TMO 600 +/* default is -1: the fail fast mechanism is disabled */ +#define NVMF_DEF_FAIL_FAST_TMO -1 + +/* + * Reserved one command for internal usage. This command is used for sending + * the connect command, as well as for the keep alive command on the admin + * queue once live. + */ +#define NVMF_RESERVED_TAGS 1 + +/* + * Define a host as seen by the target. We allocate one at boot, but also + * allow the override it when creating controllers. This is both to provide + * persistence of the Host NQN over multiple boots, and to allow using + * multiple ones, for example in a container scenario. Because we must not + * use different Host NQNs with the same Host ID we generate a Host ID and + * use this structure to keep track of the relation between the two. + */ +struct nvmf_host { + struct kref ref; + struct list_head list; + char nqn[NVMF_NQN_SIZE]; + uuid_t id; +}; + +/** + * enum nvmf_parsing_opts - used to define the sysfs parsing options used. + */ +enum { + NVMF_OPT_ERR = 0, + NVMF_OPT_TRANSPORT = 1 << 0, + NVMF_OPT_NQN = 1 << 1, + NVMF_OPT_TRADDR = 1 << 2, + NVMF_OPT_TRSVCID = 1 << 3, + NVMF_OPT_QUEUE_SIZE = 1 << 4, + NVMF_OPT_NR_IO_QUEUES = 1 << 5, + NVMF_OPT_TL_RETRY_COUNT = 1 << 6, + NVMF_OPT_KATO = 1 << 7, + NVMF_OPT_HOSTNQN = 1 << 8, + NVMF_OPT_RECONNECT_DELAY = 1 << 9, + NVMF_OPT_HOST_TRADDR = 1 << 10, + NVMF_OPT_CTRL_LOSS_TMO = 1 << 11, + NVMF_OPT_HOST_ID = 1 << 12, + NVMF_OPT_DUP_CONNECT = 1 << 13, + NVMF_OPT_DISABLE_SQFLOW = 1 << 14, + NVMF_OPT_HDR_DIGEST = 1 << 15, + NVMF_OPT_DATA_DIGEST = 1 << 16, + NVMF_OPT_NR_WRITE_QUEUES = 1 << 17, + NVMF_OPT_NR_POLL_QUEUES = 1 << 18, + NVMF_OPT_TOS = 1 << 19, + NVMF_OPT_FAIL_FAST_TMO = 1 << 20, + NVMF_OPT_HOST_IFACE = 1 << 21, + NVMF_OPT_DISCOVERY = 1 << 22, + NVMF_OPT_DHCHAP_SECRET = 1 << 23, + NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24, +}; + +/** + * struct nvmf_ctrl_options - Used to hold the options specified + * with the parsing opts enum. + * @mask: Used by the fabrics library to parse through sysfs options + * on adding a NVMe controller. + * @max_reconnects: maximum number of allowed reconnect attempts before removing + * the controller, (-1) means reconnect forever, zero means remove + * immediately; + * @transport: Holds the fabric transport "technology name" (for a lack of + * better description) that will be used by an NVMe controller + * being added. + * @subsysnqn: Hold the fully qualified NQN subystem name (format defined + * in the NVMe specification, "NVMe Qualified Names"). + * @traddr: The transport-specific TRADDR field for a port on the + * subsystem which is adding a controller. + * @trsvcid: The transport-specific TRSVCID field for a port on the + * subsystem which is adding a controller. + * @host_traddr: A transport-specific field identifying the NVME host port + * to use for the connection to the controller. + * @host_iface: A transport-specific field identifying the NVME host + * interface to use for the connection to the controller. + * @queue_size: Number of IO queue elements. + * @nr_io_queues: Number of controller IO queues that will be established. + * @reconnect_delay: Time between two consecutive reconnect attempts. + * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN. + * @kato: Keep-alive timeout. + * @host: Virtual NVMe host, contains the NQN and Host ID. + * @dhchap_secret: DH-HMAC-CHAP secret + * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional + * authentication + * @disable_sqflow: disable controller sq flow control + * @hdr_digest: generate/verify header digest (TCP) + * @data_digest: generate/verify data digest (TCP) + * @nr_write_queues: number of queues for write I/O + * @nr_poll_queues: number of queues for polling I/O + * @tos: type of service + * @fast_io_fail_tmo: Fast I/O fail timeout in seconds + */ +struct nvmf_ctrl_options { + unsigned mask; + int max_reconnects; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *host_traddr; + char *host_iface; + size_t queue_size; + unsigned int nr_io_queues; + unsigned int reconnect_delay; + bool discovery_nqn; + bool duplicate_connect; + unsigned int kato; + struct nvmf_host *host; + char *dhchap_secret; + char *dhchap_ctrl_secret; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; + int tos; + int fast_io_fail_tmo; +}; + +/* + * struct nvmf_transport_ops - used to register a specific + * fabric implementation of NVMe fabrics. + * @entry: Used by the fabrics library to add the new + * registration entry to its linked-list internal tree. + * @module: Transport module reference + * @name: Name of the NVMe fabric driver implementation. + * @required_opts: sysfs command-line options that must be specified + * when adding a new NVMe controller. + * @allowed_opts: sysfs command-line options that can be specified + * when adding a new NVMe controller. + * @create_ctrl(): function pointer that points to a non-NVMe + * implementation-specific fabric technology + * that would go into starting up that fabric + * for the purpose of conneciton to an NVMe controller + * using that fabric technology. + * + * Notes: + * 1. At minimum, 'required_opts' and 'allowed_opts' should + * be set to the same enum parsing options defined earlier. + * 2. create_ctrl() must be defined (even if it does nothing) + * 3. struct nvmf_transport_ops must be statically allocated in the + * modules .bss section so that a pure module_get on @module + * prevents the memory from beeing freed. + */ +struct nvmf_transport_ops { + struct list_head entry; + struct module *module; + const char *name; + int required_opts; + int allowed_opts; + struct nvme_ctrl *(*create_ctrl)(struct device *dev, + struct nvmf_ctrl_options *opts); +}; + +static inline bool +nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, + struct nvmf_ctrl_options *opts) +{ + if (ctrl->state == NVME_CTRL_DELETING || + ctrl->state == NVME_CTRL_DELETING_NOIO || + ctrl->state == NVME_CTRL_DEAD || + strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || + strcmp(opts->host->nqn, ctrl->opts->host->nqn) || + !uuid_equal(&opts->host->id, &ctrl->opts->host->id)) + return false; + + return true; +} + +static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl) +{ + if (!ctrl->subsys || + !strcmp(ctrl->opts->subsysnqn, NVME_DISC_SUBSYS_NAME)) + return ctrl->opts->subsysnqn; + return ctrl->subsys->subnqn; +} + +static inline void nvmf_complete_timed_out_request(struct request *rq) +{ + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } +} + +static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts) +{ + return min(opts->nr_io_queues, num_online_cpus()) + + min(opts->nr_write_queues, num_online_cpus()) + + min(opts->nr_poll_queues, num_online_cpus()); +} + +int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); +int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); +int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); +int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); +int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); +int nvmf_register_transport(struct nvmf_transport_ops *ops); +void nvmf_unregister_transport(struct nvmf_transport_ops *ops); +void nvmf_free_options(struct nvmf_ctrl_options *opts); +int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); +bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); +bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, + struct nvmf_ctrl_options *opts); +void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues, + u32 io_queues[HCTX_MAX_TYPES]); +void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl, + u32 io_queues[HCTX_MAX_TYPES]); + +#endif /* _NVME_FABRICS_H */ diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c new file mode 100644 index 0000000000..1ba10a5c65 --- /dev/null +++ b/drivers/nvme/host/fault_inject.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * fault injection support for nvme. + * + * Copyright (c) 2018, Oracle and/or its affiliates + */ + +#include <linux/moduleparam.h> +#include "nvme.h" + +static DECLARE_FAULT_ATTR(fail_default_attr); +/* optional fault injection attributes boot time option: + * nvme_core.fail_request=<interval>,<probability>,<space>,<times> + */ +static char *fail_request; +module_param(fail_request, charp, 0000); + +void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, + const char *dev_name) +{ + struct dentry *dir, *parent; + struct fault_attr *attr = &fault_inj->attr; + + /* set default fault injection attribute */ + if (fail_request) + setup_fault_attr(&fail_default_attr, fail_request); + + /* create debugfs directory and attribute */ + parent = debugfs_create_dir(dev_name, NULL); + if (IS_ERR(parent)) { + pr_warn("%s: failed to create debugfs directory\n", dev_name); + return; + } + + *attr = fail_default_attr; + dir = fault_create_debugfs_attr("fault_inject", parent, attr); + if (IS_ERR(dir)) { + pr_warn("%s: failed to create debugfs attr\n", dev_name); + debugfs_remove_recursive(parent); + return; + } + fault_inj->parent = parent; + + /* create debugfs for status code and dont_retry */ + fault_inj->status = NVME_SC_INVALID_OPCODE; + fault_inj->dont_retry = true; + debugfs_create_x16("status", 0600, dir, &fault_inj->status); + debugfs_create_bool("dont_retry", 0600, dir, &fault_inj->dont_retry); +} + +void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject) +{ + /* remove debugfs directories */ + debugfs_remove_recursive(fault_inject->parent); +} + +void nvme_should_fail(struct request *req) +{ + struct gendisk *disk = req->q->disk; + struct nvme_fault_inject *fault_inject = NULL; + u16 status; + + if (disk) { + struct nvme_ns *ns = disk->private_data; + + if (ns) + fault_inject = &ns->fault_inject; + else + WARN_ONCE(1, "No namespace found for request\n"); + } else { + fault_inject = &nvme_req(req)->ctrl->fault_inject; + } + + if (fault_inject && should_fail(&fault_inject->attr, 1)) { + /* inject status code and DNR bit */ + status = fault_inject->status; + if (fault_inject->dont_retry) + status |= NVME_SC_DNR; + nvme_req(req)->status = status; + } +} +EXPORT_SYMBOL_GPL(nvme_should_fail); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c new file mode 100644 index 0000000000..46cce0ec35 --- /dev/null +++ b/drivers/nvme/host/fc.c @@ -0,0 +1,4006 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/module.h> +#include <linux/parser.h> +#include <uapi/scsi/fc/fc_fs.h> +#include <uapi/scsi/fc/fc_els.h> +#include <linux/delay.h> +#include <linux/overflow.h> +#include <linux/blk-cgroup.h> +#include "nvme.h" +#include "fabrics.h" +#include <linux/nvme-fc-driver.h> +#include <linux/nvme-fc.h> +#include "fc.h" +#include <scsi/scsi_transport_fc.h> +#include <linux/blk-mq-pci.h> + +/* *************************** Data Structures/Defines ****************** */ + + +enum nvme_fc_queue_flags { + NVME_FC_Q_CONNECTED = 0, + NVME_FC_Q_LIVE, +}; + +#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ +#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects + * when connected and a + * connection failure. + */ + +struct nvme_fc_queue { + struct nvme_fc_ctrl *ctrl; + struct device *dev; + struct blk_mq_hw_ctx *hctx; + void *lldd_handle; + size_t cmnd_capsule_len; + u32 qnum; + u32 rqcnt; + u32 seqno; + + u64 connection_id; + atomic_t csn; + + unsigned long flags; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +enum nvme_fcop_flags { + FCOP_FLAGS_TERMIO = (1 << 0), + FCOP_FLAGS_AEN = (1 << 1), +}; + +struct nvmefc_ls_req_op { + struct nvmefc_ls_req ls_req; + + struct nvme_fc_rport *rport; + struct nvme_fc_queue *queue; + struct request *rq; + u32 flags; + + int ls_error; + struct completion ls_done; + struct list_head lsreq_list; /* rport->ls_req_list */ + bool req_queued; +}; + +struct nvmefc_ls_rcv_op { + struct nvme_fc_rport *rport; + struct nvmefc_ls_rsp *lsrsp; + union nvmefc_ls_requests *rqstbuf; + union nvmefc_ls_responses *rspbuf; + u16 rqstdatalen; + bool handled; + dma_addr_t rspdma; + struct list_head lsrcv_list; /* rport->ls_rcv_list */ +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +enum nvme_fcpop_state { + FCPOP_STATE_UNINIT = 0, + FCPOP_STATE_IDLE = 1, + FCPOP_STATE_ACTIVE = 2, + FCPOP_STATE_ABORTED = 3, + FCPOP_STATE_COMPLETE = 4, +}; + +struct nvme_fc_fcp_op { + struct nvme_request nreq; /* + * nvme/host/core.c + * requires this to be + * the 1st element in the + * private structure + * associated with the + * request. + */ + struct nvmefc_fcp_req fcp_req; + + struct nvme_fc_ctrl *ctrl; + struct nvme_fc_queue *queue; + struct request *rq; + + atomic_t state; + u32 flags; + u32 rqno; + u32 nents; + + struct nvme_fc_cmd_iu cmd_iu; + struct nvme_fc_ersp_iu rsp_iu; +}; + +struct nvme_fcp_op_w_sgl { + struct nvme_fc_fcp_op op; + struct scatterlist sgl[NVME_INLINE_SG_CNT]; + uint8_t priv[]; +}; + +struct nvme_fc_lport { + struct nvme_fc_local_port localport; + + struct ida endp_cnt; + struct list_head port_list; /* nvme_fc_port_list */ + struct list_head endp_list; + struct device *dev; /* physical device for dma */ + struct nvme_fc_port_template *ops; + struct kref ref; + atomic_t act_rport_cnt; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +struct nvme_fc_rport { + struct nvme_fc_remote_port remoteport; + + struct list_head endp_list; /* for lport->endp_list */ + struct list_head ctrl_list; + struct list_head ls_req_list; + struct list_head ls_rcv_list; + struct list_head disc_list; + struct device *dev; /* physical device for dma */ + struct nvme_fc_lport *lport; + spinlock_t lock; + struct kref ref; + atomic_t act_ctrl_cnt; + unsigned long dev_loss_end; + struct work_struct lsrcv_work; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +/* fc_ctrl flags values - specified as bit positions */ +#define ASSOC_ACTIVE 0 +#define ASSOC_FAILED 1 +#define FCCTRL_TERMIO 2 + +struct nvme_fc_ctrl { + spinlock_t lock; + struct nvme_fc_queue *queues; + struct device *dev; + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + u32 cnum; + + bool ioq_live; + u64 association_id; + struct nvmefc_ls_rcv_op *rcv_disconn; + + struct list_head ctrl_list; /* rport->ctrl_list */ + + struct blk_mq_tag_set admin_tag_set; + struct blk_mq_tag_set tag_set; + + struct work_struct ioerr_work; + struct delayed_work connect_work; + + struct kref ref; + unsigned long flags; + u32 iocnt; + wait_queue_head_t ioabort_wait; + + struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; + + struct nvme_ctrl ctrl; +}; + +static inline struct nvme_fc_ctrl * +to_fc_ctrl(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct nvme_fc_ctrl, ctrl); +} + +static inline struct nvme_fc_lport * +localport_to_lport(struct nvme_fc_local_port *portptr) +{ + return container_of(portptr, struct nvme_fc_lport, localport); +} + +static inline struct nvme_fc_rport * +remoteport_to_rport(struct nvme_fc_remote_port *portptr) +{ + return container_of(portptr, struct nvme_fc_rport, remoteport); +} + +static inline struct nvmefc_ls_req_op * +ls_req_to_lsop(struct nvmefc_ls_req *lsreq) +{ + return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); +} + +static inline struct nvme_fc_fcp_op * +fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) +{ + return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); +} + + + +/* *************************** Globals **************************** */ + + +static DEFINE_SPINLOCK(nvme_fc_lock); + +static LIST_HEAD(nvme_fc_lport_list); +static DEFINE_IDA(nvme_fc_local_port_cnt); +static DEFINE_IDA(nvme_fc_ctrl_cnt); + +static struct workqueue_struct *nvme_fc_wq; + +static bool nvme_fc_waiting_to_unload; +static DECLARE_COMPLETION(nvme_fc_unload_proceed); + +/* + * These items are short-term. They will eventually be moved into + * a generic FC class. See comments in module init. + */ +static struct device *fc_udev_device; + +static void nvme_fc_complete_rq(struct request *rq); + +/* *********************** FC-NVME Port Management ************************ */ + +static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, + struct nvme_fc_queue *, unsigned int); + +static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); + + +static void +nvme_fc_free_lport(struct kref *ref) +{ + struct nvme_fc_lport *lport = + container_of(ref, struct nvme_fc_lport, ref); + unsigned long flags; + + WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); + WARN_ON(!list_empty(&lport->endp_list)); + + /* remove from transport list */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_del(&lport->port_list); + if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) + complete(&nvme_fc_unload_proceed); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num); + ida_destroy(&lport->endp_cnt); + + put_device(lport->dev); + + kfree(lport); +} + +static void +nvme_fc_lport_put(struct nvme_fc_lport *lport) +{ + kref_put(&lport->ref, nvme_fc_free_lport); +} + +static int +nvme_fc_lport_get(struct nvme_fc_lport *lport) +{ + return kref_get_unless_zero(&lport->ref); +} + + +static struct nvme_fc_lport * +nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, + struct nvme_fc_port_template *ops, + struct device *dev) +{ + struct nvme_fc_lport *lport; + unsigned long flags; + + spin_lock_irqsave(&nvme_fc_lock, flags); + + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + if (lport->localport.node_name != pinfo->node_name || + lport->localport.port_name != pinfo->port_name) + continue; + + if (lport->dev != dev) { + lport = ERR_PTR(-EXDEV); + goto out_done; + } + + if (lport->localport.port_state != FC_OBJSTATE_DELETED) { + lport = ERR_PTR(-EEXIST); + goto out_done; + } + + if (!nvme_fc_lport_get(lport)) { + /* + * fails if ref cnt already 0. If so, + * act as if lport already deleted + */ + lport = NULL; + goto out_done; + } + + /* resume the lport */ + + lport->ops = ops; + lport->localport.port_role = pinfo->port_role; + lport->localport.port_id = pinfo->port_id; + lport->localport.port_state = FC_OBJSTATE_ONLINE; + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return lport; + } + + lport = NULL; + +out_done: + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return lport; +} + +/** + * nvme_fc_register_localport - transport entry point called by an + * LLDD to register the existence of a NVME + * host FC port. + * @pinfo: pointer to information about the port to be registered + * @template: LLDD entrypoints and operational parameters for the port + * @dev: physical hardware device node port corresponds to. Will be + * used for DMA mappings + * @portptr: pointer to a local port pointer. Upon success, the routine + * will allocate a nvme_fc_local_port structure and place its + * address in the local port pointer. Upon failure, local port + * pointer will be set to 0. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + struct nvme_fc_port_template *template, + struct device *dev, + struct nvme_fc_local_port **portptr) +{ + struct nvme_fc_lport *newrec; + unsigned long flags; + int ret, idx; + + if (!template->localport_delete || !template->remoteport_delete || + !template->ls_req || !template->fcp_io || + !template->ls_abort || !template->fcp_abort || + !template->max_hw_queues || !template->max_sgl_segments || + !template->max_dif_sgl_segments || !template->dma_boundary) { + ret = -EINVAL; + goto out_reghost_failed; + } + + /* + * look to see if there is already a localport that had been + * deregistered and in the process of waiting for all the + * references to fully be removed. If the references haven't + * expired, we can simply re-enable the localport. Remoteports + * and controller reconnections should resume naturally. + */ + newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); + + /* found an lport, but something about its state is bad */ + if (IS_ERR(newrec)) { + ret = PTR_ERR(newrec); + goto out_reghost_failed; + + /* found existing lport, which was resumed */ + } else if (newrec) { + *portptr = &newrec->localport; + return 0; + } + + /* nothing found - allocate a new localport struct */ + + newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), + GFP_KERNEL); + if (!newrec) { + ret = -ENOMEM; + goto out_reghost_failed; + } + + idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_fail_kfree; + } + + if (!get_device(dev) && dev) { + ret = -ENODEV; + goto out_ida_put; + } + + INIT_LIST_HEAD(&newrec->port_list); + INIT_LIST_HEAD(&newrec->endp_list); + kref_init(&newrec->ref); + atomic_set(&newrec->act_rport_cnt, 0); + newrec->ops = template; + newrec->dev = dev; + ida_init(&newrec->endp_cnt); + if (template->local_priv_sz) + newrec->localport.private = &newrec[1]; + else + newrec->localport.private = NULL; + newrec->localport.node_name = pinfo->node_name; + newrec->localport.port_name = pinfo->port_name; + newrec->localport.port_role = pinfo->port_role; + newrec->localport.port_id = pinfo->port_id; + newrec->localport.port_state = FC_OBJSTATE_ONLINE; + newrec->localport.port_num = idx; + + spin_lock_irqsave(&nvme_fc_lock, flags); + list_add_tail(&newrec->port_list, &nvme_fc_lport_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + if (dev) + dma_set_seg_boundary(dev, template->dma_boundary); + + *portptr = &newrec->localport; + return 0; + +out_ida_put: + ida_free(&nvme_fc_local_port_cnt, idx); +out_fail_kfree: + kfree(newrec); +out_reghost_failed: + *portptr = NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(nvme_fc_register_localport); + +/** + * nvme_fc_unregister_localport - transport entry point called by an + * LLDD to deregister/remove a previously + * registered a NVME host FC port. + * @portptr: pointer to the (registered) local port that is to be deregistered. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) +{ + struct nvme_fc_lport *lport = localport_to_lport(portptr); + unsigned long flags; + + if (!portptr) + return -EINVAL; + + spin_lock_irqsave(&nvme_fc_lock, flags); + + if (portptr->port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&nvme_fc_lock, flags); + return -EINVAL; + } + portptr->port_state = FC_OBJSTATE_DELETED; + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + if (atomic_read(&lport->act_rport_cnt) == 0) + lport->ops->localport_delete(&lport->localport); + + nvme_fc_lport_put(lport); + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); + +/* + * TRADDR strings, per FC-NVME are fixed format: + * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters + * udev event will only differ by prefix of what field is + * being specified: + * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters + * 19 + 43 + null_fudge = 64 characters + */ +#define FCNVME_TRADDR_LENGTH 64 + +static void +nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, + struct nvme_fc_rport *rport) +{ + char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ + char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ + char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; + + if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) + return; + + snprintf(hostaddr, sizeof(hostaddr), + "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", + lport->localport.node_name, lport->localport.port_name); + snprintf(tgtaddr, sizeof(tgtaddr), + "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", + rport->remoteport.node_name, rport->remoteport.port_name); + kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); +} + +static void +nvme_fc_free_rport(struct kref *ref) +{ + struct nvme_fc_rport *rport = + container_of(ref, struct nvme_fc_rport, ref); + struct nvme_fc_lport *lport = + localport_to_lport(rport->remoteport.localport); + unsigned long flags; + + WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); + WARN_ON(!list_empty(&rport->ctrl_list)); + + /* remove from lport list */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_del(&rport->endp_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + WARN_ON(!list_empty(&rport->disc_list)); + ida_free(&lport->endp_cnt, rport->remoteport.port_num); + + kfree(rport); + + nvme_fc_lport_put(lport); +} + +static void +nvme_fc_rport_put(struct nvme_fc_rport *rport) +{ + kref_put(&rport->ref, nvme_fc_free_rport); +} + +static int +nvme_fc_rport_get(struct nvme_fc_rport *rport) +{ + return kref_get_unless_zero(&rport->ref); +} + +static void +nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) +{ + switch (nvme_ctrl_state(&ctrl->ctrl)) { + case NVME_CTRL_NEW: + case NVME_CTRL_CONNECTING: + /* + * As all reconnects were suppressed, schedule a + * connect. + */ + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: connectivity re-established. " + "Attempting reconnect\n", ctrl->cnum); + + queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); + break; + + case NVME_CTRL_RESETTING: + /* + * Controller is already in the process of terminating the + * association. No need to do anything further. The reconnect + * step will naturally occur after the reset completes. + */ + break; + + default: + /* no action to take - let it delete */ + break; + } +} + +static struct nvme_fc_rport * +nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, + struct nvme_fc_port_info *pinfo) +{ + struct nvme_fc_rport *rport; + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + + spin_lock_irqsave(&nvme_fc_lock, flags); + + list_for_each_entry(rport, &lport->endp_list, endp_list) { + if (rport->remoteport.node_name != pinfo->node_name || + rport->remoteport.port_name != pinfo->port_name) + continue; + + if (!nvme_fc_rport_get(rport)) { + rport = ERR_PTR(-ENOLCK); + goto out_done; + } + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + spin_lock_irqsave(&rport->lock, flags); + + /* has it been unregistered */ + if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { + /* means lldd called us twice */ + spin_unlock_irqrestore(&rport->lock, flags); + nvme_fc_rport_put(rport); + return ERR_PTR(-ESTALE); + } + + rport->remoteport.port_role = pinfo->port_role; + rport->remoteport.port_id = pinfo->port_id; + rport->remoteport.port_state = FC_OBJSTATE_ONLINE; + rport->dev_loss_end = 0; + + /* + * kick off a reconnect attempt on all associations to the + * remote port. A successful reconnects will resume i/o. + */ + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) + nvme_fc_resume_controller(ctrl); + + spin_unlock_irqrestore(&rport->lock, flags); + + return rport; + } + + rport = NULL; + +out_done: + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return rport; +} + +static inline void +__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, + struct nvme_fc_port_info *pinfo) +{ + if (pinfo->dev_loss_tmo) + rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; + else + rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; +} + +/** + * nvme_fc_register_remoteport - transport entry point called by an + * LLDD to register the existence of a NVME + * subsystem FC port on its fabric. + * @localport: pointer to the (registered) local port that the remote + * subsystem port is connected to. + * @pinfo: pointer to information about the port to be registered + * @portptr: pointer to a remote port pointer. Upon success, the routine + * will allocate a nvme_fc_remote_port structure and place its + * address in the remote port pointer. Upon failure, remote port + * pointer will be set to 0. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, + struct nvme_fc_port_info *pinfo, + struct nvme_fc_remote_port **portptr) +{ + struct nvme_fc_lport *lport = localport_to_lport(localport); + struct nvme_fc_rport *newrec; + unsigned long flags; + int ret, idx; + + if (!nvme_fc_lport_get(lport)) { + ret = -ESHUTDOWN; + goto out_reghost_failed; + } + + /* + * look to see if there is already a remoteport that is waiting + * for a reconnect (within dev_loss_tmo) with the same WWN's. + * If so, transition to it and reconnect. + */ + newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); + + /* found an rport, but something about its state is bad */ + if (IS_ERR(newrec)) { + ret = PTR_ERR(newrec); + goto out_lport_put; + + /* found existing rport, which was resumed */ + } else if (newrec) { + nvme_fc_lport_put(lport); + __nvme_fc_set_dev_loss_tmo(newrec, pinfo); + nvme_fc_signal_discovery_scan(lport, newrec); + *portptr = &newrec->remoteport; + return 0; + } + + /* nothing found - allocate a new remoteport struct */ + + newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), + GFP_KERNEL); + if (!newrec) { + ret = -ENOMEM; + goto out_lport_put; + } + + idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_kfree_rport; + } + + INIT_LIST_HEAD(&newrec->endp_list); + INIT_LIST_HEAD(&newrec->ctrl_list); + INIT_LIST_HEAD(&newrec->ls_req_list); + INIT_LIST_HEAD(&newrec->disc_list); + kref_init(&newrec->ref); + atomic_set(&newrec->act_ctrl_cnt, 0); + spin_lock_init(&newrec->lock); + newrec->remoteport.localport = &lport->localport; + INIT_LIST_HEAD(&newrec->ls_rcv_list); + newrec->dev = lport->dev; + newrec->lport = lport; + if (lport->ops->remote_priv_sz) + newrec->remoteport.private = &newrec[1]; + else + newrec->remoteport.private = NULL; + newrec->remoteport.port_role = pinfo->port_role; + newrec->remoteport.node_name = pinfo->node_name; + newrec->remoteport.port_name = pinfo->port_name; + newrec->remoteport.port_id = pinfo->port_id; + newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; + newrec->remoteport.port_num = idx; + __nvme_fc_set_dev_loss_tmo(newrec, pinfo); + INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); + + spin_lock_irqsave(&nvme_fc_lock, flags); + list_add_tail(&newrec->endp_list, &lport->endp_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + nvme_fc_signal_discovery_scan(lport, newrec); + + *portptr = &newrec->remoteport; + return 0; + +out_kfree_rport: + kfree(newrec); +out_lport_put: + nvme_fc_lport_put(lport); +out_reghost_failed: + *portptr = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); + +static int +nvme_fc_abort_lsops(struct nvme_fc_rport *rport) +{ + struct nvmefc_ls_req_op *lsop; + unsigned long flags; + +restart: + spin_lock_irqsave(&rport->lock, flags); + + list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { + if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { + lsop->flags |= FCOP_FLAGS_TERMIO; + spin_unlock_irqrestore(&rport->lock, flags); + rport->lport->ops->ls_abort(&rport->lport->localport, + &rport->remoteport, + &lsop->ls_req); + goto restart; + } + } + spin_unlock_irqrestore(&rport->lock, flags); + + return 0; +} + +static void +nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) +{ + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: controller connectivity lost. Awaiting " + "Reconnect", ctrl->cnum); + + switch (nvme_ctrl_state(&ctrl->ctrl)) { + case NVME_CTRL_NEW: + case NVME_CTRL_LIVE: + /* + * Schedule a controller reset. The reset will terminate the + * association and schedule the reconnect timer. Reconnects + * will be attempted until either the ctlr_loss_tmo + * (max_retries * connect_delay) expires or the remoteport's + * dev_loss_tmo expires. + */ + if (nvme_reset_ctrl(&ctrl->ctrl)) { + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: Couldn't schedule reset.\n", + ctrl->cnum); + nvme_delete_ctrl(&ctrl->ctrl); + } + break; + + case NVME_CTRL_CONNECTING: + /* + * The association has already been terminated and the + * controller is attempting reconnects. No need to do anything + * futher. Reconnects will be attempted until either the + * ctlr_loss_tmo (max_retries * connect_delay) expires or the + * remoteport's dev_loss_tmo expires. + */ + break; + + case NVME_CTRL_RESETTING: + /* + * Controller is already in the process of terminating the + * association. No need to do anything further. The reconnect + * step will kick in naturally after the association is + * terminated. + */ + break; + + case NVME_CTRL_DELETING: + case NVME_CTRL_DELETING_NOIO: + default: + /* no action to take - let it delete */ + break; + } +} + +/** + * nvme_fc_unregister_remoteport - transport entry point called by an + * LLDD to deregister/remove a previously + * registered a NVME subsystem FC port. + * @portptr: pointer to the (registered) remote port that is to be + * deregistered. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) +{ + struct nvme_fc_rport *rport = remoteport_to_rport(portptr); + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + + if (!portptr) + return -EINVAL; + + spin_lock_irqsave(&rport->lock, flags); + + if (portptr->port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&rport->lock, flags); + return -EINVAL; + } + portptr->port_state = FC_OBJSTATE_DELETED; + + rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); + + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + /* if dev_loss_tmo==0, dev loss is immediate */ + if (!portptr->dev_loss_tmo) { + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: controller connectivity lost.\n", + ctrl->cnum); + nvme_delete_ctrl(&ctrl->ctrl); + } else + nvme_fc_ctrl_connectivity_loss(ctrl); + } + + spin_unlock_irqrestore(&rport->lock, flags); + + nvme_fc_abort_lsops(rport); + + if (atomic_read(&rport->act_ctrl_cnt) == 0) + rport->lport->ops->remoteport_delete(portptr); + + /* + * release the reference, which will allow, if all controllers + * go away, which should only occur after dev_loss_tmo occurs, + * for the rport to be torn down. + */ + nvme_fc_rport_put(rport); + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); + +/** + * nvme_fc_rescan_remoteport - transport entry point called by an + * LLDD to request a nvme device rescan. + * @remoteport: pointer to the (registered) remote port that is to be + * rescanned. + * + * Returns: N/A + */ +void +nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) +{ + struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); + + nvme_fc_signal_discovery_scan(rport->lport, rport); +} +EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); + +int +nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, + u32 dev_loss_tmo) +{ + struct nvme_fc_rport *rport = remoteport_to_rport(portptr); + unsigned long flags; + + spin_lock_irqsave(&rport->lock, flags); + + if (portptr->port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&rport->lock, flags); + return -EINVAL; + } + + /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ + rport->remoteport.dev_loss_tmo = dev_loss_tmo; + + spin_unlock_irqrestore(&rport->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); + + +/* *********************** FC-NVME DMA Handling **************************** */ + +/* + * The fcloop device passes in a NULL device pointer. Real LLD's will + * pass in a valid device pointer. If NULL is passed to the dma mapping + * routines, depending on the platform, it may or may not succeed, and + * may crash. + * + * As such: + * Wrapper all the dma routines and check the dev pointer. + * + * If simple mappings (return just a dma address, we'll noop them, + * returning a dma address of 0. + * + * On more complex mappings (dma_map_sg), a pseudo routine fills + * in the scatter list, setting all dma addresses to 0. + */ + +static inline dma_addr_t +fc_dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction dir) +{ + return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; +} + +static inline int +fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dev ? dma_mapping_error(dev, dma_addr) : 0; +} + +static inline void +fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_unmap_single(dev, addr, size, dir); +} + +static inline void +fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void +fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_sync_single_for_device(dev, addr, size, dir); +} + +/* pseudo dma_map_sg call */ +static int +fc_map_sg(struct scatterlist *sg, int nents) +{ + struct scatterlist *s; + int i; + + WARN_ON(nents == 0 || sg[0].length == 0); + + for_each_sg(sg, s, nents, i) { + s->dma_address = 0L; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + s->dma_length = s->length; +#endif + } + return nents; +} + +static inline int +fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); +} + +static inline void +fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + if (dev) + dma_unmap_sg(dev, sg, nents, dir); +} + +/* *********************** FC-NVME LS Handling **************************** */ + +static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); +static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); + +static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); + +static void +__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) +{ + struct nvme_fc_rport *rport = lsop->rport; + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + + spin_lock_irqsave(&rport->lock, flags); + + if (!lsop->req_queued) { + spin_unlock_irqrestore(&rport->lock, flags); + return; + } + + list_del(&lsop->lsreq_list); + + lsop->req_queued = false; + + spin_unlock_irqrestore(&rport->lock, flags); + + fc_dma_unmap_single(rport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + + nvme_fc_rport_put(rport); +} + +static int +__nvme_fc_send_ls_req(struct nvme_fc_rport *rport, + struct nvmefc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + int ret = 0; + + if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + return -ECONNREFUSED; + + if (!nvme_fc_rport_get(rport)) + return -ESHUTDOWN; + + lsreq->done = done; + lsop->rport = rport; + lsop->req_queued = false; + INIT_LIST_HEAD(&lsop->lsreq_list); + init_completion(&lsop->ls_done); + + lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, + lsreq->rqstlen + lsreq->rsplen, + DMA_BIDIRECTIONAL); + if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { + ret = -EFAULT; + goto out_putrport; + } + lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; + + spin_lock_irqsave(&rport->lock, flags); + + list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); + + lsop->req_queued = true; + + spin_unlock_irqrestore(&rport->lock, flags); + + ret = rport->lport->ops->ls_req(&rport->lport->localport, + &rport->remoteport, lsreq); + if (ret) + goto out_unlink; + + return 0; + +out_unlink: + lsop->ls_error = ret; + spin_lock_irqsave(&rport->lock, flags); + lsop->req_queued = false; + list_del(&lsop->lsreq_list); + spin_unlock_irqrestore(&rport->lock, flags); + fc_dma_unmap_single(rport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); +out_putrport: + nvme_fc_rport_put(rport); + + return ret; +} + +static void +nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) +{ + struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); + + lsop->ls_error = status; + complete(&lsop->ls_done); +} + +static int +nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; + int ret; + + ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); + + if (!ret) { + /* + * No timeout/not interruptible as we need the struct + * to exist until the lldd calls us back. Thus mandate + * wait until driver calls back. lldd responsible for + * the timeout action + */ + wait_for_completion(&lsop->ls_done); + + __nvme_fc_finish_ls_req(lsop); + + ret = lsop->ls_error; + } + + if (ret) + return ret; + + /* ACC or RJT payload ? */ + if (rjt->w0.ls_cmd == FCNVME_LS_RJT) + return -ENXIO; + + return 0; +} + +static int +nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, + struct nvmefc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + /* don't wait for completion */ + + return __nvme_fc_send_ls_req(rport, lsop, done); +} + +static int +nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) +{ + struct nvmefc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; + struct fcnvme_ls_cr_assoc_acc *assoc_acc; + unsigned long flags; + int ret, fcret = 0; + + lsop = kzalloc((sizeof(*lsop) + + sizeof(*assoc_rqst) + sizeof(*assoc_acc) + + ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); + if (!lsop) { + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: send Create Association failed: ENOMEM\n", + ctrl->cnum); + ret = -ENOMEM; + goto out_no_memory; + } + + assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; + assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; + lsreq = &lsop->ls_req; + if (ctrl->lport->ops->lsrqst_priv_sz) + lsreq->private = &assoc_acc[1]; + else + lsreq->private = NULL; + + assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; + assoc_rqst->desc_list_len = + cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); + + assoc_rqst->assoc_cmd.desc_tag = + cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); + assoc_rqst->assoc_cmd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); + + assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); + assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); + /* Linux supports only Dynamic controllers */ + assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); + uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); + strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, + min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); + strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, + min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); + + lsop->queue = queue; + lsreq->rqstaddr = assoc_rqst; + lsreq->rqstlen = sizeof(*assoc_rqst); + lsreq->rspaddr = assoc_acc; + lsreq->rsplen = sizeof(*assoc_acc); + lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; + + ret = nvme_fc_send_ls_req(ctrl->rport, lsop); + if (ret) + goto out_free_buffer; + + /* process connect LS completion */ + + /* validate the ACC response */ + if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) + fcret = VERR_LSACC; + else if (assoc_acc->hdr.desc_list_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_cr_assoc_acc))) + fcret = VERR_CR_ASSOC_ACC_LEN; + else if (assoc_acc->hdr.rqst.desc_tag != + cpu_to_be32(FCNVME_LSDESC_RQST)) + fcret = VERR_LSDESC_RQST; + else if (assoc_acc->hdr.rqst.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) + fcret = VERR_LSDESC_RQST_LEN; + else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) + fcret = VERR_CR_ASSOC; + else if (assoc_acc->associd.desc_tag != + cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) + fcret = VERR_ASSOC_ID; + else if (assoc_acc->associd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id))) + fcret = VERR_ASSOC_ID_LEN; + else if (assoc_acc->connectid.desc_tag != + cpu_to_be32(FCNVME_LSDESC_CONN_ID)) + fcret = VERR_CONN_ID; + else if (assoc_acc->connectid.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) + fcret = VERR_CONN_ID_LEN; + + if (fcret) { + ret = -EBADF; + dev_err(ctrl->dev, + "q %d Create Association LS failed: %s\n", + queue->qnum, validation_errors[fcret]); + } else { + spin_lock_irqsave(&ctrl->lock, flags); + ctrl->association_id = + be64_to_cpu(assoc_acc->associd.association_id); + queue->connection_id = + be64_to_cpu(assoc_acc->connectid.connection_id); + set_bit(NVME_FC_Q_CONNECTED, &queue->flags); + spin_unlock_irqrestore(&ctrl->lock, flags); + } + +out_free_buffer: + kfree(lsop); +out_no_memory: + if (ret) + dev_err(ctrl->dev, + "queue %d connect admin queue failed (%d).\n", + queue->qnum, ret); + return ret; +} + +static int +nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, + u16 qsize, u16 ersp_ratio) +{ + struct nvmefc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct fcnvme_ls_cr_conn_rqst *conn_rqst; + struct fcnvme_ls_cr_conn_acc *conn_acc; + int ret, fcret = 0; + + lsop = kzalloc((sizeof(*lsop) + + sizeof(*conn_rqst) + sizeof(*conn_acc) + + ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); + if (!lsop) { + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", + ctrl->cnum); + ret = -ENOMEM; + goto out_no_memory; + } + + conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; + conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; + lsreq = &lsop->ls_req; + if (ctrl->lport->ops->lsrqst_priv_sz) + lsreq->private = (void *)&conn_acc[1]; + else + lsreq->private = NULL; + + conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; + conn_rqst->desc_list_len = cpu_to_be32( + sizeof(struct fcnvme_lsdesc_assoc_id) + + sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); + + conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); + conn_rqst->associd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id)); + conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); + conn_rqst->connect_cmd.desc_tag = + cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); + conn_rqst->connect_cmd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); + conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); + conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); + conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); + + lsop->queue = queue; + lsreq->rqstaddr = conn_rqst; + lsreq->rqstlen = sizeof(*conn_rqst); + lsreq->rspaddr = conn_acc; + lsreq->rsplen = sizeof(*conn_acc); + lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; + + ret = nvme_fc_send_ls_req(ctrl->rport, lsop); + if (ret) + goto out_free_buffer; + + /* process connect LS completion */ + + /* validate the ACC response */ + if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) + fcret = VERR_LSACC; + else if (conn_acc->hdr.desc_list_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) + fcret = VERR_CR_CONN_ACC_LEN; + else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) + fcret = VERR_LSDESC_RQST; + else if (conn_acc->hdr.rqst.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) + fcret = VERR_LSDESC_RQST_LEN; + else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) + fcret = VERR_CR_CONN; + else if (conn_acc->connectid.desc_tag != + cpu_to_be32(FCNVME_LSDESC_CONN_ID)) + fcret = VERR_CONN_ID; + else if (conn_acc->connectid.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) + fcret = VERR_CONN_ID_LEN; + + if (fcret) { + ret = -EBADF; + dev_err(ctrl->dev, + "q %d Create I/O Connection LS failed: %s\n", + queue->qnum, validation_errors[fcret]); + } else { + queue->connection_id = + be64_to_cpu(conn_acc->connectid.connection_id); + set_bit(NVME_FC_Q_CONNECTED, &queue->flags); + } + +out_free_buffer: + kfree(lsop); +out_no_memory: + if (ret) + dev_err(ctrl->dev, + "queue %d connect I/O queue failed (%d).\n", + queue->qnum, ret); + return ret; +} + +static void +nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) +{ + struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); + + __nvme_fc_finish_ls_req(lsop); + + /* fc-nvme initiator doesn't care about success or failure of cmd */ + + kfree(lsop); +} + +/* + * This routine sends a FC-NVME LS to disconnect (aka terminate) + * the FC-NVME Association. Terminating the association also + * terminates the FC-NVME connections (per queue, both admin and io + * queues) that are part of the association. E.g. things are torn + * down, and the related FC-NVME Association ID and Connection IDs + * become invalid. + * + * The behavior of the fc-nvme initiator is such that it's + * understanding of the association and connections will implicitly + * be torn down. The action is implicit as it may be due to a loss of + * connectivity with the fc-nvme target, so you may never get a + * response even if you tried. As such, the action of this routine + * is to asynchronously send the LS, ignore any results of the LS, and + * continue on with terminating the association. If the fc-nvme target + * is present and receives the LS, it too can tear down. + */ +static void +nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) +{ + struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; + struct fcnvme_ls_disconnect_assoc_acc *discon_acc; + struct nvmefc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + int ret; + + lsop = kzalloc((sizeof(*lsop) + + sizeof(*discon_rqst) + sizeof(*discon_acc) + + ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); + if (!lsop) { + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: send Disconnect Association " + "failed: ENOMEM\n", + ctrl->cnum); + return; + } + + discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; + discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; + lsreq = &lsop->ls_req; + if (ctrl->lport->ops->lsrqst_priv_sz) + lsreq->private = (void *)&discon_acc[1]; + else + lsreq->private = NULL; + + nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, + ctrl->association_id); + + ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, + nvme_fc_disconnect_assoc_done); + if (ret) + kfree(lsop); +} + +static void +nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +{ + struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; + struct nvme_fc_rport *rport = lsop->rport; + struct nvme_fc_lport *lport = rport->lport; + unsigned long flags; + + spin_lock_irqsave(&rport->lock, flags); + list_del(&lsop->lsrcv_list); + spin_unlock_irqrestore(&rport->lock, flags); + + fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, + sizeof(*lsop->rspbuf), DMA_TO_DEVICE); + fc_dma_unmap_single(lport->dev, lsop->rspdma, + sizeof(*lsop->rspbuf), DMA_TO_DEVICE); + + kfree(lsop->rspbuf); + kfree(lsop->rqstbuf); + kfree(lsop); + + nvme_fc_rport_put(rport); +} + +static void +nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) +{ + struct nvme_fc_rport *rport = lsop->rport; + struct nvme_fc_lport *lport = rport->lport; + struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; + int ret; + + fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, + sizeof(*lsop->rspbuf), DMA_TO_DEVICE); + + ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, + lsop->lsrsp); + if (ret) { + dev_warn(lport->dev, + "LLDD rejected LS RSP xmt: LS %d status %d\n", + w0->ls_cmd, ret); + nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); + return; + } +} + +static struct nvme_fc_ctrl * +nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, + struct nvmefc_ls_rcv_op *lsop) +{ + struct fcnvme_ls_disconnect_assoc_rqst *rqst = + &lsop->rqstbuf->rq_dis_assoc; + struct nvme_fc_ctrl *ctrl, *ret = NULL; + struct nvmefc_ls_rcv_op *oldls = NULL; + u64 association_id = be64_to_cpu(rqst->associd.association_id); + unsigned long flags; + + spin_lock_irqsave(&rport->lock, flags); + + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + if (!nvme_fc_ctrl_get(ctrl)) + continue; + spin_lock(&ctrl->lock); + if (association_id == ctrl->association_id) { + oldls = ctrl->rcv_disconn; + ctrl->rcv_disconn = lsop; + ret = ctrl; + } + spin_unlock(&ctrl->lock); + if (ret) + /* leave the ctrl get reference */ + break; + nvme_fc_ctrl_put(ctrl); + } + + spin_unlock_irqrestore(&rport->lock, flags); + + /* transmit a response for anything that was pending */ + if (oldls) { + dev_info(rport->lport->dev, + "NVME-FC{%d}: Multiple Disconnect Association " + "LS's received\n", ctrl->cnum); + /* overwrite good response with bogus failure */ + oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, + sizeof(*oldls->rspbuf), + rqst->w0.ls_cmd, + FCNVME_RJT_RC_UNAB, + FCNVME_RJT_EXP_NONE, 0); + nvme_fc_xmt_ls_rsp(oldls); + } + + return ret; +} + +/* + * returns true to mean LS handled and ls_rsp can be sent + * returns false to defer ls_rsp xmt (will be done as part of + * association termination) + */ +static bool +nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) +{ + struct nvme_fc_rport *rport = lsop->rport; + struct fcnvme_ls_disconnect_assoc_rqst *rqst = + &lsop->rqstbuf->rq_dis_assoc; + struct fcnvme_ls_disconnect_assoc_acc *acc = + &lsop->rspbuf->rsp_dis_assoc; + struct nvme_fc_ctrl *ctrl = NULL; + int ret = 0; + + memset(acc, 0, sizeof(*acc)); + + ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); + if (!ret) { + /* match an active association */ + ctrl = nvme_fc_match_disconn_ls(rport, lsop); + if (!ctrl) + ret = VERR_NO_ASSOC; + } + + if (ret) { + dev_info(rport->lport->dev, + "Disconnect LS failed: %s\n", + validation_errors[ret]); + lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, + (ret == VERR_NO_ASSOC) ? + FCNVME_RJT_RC_INV_ASSOC : + FCNVME_RJT_RC_LOGIC, + FCNVME_RJT_EXP_NONE, 0); + return true; + } + + /* format an ACCept response */ + + lsop->lsrsp->rsplen = sizeof(*acc); + + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_disconnect_assoc_acc)), + FCNVME_LS_DISCONNECT_ASSOC); + + /* + * the transmit of the response will occur after the exchanges + * for the association have been ABTS'd by + * nvme_fc_delete_association(). + */ + + /* fail the association */ + nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); + + /* release the reference taken by nvme_fc_match_disconn_ls() */ + nvme_fc_ctrl_put(ctrl); + + return false; +} + +/* + * Actual Processing routine for received FC-NVME LS Requests from the LLD + * returns true if a response should be sent afterward, false if rsp will + * be sent asynchronously. + */ +static bool +nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) +{ + struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; + bool ret = true; + + lsop->lsrsp->nvme_fc_private = lsop; + lsop->lsrsp->rspbuf = lsop->rspbuf; + lsop->lsrsp->rspdma = lsop->rspdma; + lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; + /* Be preventative. handlers will later set to valid length */ + lsop->lsrsp->rsplen = 0; + + /* + * handlers: + * parse request input, execute the request, and format the + * LS response + */ + switch (w0->ls_cmd) { + case FCNVME_LS_DISCONNECT_ASSOC: + ret = nvme_fc_ls_disconnect_assoc(lsop); + break; + case FCNVME_LS_DISCONNECT_CONN: + lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, + sizeof(*lsop->rspbuf), w0->ls_cmd, + FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); + break; + case FCNVME_LS_CREATE_ASSOCIATION: + case FCNVME_LS_CREATE_CONNECTION: + lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, + sizeof(*lsop->rspbuf), w0->ls_cmd, + FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); + break; + default: + lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, + sizeof(*lsop->rspbuf), w0->ls_cmd, + FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); + break; + } + + return(ret); +} + +static void +nvme_fc_handle_ls_rqst_work(struct work_struct *work) +{ + struct nvme_fc_rport *rport = + container_of(work, struct nvme_fc_rport, lsrcv_work); + struct fcnvme_ls_rqst_w0 *w0; + struct nvmefc_ls_rcv_op *lsop; + unsigned long flags; + bool sendrsp; + +restart: + sendrsp = true; + spin_lock_irqsave(&rport->lock, flags); + list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { + if (lsop->handled) + continue; + + lsop->handled = true; + if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&rport->lock, flags); + sendrsp = nvme_fc_handle_ls_rqst(lsop); + } else { + spin_unlock_irqrestore(&rport->lock, flags); + w0 = &lsop->rqstbuf->w0; + lsop->lsrsp->rsplen = nvme_fc_format_rjt( + lsop->rspbuf, + sizeof(*lsop->rspbuf), + w0->ls_cmd, + FCNVME_RJT_RC_UNAB, + FCNVME_RJT_EXP_NONE, 0); + } + if (sendrsp) + nvme_fc_xmt_ls_rsp(lsop); + goto restart; + } + spin_unlock_irqrestore(&rport->lock, flags); +} + +static +void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport, + struct fcnvme_ls_rqst_w0 *w0) +{ + dev_info(lport->dev, "RCV %s LS failed: No memory\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); +} + +/** + * nvme_fc_rcv_ls_req - transport entry point called by an LLDD + * upon the reception of a NVME LS request. + * + * The nvme-fc layer will copy payload to an internal structure for + * processing. As such, upon completion of the routine, the LLDD may + * immediately free/reuse the LS request buffer passed in the call. + * + * If this routine returns error, the LLDD should abort the exchange. + * + * @portptr: pointer to the (registered) remote port that the LS + * was received from. The remoteport is associated with + * a specific localport. + * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be + * used to reference the exchange corresponding to the LS + * when issuing an ls response. + * @lsreqbuf: pointer to the buffer containing the LS Request + * @lsreqbuf_len: length, in bytes, of the received LS request + */ +int +nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, + struct nvmefc_ls_rsp *lsrsp, + void *lsreqbuf, u32 lsreqbuf_len) +{ + struct nvme_fc_rport *rport = remoteport_to_rport(portptr); + struct nvme_fc_lport *lport = rport->lport; + struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; + struct nvmefc_ls_rcv_op *lsop; + unsigned long flags; + int ret; + + nvme_fc_rport_get(rport); + + /* validate there's a routine to transmit a response */ + if (!lport->ops->xmt_ls_rsp) { + dev_info(lport->dev, + "RCV %s LS failed: no LLDD xmt_ls_rsp\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); + ret = -EINVAL; + goto out_put; + } + + if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { + dev_info(lport->dev, + "RCV %s LS failed: payload too large\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); + ret = -E2BIG; + goto out_put; + } + + lsop = kzalloc(sizeof(*lsop), GFP_KERNEL); + if (!lsop) { + nvme_fc_rcv_ls_req_err_msg(lport, w0); + ret = -ENOMEM; + goto out_put; + } + + lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL); + lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL); + if (!lsop->rqstbuf || !lsop->rspbuf) { + nvme_fc_rcv_ls_req_err_msg(lport, w0); + ret = -ENOMEM; + goto out_free; + } + + lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, + sizeof(*lsop->rspbuf), + DMA_TO_DEVICE); + if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { + dev_info(lport->dev, + "RCV %s LS failed: DMA mapping failure\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); + ret = -EFAULT; + goto out_free; + } + + lsop->rport = rport; + lsop->lsrsp = lsrsp; + + memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); + lsop->rqstdatalen = lsreqbuf_len; + + spin_lock_irqsave(&rport->lock, flags); + if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&rport->lock, flags); + ret = -ENOTCONN; + goto out_unmap; + } + list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); + spin_unlock_irqrestore(&rport->lock, flags); + + schedule_work(&rport->lsrcv_work); + + return 0; + +out_unmap: + fc_dma_unmap_single(lport->dev, lsop->rspdma, + sizeof(*lsop->rspbuf), DMA_TO_DEVICE); +out_free: + kfree(lsop->rspbuf); + kfree(lsop->rqstbuf); + kfree(lsop); +out_put: + nvme_fc_rport_put(rport); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); + + +/* *********************** NVME Ctrl Routines **************************** */ + +static void +__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_fcp_op *op) +{ + fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, + sizeof(op->rsp_iu), DMA_FROM_DEVICE); + fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, + sizeof(op->cmd_iu), DMA_TO_DEVICE); + + atomic_set(&op->state, FCPOP_STATE_UNINIT); +} + +static void +nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + + return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op); +} + +static int +__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) +{ + unsigned long flags; + int opstate; + + spin_lock_irqsave(&ctrl->lock, flags); + opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); + if (opstate != FCPOP_STATE_ACTIVE) + atomic_set(&op->state, opstate); + else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { + op->flags |= FCOP_FLAGS_TERMIO; + ctrl->iocnt++; + } + spin_unlock_irqrestore(&ctrl->lock, flags); + + if (opstate != FCPOP_STATE_ACTIVE) + return -ECANCELED; + + ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, + &ctrl->rport->remoteport, + op->queue->lldd_handle, + &op->fcp_req); + + return 0; +} + +static void +nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; + int i; + + /* ensure we've initialized the ops once */ + if (!(aen_op->flags & FCOP_FLAGS_AEN)) + return; + + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) + __nvme_fc_abort_op(ctrl, aen_op); +} + +static inline void +__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_fcp_op *op, int opstate) +{ + unsigned long flags; + + if (opstate == FCPOP_STATE_ABORTED) { + spin_lock_irqsave(&ctrl->lock, flags); + if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && + op->flags & FCOP_FLAGS_TERMIO) { + if (!--ctrl->iocnt) + wake_up(&ctrl->ioabort_wait); + } + spin_unlock_irqrestore(&ctrl->lock, flags); + } +} + +static void +nvme_fc_ctrl_ioerr_work(struct work_struct *work) +{ + struct nvme_fc_ctrl *ctrl = + container_of(work, struct nvme_fc_ctrl, ioerr_work); + + nvme_fc_error_recovery(ctrl, "transport detected io error"); +} + +/* + * nvme_fc_io_getuuid - Routine called to get the appid field + * associated with request by the lldd + * @req:IO request from nvme fc to driver + * Returns: UUID if there is an appid associated with VM or + * NULL if the user/libvirt has not set the appid to VM + */ +char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req) +{ + struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); + struct request *rq = op->rq; + + if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio) + return NULL; + return blkcg_get_fc_appid(rq->bio); +} +EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid); + +static void +nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) +{ + struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); + struct request *rq = op->rq; + struct nvmefc_fcp_req *freq = &op->fcp_req; + struct nvme_fc_ctrl *ctrl = op->ctrl; + struct nvme_fc_queue *queue = op->queue; + struct nvme_completion *cqe = &op->rsp_iu.cqe; + struct nvme_command *sqe = &op->cmd_iu.sqe; + __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); + union nvme_result result; + bool terminate_assoc = true; + int opstate; + + /* + * WARNING: + * The current linux implementation of a nvme controller + * allocates a single tag set for all io queues and sizes + * the io queues to fully hold all possible tags. Thus, the + * implementation does not reference or care about the sqhd + * value as it never needs to use the sqhd/sqtail pointers + * for submission pacing. + * + * This affects the FC-NVME implementation in two ways: + * 1) As the value doesn't matter, we don't need to waste + * cycles extracting it from ERSPs and stamping it in the + * cases where the transport fabricates CQEs on successful + * completions. + * 2) The FC-NVME implementation requires that delivery of + * ERSP completions are to go back to the nvme layer in order + * relative to the rsn, such that the sqhd value will always + * be "in order" for the nvme layer. As the nvme layer in + * linux doesn't care about sqhd, there's no need to return + * them in order. + * + * Additionally: + * As the core nvme layer in linux currently does not look at + * every field in the cqe - in cases where the FC transport must + * fabricate a CQE, the following fields will not be set as they + * are not referenced: + * cqe.sqid, cqe.sqhd, cqe.command_id + * + * Failure or error of an individual i/o, in a transport + * detected fashion unrelated to the nvme completion status, + * potentially cause the initiator and target sides to get out + * of sync on SQ head/tail (aka outstanding io count allowed). + * Per FC-NVME spec, failure of an individual command requires + * the connection to be terminated, which in turn requires the + * association to be terminated. + */ + + opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); + + fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, + sizeof(op->rsp_iu), DMA_FROM_DEVICE); + + if (opstate == FCPOP_STATE_ABORTED) + status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); + else if (freq->status) { + status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: io failed due to lldd error %d\n", + ctrl->cnum, freq->status); + } + + /* + * For the linux implementation, if we have an unsuccesful + * status, they blk-mq layer can typically be called with the + * non-zero status and the content of the cqe isn't important. + */ + if (status) + goto done; + + /* + * command completed successfully relative to the wire + * protocol. However, validate anything received and + * extract the status and result from the cqe (create it + * where necessary). + */ + + switch (freq->rcv_rsplen) { + + case 0: + case NVME_FC_SIZEOF_ZEROS_RSP: + /* + * No response payload or 12 bytes of payload (which + * should all be zeros) are considered successful and + * no payload in the CQE by the transport. + */ + if (freq->transferred_length != + be32_to_cpu(op->cmd_iu.data_len)) { + status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: io failed due to bad transfer " + "length: %d vs expected %d\n", + ctrl->cnum, freq->transferred_length, + be32_to_cpu(op->cmd_iu.data_len)); + goto done; + } + result.u64 = 0; + break; + + case sizeof(struct nvme_fc_ersp_iu): + /* + * The ERSP IU contains a full completion with CQE. + * Validate ERSP IU and look at cqe. + */ + if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != + (freq->rcv_rsplen / 4) || + be32_to_cpu(op->rsp_iu.xfrd_len) != + freq->transferred_length || + op->rsp_iu.ersp_result || + sqe->common.command_id != cqe->command_id)) { + status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " + "iu len %d, xfr len %d vs %d, status code " + "%d, cmdid %d vs %d\n", + ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), + be32_to_cpu(op->rsp_iu.xfrd_len), + freq->transferred_length, + op->rsp_iu.ersp_result, + sqe->common.command_id, + cqe->command_id); + goto done; + } + result = cqe->result; + status = cqe->status; + break; + + default: + status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " + "len %d\n", + ctrl->cnum, freq->rcv_rsplen); + goto done; + } + + terminate_assoc = false; + +done: + if (op->flags & FCOP_FLAGS_AEN) { + nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); + atomic_set(&op->state, FCPOP_STATE_IDLE); + op->flags = FCOP_FLAGS_AEN; /* clear other flags */ + nvme_fc_ctrl_put(ctrl); + goto check_error; + } + + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); + if (!nvme_try_complete_req(rq, status, result)) + nvme_fc_complete_rq(rq); + +check_error: + if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) + queue_work(nvme_reset_wq, &ctrl->ioerr_work); +} + +static int +__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, + struct request *rq, u32 rqno) +{ + struct nvme_fcp_op_w_sgl *op_w_sgl = + container_of(op, typeof(*op_w_sgl), op); + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + int ret = 0; + + memset(op, 0, sizeof(*op)); + op->fcp_req.cmdaddr = &op->cmd_iu; + op->fcp_req.cmdlen = sizeof(op->cmd_iu); + op->fcp_req.rspaddr = &op->rsp_iu; + op->fcp_req.rsplen = sizeof(op->rsp_iu); + op->fcp_req.done = nvme_fc_fcpio_done; + op->ctrl = ctrl; + op->queue = queue; + op->rq = rq; + op->rqno = rqno; + + cmdiu->format_id = NVME_CMD_FORMAT_ID; + cmdiu->fc_id = NVME_CMD_FC_ID; + cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); + if (queue->qnum) + cmdiu->rsv_cat = fccmnd_set_cat_css(0, + (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); + else + cmdiu->rsv_cat = fccmnd_set_cat_admin(0); + + op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, + &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); + if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { + dev_err(ctrl->dev, + "FCP Op failed - cmdiu dma mapping failed.\n"); + ret = -EFAULT; + goto out_on_error; + } + + op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, + &op->rsp_iu, sizeof(op->rsp_iu), + DMA_FROM_DEVICE); + if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { + dev_err(ctrl->dev, + "FCP Op failed - rspiu dma mapping failed.\n"); + ret = -EFAULT; + } + + atomic_set(&op->state, FCPOP_STATE_IDLE); +out_on_error: + return ret; +} + +static int +nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, + unsigned int hctx_idx, unsigned int numa_node) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); + struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); + int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; + struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; + int res; + + res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); + if (res) + return res; + op->op.fcp_req.first_sgl = op->sgl; + op->op.fcp_req.private = &op->priv[0]; + nvme_req(rq)->ctrl = &ctrl->ctrl; + nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; + return res; +} + +static int +nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_fcp_op *aen_op; + struct nvme_fc_cmd_iu *cmdiu; + struct nvme_command *sqe; + void *private = NULL; + int i, ret; + + aen_op = ctrl->aen_ops; + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { + if (ctrl->lport->ops->fcprqst_priv_sz) { + private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, + GFP_KERNEL); + if (!private) + return -ENOMEM; + } + + cmdiu = &aen_op->cmd_iu; + sqe = &cmdiu->sqe; + ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], + aen_op, (struct request *)NULL, + (NVME_AQ_BLK_MQ_DEPTH + i)); + if (ret) { + kfree(private); + return ret; + } + + aen_op->flags = FCOP_FLAGS_AEN; + aen_op->fcp_req.private = private; + + memset(sqe, 0, sizeof(*sqe)); + sqe->common.opcode = nvme_admin_async_event; + /* Note: core layer may overwrite the sqe.command_id value */ + sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; + } + return 0; +} + +static void +nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_fcp_op *aen_op; + int i; + + cancel_work_sync(&ctrl->ctrl.async_event_work); + aen_op = ctrl->aen_ops; + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { + __nvme_fc_exit_request(ctrl, aen_op); + + kfree(aen_op->fcp_req.private); + aen_op->fcp_req.private = NULL; + } +} + +static inline int +__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data); + struct nvme_fc_queue *queue = &ctrl->queues[qidx]; + + hctx->driver_data = queue; + queue->hctx = hctx; + return 0; +} + +static int +nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) +{ + return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1); +} + +static int +nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + return __nvme_fc_init_hctx(hctx, data, hctx_idx); +} + +static void +nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) +{ + struct nvme_fc_queue *queue; + + queue = &ctrl->queues[idx]; + memset(queue, 0, sizeof(*queue)); + queue->ctrl = ctrl; + queue->qnum = idx; + atomic_set(&queue->csn, 0); + queue->dev = ctrl->dev; + + if (idx > 0) + queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; + else + queue->cmnd_capsule_len = sizeof(struct nvme_command); + + /* + * Considered whether we should allocate buffers for all SQEs + * and CQEs and dma map them - mapping their respective entries + * into the request structures (kernel vm addr and dma address) + * thus the driver could use the buffers/mappings directly. + * It only makes sense if the LLDD would use them for its + * messaging api. It's very unlikely most adapter api's would use + * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload + * structures were used instead. + */ +} + +/* + * This routine terminates a queue at the transport level. + * The transport has already ensured that all outstanding ios on + * the queue have been terminated. + * The transport will send a Disconnect LS request to terminate + * the queue's connection. Termination of the admin queue will also + * terminate the association at the target. + */ +static void +nvme_fc_free_queue(struct nvme_fc_queue *queue) +{ + if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) + return; + + clear_bit(NVME_FC_Q_LIVE, &queue->flags); + /* + * Current implementation never disconnects a single queue. + * It always terminates a whole association. So there is never + * a disconnect(queue) LS sent to the target. + */ + + queue->connection_id = 0; + atomic_set(&queue->csn, 0); +} + +static void +__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, unsigned int qidx) +{ + if (ctrl->lport->ops->delete_queue) + ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, + queue->lldd_handle); + queue->lldd_handle = NULL; +} + +static void +nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->ctrl.queue_count; i++) + nvme_fc_free_queue(&ctrl->queues[i]); +} + +static int +__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) +{ + int ret = 0; + + queue->lldd_handle = NULL; + if (ctrl->lport->ops->create_queue) + ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, + qidx, qsize, &queue->lldd_handle); + + return ret; +} + +static void +nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; + int i; + + for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) + __nvme_fc_delete_hw_queue(ctrl, queue, i); +} + +static int +nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) +{ + struct nvme_fc_queue *queue = &ctrl->queues[1]; + int i, ret; + + for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { + ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); + if (ret) + goto delete_queues; + } + + return 0; + +delete_queues: + for (; i > 0; i--) + __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); + return ret; +} + +static int +nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) +{ + int i, ret = 0; + + for (i = 1; i < ctrl->ctrl.queue_count; i++) { + ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, + (qsize / 5)); + if (ret) + break; + ret = nvmf_connect_io_queue(&ctrl->ctrl, i); + if (ret) + break; + + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); + } + + return ret; +} + +static void +nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->ctrl.queue_count; i++) + nvme_fc_init_queue(ctrl, i); +} + +static void +nvme_fc_ctrl_free(struct kref *ref) +{ + struct nvme_fc_ctrl *ctrl = + container_of(ref, struct nvme_fc_ctrl, ref); + unsigned long flags; + + if (ctrl->ctrl.tagset) + nvme_remove_io_tag_set(&ctrl->ctrl); + + /* remove from rport list */ + spin_lock_irqsave(&ctrl->rport->lock, flags); + list_del(&ctrl->ctrl_list); + spin_unlock_irqrestore(&ctrl->rport->lock, flags); + + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvme_remove_admin_tag_set(&ctrl->ctrl); + + kfree(ctrl->queues); + + put_device(ctrl->dev); + nvme_fc_rport_put(ctrl->rport); + + ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); + if (ctrl->ctrl.opts) + nvmf_free_options(ctrl->ctrl.opts); + kfree(ctrl); +} + +static void +nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) +{ + kref_put(&ctrl->ref, nvme_fc_ctrl_free); +} + +static int +nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) +{ + return kref_get_unless_zero(&ctrl->ref); +} + +/* + * All accesses from nvme core layer done - can now free the + * controller. Called after last nvme_put_ctrl() call + */ +static void +nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + + WARN_ON(nctrl != &ctrl->ctrl); + + nvme_fc_ctrl_put(ctrl); +} + +/* + * This routine is used by the transport when it needs to find active + * io on a queue that is to be terminated. The transport uses + * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke + * this routine to kill them on a 1 by 1 basis. + * + * As FC allocates FC exchange for each io, the transport must contact + * the LLDD to terminate the exchange, thus releasing the FC exchange. + * After terminating the exchange the LLDD will call the transport's + * normal io done path for the request, but it will have an aborted + * status. The done path will return the io request back to the block + * layer with an error status. + */ +static bool nvme_fc_terminate_exchange(struct request *req, void *data) +{ + struct nvme_ctrl *nctrl = data; + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); + + op->nreq.flags |= NVME_REQ_CANCELLED; + __nvme_fc_abort_op(ctrl, op); + return true; +} + +/* + * This routine runs through all outstanding commands on the association + * and aborts them. This routine is typically be called by the + * delete_association routine. It is also called due to an error during + * reconnect. In that scenario, it is most likely a command that initializes + * the controller, including fabric Connect commands on io queues, that + * may have timed out or failed thus the io must be killed for the connect + * thread to see the error. + */ +static void +__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) +{ + int q; + + /* + * if aborting io, the queues are no longer good, mark them + * all as not live. + */ + if (ctrl->ctrl.queue_count > 1) { + for (q = 1; q < ctrl->ctrl.queue_count; q++) + clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); + } + clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); + + /* + * If io queues are present, stop them and terminate all outstanding + * ios on them. As FC allocates FC exchange for each io, the + * transport must contact the LLDD to terminate the exchange, + * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() + * to tell us what io's are busy and invoke a transport routine + * to kill them with the LLDD. After terminating the exchange + * the LLDD will call the transport's normal io done path, but it + * will have an aborted status. The done path will return the + * io requests back to the block layer as part of normal completions + * (but with error status). + */ + if (ctrl->ctrl.queue_count > 1) { + nvme_quiesce_io_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + blk_mq_tagset_busy_iter(&ctrl->tag_set, + nvme_fc_terminate_exchange, &ctrl->ctrl); + blk_mq_tagset_wait_completed_request(&ctrl->tag_set); + if (start_queues) + nvme_unquiesce_io_queues(&ctrl->ctrl); + } + + /* + * Other transports, which don't have link-level contexts bound + * to sqe's, would try to gracefully shutdown the controller by + * writing the registers for shutdown and polling (call + * nvme_disable_ctrl()). Given a bunch of i/o was potentially + * just aborted and we will wait on those contexts, and given + * there was no indication of how live the controlelr is on the + * link, don't send more io to create more contexts for the + * shutdown. Let the controller fail via keepalive failure if + * its still present. + */ + + /* + * clean up the admin queue. Same thing as above. + */ + nvme_quiesce_admin_queue(&ctrl->ctrl); + blk_sync_queue(ctrl->ctrl.admin_q); + blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, + nvme_fc_terminate_exchange, &ctrl->ctrl); + blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); + if (start_queues) + nvme_unquiesce_admin_queue(&ctrl->ctrl); +} + +static void +nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) +{ + /* + * if an error (io timeout, etc) while (re)connecting, the remote + * port requested terminating of the association (disconnect_ls) + * or an error (timeout or abort) occurred on an io while creating + * the controller. Abort any ios on the association and let the + * create_association error path resolve things. + */ + if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { + __nvme_fc_abort_outstanding_ios(ctrl, true); + set_bit(ASSOC_FAILED, &ctrl->flags); + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: transport error during (re)connect\n", + ctrl->cnum); + return; + } + + /* Otherwise, only proceed if in LIVE state - e.g. on first error */ + if (ctrl->ctrl.state != NVME_CTRL_LIVE) + return; + + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: transport association event: %s\n", + ctrl->cnum, errmsg); + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: resetting controller\n", ctrl->cnum); + + nvme_reset_ctrl(&ctrl->ctrl); +} + +static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_ctrl *ctrl = op->ctrl; + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + struct nvme_command *sqe = &cmdiu->sqe; + + /* + * Attempt to abort the offending command. Command completion + * will detect the aborted io and will fail the connection. + */ + dev_info(ctrl->ctrl.device, + "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " + "x%08x/x%08x\n", + ctrl->cnum, op->queue->qnum, sqe->common.opcode, + sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); + if (__nvme_fc_abort_op(ctrl, op)) + nvme_fc_error_recovery(ctrl, "io timeout abort failed"); + + /* + * the io abort has been initiated. Have the reset timer + * restarted and the abort completion will complete the io + * shortly. Avoids a synchronous wait while the abort finishes. + */ + return BLK_EH_RESET_TIMER; +} + +static int +nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, + struct nvme_fc_fcp_op *op) +{ + struct nvmefc_fcp_req *freq = &op->fcp_req; + int ret; + + freq->sg_cnt = 0; + + if (!blk_rq_nr_phys_segments(rq)) + return 0; + + freq->sg_table.sgl = freq->first_sgl; + ret = sg_alloc_table_chained(&freq->sg_table, + blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, + NVME_INLINE_SG_CNT); + if (ret) + return -ENOMEM; + + op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); + WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); + freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, + op->nents, rq_dma_dir(rq)); + if (unlikely(freq->sg_cnt <= 0)) { + sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); + freq->sg_cnt = 0; + return -EFAULT; + } + + /* + * TODO: blk_integrity_rq(rq) for DIF + */ + return 0; +} + +static void +nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, + struct nvme_fc_fcp_op *op) +{ + struct nvmefc_fcp_req *freq = &op->fcp_req; + + if (!freq->sg_cnt) + return; + + fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, + rq_dma_dir(rq)); + + sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); + + freq->sg_cnt = 0; +} + +/* + * In FC, the queue is a logical thing. At transport connect, the target + * creates its "queue" and returns a handle that is to be given to the + * target whenever it posts something to the corresponding SQ. When an + * SQE is sent on a SQ, FC effectively considers the SQE, or rather the + * command contained within the SQE, an io, and assigns a FC exchange + * to it. The SQE and the associated SQ handle are sent in the initial + * CMD IU sents on the exchange. All transfers relative to the io occur + * as part of the exchange. The CQE is the last thing for the io, + * which is transferred (explicitly or implicitly) with the RSP IU + * sent on the exchange. After the CQE is received, the FC exchange is + * terminaed and the Exchange may be used on a different io. + * + * The transport to LLDD api has the transport making a request for a + * new fcp io request to the LLDD. The LLDD then allocates a FC exchange + * resource and transfers the command. The LLDD will then process all + * steps to complete the io. Upon completion, the transport done routine + * is called. + * + * So - while the operation is outstanding to the LLDD, there is a link + * level FC exchange resource that is also outstanding. This must be + * considered in all cleanup operations. + */ +static blk_status_t +nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, + struct nvme_fc_fcp_op *op, u32 data_len, + enum nvmefc_fcp_datadir io_dir) +{ + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + struct nvme_command *sqe = &cmdiu->sqe; + int ret, opstate; + + /* + * before attempting to send the io, check to see if we believe + * the target device is present + */ + if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + return BLK_STS_RESOURCE; + + if (!nvme_fc_ctrl_get(ctrl)) + return BLK_STS_IOERR; + + /* format the FC-NVME CMD IU and fcp_req */ + cmdiu->connection_id = cpu_to_be64(queue->connection_id); + cmdiu->data_len = cpu_to_be32(data_len); + switch (io_dir) { + case NVMEFC_FCP_WRITE: + cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; + break; + case NVMEFC_FCP_READ: + cmdiu->flags = FCNVME_CMD_FLAGS_READ; + break; + case NVMEFC_FCP_NODATA: + cmdiu->flags = 0; + break; + } + op->fcp_req.payload_length = data_len; + op->fcp_req.io_dir = io_dir; + op->fcp_req.transferred_length = 0; + op->fcp_req.rcv_rsplen = 0; + op->fcp_req.status = NVME_SC_SUCCESS; + op->fcp_req.sqid = cpu_to_le16(queue->qnum); + + /* + * validate per fabric rules, set fields mandated by fabric spec + * as well as those by FC-NVME spec. + */ + WARN_ON_ONCE(sqe->common.metadata); + sqe->common.flags |= NVME_CMD_SGL_METABUF; + + /* + * format SQE DPTR field per FC-NVME rules: + * type=0x5 Transport SGL Data Block Descriptor + * subtype=0xA Transport-specific value + * address=0 + * length=length of the data series + */ + sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | + NVME_SGL_FMT_TRANSPORT_A; + sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); + sqe->rw.dptr.sgl.addr = 0; + + if (!(op->flags & FCOP_FLAGS_AEN)) { + ret = nvme_fc_map_data(ctrl, op->rq, op); + if (ret < 0) { + nvme_cleanup_cmd(op->rq); + nvme_fc_ctrl_put(ctrl); + if (ret == -ENOMEM || ret == -EAGAIN) + return BLK_STS_RESOURCE; + return BLK_STS_IOERR; + } + } + + fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, + sizeof(op->cmd_iu), DMA_TO_DEVICE); + + atomic_set(&op->state, FCPOP_STATE_ACTIVE); + + if (!(op->flags & FCOP_FLAGS_AEN)) + nvme_start_request(op->rq); + + cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); + ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, + &ctrl->rport->remoteport, + queue->lldd_handle, &op->fcp_req); + + if (ret) { + /* + * If the lld fails to send the command is there an issue with + * the csn value? If the command that fails is the Connect, + * no - as the connection won't be live. If it is a command + * post-connect, it's possible a gap in csn may be created. + * Does this matter? As Linux initiators don't send fused + * commands, no. The gap would exist, but as there's nothing + * that depends on csn order to be delivered on the target + * side, it shouldn't hurt. It would be difficult for a + * target to even detect the csn gap as it has no idea when the + * cmd with the csn was supposed to arrive. + */ + opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); + + if (!(op->flags & FCOP_FLAGS_AEN)) { + nvme_fc_unmap_data(ctrl, op->rq, op); + nvme_cleanup_cmd(op->rq); + } + + nvme_fc_ctrl_put(ctrl); + + if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && + ret != -EBUSY) + return BLK_STS_IOERR; + + return BLK_STS_RESOURCE; + } + + return BLK_STS_OK; +} + +static blk_status_t +nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct nvme_fc_queue *queue = hctx->driver_data; + struct nvme_fc_ctrl *ctrl = queue->ctrl; + struct request *rq = bd->rq; + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + enum nvmefc_fcp_datadir io_dir; + bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); + u32 data_len; + blk_status_t ret; + + if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || + !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) + return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); + + ret = nvme_setup_cmd(ns, rq); + if (ret) + return ret; + + /* + * nvme core doesn't quite treat the rq opaquely. Commands such + * as WRITE ZEROES will return a non-zero rq payload_bytes yet + * there is no actual payload to be transferred. + * To get it right, key data transmission on there being 1 or + * more physical segments in the sg list. If there is no + * physical segments, there is no payload. + */ + if (blk_rq_nr_phys_segments(rq)) { + data_len = blk_rq_payload_bytes(rq); + io_dir = ((rq_data_dir(rq) == WRITE) ? + NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); + } else { + data_len = 0; + io_dir = NVMEFC_FCP_NODATA; + } + + + return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); +} + +static void +nvme_fc_submit_async_event(struct nvme_ctrl *arg) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); + struct nvme_fc_fcp_op *aen_op; + blk_status_t ret; + + if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) + return; + + aen_op = &ctrl->aen_ops[0]; + + ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, + NVMEFC_FCP_NODATA); + if (ret) + dev_err(ctrl->ctrl.device, + "failed async event work\n"); +} + +static void +nvme_fc_complete_rq(struct request *rq) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_ctrl *ctrl = op->ctrl; + + atomic_set(&op->state, FCPOP_STATE_IDLE); + op->flags &= ~FCOP_FLAGS_TERMIO; + + nvme_fc_unmap_data(ctrl, rq, op); + nvme_complete_rq(rq); + nvme_fc_ctrl_put(ctrl); +} + +static void nvme_fc_map_queues(struct blk_mq_tag_set *set) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); + int i; + + for (i = 0; i < set->nr_maps; i++) { + struct blk_mq_queue_map *map = &set->map[i]; + + if (!map->nr_queues) { + WARN_ON(i == HCTX_TYPE_DEFAULT); + continue; + } + + /* Call LLDD map queue functionality if defined */ + if (ctrl->lport->ops->map_queues) + ctrl->lport->ops->map_queues(&ctrl->lport->localport, + map); + else + blk_mq_map_queues(map); + } +} + +static const struct blk_mq_ops nvme_fc_mq_ops = { + .queue_rq = nvme_fc_queue_rq, + .complete = nvme_fc_complete_rq, + .init_request = nvme_fc_init_request, + .exit_request = nvme_fc_exit_request, + .init_hctx = nvme_fc_init_hctx, + .timeout = nvme_fc_timeout, + .map_queues = nvme_fc_map_queues, +}; + +static int +nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; + int ret; + + nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), + ctrl->lport->ops->max_hw_queues); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret) { + dev_info(ctrl->ctrl.device, + "set_queue_count failed: %d\n", ret); + return ret; + } + + ctrl->ctrl.queue_count = nr_io_queues + 1; + if (!nr_io_queues) + return 0; + + nvme_fc_init_io_queues(ctrl); + + ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, + &nvme_fc_mq_ops, 1, + struct_size_t(struct nvme_fcp_op_w_sgl, priv, + ctrl->lport->ops->fcprqst_priv_sz)); + if (ret) + return ret; + + ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); + if (ret) + goto out_cleanup_tagset; + + ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); + if (ret) + goto out_delete_hw_queues; + + ctrl->ioq_live = true; + + return 0; + +out_delete_hw_queues: + nvme_fc_delete_hw_io_queues(ctrl); +out_cleanup_tagset: + nvme_remove_io_tag_set(&ctrl->ctrl); + nvme_fc_free_io_queues(ctrl); + + /* force put free routine to ignore io queues */ + ctrl->ctrl.tagset = NULL; + + return ret; +} + +static int +nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; + unsigned int nr_io_queues; + int ret; + + nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), + ctrl->lport->ops->max_hw_queues); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret) { + dev_info(ctrl->ctrl.device, + "set_queue_count failed: %d\n", ret); + return ret; + } + + if (!nr_io_queues && prior_ioq_cnt) { + dev_info(ctrl->ctrl.device, + "Fail Reconnect: At least 1 io queue " + "required (was %d)\n", prior_ioq_cnt); + return -ENOSPC; + } + + ctrl->ctrl.queue_count = nr_io_queues + 1; + /* check for io queues existing */ + if (ctrl->ctrl.queue_count == 1) + return 0; + + if (prior_ioq_cnt != nr_io_queues) { + dev_info(ctrl->ctrl.device, + "reconnect: revising io queue count from %d to %d\n", + prior_ioq_cnt, nr_io_queues); + blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); + } + + ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); + if (ret) + goto out_free_io_queues; + + ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); + if (ret) + goto out_delete_hw_queues; + + return 0; + +out_delete_hw_queues: + nvme_fc_delete_hw_io_queues(ctrl); +out_free_io_queues: + nvme_fc_free_io_queues(ctrl); + return ret; +} + +static void +nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) +{ + struct nvme_fc_lport *lport = rport->lport; + + atomic_inc(&lport->act_rport_cnt); +} + +static void +nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) +{ + struct nvme_fc_lport *lport = rport->lport; + u32 cnt; + + cnt = atomic_dec_return(&lport->act_rport_cnt); + if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) + lport->ops->localport_delete(&lport->localport); +} + +static int +nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_rport *rport = ctrl->rport; + u32 cnt; + + if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) + return 1; + + cnt = atomic_inc_return(&rport->act_ctrl_cnt); + if (cnt == 1) + nvme_fc_rport_active_on_lport(rport); + + return 0; +} + +static int +nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_rport *rport = ctrl->rport; + struct nvme_fc_lport *lport = rport->lport; + u32 cnt; + + /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ + + cnt = atomic_dec_return(&rport->act_ctrl_cnt); + if (cnt == 0) { + if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) + lport->ops->remoteport_delete(&rport->remoteport); + nvme_fc_rport_inactive_on_lport(rport); + } + + return 0; +} + +/* + * This routine restarts the controller on the host side, and + * on the link side, recreates the controller association. + */ +static int +nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + struct nvmefc_ls_rcv_op *disls = NULL; + unsigned long flags; + int ret; + bool changed; + + ++ctrl->ctrl.nr_reconnects; + + if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + return -ENODEV; + + if (nvme_fc_ctlr_active_on_rport(ctrl)) + return -ENOTUNIQ; + + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: create association : host wwpn 0x%016llx " + " rport wwpn 0x%016llx: NQN \"%s\"\n", + ctrl->cnum, ctrl->lport->localport.port_name, + ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); + + clear_bit(ASSOC_FAILED, &ctrl->flags); + + /* + * Create the admin queue + */ + + ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, + NVME_AQ_DEPTH); + if (ret) + goto out_free_queue; + + ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], + NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); + if (ret) + goto out_delete_hw_queue; + + ret = nvmf_connect_admin_queue(&ctrl->ctrl); + if (ret) + goto out_disconnect_admin_queue; + + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); + + /* + * Check controller capabilities + * + * todo:- add code to check if ctrl attributes changed from + * prior connection values + */ + + ret = nvme_enable_ctrl(&ctrl->ctrl); + if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) + ret = -EIO; + if (ret) + goto out_disconnect_admin_queue; + + ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; + ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << + (ilog2(SZ_4K) - 9); + + nvme_unquiesce_admin_queue(&ctrl->ctrl); + + ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); + if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) + ret = -EIO; + if (ret) + goto out_disconnect_admin_queue; + + /* sanity checks */ + + /* FC-NVME does not have other data in the capsule */ + if (ctrl->ctrl.icdoff) { + dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", + ctrl->ctrl.icdoff); + ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out_disconnect_admin_queue; + } + + /* FC-NVME supports normal SGL Data Block Descriptors */ + if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { + dev_err(ctrl->ctrl.device, + "Mandatory sgls are not supported!\n"); + ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out_disconnect_admin_queue; + } + + if (opts->queue_size > ctrl->ctrl.maxcmd) { + /* warn if maxcmd is lower than queue_size */ + dev_warn(ctrl->ctrl.device, + "queue_size %zu > ctrl maxcmd %u, reducing " + "to maxcmd\n", + opts->queue_size, ctrl->ctrl.maxcmd); + opts->queue_size = ctrl->ctrl.maxcmd; + ctrl->ctrl.sqsize = opts->queue_size - 1; + } + + ret = nvme_fc_init_aen_ops(ctrl); + if (ret) + goto out_term_aen_ops; + + /* + * Create the io queues + */ + + if (ctrl->ctrl.queue_count > 1) { + if (!ctrl->ioq_live) + ret = nvme_fc_create_io_queues(ctrl); + else + ret = nvme_fc_recreate_io_queues(ctrl); + } + if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) + ret = -EIO; + if (ret) + goto out_term_aen_ops; + + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); + + ctrl->ctrl.nr_reconnects = 0; + + if (changed) + nvme_start_ctrl(&ctrl->ctrl); + + return 0; /* Success */ + +out_term_aen_ops: + nvme_fc_term_aen_ops(ctrl); +out_disconnect_admin_queue: + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n", + ctrl->cnum, ctrl->association_id, ret); + /* send a Disconnect(association) LS to fc-nvme target */ + nvme_fc_xmt_disconnect_assoc(ctrl); + spin_lock_irqsave(&ctrl->lock, flags); + ctrl->association_id = 0; + disls = ctrl->rcv_disconn; + ctrl->rcv_disconn = NULL; + spin_unlock_irqrestore(&ctrl->lock, flags); + if (disls) + nvme_fc_xmt_ls_rsp(disls); +out_delete_hw_queue: + __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); +out_free_queue: + nvme_fc_free_queue(&ctrl->queues[0]); + clear_bit(ASSOC_ACTIVE, &ctrl->flags); + nvme_fc_ctlr_inactive_on_rport(ctrl); + + return ret; +} + + +/* + * This routine stops operation of the controller on the host side. + * On the host os stack side: Admin and IO queues are stopped, + * outstanding ios on them terminated via FC ABTS. + * On the link side: the association is terminated. + */ +static void +nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) +{ + struct nvmefc_ls_rcv_op *disls = NULL; + unsigned long flags; + + if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) + return; + + spin_lock_irqsave(&ctrl->lock, flags); + set_bit(FCCTRL_TERMIO, &ctrl->flags); + ctrl->iocnt = 0; + spin_unlock_irqrestore(&ctrl->lock, flags); + + __nvme_fc_abort_outstanding_ios(ctrl, false); + + /* kill the aens as they are a separate path */ + nvme_fc_abort_aen_ops(ctrl); + + /* wait for all io that had to be aborted */ + spin_lock_irq(&ctrl->lock); + wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); + clear_bit(FCCTRL_TERMIO, &ctrl->flags); + spin_unlock_irq(&ctrl->lock); + + nvme_fc_term_aen_ops(ctrl); + + /* + * send a Disconnect(association) LS to fc-nvme target + * Note: could have been sent at top of process, but + * cleaner on link traffic if after the aborts complete. + * Note: if association doesn't exist, association_id will be 0 + */ + if (ctrl->association_id) + nvme_fc_xmt_disconnect_assoc(ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + ctrl->association_id = 0; + disls = ctrl->rcv_disconn; + ctrl->rcv_disconn = NULL; + spin_unlock_irqrestore(&ctrl->lock, flags); + if (disls) + /* + * if a Disconnect Request was waiting for a response, send + * now that all ABTS's have been issued (and are complete). + */ + nvme_fc_xmt_ls_rsp(disls); + + if (ctrl->ctrl.tagset) { + nvme_fc_delete_hw_io_queues(ctrl); + nvme_fc_free_io_queues(ctrl); + } + + __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); + nvme_fc_free_queue(&ctrl->queues[0]); + + /* re-enable the admin_q so anything new can fast fail */ + nvme_unquiesce_admin_queue(&ctrl->ctrl); + + /* resume the io queues so that things will fast fail */ + nvme_unquiesce_io_queues(&ctrl->ctrl); + + nvme_fc_ctlr_inactive_on_rport(ctrl); +} + +static void +nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + + cancel_work_sync(&ctrl->ioerr_work); + cancel_delayed_work_sync(&ctrl->connect_work); + /* + * kill the association on the link side. this will block + * waiting for io to terminate + */ + nvme_fc_delete_association(ctrl); +} + +static void +nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) +{ + struct nvme_fc_rport *rport = ctrl->rport; + struct nvme_fc_remote_port *portptr = &rport->remoteport; + unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; + bool recon = true; + + if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING) + return; + + if (portptr->port_state == FC_OBJSTATE_ONLINE) { + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", + ctrl->cnum, status); + if (status > 0 && (status & NVME_SC_DNR)) + recon = false; + } else if (time_after_eq(jiffies, rport->dev_loss_end)) + recon = false; + + if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { + if (portptr->port_state == FC_OBJSTATE_ONLINE) + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: Reconnect attempt in %ld " + "seconds\n", + ctrl->cnum, recon_delay / HZ); + else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) + recon_delay = rport->dev_loss_end - jiffies; + + queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); + } else { + if (portptr->port_state == FC_OBJSTATE_ONLINE) { + if (status > 0 && (status & NVME_SC_DNR)) + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: reconnect failure\n", + ctrl->cnum); + else + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: Max reconnect attempts " + "(%d) reached.\n", + ctrl->cnum, ctrl->ctrl.nr_reconnects); + } else + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: dev_loss_tmo (%d) expired " + "while waiting for remoteport connectivity.\n", + ctrl->cnum, min_t(int, portptr->dev_loss_tmo, + (ctrl->ctrl.opts->max_reconnects * + ctrl->ctrl.opts->reconnect_delay))); + WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); + } +} + +static void +nvme_fc_reset_ctrl_work(struct work_struct *work) +{ + struct nvme_fc_ctrl *ctrl = + container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); + + nvme_stop_ctrl(&ctrl->ctrl); + + /* will block will waiting for io to terminate */ + nvme_fc_delete_association(ctrl); + + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) + dev_err(ctrl->ctrl.device, + "NVME-FC{%d}: error_recovery: Couldn't change state " + "to CONNECTING\n", ctrl->cnum); + + if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { + if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { + dev_err(ctrl->ctrl.device, + "NVME-FC{%d}: failed to schedule connect " + "after reset\n", ctrl->cnum); + } else { + flush_delayed_work(&ctrl->connect_work); + } + } else { + nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); + } +} + + +static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { + .name = "fc", + .module = THIS_MODULE, + .flags = NVME_F_FABRICS, + .reg_read32 = nvmf_reg_read32, + .reg_read64 = nvmf_reg_read64, + .reg_write32 = nvmf_reg_write32, + .free_ctrl = nvme_fc_nvme_ctrl_freed, + .submit_async_event = nvme_fc_submit_async_event, + .delete_ctrl = nvme_fc_delete_ctrl, + .get_address = nvmf_get_address, +}; + +static void +nvme_fc_connect_ctrl_work(struct work_struct *work) +{ + int ret; + + struct nvme_fc_ctrl *ctrl = + container_of(to_delayed_work(work), + struct nvme_fc_ctrl, connect_work); + + ret = nvme_fc_create_association(ctrl); + if (ret) + nvme_fc_reconnect_or_delete(ctrl, ret); + else + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: controller connect complete\n", + ctrl->cnum); +} + + +static const struct blk_mq_ops nvme_fc_admin_mq_ops = { + .queue_rq = nvme_fc_queue_rq, + .complete = nvme_fc_complete_rq, + .init_request = nvme_fc_init_request, + .exit_request = nvme_fc_exit_request, + .init_hctx = nvme_fc_init_admin_hctx, + .timeout = nvme_fc_timeout, +}; + + +/* + * Fails a controller request if it matches an existing controller + * (association) with the same tuple: + * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> + * + * The ports don't need to be compared as they are intrinsically + * already matched by the port pointers supplied. + */ +static bool +nvme_fc_existing_controller(struct nvme_fc_rport *rport, + struct nvmf_ctrl_options *opts) +{ + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&rport->lock, flags); + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); + if (found) + break; + } + spin_unlock_irqrestore(&rport->lock, flags); + + return found; +} + +static struct nvme_ctrl * +nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) +{ + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + int ret, idx, ctrl_loss_tmo; + + if (!(rport->remoteport.port_role & + (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { + ret = -EBADR; + goto out_fail; + } + + if (!opts->duplicate_connect && + nvme_fc_existing_controller(rport, opts)) { + ret = -EALREADY; + goto out_fail; + } + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + ret = -ENOMEM; + goto out_fail; + } + + idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_free_ctrl; + } + + /* + * if ctrl_loss_tmo is being enforced and the default reconnect delay + * is being used, change to a shorter reconnect delay for FC. + */ + if (opts->max_reconnects != -1 && + opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && + opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { + ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; + opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; + opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, + opts->reconnect_delay); + } + + ctrl->ctrl.opts = opts; + ctrl->ctrl.nr_reconnects = 0; + if (lport->dev) + ctrl->ctrl.numa_node = dev_to_node(lport->dev); + else + ctrl->ctrl.numa_node = NUMA_NO_NODE; + INIT_LIST_HEAD(&ctrl->ctrl_list); + ctrl->lport = lport; + ctrl->rport = rport; + ctrl->dev = lport->dev; + ctrl->cnum = idx; + ctrl->ioq_live = false; + init_waitqueue_head(&ctrl->ioabort_wait); + + get_device(ctrl->dev); + kref_init(&ctrl->ref); + + INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); + INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); + INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); + spin_lock_init(&ctrl->lock); + + /* io queue count */ + ctrl->ctrl.queue_count = min_t(unsigned int, + opts->nr_io_queues, + lport->ops->max_hw_queues); + ctrl->ctrl.queue_count++; /* +1 for admin queue */ + + ctrl->ctrl.sqsize = opts->queue_size - 1; + ctrl->ctrl.kato = opts->kato; + ctrl->ctrl.cntlid = 0xffff; + + ret = -ENOMEM; + ctrl->queues = kcalloc(ctrl->ctrl.queue_count, + sizeof(struct nvme_fc_queue), GFP_KERNEL); + if (!ctrl->queues) + goto out_free_ida; + + nvme_fc_init_queue(ctrl, 0); + + /* + * Would have been nice to init io queues tag set as well. + * However, we require interaction from the controller + * for max io queue count before we can do so. + * Defer this to the connect path. + */ + + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); + if (ret) + goto out_free_queues; + + /* at this point, teardown path changes to ref counting on nvme ctrl */ + + ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, + &nvme_fc_admin_mq_ops, + struct_size_t(struct nvme_fcp_op_w_sgl, priv, + ctrl->lport->ops->fcprqst_priv_sz)); + if (ret) + goto fail_ctrl; + + spin_lock_irqsave(&rport->lock, flags); + list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); + spin_unlock_irqrestore(&rport->lock, flags); + + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || + !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + dev_err(ctrl->ctrl.device, + "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); + goto fail_ctrl; + } + + if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { + dev_err(ctrl->ctrl.device, + "NVME-FC{%d}: failed to schedule initial connect\n", + ctrl->cnum); + goto fail_ctrl; + } + + flush_delayed_work(&ctrl->connect_work); + + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", + ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); + + return &ctrl->ctrl; + +fail_ctrl: + nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); + cancel_work_sync(&ctrl->ioerr_work); + cancel_work_sync(&ctrl->ctrl.reset_work); + cancel_delayed_work_sync(&ctrl->connect_work); + + ctrl->ctrl.opts = NULL; + + /* initiate nvme ctrl ref counting teardown */ + nvme_uninit_ctrl(&ctrl->ctrl); + + /* Remove core ctrl ref. */ + nvme_put_ctrl(&ctrl->ctrl); + + /* as we're past the point where we transition to the ref + * counting teardown path, if we return a bad pointer here, + * the calling routine, thinking it's prior to the + * transition, will do an rport put. Since the teardown + * path also does a rport put, we do an extra get here to + * so proper order/teardown happens. + */ + nvme_fc_rport_get(rport); + + return ERR_PTR(-EIO); + +out_free_queues: + kfree(ctrl->queues); +out_free_ida: + put_device(ctrl->dev); + ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_free_ctrl: + kfree(ctrl); +out_fail: + /* exit via here doesn't follow ctlr ref points */ + return ERR_PTR(ret); +} + + +struct nvmet_fc_traddr { + u64 nn; + u64 pn; +}; + +static int +__nvme_fc_parse_u64(substring_t *sstr, u64 *val) +{ + u64 token64; + + if (match_u64(sstr, &token64)) + return -EINVAL; + *val = token64; + + return 0; +} + +/* + * This routine validates and extracts the WWN's from the TRADDR string. + * As kernel parsers need the 0x to determine number base, universally + * build string to parse with 0x prefix before parsing name strings. + */ +static int +nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) +{ + char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; + substring_t wwn = { name, &name[sizeof(name)-1] }; + int nnoffset, pnoffset; + + /* validate if string is one of the 2 allowed formats */ + if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && + !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && + !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], + "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { + nnoffset = NVME_FC_TRADDR_OXNNLEN; + pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + + NVME_FC_TRADDR_OXNNLEN; + } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && + !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && + !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], + "pn-", NVME_FC_TRADDR_NNLEN))) { + nnoffset = NVME_FC_TRADDR_NNLEN; + pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; + } else + goto out_einval; + + name[0] = '0'; + name[1] = 'x'; + name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; + + memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); + if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) + goto out_einval; + + memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); + if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) + goto out_einval; + + return 0; + +out_einval: + pr_warn("%s: bad traddr string\n", __func__); + return -EINVAL; +} + +static struct nvme_ctrl * +nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) +{ + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + struct nvme_ctrl *ctrl; + struct nvmet_fc_traddr laddr = { 0L, 0L }; + struct nvmet_fc_traddr raddr = { 0L, 0L }; + unsigned long flags; + int ret; + + ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); + if (ret || !raddr.nn || !raddr.pn) + return ERR_PTR(-EINVAL); + + ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); + if (ret || !laddr.nn || !laddr.pn) + return ERR_PTR(-EINVAL); + + /* find the host and remote ports to connect together */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + if (lport->localport.node_name != laddr.nn || + lport->localport.port_name != laddr.pn || + lport->localport.port_state != FC_OBJSTATE_ONLINE) + continue; + + list_for_each_entry(rport, &lport->endp_list, endp_list) { + if (rport->remoteport.node_name != raddr.nn || + rport->remoteport.port_name != raddr.pn || + rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + continue; + + /* if fail to get reference fall through. Will error */ + if (!nvme_fc_rport_get(rport)) + break; + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); + if (IS_ERR(ctrl)) + nvme_fc_rport_put(rport); + return ctrl; + } + } + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + pr_warn("%s: %s - %s combination not found\n", + __func__, opts->traddr, opts->host_traddr); + return ERR_PTR(-ENOENT); +} + + +static struct nvmf_transport_ops nvme_fc_transport = { + .name = "fc", + .module = THIS_MODULE, + .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, + .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, + .create_ctrl = nvme_fc_create_ctrl, +}; + +/* Arbitrary successive failures max. With lots of subsystems could be high */ +#define DISCOVERY_MAX_FAIL 20 + +static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long flags; + LIST_HEAD(local_disc_list); + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + int failcnt = 0; + + spin_lock_irqsave(&nvme_fc_lock, flags); +restart: + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + list_for_each_entry(rport, &lport->endp_list, endp_list) { + if (!nvme_fc_lport_get(lport)) + continue; + if (!nvme_fc_rport_get(rport)) { + /* + * This is a temporary condition. Upon restart + * this rport will be gone from the list. + * + * Revert the lport put and retry. Anything + * added to the list already will be skipped (as + * they are no longer list_empty). Loops should + * resume at rports that were not yet seen. + */ + nvme_fc_lport_put(lport); + + if (failcnt++ < DISCOVERY_MAX_FAIL) + goto restart; + + pr_err("nvme_discovery: too many reference " + "failures\n"); + goto process_local_list; + } + if (list_empty(&rport->disc_list)) + list_add_tail(&rport->disc_list, + &local_disc_list); + } + } + +process_local_list: + while (!list_empty(&local_disc_list)) { + rport = list_first_entry(&local_disc_list, + struct nvme_fc_rport, disc_list); + list_del_init(&rport->disc_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + lport = rport->lport; + /* signal discovery. Won't hurt if it repeats */ + nvme_fc_signal_discovery_scan(lport, rport); + nvme_fc_rport_put(rport); + nvme_fc_lport_put(lport); + + spin_lock_irqsave(&nvme_fc_lock, flags); + } + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return count; +} + +static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); + +#ifdef CONFIG_BLK_CGROUP_FC_APPID +/* Parse the cgroup id from a buf and return the length of cgrpid */ +static int fc_parse_cgrpid(const char *buf, u64 *id) +{ + char cgrp_id[16+1]; + int cgrpid_len, j; + + memset(cgrp_id, 0x0, sizeof(cgrp_id)); + for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) { + if (buf[cgrpid_len] != ':') + cgrp_id[cgrpid_len] = buf[cgrpid_len]; + else { + j = 1; + break; + } + } + if (!j) + return -EINVAL; + if (kstrtou64(cgrp_id, 16, id) < 0) + return -EINVAL; + return cgrpid_len; +} + +/* + * Parse and update the appid in the blkcg associated with the cgroupid. + */ +static ssize_t fc_appid_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + size_t orig_count = count; + u64 cgrp_id; + int appid_len = 0; + int cgrpid_len = 0; + char app_id[FC_APPID_LEN]; + int ret = 0; + + if (buf[count-1] == '\n') + count--; + + if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':'))) + return -EINVAL; + + cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id); + if (cgrpid_len < 0) + return -EINVAL; + appid_len = count - cgrpid_len - 1; + if (appid_len > FC_APPID_LEN) + return -EINVAL; + + memset(app_id, 0x0, sizeof(app_id)); + memcpy(app_id, &buf[cgrpid_len+1], appid_len); + ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id)); + if (ret < 0) + return ret; + return orig_count; +} +static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store); +#endif /* CONFIG_BLK_CGROUP_FC_APPID */ + +static struct attribute *nvme_fc_attrs[] = { + &dev_attr_nvme_discovery.attr, +#ifdef CONFIG_BLK_CGROUP_FC_APPID + &dev_attr_appid_store.attr, +#endif + NULL +}; + +static const struct attribute_group nvme_fc_attr_group = { + .attrs = nvme_fc_attrs, +}; + +static const struct attribute_group *nvme_fc_attr_groups[] = { + &nvme_fc_attr_group, + NULL +}; + +static struct class fc_class = { + .name = "fc", + .dev_groups = nvme_fc_attr_groups, +}; + +static int __init nvme_fc_init_module(void) +{ + int ret; + + nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); + if (!nvme_fc_wq) + return -ENOMEM; + + /* + * NOTE: + * It is expected that in the future the kernel will combine + * the FC-isms that are currently under scsi and now being + * added to by NVME into a new standalone FC class. The SCSI + * and NVME protocols and their devices would be under this + * new FC class. + * + * As we need something to post FC-specific udev events to, + * specifically for nvme probe events, start by creating the + * new device class. When the new standalone FC class is + * put in place, this code will move to a more generic + * location for the class. + */ + ret = class_register(&fc_class); + if (ret) { + pr_err("couldn't register class fc\n"); + goto out_destroy_wq; + } + + /* + * Create a device for the FC-centric udev events + */ + fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, + "fc_udev_device"); + if (IS_ERR(fc_udev_device)) { + pr_err("couldn't create fc_udev device!\n"); + ret = PTR_ERR(fc_udev_device); + goto out_destroy_class; + } + + ret = nvmf_register_transport(&nvme_fc_transport); + if (ret) + goto out_destroy_device; + + return 0; + +out_destroy_device: + device_destroy(&fc_class, MKDEV(0, 0)); +out_destroy_class: + class_unregister(&fc_class); +out_destroy_wq: + destroy_workqueue(nvme_fc_wq); + + return ret; +} + +static void +nvme_fc_delete_controllers(struct nvme_fc_rport *rport) +{ + struct nvme_fc_ctrl *ctrl; + + spin_lock(&rport->lock); + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: transport unloading: deleting ctrl\n", + ctrl->cnum); + nvme_delete_ctrl(&ctrl->ctrl); + } + spin_unlock(&rport->lock); +} + +static void +nvme_fc_cleanup_for_unload(void) +{ + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + list_for_each_entry(rport, &lport->endp_list, endp_list) { + nvme_fc_delete_controllers(rport); + } + } +} + +static void __exit nvme_fc_exit_module(void) +{ + unsigned long flags; + bool need_cleanup = false; + + spin_lock_irqsave(&nvme_fc_lock, flags); + nvme_fc_waiting_to_unload = true; + if (!list_empty(&nvme_fc_lport_list)) { + need_cleanup = true; + nvme_fc_cleanup_for_unload(); + } + spin_unlock_irqrestore(&nvme_fc_lock, flags); + if (need_cleanup) { + pr_info("%s: waiting for ctlr deletes\n", __func__); + wait_for_completion(&nvme_fc_unload_proceed); + pr_info("%s: ctrl deletes complete\n", __func__); + } + + nvmf_unregister_transport(&nvme_fc_transport); + + ida_destroy(&nvme_fc_local_port_cnt); + ida_destroy(&nvme_fc_ctrl_cnt); + + device_destroy(&fc_class, MKDEV(0, 0)); + class_unregister(&fc_class); + destroy_workqueue(nvme_fc_wq); +} + +module_init(nvme_fc_init_module); +module_exit(nvme_fc_exit_module); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/fc.h b/drivers/nvme/host/fc.h new file mode 100644 index 0000000000..05ce566f2c --- /dev/null +++ b/drivers/nvme/host/fc.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016, Avago Technologies + */ + +#ifndef _NVME_FC_TRANSPORT_H +#define _NVME_FC_TRANSPORT_H 1 + + +/* + * Common definitions between the nvme_fc (host) transport and + * nvmet_fc (target) transport implementation. + */ + +/* + * ****************** FC-NVME LS HANDLING ****************** + */ + +union nvmefc_ls_requests { + struct fcnvme_ls_rqst_w0 w0; + struct fcnvme_ls_cr_assoc_rqst rq_cr_assoc; + struct fcnvme_ls_cr_conn_rqst rq_cr_conn; + struct fcnvme_ls_disconnect_assoc_rqst rq_dis_assoc; + struct fcnvme_ls_disconnect_conn_rqst rq_dis_conn; +} __aligned(128); /* alignment for other things alloc'd with */ + +union nvmefc_ls_responses { + struct fcnvme_ls_rjt rsp_rjt; + struct fcnvme_ls_cr_assoc_acc rsp_cr_assoc; + struct fcnvme_ls_cr_conn_acc rsp_cr_conn; + struct fcnvme_ls_disconnect_assoc_acc rsp_dis_assoc; + struct fcnvme_ls_disconnect_conn_acc rsp_dis_conn; +} __aligned(128); /* alignment for other things alloc'd with */ + +static inline void +nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) +{ + struct fcnvme_ls_acc_hdr *acc = buf; + + acc->w0.ls_cmd = ls_cmd; + acc->desc_list_len = desc_len; + acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); + acc->rqst.desc_len = + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); + acc->rqst.w0.ls_cmd = rqst_ls_cmd; +} + +static inline int +nvme_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, + u8 reason, u8 explanation, u8 vendor) +{ + struct fcnvme_ls_rjt *rjt = buf; + + nvme_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, + fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), + ls_cmd); + rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); + rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); + rjt->rjt.reason_code = reason; + rjt->rjt.reason_explanation = explanation; + rjt->rjt.vendor = vendor; + + return sizeof(struct fcnvme_ls_rjt); +} + +/* Validation Error indexes into the string table below */ +enum { + VERR_NO_ERROR = 0, + VERR_CR_ASSOC_LEN = 1, + VERR_CR_ASSOC_RQST_LEN = 2, + VERR_CR_ASSOC_CMD = 3, + VERR_CR_ASSOC_CMD_LEN = 4, + VERR_ERSP_RATIO = 5, + VERR_ASSOC_ALLOC_FAIL = 6, + VERR_QUEUE_ALLOC_FAIL = 7, + VERR_CR_CONN_LEN = 8, + VERR_CR_CONN_RQST_LEN = 9, + VERR_ASSOC_ID = 10, + VERR_ASSOC_ID_LEN = 11, + VERR_NO_ASSOC = 12, + VERR_CONN_ID = 13, + VERR_CONN_ID_LEN = 14, + VERR_INVAL_CONN = 15, + VERR_CR_CONN_CMD = 16, + VERR_CR_CONN_CMD_LEN = 17, + VERR_DISCONN_LEN = 18, + VERR_DISCONN_RQST_LEN = 19, + VERR_DISCONN_CMD = 20, + VERR_DISCONN_CMD_LEN = 21, + VERR_DISCONN_SCOPE = 22, + VERR_RS_LEN = 23, + VERR_RS_RQST_LEN = 24, + VERR_RS_CMD = 25, + VERR_RS_CMD_LEN = 26, + VERR_RS_RCTL = 27, + VERR_RS_RO = 28, + VERR_LSACC = 29, + VERR_LSDESC_RQST = 30, + VERR_LSDESC_RQST_LEN = 31, + VERR_CR_ASSOC = 32, + VERR_CR_ASSOC_ACC_LEN = 33, + VERR_CR_CONN = 34, + VERR_CR_CONN_ACC_LEN = 35, + VERR_DISCONN = 36, + VERR_DISCONN_ACC_LEN = 37, +}; + +static char *validation_errors[] = { + "OK", + "Bad CR_ASSOC Length", + "Bad CR_ASSOC Rqst Length", + "Not CR_ASSOC Cmd", + "Bad CR_ASSOC Cmd Length", + "Bad Ersp Ratio", + "Association Allocation Failed", + "Queue Allocation Failed", + "Bad CR_CONN Length", + "Bad CR_CONN Rqst Length", + "Not Association ID", + "Bad Association ID Length", + "No Association", + "Not Connection ID", + "Bad Connection ID Length", + "Invalid Connection ID", + "Not CR_CONN Cmd", + "Bad CR_CONN Cmd Length", + "Bad DISCONN Length", + "Bad DISCONN Rqst Length", + "Not DISCONN Cmd", + "Bad DISCONN Cmd Length", + "Bad Disconnect Scope", + "Bad RS Length", + "Bad RS Rqst Length", + "Not RS Cmd", + "Bad RS Cmd Length", + "Bad RS R_CTL", + "Bad RS Relative Offset", + "Not LS_ACC", + "Not LSDESC_RQST", + "Bad LSDESC_RQST Length", + "Not CR_ASSOC Rqst", + "Bad CR_ASSOC ACC Length", + "Not CR_CONN Rqst", + "Bad CR_CONN ACC Length", + "Not Disconnect Rqst", + "Bad Disconnect ACC Length", +}; + +#define NVME_FC_LAST_LS_CMD_VALUE FCNVME_LS_DISCONNECT_CONN + +static char *nvmefc_ls_names[] = { + "Reserved (0)", + "RJT (1)", + "ACC (2)", + "Create Association", + "Create Connection", + "Disconnect Association", + "Disconnect Connection", +}; + +static inline void +nvmefc_fmt_lsreq_discon_assoc(struct nvmefc_ls_req *lsreq, + struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst, + struct fcnvme_ls_disconnect_assoc_acc *discon_acc, + u64 association_id) +{ + lsreq->rqstaddr = discon_rqst; + lsreq->rqstlen = sizeof(*discon_rqst); + lsreq->rspaddr = discon_acc; + lsreq->rsplen = sizeof(*discon_acc); + lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; + + discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC; + discon_rqst->desc_list_len = cpu_to_be32( + sizeof(struct fcnvme_lsdesc_assoc_id) + + sizeof(struct fcnvme_lsdesc_disconn_cmd)); + + discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); + discon_rqst->associd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id)); + + discon_rqst->associd.association_id = cpu_to_be64(association_id); + + discon_rqst->discon_cmd.desc_tag = cpu_to_be32( + FCNVME_LSDESC_DISCONN_CMD); + discon_rqst->discon_cmd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_disconn_cmd)); +} + +static inline int +nvmefc_vldt_lsreq_discon_assoc(u32 rqstlen, + struct fcnvme_ls_disconnect_assoc_rqst *rqst) +{ + int ret = 0; + + if (rqstlen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst)) + ret = VERR_DISCONN_LEN; + else if (rqst->desc_list_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_disconnect_assoc_rqst))) + ret = VERR_DISCONN_RQST_LEN; + else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) + ret = VERR_ASSOC_ID; + else if (rqst->associd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id))) + ret = VERR_ASSOC_ID_LEN; + else if (rqst->discon_cmd.desc_tag != + cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) + ret = VERR_DISCONN_CMD; + else if (rqst->discon_cmd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_disconn_cmd))) + ret = VERR_DISCONN_CMD_LEN; + /* + * As the standard changed on the LS, check if old format and scope + * something other than Association (e.g. 0). + */ + else if (rqst->discon_cmd.rsvd8[0]) + ret = VERR_DISCONN_SCOPE; + + return ret; +} + +#endif /* _NVME_FC_TRANSPORT_H */ diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c new file mode 100644 index 0000000000..8df73a0b39 --- /dev/null +++ b/drivers/nvme/host/hwmon.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM Express hardware monitoring support + * Copyright (c) 2019, Guenter Roeck + */ + +#include <linux/hwmon.h> +#include <linux/units.h> +#include <asm/unaligned.h> + +#include "nvme.h" + +struct nvme_hwmon_data { + struct nvme_ctrl *ctrl; + struct nvme_smart_log *log; + struct mutex read_lock; +}; + +static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, + long *temp) +{ + unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT; + u32 status; + int ret; + + if (under) + threshold |= NVME_TEMP_THRESH_TYPE_UNDER; + + ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0, + &status); + if (ret > 0) + return -EIO; + if (ret < 0) + return ret; + *temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK); + + return 0; +} + +static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, + long temp) +{ + unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT; + int ret; + + temp = millicelsius_to_kelvin(temp); + threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK); + + if (under) + threshold |= NVME_TEMP_THRESH_TYPE_UNDER; + + ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0, + NULL); + if (ret > 0) + return -EIO; + + return ret; +} + +static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) +{ + return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, + NVME_CSI_NVM, data->log, sizeof(*data->log), 0); +} + +static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct nvme_hwmon_data *data = dev_get_drvdata(dev); + struct nvme_smart_log *log = data->log; + int temp; + int err; + + /* + * First handle attributes which don't require us to read + * the smart log. + */ + switch (attr) { + case hwmon_temp_max: + return nvme_get_temp_thresh(data->ctrl, channel, false, val); + case hwmon_temp_min: + return nvme_get_temp_thresh(data->ctrl, channel, true, val); + case hwmon_temp_crit: + *val = kelvin_to_millicelsius(data->ctrl->cctemp); + return 0; + default: + break; + } + + mutex_lock(&data->read_lock); + err = nvme_hwmon_get_smart_log(data); + if (err) + goto unlock; + + switch (attr) { + case hwmon_temp_input: + if (!channel) + temp = get_unaligned_le16(log->temperature); + else + temp = le16_to_cpu(log->temp_sensor[channel - 1]); + *val = kelvin_to_millicelsius(temp); + break; + case hwmon_temp_alarm: + *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE); + break; + default: + err = -EOPNOTSUPP; + break; + } +unlock: + mutex_unlock(&data->read_lock); + return err; +} + +static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct nvme_hwmon_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_temp_max: + return nvme_set_temp_thresh(data->ctrl, channel, false, val); + case hwmon_temp_min: + return nvme_set_temp_thresh(data->ctrl, channel, true, val); + default: + break; + } + + return -EOPNOTSUPP; +} + +static const char * const nvme_hwmon_sensor_names[] = { + "Composite", + "Sensor 1", + "Sensor 2", + "Sensor 3", + "Sensor 4", + "Sensor 5", + "Sensor 6", + "Sensor 7", + "Sensor 8", +}; + +static int nvme_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, u32 attr, + int channel, const char **str) +{ + *str = nvme_hwmon_sensor_names[channel]; + return 0; +} + +static umode_t nvme_hwmon_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct nvme_hwmon_data *data = _data; + + switch (attr) { + case hwmon_temp_crit: + if (!channel && data->ctrl->cctemp) + return 0444; + break; + case hwmon_temp_max: + case hwmon_temp_min: + if ((!channel && data->ctrl->wctemp) || + (channel && data->log->temp_sensor[channel - 1] && + !(data->ctrl->quirks & + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) { + if (data->ctrl->quirks & + NVME_QUIRK_NO_TEMP_THRESH_CHANGE) + return 0444; + return 0644; + } + break; + case hwmon_temp_alarm: + if (!channel) + return 0444; + break; + case hwmon_temp_input: + case hwmon_temp_label: + if (!channel || data->log->temp_sensor[channel - 1]) + return 0444; + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *const nvme_hwmon_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops nvme_hwmon_ops = { + .is_visible = nvme_hwmon_is_visible, + .read = nvme_hwmon_read, + .read_string = nvme_hwmon_read_string, + .write = nvme_hwmon_write, +}; + +static const struct hwmon_chip_info nvme_hwmon_chip_info = { + .ops = &nvme_hwmon_ops, + .info = nvme_hwmon_info, +}; + +int nvme_hwmon_init(struct nvme_ctrl *ctrl) +{ + struct device *dev = ctrl->device; + struct nvme_hwmon_data *data; + struct device *hwmon; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); + if (!data->log) { + err = -ENOMEM; + goto err_free_data; + } + + data->ctrl = ctrl; + mutex_init(&data->read_lock); + + err = nvme_hwmon_get_smart_log(data); + if (err) { + dev_warn(dev, "Failed to read smart log (error %d)\n", err); + goto err_free_log; + } + + hwmon = hwmon_device_register_with_info(dev, "nvme", + data, &nvme_hwmon_chip_info, + NULL); + if (IS_ERR(hwmon)) { + dev_warn(dev, "Failed to instantiate hwmon device\n"); + err = PTR_ERR(hwmon); + goto err_free_log; + } + ctrl->hwmon_device = hwmon; + return 0; + +err_free_log: + kfree(data->log); +err_free_data: + kfree(data); + return err; +} + +void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ + if (ctrl->hwmon_device) { + struct nvme_hwmon_data *data = + dev_get_drvdata(ctrl->hwmon_device); + + hwmon_device_unregister(ctrl->hwmon_device); + ctrl->hwmon_device = NULL; + kfree(data->log); + kfree(data); + } +} diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c new file mode 100644 index 0000000000..4939ed3563 --- /dev/null +++ b/drivers/nvme/host/ioctl.c @@ -0,0 +1,985 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2014, Intel Corporation. + * Copyright (c) 2017-2021 Christoph Hellwig. + */ +#include <linux/ptrace.h> /* for force_successful_syscall_return */ +#include <linux/nvme_ioctl.h> +#include <linux/io_uring.h> +#include "nvme.h" + +enum { + NVME_IOCTL_VEC = (1 << 0), + NVME_IOCTL_PARTITION = (1 << 1), +}; + +static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, + unsigned int flags, bool open_for_write) +{ + u32 effects; + + /* + * Do not allow unprivileged passthrough on partitions, as that allows an + * escape from the containment of the partition. + */ + if (flags & NVME_IOCTL_PARTITION) + goto admin; + + /* + * Do not allow unprivileged processes to send vendor specific or fabrics + * commands as we can't be sure about their effects. + */ + if (c->common.opcode >= nvme_cmd_vendor_start || + c->common.opcode == nvme_fabrics_command) + goto admin; + + /* + * Do not allow unprivileged passthrough of admin commands except + * for a subset of identify commands that contain information required + * to form proper I/O commands in userspace and do not expose any + * potentially sensitive information. + */ + if (!ns) { + if (c->common.opcode == nvme_admin_identify) { + switch (c->identify.cns) { + case NVME_ID_CNS_NS: + case NVME_ID_CNS_CS_NS: + case NVME_ID_CNS_NS_CS_INDEP: + case NVME_ID_CNS_CS_CTRL: + case NVME_ID_CNS_CTRL: + return true; + } + } + goto admin; + } + + /* + * Check if the controller provides a Commands Supported and Effects log + * and marks this command as supported. If not reject unprivileged + * passthrough. + */ + effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); + if (!(effects & NVME_CMD_EFFECTS_CSUPP)) + goto admin; + + /* + * Don't allow passthrough for command that have intrusive (or unknown) + * effects. + */ + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | + NVME_CMD_EFFECTS_UUID_SEL | + NVME_CMD_EFFECTS_SCOPE_MASK)) + goto admin; + + /* + * Only allow I/O commands that transfer data to the controller or that + * change the logical block contents if the file descriptor is open for + * writing. + */ + if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) && + !open_for_write) + goto admin; + + return true; +admin: + return capable(CAP_SYS_ADMIN); +} + +/* + * Convert integer values from ioctl structures to user pointers, silently + * ignoring the upper bits in the compat case to match behaviour of 32-bit + * kernels. + */ +static void __user *nvme_to_user_ptr(uintptr_t ptrval) +{ + if (in_compat_syscall()) + ptrval = (compat_uptr_t)ptrval; + return (void __user *)ptrval; +} + +static void *nvme_add_user_metadata(struct request *req, void __user *ubuf, + unsigned len, u32 seed) +{ + struct bio_integrity_payload *bip; + int ret = -ENOMEM; + void *buf; + struct bio *bio = req->bio; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + goto out; + + if (req_op(req) == REQ_OP_DRV_OUT) { + ret = -EFAULT; + if (copy_from_user(buf, ubuf, len)) + goto out_free_meta; + } else { + memset(buf, 0, len); + } + + bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); + if (IS_ERR(bip)) { + ret = PTR_ERR(bip); + goto out_free_meta; + } + + bip->bip_iter.bi_sector = seed; + ret = bio_integrity_add_page(bio, virt_to_page(buf), len, + offset_in_page(buf)); + if (ret != len) { + ret = -ENOMEM; + goto out_free_meta; + } + + req->cmd_flags |= REQ_INTEGRITY; + return buf; +out_free_meta: + kfree(buf); +out: + return ERR_PTR(ret); +} + +static int nvme_finish_user_metadata(struct request *req, void __user *ubuf, + void *meta, unsigned len, int ret) +{ + if (!ret && req_op(req) == REQ_OP_DRV_IN && + copy_to_user(ubuf, meta, len)) + ret = -EFAULT; + kfree(meta); + return ret; +} + +static struct request *nvme_alloc_user_request(struct request_queue *q, + struct nvme_command *cmd, blk_opf_t rq_flags, + blk_mq_req_flags_t blk_flags) +{ + struct request *req; + + req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); + if (IS_ERR(req)) + return req; + nvme_init_request(req, cmd); + nvme_req(req)->flags |= NVME_REQ_USERCMD; + return req; +} + +static int nvme_map_user_request(struct request *req, u64 ubuffer, + unsigned bufflen, void __user *meta_buffer, unsigned meta_len, + u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd, + unsigned int flags) +{ + struct request_queue *q = req->q; + struct nvme_ns *ns = q->queuedata; + struct block_device *bdev = ns ? ns->disk->part0 : NULL; + struct bio *bio = NULL; + void *meta = NULL; + int ret; + + if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { + struct iov_iter iter; + + /* fixedbufs is only for non-vectored io */ + if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) + return -EINVAL; + ret = io_uring_cmd_import_fixed(ubuffer, bufflen, + rq_data_dir(req), &iter, ioucmd); + if (ret < 0) + goto out; + ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); + } else { + ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), + bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0, + 0, rq_data_dir(req)); + } + + if (ret) + goto out; + bio = req->bio; + if (bdev) + bio_set_dev(bio, bdev); + + if (bdev && meta_buffer && meta_len) { + meta = nvme_add_user_metadata(req, meta_buffer, meta_len, + meta_seed); + if (IS_ERR(meta)) { + ret = PTR_ERR(meta); + goto out_unmap; + } + *metap = meta; + } + + return ret; + +out_unmap: + if (bio) + blk_rq_unmap_user(bio); +out: + blk_mq_free_request(req); + return ret; +} + +static int nvme_submit_user_cmd(struct request_queue *q, + struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, + void __user *meta_buffer, unsigned meta_len, u32 meta_seed, + u64 *result, unsigned timeout, unsigned int flags) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_ctrl *ctrl; + struct request *req; + void *meta = NULL; + struct bio *bio; + u32 effects; + int ret; + + req = nvme_alloc_user_request(q, cmd, 0, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + + req->timeout = timeout; + if (ubuffer && bufflen) { + ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, + meta_len, meta_seed, &meta, NULL, flags); + if (ret) + return ret; + } + + bio = req->bio; + ctrl = nvme_req(req)->ctrl; + + effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); + ret = nvme_execute_rq(req, false); + if (result) + *result = le64_to_cpu(nvme_req(req)->result.u64); + if (meta) + ret = nvme_finish_user_metadata(req, meta_buffer, meta, + meta_len, ret); + if (bio) + blk_rq_unmap_user(bio); + blk_mq_free_request(req); + + if (effects) + nvme_passthru_end(ctrl, ns, effects, cmd, ret); + + return ret; +} + +static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) +{ + struct nvme_user_io io; + struct nvme_command c; + unsigned length, meta_len; + void __user *metadata; + + if (copy_from_user(&io, uio, sizeof(io))) + return -EFAULT; + if (io.flags) + return -EINVAL; + + switch (io.opcode) { + case nvme_cmd_write: + case nvme_cmd_read: + case nvme_cmd_compare: + break; + default: + return -EINVAL; + } + + length = (io.nblocks + 1) << ns->lba_shift; + + if ((io.control & NVME_RW_PRINFO_PRACT) && + ns->ms == sizeof(struct t10_pi_tuple)) { + /* + * Protection information is stripped/inserted by the + * controller. + */ + if (nvme_to_user_ptr(io.metadata)) + return -EINVAL; + meta_len = 0; + metadata = NULL; + } else { + meta_len = (io.nblocks + 1) * ns->ms; + metadata = nvme_to_user_ptr(io.metadata); + } + + if (ns->features & NVME_NS_EXT_LBAS) { + length += meta_len; + meta_len = 0; + } else if (meta_len) { + if ((io.metadata & 3) || !io.metadata) + return -EINVAL; + } + + memset(&c, 0, sizeof(c)); + c.rw.opcode = io.opcode; + c.rw.flags = io.flags; + c.rw.nsid = cpu_to_le32(ns->head->ns_id); + c.rw.slba = cpu_to_le64(io.slba); + c.rw.length = cpu_to_le16(io.nblocks); + c.rw.control = cpu_to_le16(io.control); + c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); + c.rw.reftag = cpu_to_le32(io.reftag); + c.rw.apptag = cpu_to_le16(io.apptag); + c.rw.appmask = cpu_to_le16(io.appmask); + + return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, + meta_len, lower_32_bits(io.slba), NULL, 0, 0); +} + +static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns *ns, __u32 nsid) +{ + if (ns && nsid != ns->head->ns_id) { + dev_err(ctrl->device, + "%s: nsid (%u) in cmd does not match nsid (%u)" + "of namespace\n", + current->comm, nsid, ns->head->ns_id); + return false; + } + + return true; +} + +static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + struct nvme_passthru_cmd __user *ucmd, unsigned int flags, + bool open_for_write) +{ + struct nvme_passthru_cmd cmd; + struct nvme_command c; + unsigned timeout = 0; + u64 result; + int status; + + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + if (cmd.flags) + return -EINVAL; + if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.common.opcode = cmd.opcode; + c.common.flags = cmd.flags; + c.common.nsid = cpu_to_le32(cmd.nsid); + c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); + c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); + c.common.cdw10 = cpu_to_le32(cmd.cdw10); + c.common.cdw11 = cpu_to_le32(cmd.cdw11); + c.common.cdw12 = cpu_to_le32(cmd.cdw12); + c.common.cdw13 = cpu_to_le32(cmd.cdw13); + c.common.cdw14 = cpu_to_le32(cmd.cdw14); + c.common.cdw15 = cpu_to_le32(cmd.cdw15); + + if (!nvme_cmd_allowed(ns, &c, 0, open_for_write)) + return -EACCES; + + if (cmd.timeout_ms) + timeout = msecs_to_jiffies(cmd.timeout_ms); + + status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), + cmd.metadata_len, 0, &result, timeout, 0); + + if (status >= 0) { + if (put_user(result, &ucmd->result)) + return -EFAULT; + } + + return status; +} + +static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, + bool open_for_write) +{ + struct nvme_passthru_cmd64 cmd; + struct nvme_command c; + unsigned timeout = 0; + int status; + + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + if (cmd.flags) + return -EINVAL; + if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.common.opcode = cmd.opcode; + c.common.flags = cmd.flags; + c.common.nsid = cpu_to_le32(cmd.nsid); + c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); + c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); + c.common.cdw10 = cpu_to_le32(cmd.cdw10); + c.common.cdw11 = cpu_to_le32(cmd.cdw11); + c.common.cdw12 = cpu_to_le32(cmd.cdw12); + c.common.cdw13 = cpu_to_le32(cmd.cdw13); + c.common.cdw14 = cpu_to_le32(cmd.cdw14); + c.common.cdw15 = cpu_to_le32(cmd.cdw15); + + if (!nvme_cmd_allowed(ns, &c, flags, open_for_write)) + return -EACCES; + + if (cmd.timeout_ms) + timeout = msecs_to_jiffies(cmd.timeout_ms); + + status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), + cmd.metadata_len, 0, &cmd.result, timeout, flags); + + if (status >= 0) { + if (put_user(cmd.result, &ucmd->result)) + return -EFAULT; + } + + return status; +} + +struct nvme_uring_data { + __u64 metadata; + __u64 addr; + __u32 data_len; + __u32 metadata_len; + __u32 timeout_ms; +}; + +/* + * This overlays struct io_uring_cmd pdu. + * Expect build errors if this grows larger than that. + */ +struct nvme_uring_cmd_pdu { + union { + struct bio *bio; + struct request *req; + }; + u32 meta_len; + u32 nvme_status; + union { + struct { + void *meta; /* kernel-resident buffer */ + void __user *meta_buffer; + }; + u64 result; + } u; +}; + +static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( + struct io_uring_cmd *ioucmd) +{ + return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; +} + +static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd, + unsigned issue_flags) +{ + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + struct request *req = pdu->req; + int status; + u64 result; + + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + status = -EINTR; + else + status = nvme_req(req)->status; + + result = le64_to_cpu(nvme_req(req)->result.u64); + + if (pdu->meta_len) + status = nvme_finish_user_metadata(req, pdu->u.meta_buffer, + pdu->u.meta, pdu->meta_len, status); + if (req->bio) + blk_rq_unmap_user(req->bio); + blk_mq_free_request(req); + + io_uring_cmd_done(ioucmd, status, result, issue_flags); +} + +static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, + unsigned issue_flags) +{ + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + + if (pdu->bio) + blk_rq_unmap_user(pdu->bio); + + io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags); +} + +static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, + blk_status_t err) +{ + struct io_uring_cmd *ioucmd = req->end_io_data; + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + + req->bio = pdu->bio; + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { + pdu->nvme_status = -EINTR; + } else { + pdu->nvme_status = nvme_req(req)->status; + if (!pdu->nvme_status) + pdu->nvme_status = blk_status_to_errno(err); + } + pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); + + /* + * For iopoll, complete it directly. + * Otherwise, move the completion to task work. + */ + if (blk_rq_is_poll(req)) { + WRITE_ONCE(ioucmd->cookie, NULL); + nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); + } else { + io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); + } + + return RQ_END_IO_FREE; +} + +static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req, + blk_status_t err) +{ + struct io_uring_cmd *ioucmd = req->end_io_data; + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + + req->bio = pdu->bio; + pdu->req = req; + + /* + * For iopoll, complete it directly. + * Otherwise, move the completion to task work. + */ + if (blk_rq_is_poll(req)) { + WRITE_ONCE(ioucmd->cookie, NULL); + nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED); + } else { + io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb); + } + + return RQ_END_IO_NONE; +} + +static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) +{ + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); + struct request_queue *q = ns ? ns->queue : ctrl->admin_q; + struct nvme_uring_data d; + struct nvme_command c; + struct request *req; + blk_opf_t rq_flags = REQ_ALLOC_CACHE; + blk_mq_req_flags_t blk_flags = 0; + void *meta = NULL; + int ret; + + c.common.opcode = READ_ONCE(cmd->opcode); + c.common.flags = READ_ONCE(cmd->flags); + if (c.common.flags) + return -EINVAL; + + c.common.command_id = 0; + c.common.nsid = cpu_to_le32(cmd->nsid); + if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid))) + return -EINVAL; + + c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); + c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); + c.common.metadata = 0; + c.common.dptr.prp1 = c.common.dptr.prp2 = 0; + c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); + c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); + c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); + c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); + c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); + c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); + + if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE)) + return -EACCES; + + d.metadata = READ_ONCE(cmd->metadata); + d.addr = READ_ONCE(cmd->addr); + d.data_len = READ_ONCE(cmd->data_len); + d.metadata_len = READ_ONCE(cmd->metadata_len); + d.timeout_ms = READ_ONCE(cmd->timeout_ms); + + if (issue_flags & IO_URING_F_NONBLOCK) { + rq_flags |= REQ_NOWAIT; + blk_flags = BLK_MQ_REQ_NOWAIT; + } + if (issue_flags & IO_URING_F_IOPOLL) + rq_flags |= REQ_POLLED; + + req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); + if (IS_ERR(req)) + return PTR_ERR(req); + req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; + + if (d.addr && d.data_len) { + ret = nvme_map_user_request(req, d.addr, + d.data_len, nvme_to_user_ptr(d.metadata), + d.metadata_len, 0, &meta, ioucmd, vec); + if (ret) + return ret; + } + + if (blk_rq_is_poll(req)) { + ioucmd->flags |= IORING_URING_CMD_POLLED; + WRITE_ONCE(ioucmd->cookie, req); + } + + /* to free bio on completion, as req->bio will be null at that time */ + pdu->bio = req->bio; + pdu->meta_len = d.metadata_len; + req->end_io_data = ioucmd; + if (pdu->meta_len) { + pdu->u.meta = meta; + pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata); + req->end_io = nvme_uring_cmd_end_io_meta; + } else { + req->end_io = nvme_uring_cmd_end_io; + } + blk_execute_rq_nowait(req, false); + return -EIOCBQUEUED; +} + +static bool is_ctrl_ioctl(unsigned int cmd) +{ + if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) + return true; + if (is_sed_ioctl(cmd)) + return true; + return false; +} + +static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, + void __user *argp, bool open_for_write) +{ + switch (cmd) { + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); + case NVME_IOCTL_ADMIN64_CMD: + return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); + default: + return sed_ioctl(ctrl->opal_dev, cmd, argp); + } +} + +#ifdef COMPAT_FOR_U64_ALIGNMENT +struct nvme_user_io32 { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +} __attribute__((__packed__)); +#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) +#endif /* COMPAT_FOR_U64_ALIGNMENT */ + +static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, + void __user *argp, unsigned int flags, bool open_for_write) +{ + switch (cmd) { + case NVME_IOCTL_ID: + force_successful_syscall_return(); + return ns->head->ns_id; + case NVME_IOCTL_IO_CMD: + return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write); + /* + * struct nvme_user_io can have different padding on some 32-bit ABIs. + * Just accept the compat version as all fields that are used are the + * same size and at the same offset. + */ +#ifdef COMPAT_FOR_U64_ALIGNMENT + case NVME_IOCTL_SUBMIT_IO32: +#endif + case NVME_IOCTL_SUBMIT_IO: + return nvme_submit_io(ns, argp); + case NVME_IOCTL_IO64_CMD_VEC: + flags |= NVME_IOCTL_VEC; + fallthrough; + case NVME_IOCTL_IO64_CMD: + return nvme_user_cmd64(ns->ctrl, ns, argp, flags, + open_for_write); + default: + return -ENOTTY; + } +} + +int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + bool open_for_write = mode & BLK_OPEN_WRITE; + void __user *argp = (void __user *)arg; + unsigned int flags = 0; + + if (bdev_is_partition(bdev)) + flags |= NVME_IOCTL_PARTITION; + + if (is_ctrl_ioctl(cmd)) + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); + return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); +} + +long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct nvme_ns *ns = + container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); + bool open_for_write = file->f_mode & FMODE_WRITE; + void __user *argp = (void __user *)arg; + + if (is_ctrl_ioctl(cmd)) + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); + return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); +} + +static int nvme_uring_cmd_checks(unsigned int issue_flags) +{ + + /* NVMe passthrough requires big SQE/CQE support */ + if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != + (IO_URING_F_SQE128|IO_URING_F_CQE32)) + return -EOPNOTSUPP; + return 0; +} + +static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, + unsigned int issue_flags) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + int ret; + + BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu)); + + ret = nvme_uring_cmd_checks(issue_flags); + if (ret) + return ret; + + switch (ioucmd->cmd_op) { + case NVME_URING_CMD_IO: + ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); + break; + case NVME_URING_CMD_IO_VEC: + ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + +int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) +{ + struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, + struct nvme_ns, cdev); + + return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); +} + +int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, + struct io_comp_batch *iob, + unsigned int poll_flags) +{ + struct request *req; + int ret = 0; + + if (!(ioucmd->flags & IORING_URING_CMD_POLLED)) + return 0; + + req = READ_ONCE(ioucmd->cookie); + if (req && blk_rq_is_poll(req)) + ret = blk_rq_poll(req, iob, poll_flags); + return ret; +} +#ifdef CONFIG_NVME_MULTIPATH +static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, + void __user *argp, struct nvme_ns_head *head, int srcu_idx, + bool open_for_write) + __releases(&head->srcu) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + int ret; + + nvme_get_ctrl(ns->ctrl); + srcu_read_unlock(&head->srcu, srcu_idx); + ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); + + nvme_put_ctrl(ctrl); + return ret; +} + +int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct nvme_ns_head *head = bdev->bd_disk->private_data; + bool open_for_write = mode & BLK_OPEN_WRITE; + void __user *argp = (void __user *)arg; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + unsigned int flags = 0; + + if (bdev_is_partition(bdev)) + flags |= NVME_IOCTL_PARTITION; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (!ns) + goto out_unlock; + + /* + * Handle ioctls that apply to the controller instead of the namespace + * seperately and drop the ns SRCU reference early. This avoids a + * deadlock when deleting namespaces using the passthrough interface. + */ + if (is_ctrl_ioctl(cmd)) + return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, + open_for_write); + + ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + bool open_for_write = file->f_mode & FMODE_WRITE; + struct cdev *cdev = file_inode(file)->i_cdev; + struct nvme_ns_head *head = + container_of(cdev, struct nvme_ns_head, cdev); + void __user *argp = (void __user *)arg; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (!ns) + goto out_unlock; + + if (is_ctrl_ioctl(cmd)) + return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, + open_for_write); + + ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, + unsigned int issue_flags) +{ + struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; + struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); + int srcu_idx = srcu_read_lock(&head->srcu); + struct nvme_ns *ns = nvme_find_path(head); + int ret = -EINVAL; + + if (ns) + ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} +#endif /* CONFIG_NVME_MULTIPATH */ + +int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) +{ + struct nvme_ctrl *ctrl = ioucmd->file->private_data; + int ret; + + /* IOPOLL not supported yet */ + if (issue_flags & IO_URING_F_IOPOLL) + return -EOPNOTSUPP; + + ret = nvme_uring_cmd_checks(issue_flags); + if (ret) + return ret; + + switch (ioucmd->cmd_op) { + case NVME_URING_CMD_ADMIN: + ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false); + break; + case NVME_URING_CMD_ADMIN_VEC: + ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + +static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, + bool open_for_write) +{ + struct nvme_ns *ns; + int ret; + + down_read(&ctrl->namespaces_rwsem); + if (list_empty(&ctrl->namespaces)) { + ret = -ENOTTY; + goto out_unlock; + } + + ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); + if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { + dev_warn(ctrl->device, + "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); + ret = -EINVAL; + goto out_unlock; + } + + dev_warn(ctrl->device, + "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); + kref_get(&ns->kref); + up_read(&ctrl->namespaces_rwsem); + + ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write); + nvme_put_ns(ns); + return ret; + +out_unlock: + up_read(&ctrl->namespaces_rwsem); + return ret; +} + +long nvme_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + bool open_for_write = file->f_mode & FMODE_WRITE; + struct nvme_ctrl *ctrl = file->private_data; + void __user *argp = (void __user *)arg; + + switch (cmd) { + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); + case NVME_IOCTL_ADMIN64_CMD: + return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); + case NVME_IOCTL_IO_CMD: + return nvme_dev_user_cmd(ctrl, argp, open_for_write); + case NVME_IOCTL_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + dev_warn(ctrl->device, "resetting controller\n"); + return nvme_reset_ctrl_sync(ctrl); + case NVME_IOCTL_SUBSYS_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return nvme_reset_subsystem(ctrl); + case NVME_IOCTL_RESCAN: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + nvme_queue_scan(ctrl); + return 0; + default: + return -ENOTTY; + } +} diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c new file mode 100644 index 0000000000..0a88d7bdc5 --- /dev/null +++ b/drivers/nvme/host/multipath.c @@ -0,0 +1,964 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018 Christoph Hellwig. + */ + +#include <linux/backing-dev.h> +#include <linux/moduleparam.h> +#include <linux/vmalloc.h> +#include <trace/events/block.h> +#include "nvme.h" + +bool multipath = true; +module_param(multipath, bool, 0444); +MODULE_PARM_DESC(multipath, + "turn on native support for multiple controllers per subsystem"); + +static const char *nvme_iopolicy_names[] = { + [NVME_IOPOLICY_NUMA] = "numa", + [NVME_IOPOLICY_RR] = "round-robin", +}; + +static int iopolicy = NVME_IOPOLICY_NUMA; + +static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp) +{ + if (!val) + return -EINVAL; + if (!strncmp(val, "numa", 4)) + iopolicy = NVME_IOPOLICY_NUMA; + else if (!strncmp(val, "round-robin", 11)) + iopolicy = NVME_IOPOLICY_RR; + else + return -EINVAL; + + return 0; +} + +static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp) +{ + return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]); +} + +module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy, + &iopolicy, 0644); +MODULE_PARM_DESC(iopolicy, + "Default multipath I/O policy; 'numa' (default) or 'round-robin'"); + +void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) +{ + subsys->iopolicy = iopolicy; +} + +void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_mq_unfreeze_queue(h->disk->queue); +} + +void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_mq_freeze_queue_wait(h->disk->queue); +} + +void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_freeze_queue_start(h->disk->queue); +} + +void nvme_failover_req(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + u16 status = nvme_req(req)->status & 0x7ff; + unsigned long flags; + struct bio *bio; + + nvme_mpath_clear_current_path(ns); + + /* + * If we got back an ANA error, we know the controller is alive but not + * ready to serve this namespace. Kick of a re-read of the ANA + * information page, and just try any other available path for now. + */ + if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { + set_bit(NVME_NS_ANA_PENDING, &ns->flags); + queue_work(nvme_wq, &ns->ctrl->ana_work); + } + + spin_lock_irqsave(&ns->head->requeue_lock, flags); + for (bio = req->bio; bio; bio = bio->bi_next) { + bio_set_dev(bio, ns->head->disk->part0); + if (bio->bi_opf & REQ_POLLED) { + bio->bi_opf &= ~REQ_POLLED; + bio->bi_cookie = BLK_QC_T_NONE; + } + /* + * The alternate request queue that we may end up submitting + * the bio to may be frozen temporarily, in this case REQ_NOWAIT + * will fail the I/O immediately with EAGAIN to the issuer. + * We are not in the issuer context which cannot block. Clear + * the flag to avoid spurious EAGAIN I/O failures. + */ + bio->bi_opf &= ~REQ_NOWAIT; + } + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); +} + +void nvme_mpath_start_request(struct request *rq) +{ + struct nvme_ns *ns = rq->q->queuedata; + struct gendisk *disk = ns->head->disk; + + if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) + return; + + nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; + nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq), + jiffies); +} +EXPORT_SYMBOL_GPL(nvme_mpath_start_request); + +void nvme_mpath_end_request(struct request *rq) +{ + struct nvme_ns *ns = rq->q->queuedata; + + if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) + return; + bdev_end_io_acct(ns->head->disk->part0, req_op(rq), + blk_rq_bytes(rq) >> SECTOR_SHIFT, + nvme_req(rq)->start_time); +} + +void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) { + if (!ns->head->disk) + continue; + kblockd_schedule_work(&ns->head->requeue_work); + if (ctrl->state == NVME_CTRL_LIVE) + disk_uevent(ns->head->disk, KOBJ_CHANGE); + } + up_read(&ctrl->namespaces_rwsem); +} + +static const char *nvme_ana_state_names[] = { + [0] = "invalid state", + [NVME_ANA_OPTIMIZED] = "optimized", + [NVME_ANA_NONOPTIMIZED] = "non-optimized", + [NVME_ANA_INACCESSIBLE] = "inaccessible", + [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss", + [NVME_ANA_CHANGE] = "change", +}; + +bool nvme_mpath_clear_current_path(struct nvme_ns *ns) +{ + struct nvme_ns_head *head = ns->head; + bool changed = false; + int node; + + if (!head) + goto out; + + for_each_node(node) { + if (ns == rcu_access_pointer(head->current_path[node])) { + rcu_assign_pointer(head->current_path[node], NULL); + changed = true; + } + } +out: + return changed; +} + +void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) { + nvme_mpath_clear_current_path(ns); + kblockd_schedule_work(&ns->head->requeue_work); + } + up_read(&ctrl->namespaces_rwsem); +} + +void nvme_mpath_revalidate_paths(struct nvme_ns *ns) +{ + struct nvme_ns_head *head = ns->head; + sector_t capacity = get_capacity(head->disk); + int node; + int srcu_idx; + + srcu_idx = srcu_read_lock(&head->srcu); + list_for_each_entry_rcu(ns, &head->list, siblings) { + if (capacity != get_capacity(ns->disk)) + clear_bit(NVME_NS_READY, &ns->flags); + } + srcu_read_unlock(&head->srcu, srcu_idx); + + for_each_node(node) + rcu_assign_pointer(head->current_path[node], NULL); + kblockd_schedule_work(&head->requeue_work); +} + +static bool nvme_path_is_disabled(struct nvme_ns *ns) +{ + /* + * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should + * still be able to complete assuming that the controller is connected. + * Otherwise it will fail immediately and return to the requeue list. + */ + if (ns->ctrl->state != NVME_CTRL_LIVE && + ns->ctrl->state != NVME_CTRL_DELETING) + return true; + if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || + !test_bit(NVME_NS_READY, &ns->flags)) + return true; + return false; +} + +static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) +{ + int found_distance = INT_MAX, fallback_distance = INT_MAX, distance; + struct nvme_ns *found = NULL, *fallback = NULL, *ns; + + list_for_each_entry_rcu(ns, &head->list, siblings) { + if (nvme_path_is_disabled(ns)) + continue; + + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) + distance = node_distance(node, ns->ctrl->numa_node); + else + distance = LOCAL_DISTANCE; + + switch (ns->ana_state) { + case NVME_ANA_OPTIMIZED: + if (distance < found_distance) { + found_distance = distance; + found = ns; + } + break; + case NVME_ANA_NONOPTIMIZED: + if (distance < fallback_distance) { + fallback_distance = distance; + fallback = ns; + } + break; + default: + break; + } + } + + if (!found) + found = fallback; + if (found) + rcu_assign_pointer(head->current_path[node], found); + return found; +} + +static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, + struct nvme_ns *ns) +{ + ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, + siblings); + if (ns) + return ns; + return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); +} + +static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, + int node, struct nvme_ns *old) +{ + struct nvme_ns *ns, *found = NULL; + + if (list_is_singular(&head->list)) { + if (nvme_path_is_disabled(old)) + return NULL; + return old; + } + + for (ns = nvme_next_ns(head, old); + ns && ns != old; + ns = nvme_next_ns(head, ns)) { + if (nvme_path_is_disabled(ns)) + continue; + + if (ns->ana_state == NVME_ANA_OPTIMIZED) { + found = ns; + goto out; + } + if (ns->ana_state == NVME_ANA_NONOPTIMIZED) + found = ns; + } + + /* + * The loop above skips the current path for round-robin semantics. + * Fall back to the current path if either: + * - no other optimized path found and current is optimized, + * - no other usable path found and current is usable. + */ + if (!nvme_path_is_disabled(old) && + (old->ana_state == NVME_ANA_OPTIMIZED || + (!found && old->ana_state == NVME_ANA_NONOPTIMIZED))) + return old; + + if (!found) + return NULL; +out: + rcu_assign_pointer(head->current_path[node], found); + return found; +} + +static inline bool nvme_path_is_optimized(struct nvme_ns *ns) +{ + return ns->ctrl->state == NVME_CTRL_LIVE && + ns->ana_state == NVME_ANA_OPTIMIZED; +} + +inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) +{ + int node = numa_node_id(); + struct nvme_ns *ns; + + ns = srcu_dereference(head->current_path[node], &head->srcu); + if (unlikely(!ns)) + return __nvme_find_path(head, node); + + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) + return nvme_round_robin_path(head, node, ns); + if (unlikely(!nvme_path_is_optimized(ns))) + return __nvme_find_path(head, node); + return ns; +} + +static bool nvme_available_path(struct nvme_ns_head *head) +{ + struct nvme_ns *ns; + + list_for_each_entry_rcu(ns, &head->list, siblings) { + if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) + continue; + switch (ns->ctrl->state) { + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + /* fallthru */ + return true; + default: + break; + } + } + return false; +} + +static void nvme_ns_head_submit_bio(struct bio *bio) +{ + struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; + struct device *dev = disk_to_dev(head->disk); + struct nvme_ns *ns; + int srcu_idx; + + /* + * The namespace might be going away and the bio might be moved to a + * different queue via blk_steal_bios(), so we need to use the bio_split + * pool from the original queue to allocate the bvecs from. + */ + bio = bio_split_to_limits(bio); + if (!bio) + return; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (likely(ns)) { + bio_set_dev(bio, ns->disk->part0); + bio->bi_opf |= REQ_NVME_MPATH; + trace_block_bio_remap(bio, disk_devt(ns->head->disk), + bio->bi_iter.bi_sector); + submit_bio_noacct(bio); + } else if (nvme_available_path(head)) { + dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); + + spin_lock_irq(&head->requeue_lock); + bio_list_add(&head->requeue_list, bio); + spin_unlock_irq(&head->requeue_lock); + } else { + dev_warn_ratelimited(dev, "no available path - failing I/O\n"); + + bio_io_error(bio); + } + + srcu_read_unlock(&head->srcu, srcu_idx); +} + +static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode) +{ + if (!nvme_tryget_ns_head(disk->private_data)) + return -ENXIO; + return 0; +} + +static void nvme_ns_head_release(struct gendisk *disk) +{ + nvme_put_ns_head(disk->private_data); +} + +#ifdef CONFIG_BLK_DEV_ZONED +static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct nvme_ns_head *head = disk->private_data; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (ns) + ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} +#else +#define nvme_ns_head_report_zones NULL +#endif /* CONFIG_BLK_DEV_ZONED */ + +const struct block_device_operations nvme_ns_head_ops = { + .owner = THIS_MODULE, + .submit_bio = nvme_ns_head_submit_bio, + .open = nvme_ns_head_open, + .release = nvme_ns_head_release, + .ioctl = nvme_ns_head_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, + .getgeo = nvme_getgeo, + .report_zones = nvme_ns_head_report_zones, + .pr_ops = &nvme_pr_ops, +}; + +static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev) +{ + return container_of(cdev, struct nvme_ns_head, cdev); +} + +static int nvme_ns_head_chr_open(struct inode *inode, struct file *file) +{ + if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev))) + return -ENXIO; + return 0; +} + +static int nvme_ns_head_chr_release(struct inode *inode, struct file *file) +{ + nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev)); + return 0; +} + +static const struct file_operations nvme_ns_head_chr_fops = { + .owner = THIS_MODULE, + .open = nvme_ns_head_chr_open, + .release = nvme_ns_head_chr_release, + .unlocked_ioctl = nvme_ns_head_chr_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .uring_cmd = nvme_ns_head_chr_uring_cmd, + .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, +}; + +static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) +{ + int ret; + + head->cdev_device.parent = &head->subsys->dev; + ret = dev_set_name(&head->cdev_device, "ng%dn%d", + head->subsys->instance, head->instance); + if (ret) + return ret; + ret = nvme_cdev_add(&head->cdev, &head->cdev_device, + &nvme_ns_head_chr_fops, THIS_MODULE); + return ret; +} + +static void nvme_requeue_work(struct work_struct *work) +{ + struct nvme_ns_head *head = + container_of(work, struct nvme_ns_head, requeue_work); + struct bio *bio, *next; + + spin_lock_irq(&head->requeue_lock); + next = bio_list_get(&head->requeue_list); + spin_unlock_irq(&head->requeue_lock); + + while ((bio = next) != NULL) { + next = bio->bi_next; + bio->bi_next = NULL; + + submit_bio_noacct(bio); + } +} + +int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) +{ + bool vwc = false; + + mutex_init(&head->lock); + bio_list_init(&head->requeue_list); + spin_lock_init(&head->requeue_lock); + INIT_WORK(&head->requeue_work, nvme_requeue_work); + + /* + * Add a multipath node if the subsystems supports multiple controllers. + * We also do this for private namespaces as the namespace sharing flag + * could change after a rescan. + */ + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !nvme_is_unique_nsid(ctrl, head) || !multipath) + return 0; + + head->disk = blk_alloc_disk(ctrl->numa_node); + if (!head->disk) + return -ENOMEM; + head->disk->fops = &nvme_ns_head_ops; + head->disk->private_data = head; + sprintf(head->disk->disk_name, "nvme%dn%d", + ctrl->subsys->instance, head->instance); + + blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue); + /* + * This assumes all controllers that refer to a namespace either + * support poll queues or not. That is not a strict guarantee, + * but if the assumption is wrong the effect is only suboptimal + * performance but not correctness problem. + */ + if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && + ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) + blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); + + /* set to a default value of 512 until the disk is validated */ + blk_queue_logical_block_size(head->disk->queue, 512); + blk_set_stacking_limits(&head->disk->queue->limits); + blk_queue_dma_alignment(head->disk->queue, 3); + + /* we need to propagate up the VMC settings */ + if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) + vwc = true; + blk_queue_write_cache(head->disk->queue, vwc, vwc); + return 0; +} + +static void nvme_mpath_set_live(struct nvme_ns *ns) +{ + struct nvme_ns_head *head = ns->head; + int rc; + + if (!head->disk) + return; + + /* + * test_and_set_bit() is used because it is protecting against two nvme + * paths simultaneously calling device_add_disk() on the same namespace + * head. + */ + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + rc = device_add_disk(&head->subsys->dev, head->disk, + nvme_ns_id_attr_groups); + if (rc) { + clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); + return; + } + nvme_add_ns_head_cdev(head); + } + + mutex_lock(&head->lock); + if (nvme_path_is_optimized(ns)) { + int node, srcu_idx; + + srcu_idx = srcu_read_lock(&head->srcu); + for_each_node(node) + __nvme_find_path(head, node); + srcu_read_unlock(&head->srcu, srcu_idx); + } + mutex_unlock(&head->lock); + + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); +} + +static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, + int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *, + void *)) +{ + void *base = ctrl->ana_log_buf; + size_t offset = sizeof(struct nvme_ana_rsp_hdr); + int error, i; + + lockdep_assert_held(&ctrl->ana_lock); + + for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { + struct nvme_ana_group_desc *desc = base + offset; + u32 nr_nsids; + size_t nsid_buf_size; + + if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) + return -EINVAL; + + nr_nsids = le32_to_cpu(desc->nnsids); + nsid_buf_size = flex_array_size(desc, nsids, nr_nsids); + + if (WARN_ON_ONCE(desc->grpid == 0)) + return -EINVAL; + if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax)) + return -EINVAL; + if (WARN_ON_ONCE(desc->state == 0)) + return -EINVAL; + if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE)) + return -EINVAL; + + offset += sizeof(*desc); + if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size)) + return -EINVAL; + + error = cb(ctrl, desc, data); + if (error) + return error; + + offset += nsid_buf_size; + } + + return 0; +} + +static inline bool nvme_state_is_live(enum nvme_ana_state state) +{ + return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED; +} + +static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, + struct nvme_ns *ns) +{ + ns->ana_grpid = le32_to_cpu(desc->grpid); + ns->ana_state = desc->state; + clear_bit(NVME_NS_ANA_PENDING, &ns->flags); + /* + * nvme_mpath_set_live() will trigger I/O to the multipath path device + * and in turn to this path device. However we cannot accept this I/O + * if the controller is not live. This may deadlock if called from + * nvme_mpath_init_identify() and the ctrl will never complete + * initialization, preventing I/O from completing. For this case we + * will reprocess the ANA log page in nvme_mpath_update() once the + * controller is ready. + */ + if (nvme_state_is_live(ns->ana_state) && + ns->ctrl->state == NVME_CTRL_LIVE) + nvme_mpath_set_live(ns); +} + +static int nvme_update_ana_state(struct nvme_ctrl *ctrl, + struct nvme_ana_group_desc *desc, void *data) +{ + u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; + unsigned *nr_change_groups = data; + struct nvme_ns *ns; + + dev_dbg(ctrl->device, "ANA group %d: %s.\n", + le32_to_cpu(desc->grpid), + nvme_ana_state_names[desc->state]); + + if (desc->state == NVME_ANA_CHANGE) + (*nr_change_groups)++; + + if (!nr_nsids) + return 0; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) { + unsigned nsid; +again: + nsid = le32_to_cpu(desc->nsids[n]); + if (ns->head->ns_id < nsid) + continue; + if (ns->head->ns_id == nsid) + nvme_update_ns_ana_state(desc, ns); + if (++n == nr_nsids) + break; + if (ns->head->ns_id > nsid) + goto again; + } + up_read(&ctrl->namespaces_rwsem); + return 0; +} + +static int nvme_read_ana_log(struct nvme_ctrl *ctrl) +{ + u32 nr_change_groups = 0; + int error; + + mutex_lock(&ctrl->ana_lock); + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM, + ctrl->ana_log_buf, ctrl->ana_log_size, 0); + if (error) { + dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); + goto out_unlock; + } + + error = nvme_parse_ana_log(ctrl, &nr_change_groups, + nvme_update_ana_state); + if (error) + goto out_unlock; + + /* + * In theory we should have an ANATT timer per group as they might enter + * the change state at different times. But that is a lot of overhead + * just to protect against a target that keeps entering new changes + * states while never finishing previous ones. But we'll still + * eventually time out once all groups are in change state, so this + * isn't a big deal. + * + * We also double the ANATT value to provide some slack for transports + * or AEN processing overhead. + */ + if (nr_change_groups) + mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); + else + del_timer_sync(&ctrl->anatt_timer); +out_unlock: + mutex_unlock(&ctrl->ana_lock); + return error; +} + +static void nvme_ana_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); + + if (ctrl->state != NVME_CTRL_LIVE) + return; + + nvme_read_ana_log(ctrl); +} + +void nvme_mpath_update(struct nvme_ctrl *ctrl) +{ + u32 nr_change_groups = 0; + + if (!ctrl->ana_log_buf) + return; + + mutex_lock(&ctrl->ana_lock); + nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); + mutex_unlock(&ctrl->ana_lock); +} + +static void nvme_anatt_timeout(struct timer_list *t) +{ + struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); + + dev_info(ctrl->device, "ANATT timeout, resetting controller.\n"); + nvme_reset_ctrl(ctrl); +} + +void nvme_mpath_stop(struct nvme_ctrl *ctrl) +{ + if (!nvme_ctrl_use_ana(ctrl)) + return; + del_timer_sync(&ctrl->anatt_timer); + cancel_work_sync(&ctrl->ana_work); +} + +#define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \ + struct device_attribute subsys_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static ssize_t nvme_subsys_iopolicy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + return sysfs_emit(buf, "%s\n", + nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); +} + +static ssize_t nvme_subsys_iopolicy_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + int i; + + for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { + if (sysfs_streq(buf, nvme_iopolicy_names[i])) { + WRITE_ONCE(subsys->iopolicy, i); + return count; + } + } + + return -EINVAL; +} +SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR, + nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store); + +static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); +} +DEVICE_ATTR_RO(ana_grpid); + +static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + + return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); +} +DEVICE_ATTR_RO(ana_state); + +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, + struct nvme_ana_group_desc *desc, void *data) +{ + struct nvme_ana_group_desc *dst = data; + + if (desc->grpid != dst->grpid) + return 0; + + *dst = *desc; + return -ENXIO; /* just break out of the loop */ +} + +void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) +{ + if (nvme_ctrl_use_ana(ns->ctrl)) { + struct nvme_ana_group_desc desc = { + .grpid = anagrpid, + .state = 0, + }; + + mutex_lock(&ns->ctrl->ana_lock); + ns->ana_grpid = le32_to_cpu(anagrpid); + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); + mutex_unlock(&ns->ctrl->ana_lock); + if (desc.state) { + /* found the group desc: update */ + nvme_update_ns_ana_state(&desc, ns); + } else { + /* group desc not found: trigger a re-read */ + set_bit(NVME_NS_ANA_PENDING, &ns->flags); + queue_work(nvme_wq, &ns->ctrl->ana_work); + } + } else { + ns->ana_state = NVME_ANA_OPTIMIZED; + nvme_mpath_set_live(ns); + } + + if (blk_queue_stable_writes(ns->queue) && ns->head->disk) + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, + ns->head->disk->queue); +#ifdef CONFIG_BLK_DEV_ZONED + if (blk_queue_is_zoned(ns->queue) && ns->head->disk) + ns->head->disk->nr_zones = ns->disk->nr_zones; +#endif +} + +void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +{ + if (!head->disk) + return; + kblockd_schedule_work(&head->requeue_work); + if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + nvme_cdev_del(&head->cdev, &head->cdev_device); + del_gendisk(head->disk); + } +} + +void nvme_mpath_remove_disk(struct nvme_ns_head *head) +{ + if (!head->disk) + return; + /* make sure all pending bios are cleaned up */ + kblockd_schedule_work(&head->requeue_work); + flush_work(&head->requeue_work); + put_disk(head->disk); +} + +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) +{ + mutex_init(&ctrl->ana_lock); + timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); + INIT_WORK(&ctrl->ana_work, nvme_ana_work); +} + +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT; + size_t ana_log_size; + int error = 0; + + /* check if multipath is enabled and we have the capability */ + if (!multipath || !ctrl->subsys || + !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) + return 0; + + if (!ctrl->max_namespaces || + ctrl->max_namespaces > le32_to_cpu(id->nn)) { + dev_err(ctrl->device, + "Invalid MNAN value %u\n", ctrl->max_namespaces); + return -EINVAL; + } + + ctrl->anacap = id->anacap; + ctrl->anatt = id->anatt; + ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); + ctrl->anagrpmax = le32_to_cpu(id->anagrpmax); + + ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + + ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) + + ctrl->max_namespaces * sizeof(__le32); + if (ana_log_size > max_transfer_size) { + dev_err(ctrl->device, + "ANA log page size (%zd) larger than MDTS (%zd).\n", + ana_log_size, max_transfer_size); + dev_err(ctrl->device, "disabling ANA support.\n"); + goto out_uninit; + } + if (ana_log_size > ctrl->ana_log_size) { + nvme_mpath_stop(ctrl); + nvme_mpath_uninit(ctrl); + ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL); + if (!ctrl->ana_log_buf) + return -ENOMEM; + } + ctrl->ana_log_size = ana_log_size; + error = nvme_read_ana_log(ctrl); + if (error) + goto out_uninit; + return 0; + +out_uninit: + nvme_mpath_uninit(ctrl); + return error; +} + +void nvme_mpath_uninit(struct nvme_ctrl *ctrl) +{ + kvfree(ctrl->ana_log_buf); + ctrl->ana_log_buf = NULL; + ctrl->ana_log_size = 0; +} diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h new file mode 100644 index 0000000000..ba62d42d2a --- /dev/null +++ b/drivers/nvme/host/nvme.h @@ -0,0 +1,1141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2011-2014, Intel Corporation. + */ + +#ifndef _NVME_H +#define _NVME_H + +#include <linux/nvme.h> +#include <linux/cdev.h> +#include <linux/pci.h> +#include <linux/kref.h> +#include <linux/blk-mq.h> +#include <linux/sed-opal.h> +#include <linux/fault-inject.h> +#include <linux/rcupdate.h> +#include <linux/wait.h> +#include <linux/t10-pi.h> + +#include <trace/events/block.h> + +extern const struct pr_ops nvme_pr_ops; + +extern unsigned int nvme_io_timeout; +#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) + +extern unsigned int admin_timeout; +#define NVME_ADMIN_TIMEOUT (admin_timeout * HZ) + +#define NVME_DEFAULT_KATO 5 + +#ifdef CONFIG_ARCH_NO_SG_CHAIN +#define NVME_INLINE_SG_CNT 0 +#define NVME_INLINE_METADATA_SG_CNT 0 +#else +#define NVME_INLINE_SG_CNT 2 +#define NVME_INLINE_METADATA_SG_CNT 1 +#endif + +/* + * Default to a 4K page size, with the intention to update this + * path in the future to accommodate architectures with differing + * kernel and IO page sizes. + */ +#define NVME_CTRL_PAGE_SHIFT 12 +#define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT) + +extern struct workqueue_struct *nvme_wq; +extern struct workqueue_struct *nvme_reset_wq; +extern struct workqueue_struct *nvme_delete_wq; + +/* + * List of workarounds for devices that required behavior not specified in + * the standard. + */ +enum nvme_quirks { + /* + * Prefers I/O aligned to a stripe size specified in a vendor + * specific Identify field. + */ + NVME_QUIRK_STRIPE_SIZE = (1 << 0), + + /* + * The controller doesn't handle Identify value others than 0 or 1 + * correctly. + */ + NVME_QUIRK_IDENTIFY_CNS = (1 << 1), + + /* + * The controller deterministically returns O's on reads to + * logical blocks that deallocate was called on. + */ + NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), + + /* + * The controller needs a delay before starts checking the device + * readiness, which is done by reading the NVME_CSTS_RDY bit. + */ + NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), + + /* + * APST should not be used. + */ + NVME_QUIRK_NO_APST = (1 << 4), + + /* + * The deepest sleep state should not be used. + */ + NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), + + /* + * Set MEDIUM priority on SQ creation + */ + NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), + + /* + * Ignore device provided subnqn. + */ + NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), + + /* + * Broken Write Zeroes. + */ + NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), + + /* + * Force simple suspend/resume path. + */ + NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), + + /* + * Use only one interrupt vector for all queues + */ + NVME_QUIRK_SINGLE_VECTOR = (1 << 11), + + /* + * Use non-standard 128 bytes SQEs. + */ + NVME_QUIRK_128_BYTES_SQES = (1 << 12), + + /* + * Prevent tag overlap between queues + */ + NVME_QUIRK_SHARED_TAGS = (1 << 13), + + /* + * Don't change the value of the temperature threshold feature + */ + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), + + /* + * The controller does not properly handle DMA addresses over + * 48 bits. + */ + NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), + + /* + * The controller requires the command_id value be limited, so skip + * encoding the generation sequence number. + */ + NVME_QUIRK_SKIP_CID_GEN = (1 << 17), + + /* + * Reports garbage in the namespace identifiers (eui64, nguid, uuid). + */ + NVME_QUIRK_BOGUS_NID = (1 << 18), + + /* + * No temperature thresholds for channels other than 0 (Composite). + */ + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19), + + /* + * Disables simple suspend/resume path. + */ + NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20), +}; + +/* + * Common request structure for NVMe passthrough. All drivers must have + * this structure as the first member of their request-private data. + */ +struct nvme_request { + struct nvme_command *cmd; + union nvme_result result; + u8 genctr; + u8 retries; + u8 flags; + u16 status; +#ifdef CONFIG_NVME_MULTIPATH + unsigned long start_time; +#endif + struct nvme_ctrl *ctrl; +}; + +/* + * Mark a bio as coming in through the mpath node. + */ +#define REQ_NVME_MPATH REQ_DRV + +enum { + NVME_REQ_CANCELLED = (1 << 0), + NVME_REQ_USERCMD = (1 << 1), + NVME_MPATH_IO_STATS = (1 << 2), +}; + +static inline struct nvme_request *nvme_req(struct request *req) +{ + return blk_mq_rq_to_pdu(req); +} + +static inline u16 nvme_req_qid(struct request *req) +{ + if (!req->q->queuedata) + return 0; + + return req->mq_hctx->queue_num + 1; +} + +/* The below value is the specific amount of delay needed before checking + * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the + * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was + * found empirically. + */ +#define NVME_QUIRK_DELAY_AMOUNT 2300 + +/* + * enum nvme_ctrl_state: Controller state + * + * @NVME_CTRL_NEW: New controller just allocated, initial state + * @NVME_CTRL_LIVE: Controller is connected and I/O capable + * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset) + * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the + * transport + * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion) + * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not + * disabled/failed immediately. This state comes + * after all async event processing took place and + * before ns removal and the controller deletion + * progress + * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during + * shutdown or removal. In this case we forcibly + * kill all inflight I/O as they have no chance to + * complete + */ +enum nvme_ctrl_state { + NVME_CTRL_NEW, + NVME_CTRL_LIVE, + NVME_CTRL_RESETTING, + NVME_CTRL_CONNECTING, + NVME_CTRL_DELETING, + NVME_CTRL_DELETING_NOIO, + NVME_CTRL_DEAD, +}; + +struct nvme_fault_inject { +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + struct fault_attr attr; + struct dentry *parent; + bool dont_retry; /* DNR, do not retry */ + u16 status; /* status code */ +#endif +}; + +enum nvme_ctrl_flags { + NVME_CTRL_FAILFAST_EXPIRED = 0, + NVME_CTRL_ADMIN_Q_STOPPED = 1, + NVME_CTRL_STARTED_ONCE = 2, + NVME_CTRL_STOPPED = 3, + NVME_CTRL_SKIP_ID_CNS_CS = 4, + NVME_CTRL_DIRTY_CAPABILITY = 5, + NVME_CTRL_FROZEN = 6, +}; + +struct nvme_ctrl { + bool comp_seen; + bool identified; + enum nvme_ctrl_state state; + spinlock_t lock; + struct mutex scan_lock; + const struct nvme_ctrl_ops *ops; + struct request_queue *admin_q; + struct request_queue *connect_q; + struct request_queue *fabrics_q; + struct device *dev; + int instance; + int numa_node; + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; + struct rw_semaphore namespaces_rwsem; + struct device ctrl_device; + struct device *device; /* char device */ +#ifdef CONFIG_NVME_HWMON + struct device *hwmon_device; +#endif + struct cdev cdev; + struct work_struct reset_work; + struct work_struct delete_work; + wait_queue_head_t state_wq; + + struct nvme_subsystem *subsys; + struct list_head subsys_entry; + + struct opal_dev *opal_dev; + + char name[12]; + u16 cntlid; + + u16 mtfa; + u32 ctrl_config; + u32 queue_count; + + u64 cap; + u32 max_hw_sectors; + u32 max_segments; + u32 max_integrity_segments; + u32 max_discard_sectors; + u32 max_discard_segments; + u32 max_zeroes_sectors; +#ifdef CONFIG_BLK_DEV_ZONED + u32 max_zone_append; +#endif + u16 crdt[3]; + u16 oncs; + u32 dmrsl; + u16 oacs; + u16 sqsize; + u32 max_namespaces; + atomic_t abort_limit; + u8 vwc; + u32 vs; + u32 sgls; + u16 kas; + u8 npss; + u8 apsta; + u16 wctemp; + u16 cctemp; + u32 oaes; + u32 aen_result; + u32 ctratt; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; + unsigned long quirks; + struct nvme_id_power_state psd[32]; + struct nvme_effects_log *effects; + struct xarray cels; + struct work_struct scan_work; + struct work_struct async_event_work; + struct delayed_work ka_work; + struct delayed_work failfast_work; + struct nvme_command ka_cmd; + unsigned long ka_last_check_time; + struct work_struct fw_act_work; + unsigned long events; + +#ifdef CONFIG_NVME_MULTIPATH + /* asymmetric namespace access: */ + u8 anacap; + u8 anatt; + u32 anagrpmax; + u32 nanagrpid; + struct mutex ana_lock; + struct nvme_ana_rsp_hdr *ana_log_buf; + size_t ana_log_size; + struct timer_list anatt_timer; + struct work_struct ana_work; +#endif + +#ifdef CONFIG_NVME_AUTH + struct work_struct dhchap_auth_work; + struct mutex dhchap_auth_mutex; + struct nvme_dhchap_queue_context *dhchap_ctxs; + struct nvme_dhchap_key *host_key; + struct nvme_dhchap_key *ctrl_key; + u16 transaction; +#endif + + /* Power saving configuration */ + u64 ps_max_latency_us; + bool apst_enabled; + + /* PCIe only: */ + u16 hmmaxd; + u32 hmpre; + u32 hmmin; + u32 hmminds; + + /* Fabrics only */ + u32 ioccsz; + u32 iorcsz; + u16 icdoff; + u16 maxcmd; + int nr_reconnects; + unsigned long flags; + struct nvmf_ctrl_options *opts; + + struct page *discard_page; + unsigned long discard_page_busy; + + struct nvme_fault_inject fault_inject; + + enum nvme_ctrl_type cntrltype; + enum nvme_dctype dctype; +}; + +static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) +{ + return READ_ONCE(ctrl->state); +} + +enum nvme_iopolicy { + NVME_IOPOLICY_NUMA, + NVME_IOPOLICY_RR, +}; + +struct nvme_subsystem { + int instance; + struct device dev; + /* + * Because we unregister the device on the last put we need + * a separate refcount. + */ + struct kref ref; + struct list_head entry; + struct mutex lock; + struct list_head ctrls; + struct list_head nsheads; + char subnqn[NVMF_NQN_SIZE]; + char serial[20]; + char model[40]; + char firmware_rev[8]; + u8 cmic; + enum nvme_subsys_type subtype; + u16 vendor_id; + u16 awupf; /* 0's based awupf value. */ + struct ida ns_ida; +#ifdef CONFIG_NVME_MULTIPATH + enum nvme_iopolicy iopolicy; +#endif +}; + +/* + * Container structure for uniqueue namespace identifiers. + */ +struct nvme_ns_ids { + u8 eui64[8]; + u8 nguid[16]; + uuid_t uuid; + u8 csi; +}; + +/* + * Anchor structure for namespaces. There is one for each namespace in a + * NVMe subsystem that any of our controllers can see, and the namespace + * structure for each controller is chained of it. For private namespaces + * there is a 1:1 relation to our namespace structures, that is ->list + * only ever has a single entry for private namespaces. + */ +struct nvme_ns_head { + struct list_head list; + struct srcu_struct srcu; + struct nvme_subsystem *subsys; + unsigned ns_id; + struct nvme_ns_ids ids; + struct list_head entry; + struct kref ref; + bool shared; + int instance; + struct nvme_effects_log *effects; + + struct cdev cdev; + struct device cdev_device; + + struct gendisk *disk; +#ifdef CONFIG_NVME_MULTIPATH + struct bio_list requeue_list; + spinlock_t requeue_lock; + struct work_struct requeue_work; + struct mutex lock; + unsigned long flags; +#define NVME_NSHEAD_DISK_LIVE 0 + struct nvme_ns __rcu *current_path[]; +#endif +}; + +static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head) +{ + return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; +} + +enum nvme_ns_features { + NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ + NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ + NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */ +}; + +struct nvme_ns { + struct list_head list; + + struct nvme_ctrl *ctrl; + struct request_queue *queue; + struct gendisk *disk; +#ifdef CONFIG_NVME_MULTIPATH + enum nvme_ana_state ana_state; + u32 ana_grpid; +#endif + struct list_head siblings; + struct kref kref; + struct nvme_ns_head *head; + + int lba_shift; + u16 ms; + u16 pi_size; + u16 sgs; + u32 sws; + u8 pi_type; + u8 guard_type; +#ifdef CONFIG_BLK_DEV_ZONED + u64 zsze; +#endif + unsigned long features; + unsigned long flags; +#define NVME_NS_REMOVING 0 +#define NVME_NS_ANA_PENDING 2 +#define NVME_NS_FORCE_RO 3 +#define NVME_NS_READY 4 + + struct cdev cdev; + struct device cdev_device; + + struct nvme_fault_inject fault_inject; + +}; + +/* NVMe ns supports metadata actions by the controller (generate/strip) */ +static inline bool nvme_ns_has_pi(struct nvme_ns *ns) +{ + return ns->pi_type && ns->ms == ns->pi_size; +} + +struct nvme_ctrl_ops { + const char *name; + struct module *module; + unsigned int flags; +#define NVME_F_FABRICS (1 << 0) +#define NVME_F_METADATA_SUPPORTED (1 << 1) +#define NVME_F_BLOCKING (1 << 2) + + const struct attribute_group **dev_attr_groups; + int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); + int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); + int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); + void (*free_ctrl)(struct nvme_ctrl *ctrl); + void (*submit_async_event)(struct nvme_ctrl *ctrl); + void (*delete_ctrl)(struct nvme_ctrl *ctrl); + void (*stop_ctrl)(struct nvme_ctrl *ctrl); + int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); + void (*print_device_info)(struct nvme_ctrl *ctrl); + bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl); +}; + +/* + * nvme command_id is constructed as such: + * | xxxx | xxxxxxxxxxxx | + * gen request tag + */ +#define nvme_genctr_mask(gen) (gen & 0xf) +#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12) +#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12) +#define nvme_tag_from_cid(cid) (cid & 0xfff) + +static inline u16 nvme_cid(struct request *rq) +{ + return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; +} + +static inline struct request *nvme_find_rq(struct blk_mq_tags *tags, + u16 command_id) +{ + u8 genctr = nvme_genctr_from_cid(command_id); + u16 tag = nvme_tag_from_cid(command_id); + struct request *rq; + + rq = blk_mq_tag_to_rq(tags, tag); + if (unlikely(!rq)) { + pr_err("could not locate request for tag %#x\n", + tag); + return NULL; + } + if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { + dev_err(nvme_req(rq)->ctrl->device, + "request %#x genctr mismatch (got %#x expected %#x)\n", + tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); + return NULL; + } + return rq; +} + +static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, + u16 command_id) +{ + return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); +} + +/* + * Return the length of the string without the space padding + */ +static inline int nvme_strlen(char *s, int len) +{ + while (s[len - 1] == ' ') + len--; + return len; +} + +static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) +{ + struct nvme_subsystem *subsys = ctrl->subsys; + + if (ctrl->ops->print_device_info) { + ctrl->ops->print_device_info(ctrl); + return; + } + + dev_err(ctrl->device, + "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS +void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, + const char *dev_name); +void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); +void nvme_should_fail(struct request *req); +#else +static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, + const char *dev_name) +{ +} +static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) +{ +} +static inline void nvme_should_fail(struct request *req) {} +#endif + +bool nvme_wait_reset(struct nvme_ctrl *ctrl); +int nvme_try_sched_reset(struct nvme_ctrl *ctrl); + +static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) +{ + int ret; + + if (!ctrl->subsystem) + return -ENOTTY; + if (!nvme_wait_reset(ctrl)) + return -EBUSY; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); + if (ret) + return ret; + + return nvme_try_sched_reset(ctrl); +} + +/* + * Convert a 512B sector number to a device logical block number. + */ +static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) +{ + return sector >> (ns->lba_shift - SECTOR_SHIFT); +} + +/* + * Convert a device logical block number to a 512B sector number. + */ +static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) +{ + return lba << (ns->lba_shift - SECTOR_SHIFT); +} + +/* + * Convert byte length to nvme's 0-based num dwords + */ +static inline u32 nvme_bytes_to_numd(size_t len) +{ + return (len >> 2) - 1; +} + +static inline bool nvme_is_ana_error(u16 status) +{ + switch (status & 0x7ff) { + case NVME_SC_ANA_TRANSITION: + case NVME_SC_ANA_INACCESSIBLE: + case NVME_SC_ANA_PERSISTENT_LOSS: + return true; + default: + return false; + } +} + +static inline bool nvme_is_path_error(u16 status) +{ + /* check for a status code type of 'path related status' */ + return (status & 0x700) == 0x300; +} + +/* + * Fill in the status and result information from the CQE, and then figure out + * if blk-mq will need to use IPI magic to complete the request, and if yes do + * so. If not let the caller complete the request without an indirect function + * call. + */ +static inline bool nvme_try_complete_req(struct request *req, __le16 status, + union nvme_result result) +{ + struct nvme_request *rq = nvme_req(req); + struct nvme_ctrl *ctrl = rq->ctrl; + + if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) + rq->genctr++; + + rq->status = le16_to_cpu(status) >> 1; + rq->result = result; + /* inject error when permitted by fault injection framework */ + nvme_should_fail(req); + if (unlikely(blk_should_fake_timeout(req->q))) + return true; + return blk_mq_complete_request_remote(req); +} + +static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) +{ + get_device(ctrl->device); +} + +static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) +{ + put_device(ctrl->device); +} + +static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) +{ + return !qid && + nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH; +} + +void nvme_complete_rq(struct request *req); +void nvme_complete_batch_req(struct request *req); + +static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, + void (*fn)(struct request *rq)) +{ + struct request *req; + + rq_list_for_each(&iob->req_list, req) { + fn(req); + nvme_complete_batch_req(req); + } + blk_mq_end_request_batch(iob); +} + +blk_status_t nvme_host_path_error(struct request *req); +bool nvme_cancel_request(struct request *req, void *data); +void nvme_cancel_tagset(struct nvme_ctrl *ctrl); +void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); +bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, + enum nvme_ctrl_state new_state); +int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown); +int nvme_enable_ctrl(struct nvme_ctrl *ctrl); +int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, + const struct nvme_ctrl_ops *ops, unsigned long quirks); +void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); +void nvme_start_ctrl(struct nvme_ctrl *ctrl); +void nvme_stop_ctrl(struct nvme_ctrl *ctrl); +int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended); +int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, unsigned int cmd_size); +void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl); +int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, unsigned int nr_maps, + unsigned int cmd_size); +void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl); + +void nvme_remove_namespaces(struct nvme_ctrl *ctrl); + +void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, + volatile union nvme_result *res); + +void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl); +void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl); +void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl); +void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl); +void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl); +void nvme_sync_queues(struct nvme_ctrl *ctrl); +void nvme_sync_io_queues(struct nvme_ctrl *ctrl); +void nvme_unfreeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze(struct nvme_ctrl *ctrl); +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +void nvme_start_freeze(struct nvme_ctrl *ctrl); + +static inline enum req_op nvme_req_op(struct nvme_command *cmd) +{ + return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; +} + +#define NVME_QID_ANY -1 +void nvme_init_request(struct request *req, struct nvme_command *cmd); +void nvme_cleanup_cmd(struct request *req); +blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); +blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, + struct request *req); +bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, + bool queue_live); + +static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, + bool queue_live) +{ + if (likely(ctrl->state == NVME_CTRL_LIVE)) + return true; + if (ctrl->ops->flags & NVME_F_FABRICS && + ctrl->state == NVME_CTRL_DELETING) + return queue_live; + return __nvme_check_ready(ctrl, rq, queue_live); +} + +/* + * NSID shall be unique for all shared namespaces, or if at least one of the + * following conditions is met: + * 1. Namespace Management is supported by the controller + * 2. ANA is supported by the controller + * 3. NVM Set are supported by the controller + * + * In other case, private namespace are not required to report a unique NSID. + */ +static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head) +{ + return head->shared || + (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || + (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || + (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); +} + +int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, + void *buf, unsigned bufflen); +int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, + union nvme_result *result, void *buffer, unsigned bufflen, + int qid, int at_head, + blk_mq_req_flags_t flags); +int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, + unsigned int dword11, void *buffer, size_t buflen, + u32 *result); +int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, + unsigned int dword11, void *buffer, size_t buflen, + u32 *result); +int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); +void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); +int nvme_reset_ctrl(struct nvme_ctrl *ctrl); +int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); +int nvme_delete_ctrl(struct nvme_ctrl *ctrl); +void nvme_queue_scan(struct nvme_ctrl *ctrl); +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, + void *log, size_t size, u64 offset); +bool nvme_tryget_ns_head(struct nvme_ns_head *head); +void nvme_put_ns_head(struct nvme_ns_head *head); +int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, + const struct file_operations *fops, struct module *owner); +void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); +int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg); +long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg); +long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +long nvme_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, + struct io_comp_batch *iob, unsigned int poll_flags); +int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, + unsigned int issue_flags); +int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, + unsigned int issue_flags); +int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); +int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); + +extern const struct attribute_group *nvme_ns_id_attr_groups[]; +extern const struct pr_ops nvme_pr_ops; +extern const struct block_device_operations nvme_ns_head_ops; +extern const struct attribute_group nvme_dev_attrs_group; +extern const struct attribute_group *nvme_subsys_attrs_groups[]; +extern const struct attribute_group *nvme_dev_attr_groups[]; +extern const struct block_device_operations nvme_bdev_ops; + +void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); +struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); +#ifdef CONFIG_NVME_MULTIPATH +static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return ctrl->ana_log_buf != NULL; +} + +void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); +void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); +void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); +void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys); +void nvme_failover_req(struct request *req); +void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); +int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); +void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); +void nvme_mpath_remove_disk(struct nvme_ns_head *head); +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); +void nvme_mpath_update(struct nvme_ctrl *ctrl); +void nvme_mpath_uninit(struct nvme_ctrl *ctrl); +void nvme_mpath_stop(struct nvme_ctrl *ctrl); +bool nvme_mpath_clear_current_path(struct nvme_ns *ns); +void nvme_mpath_revalidate_paths(struct nvme_ns *ns); +void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); +void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); +void nvme_mpath_start_request(struct request *rq); +void nvme_mpath_end_request(struct request *rq); + +static inline void nvme_trace_bio_complete(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + + if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) + trace_block_bio_complete(ns->head->disk->queue, req->bio); +} + +extern bool multipath; +extern struct device_attribute dev_attr_ana_grpid; +extern struct device_attribute dev_attr_ana_state; +extern struct device_attribute subsys_attr_iopolicy; + +#else +#define multipath false +static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return false; +} +static inline void nvme_failover_req(struct request *req) +{ +} +static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) +{ +} +static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head) +{ + return 0; +} +static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) +{ +} +static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) +{ +} +static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) +{ + return false; +} +static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) +{ +} +static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) +{ +} +static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +{ +} +static inline void nvme_trace_bio_complete(struct request *req) +{ +} +static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) +{ +} +static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, + struct nvme_id_ctrl *id) +{ + if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) + dev_warn(ctrl->device, +"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); + return 0; +} +static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) +{ +} +static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) +{ +} +static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) +{ +} +static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_start_request(struct request *rq) +{ +} +static inline void nvme_mpath_end_request(struct request *rq) +{ +} +#endif /* CONFIG_NVME_MULTIPATH */ + +int nvme_revalidate_zones(struct nvme_ns *ns); +int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); +#ifdef CONFIG_BLK_DEV_ZONED +int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); +blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, + struct nvme_command *cmnd, + enum nvme_zone_mgmt_action action); +#else +static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd, + enum nvme_zone_mgmt_action action) +{ + return BLK_STS_NOTSUPP; +} + +static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) +{ + dev_warn(ns->ctrl->device, + "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); + return -EPROTONOSUPPORT; +} +#endif + +static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) +{ + return dev_to_disk(dev)->private_data; +} + +#ifdef CONFIG_NVME_HWMON +int nvme_hwmon_init(struct nvme_ctrl *ctrl); +void nvme_hwmon_exit(struct nvme_ctrl *ctrl); +#else +static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) +{ + return 0; +} + +static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ +} +#endif + +static inline void nvme_start_request(struct request *rq) +{ + if (rq->cmd_flags & REQ_NVME_MPATH) + nvme_mpath_start_request(rq); + blk_mq_start_request(rq); +} + +static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) +{ + return ctrl->sgls & ((1 << 0) | (1 << 1)); +} + +#ifdef CONFIG_NVME_AUTH +int __init nvme_init_auth(void); +void __exit nvme_exit_auth(void); +int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl); +void nvme_auth_stop(struct nvme_ctrl *ctrl); +int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid); +int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid); +void nvme_auth_free(struct nvme_ctrl *ctrl); +#else +static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) +{ + return 0; +} +static inline int __init nvme_init_auth(void) +{ + return 0; +} +static inline void __exit nvme_exit_auth(void) +{ +} +static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {}; +static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) +{ + return -EPROTONOSUPPORT; +} +static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) +{ + return NVME_SC_AUTH_REQUIRED; +} +static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; +#endif + +u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + u8 opcode); +u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); +int nvme_execute_rq(struct request *rq, bool at_head); +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, + struct nvme_command *cmd, int status); +struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); +struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); +void nvme_put_ns(struct nvme_ns *ns); + +static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) +{ + return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; +} + +#ifdef CONFIG_NVME_VERBOSE_ERRORS +const unsigned char *nvme_get_error_status_str(u16 status); +const unsigned char *nvme_get_opcode_str(u8 opcode); +const unsigned char *nvme_get_admin_opcode_str(u8 opcode); +const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); +#else /* CONFIG_NVME_VERBOSE_ERRORS */ +static inline const unsigned char *nvme_get_error_status_str(u16 status) +{ + return "I/O Error"; +} +static inline const unsigned char *nvme_get_opcode_str(u8 opcode) +{ + return "I/O Cmd"; +} +static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +{ + return "Admin Cmd"; +} + +static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) +{ + return "Fabrics Cmd"; +} +#endif /* CONFIG_NVME_VERBOSE_ERRORS */ + +static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) +{ + if (opcode == nvme_fabrics_command) + return nvme_get_fabrics_opcode_str(fctype); + return qid ? nvme_get_opcode_str(opcode) : + nvme_get_admin_opcode_str(opcode); +} +#endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c new file mode 100644 index 0000000000..f8e92404a6 --- /dev/null +++ b/drivers/nvme/host/pci.c @@ -0,0 +1,3543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM Express device driver + * Copyright (c) 2011-2014, Intel Corporation. + */ + +#include <linux/acpi.h> +#include <linux/async.h> +#include <linux/blkdev.h> +#include <linux/blk-mq.h> +#include <linux/blk-mq-pci.h> +#include <linux/blk-integrity.h> +#include <linux/dmi.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kstrtox.h> +#include <linux/memremap.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/once.h> +#include <linux/pci.h> +#include <linux/suspend.h> +#include <linux/t10-pi.h> +#include <linux/types.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/sed-opal.h> +#include <linux/pci-p2pdma.h> + +#include "trace.h" +#include "nvme.h" + +#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) +#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) + +#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) + +/* + * These can be higher, but we need to ensure that any command doesn't + * require an sg allocation that needs more than a page of data. + */ +#define NVME_MAX_KB_SZ 8192 +#define NVME_MAX_SEGS 128 +#define NVME_MAX_NR_ALLOCATIONS 5 + +static int use_threaded_interrupts; +module_param(use_threaded_interrupts, int, 0444); + +static bool use_cmb_sqes = true; +module_param(use_cmb_sqes, bool, 0444); +MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); + +static unsigned int max_host_mem_size_mb = 128; +module_param(max_host_mem_size_mb, uint, 0444); +MODULE_PARM_DESC(max_host_mem_size_mb, + "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); + +static unsigned int sgl_threshold = SZ_32K; +module_param(sgl_threshold, uint, 0644); +MODULE_PARM_DESC(sgl_threshold, + "Use SGLs when average request segment size is larger or equal to " + "this size. Use 0 to disable SGLs."); + +#define NVME_PCI_MIN_QUEUE_SIZE 2 +#define NVME_PCI_MAX_QUEUE_SIZE 4095 +static int io_queue_depth_set(const char *val, const struct kernel_param *kp); +static const struct kernel_param_ops io_queue_depth_ops = { + .set = io_queue_depth_set, + .get = param_get_uint, +}; + +static unsigned int io_queue_depth = 1024; +module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); +MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); + +static int io_queue_count_set(const char *val, const struct kernel_param *kp) +{ + unsigned int n; + int ret; + + ret = kstrtouint(val, 10, &n); + if (ret != 0 || n > num_possible_cpus()) + return -EINVAL; + return param_set_uint(val, kp); +} + +static const struct kernel_param_ops io_queue_count_ops = { + .set = io_queue_count_set, + .get = param_get_uint, +}; + +static unsigned int write_queues; +module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); +MODULE_PARM_DESC(write_queues, + "Number of queues to use for writes. If not set, reads and writes " + "will share a queue set."); + +static unsigned int poll_queues; +module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); +MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); + +static bool noacpi; +module_param(noacpi, bool, 0444); +MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); + +struct nvme_dev; +struct nvme_queue; + +static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); +static void nvme_delete_io_queues(struct nvme_dev *dev); +static void nvme_update_attrs(struct nvme_dev *dev); + +/* + * Represents an NVM Express device. Each nvme_dev is a PCI function. + */ +struct nvme_dev { + struct nvme_queue *queues; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; + u32 __iomem *dbs; + struct device *dev; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + unsigned online_queues; + unsigned max_qid; + unsigned io_queues[HCTX_MAX_TYPES]; + unsigned int num_vecs; + u32 q_depth; + int io_sqes; + u32 db_stride; + void __iomem *bar; + unsigned long bar_mapped_size; + struct mutex shutdown_lock; + bool subsystem; + u64 cmb_size; + bool cmb_use_sqes; + u32 cmbsz; + u32 cmbloc; + struct nvme_ctrl ctrl; + u32 last_ps; + bool hmb; + + mempool_t *iod_mempool; + + /* shadow doorbell buffer support: */ + __le32 *dbbuf_dbs; + dma_addr_t dbbuf_dbs_dma_addr; + __le32 *dbbuf_eis; + dma_addr_t dbbuf_eis_dma_addr; + + /* host memory buffer support: */ + u64 host_mem_size; + u32 nr_host_mem_descs; + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; +}; + +static int io_queue_depth_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, + NVME_PCI_MAX_QUEUE_SIZE); +} + +static inline unsigned int sq_idx(unsigned int qid, u32 stride) +{ + return qid * 2 * stride; +} + +static inline unsigned int cq_idx(unsigned int qid, u32 stride) +{ + return (qid * 2 + 1) * stride; +} + +static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct nvme_dev, ctrl); +} + +/* + * An NVM Express queue. Each device has at least two (one for admin + * commands and one for I/O commands). + */ +struct nvme_queue { + struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u32 q_depth; + u16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +union nvme_descriptor { + struct nvme_sgl_desc *sg_list; + __le64 *prp_list; +}; + +/* + * The nvme_iod describes the data in an I/O. + * + * The sg pointer contains the list of PRP/SGL chunk allocations in addition + * to the actual struct scatterlist. + */ +struct nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + bool aborted; + s8 nr_allocations; /* PRP list pool allocations. 0 means small + pool in use */ + unsigned int dma_len; /* length of single DMA segment mapping */ + dma_addr_t first_dma; + dma_addr_t meta_dma; + struct sg_table sgt; + union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; +}; + +static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) +{ + return dev->nr_allocated_queues * 8 * dev->db_stride; +} + +static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) +{ + unsigned int mem_size = nvme_dbbuf_size(dev); + + if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) + return; + + if (dev->dbbuf_dbs) { + /* + * Clear the dbbuf memory so the driver doesn't observe stale + * values from the previous instantiation. + */ + memset(dev->dbbuf_dbs, 0, mem_size); + memset(dev->dbbuf_eis, 0, mem_size); + return; + } + + dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, + &dev->dbbuf_dbs_dma_addr, + GFP_KERNEL); + if (!dev->dbbuf_dbs) + goto fail; + dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, + &dev->dbbuf_eis_dma_addr, + GFP_KERNEL); + if (!dev->dbbuf_eis) + goto fail_free_dbbuf_dbs; + return; + +fail_free_dbbuf_dbs: + dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, + dev->dbbuf_dbs_dma_addr); + dev->dbbuf_dbs = NULL; +fail: + dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); +} + +static void nvme_dbbuf_dma_free(struct nvme_dev *dev) +{ + unsigned int mem_size = nvme_dbbuf_size(dev); + + if (dev->dbbuf_dbs) { + dma_free_coherent(dev->dev, mem_size, + dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); + dev->dbbuf_dbs = NULL; + } + if (dev->dbbuf_eis) { + dma_free_coherent(dev->dev, mem_size, + dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); + dev->dbbuf_eis = NULL; + } +} + +static void nvme_dbbuf_init(struct nvme_dev *dev, + struct nvme_queue *nvmeq, int qid) +{ + if (!dev->dbbuf_dbs || !qid) + return; + + nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; + nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; + nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; + nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; +} + +static void nvme_dbbuf_free(struct nvme_queue *nvmeq) +{ + if (!nvmeq->qid) + return; + + nvmeq->dbbuf_sq_db = NULL; + nvmeq->dbbuf_cq_db = NULL; + nvmeq->dbbuf_sq_ei = NULL; + nvmeq->dbbuf_cq_ei = NULL; +} + +static void nvme_dbbuf_set(struct nvme_dev *dev) +{ + struct nvme_command c = { }; + unsigned int i; + + if (!dev->dbbuf_dbs) + return; + + c.dbbuf.opcode = nvme_admin_dbbuf; + c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); + c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); + + if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { + dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); + /* Free memory and continue on */ + nvme_dbbuf_dma_free(dev); + + for (i = 1; i <= dev->online_queues; i++) + nvme_dbbuf_free(&dev->queues[i]); + } +} + +static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) +{ + return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); +} + +/* Update dbbuf and return true if an MMIO is required */ +static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, + volatile __le32 *dbbuf_ei) +{ + if (dbbuf_db) { + u16 old_value, event_idx; + + /* + * Ensure that the queue is written before updating + * the doorbell in memory + */ + wmb(); + + old_value = le32_to_cpu(*dbbuf_db); + *dbbuf_db = cpu_to_le32(value); + + /* + * Ensure that the doorbell is updated before reading the event + * index from memory. The controller needs to provide similar + * ordering to ensure the envent index is updated before reading + * the doorbell. + */ + mb(); + + event_idx = le32_to_cpu(*dbbuf_ei); + if (!nvme_dbbuf_need_event(event_idx, value, old_value)) + return false; + } + + return true; +} + +/* + * Will slightly overestimate the number of pages needed. This is OK + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +static int nvme_pci_npages_prp(void) +{ + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); +} + +static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_dev *dev = to_nvme_dev(data); + struct nvme_queue *nvmeq = &dev->queues[0]; + + WARN_ON(hctx_idx != 0); + WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); + + hctx->driver_data = nvmeq; + return 0; +} + +static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_dev *dev = to_nvme_dev(data); + struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; + + WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); + hctx->driver_data = nvmeq; + return 0; +} + +static int nvme_pci_init_request(struct blk_mq_tag_set *set, + struct request *req, unsigned int hctx_idx, + unsigned int numa_node) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + nvme_req(req)->ctrl = set->driver_data; + nvme_req(req)->cmd = &iod->cmd; + return 0; +} + +static int queue_irq_offset(struct nvme_dev *dev) +{ + /* if we have more than 1 vec, admin queue offsets us by 1 */ + if (dev->num_vecs > 1) + return 1; + + return 0; +} + +static void nvme_pci_map_queues(struct blk_mq_tag_set *set) +{ + struct nvme_dev *dev = to_nvme_dev(set->driver_data); + int i, qoff, offset; + + offset = queue_irq_offset(dev); + for (i = 0, qoff = 0; i < set->nr_maps; i++) { + struct blk_mq_queue_map *map = &set->map[i]; + + map->nr_queues = dev->io_queues[i]; + if (!map->nr_queues) { + BUG_ON(i == HCTX_TYPE_DEFAULT); + continue; + } + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL && offset) + blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); + else + blk_mq_map_queues(map); + qoff += map->nr_queues; + offset += map->nr_queues; + } +} + +/* + * Write sq tail if we are asked to, or if the next command would wrap. + */ +static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) +{ + if (!write_sq) { + u16 next_tail = nvmeq->sq_tail + 1; + + if (next_tail == nvmeq->q_depth) + next_tail = 0; + if (next_tail != nvmeq->last_sq_tail) + return; + } + + if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, + nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) + writel(nvmeq->sq_tail, nvmeq->q_db); + nvmeq->last_sq_tail = nvmeq->sq_tail; +} + +static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, + struct nvme_command *cmd) +{ + memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), + absolute_pointer(cmd), sizeof(*cmd)); + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; +} + +static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) +{ + struct nvme_queue *nvmeq = hctx->driver_data; + + spin_lock(&nvmeq->sq_lock); + if (nvmeq->sq_tail != nvmeq->last_sq_tail) + nvme_write_sq_db(nvmeq, true); + spin_unlock(&nvmeq->sq_lock); +} + +static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, + int nseg) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int avg_seg_size; + + avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); + + if (!nvme_ctrl_sgl_supported(&dev->ctrl)) + return false; + if (!nvmeq->qid) + return false; + if (!sgl_threshold || avg_seg_size < sgl_threshold) + return false; + return true; +} + +static void nvme_free_prps(struct nvme_dev *dev, struct request *req) +{ + const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + + for (i = 0; i < iod->nr_allocations; i++) { + __le64 *prp_list = iod->list[i].prp_list; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; + } +} + +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (iod->dma_len) { + dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; + } + + WARN_ON_ONCE(!iod->sgt.nents); + + dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); + + if (iod->nr_allocations == 0) + dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, + iod->first_dma); + else if (iod->nr_allocations == 1) + dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, + iod->first_dma); + else + nvme_free_prps(dev, req); + mempool_free(iod->sgt.sgl, dev->iod_mempool); +} + +static void nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " + "dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + +static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, + struct request *req, struct nvme_rw_command *cmnd) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct dma_pool *pool; + int length = blk_rq_payload_bytes(req); + struct scatterlist *sg = iod->sgt.sgl; + int dma_len = sg_dma_len(sg); + u64 dma_addr = sg_dma_address(sg); + int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + __le64 *prp_list; + dma_addr_t prp_dma; + int nprps, i; + + length -= (NVME_CTRL_PAGE_SIZE - offset); + if (length <= 0) { + iod->first_dma = 0; + goto done; + } + + dma_len -= (NVME_CTRL_PAGE_SIZE - offset); + if (dma_len) { + dma_addr += (NVME_CTRL_PAGE_SIZE - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= NVME_CTRL_PAGE_SIZE) { + iod->first_dma = dma_addr; + goto done; + } + + nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); + if (nprps <= (256 / 8)) { + pool = dev->prp_small_pool; + iod->nr_allocations = 0; + } else { + pool = dev->prp_page_pool; + iod->nr_allocations = 1; + } + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + iod->nr_allocations = -1; + return BLK_STS_RESOURCE; + } + iod->list[0].prp_list = prp_list; + iod->first_dma = prp_dma; + i = 0; + for (;;) { + if (i == NVME_CTRL_PAGE_SIZE >> 3) { + __le64 *old_prp_list = prp_list; + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) + goto free_prps; + iod->list[iod->nr_allocations++].prp_list = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= NVME_CTRL_PAGE_SIZE; + dma_addr += NVME_CTRL_PAGE_SIZE; + length -= NVME_CTRL_PAGE_SIZE; + if (length <= 0) + break; + if (dma_len > 0) + continue; + if (unlikely(dma_len < 0)) + goto bad_sgl; + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } +done: + cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); + return BLK_STS_OK; +free_prps: + nvme_free_prps(dev, req); + return BLK_STS_RESOURCE; +bad_sgl: + WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), + "Invalid SGL for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), iod->sgt.nents); + return BLK_STS_IOERR; +} + +static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, + struct scatterlist *sg) +{ + sge->addr = cpu_to_le64(sg_dma_address(sg)); + sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->type = NVME_SGL_FMT_DATA_DESC << 4; +} + +static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, + dma_addr_t dma_addr, int entries) +{ + sge->addr = cpu_to_le64(dma_addr); + sge->length = cpu_to_le32(entries * sizeof(*sge)); + sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; +} + +static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, + struct request *req, struct nvme_rw_command *cmd) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct dma_pool *pool; + struct nvme_sgl_desc *sg_list; + struct scatterlist *sg = iod->sgt.sgl; + unsigned int entries = iod->sgt.nents; + dma_addr_t sgl_dma; + int i = 0; + + /* setting the transfer type as SGL */ + cmd->flags = NVME_CMD_SGL_METABUF; + + if (entries == 1) { + nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); + return BLK_STS_OK; + } + + if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { + pool = dev->prp_small_pool; + iod->nr_allocations = 0; + } else { + pool = dev->prp_page_pool; + iod->nr_allocations = 1; + } + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) { + iod->nr_allocations = -1; + return BLK_STS_RESOURCE; + } + + iod->list[0].sg_list = sg_list; + iod->first_dma = sgl_dma; + + nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); + do { + nvme_pci_sgl_set_data(&sg_list[i++], sg); + sg = sg_next(sg); + } while (--entries > 0); + + return BLK_STS_OK; +} + +static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, + struct request *req, struct nvme_rw_command *cmnd, + struct bio_vec *bv) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); + unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; + + iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); + if (dma_mapping_error(dev->dev, iod->first_dma)) + return BLK_STS_RESOURCE; + iod->dma_len = bv->bv_len; + + cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); + if (bv->bv_len > first_prp_len) + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); + else + cmnd->dptr.prp2 = 0; + return BLK_STS_OK; +} + +static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, + struct request *req, struct nvme_rw_command *cmnd, + struct bio_vec *bv) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); + if (dma_mapping_error(dev->dev, iod->first_dma)) + return BLK_STS_RESOURCE; + iod->dma_len = bv->bv_len; + + cmnd->flags = NVME_CMD_SGL_METABUF; + cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); + cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); + cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; + return BLK_STS_OK; +} + +static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, + struct nvme_command *cmnd) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret = BLK_STS_RESOURCE; + int rc; + + if (blk_rq_nr_phys_segments(req) == 1) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct bio_vec bv = req_bvec(req); + + if (!is_pci_p2pdma_page(bv.bv_page)) { + if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) + return nvme_setup_prp_simple(dev, req, + &cmnd->rw, &bv); + + if (nvmeq->qid && sgl_threshold && + nvme_ctrl_sgl_supported(&dev->ctrl)) + return nvme_setup_sgl_simple(dev, req, + &cmnd->rw, &bv); + } + } + + iod->dma_len = 0; + iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); + if (!iod->sgt.sgl) + return BLK_STS_RESOURCE; + sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); + iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); + if (!iod->sgt.orig_nents) + goto out_free_sg; + + rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), + DMA_ATTR_NO_WARN); + if (rc) { + if (rc == -EREMOTEIO) + ret = BLK_STS_TARGET; + goto out_free_sg; + } + + if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) + ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); + else + ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); + if (ret != BLK_STS_OK) + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); +out_free_sg: + mempool_free(iod->sgt.sgl, dev->iod_mempool); + return ret; +} + +static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, + struct nvme_command *cmnd) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), + rq_dma_dir(req), 0); + if (dma_mapping_error(dev->dev, iod->meta_dma)) + return BLK_STS_IOERR; + cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); + return BLK_STS_OK; +} + +static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret; + + iod->aborted = false; + iod->nr_allocations = -1; + iod->sgt.nents = 0; + + ret = nvme_setup_cmd(req->q->queuedata, req); + if (ret) + return ret; + + if (blk_rq_nr_phys_segments(req)) { + ret = nvme_map_data(dev, req, &iod->cmd); + if (ret) + goto out_free_cmd; + } + + if (blk_integrity_rq(req)) { + ret = nvme_map_metadata(dev, req, &iod->cmd); + if (ret) + goto out_unmap_data; + } + + nvme_start_request(req); + return BLK_STS_OK; +out_unmap_data: + nvme_unmap_data(dev, req); +out_free_cmd: + nvme_cleanup_cmd(req); + return ret; +} + +/* + * NOTE: ns is NULL when called on the admin queue. + */ +static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_queue *nvmeq = hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + struct request *req = bd->rq; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret; + + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) + return BLK_STS_IOERR; + + if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) + return nvme_fail_nonready_command(&dev->ctrl, req); + + ret = nvme_prep_rq(dev, req); + if (unlikely(ret)) + return ret; + spin_lock(&nvmeq->sq_lock); + nvme_sq_copy_cmd(nvmeq, &iod->cmd); + nvme_write_sq_db(nvmeq, bd->last); + spin_unlock(&nvmeq->sq_lock); + return BLK_STS_OK; +} + +static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) +{ + spin_lock(&nvmeq->sq_lock); + while (!rq_list_empty(*rqlist)) { + struct request *req = rq_list_pop(rqlist); + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + nvme_sq_copy_cmd(nvmeq, &iod->cmd); + } + nvme_write_sq_db(nvmeq, true); + spin_unlock(&nvmeq->sq_lock); +} + +static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) +{ + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) + return false; + if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) + return false; + + req->mq_hctx->tags->rqs[req->tag] = req; + return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; +} + +static void nvme_queue_rqs(struct request **rqlist) +{ + struct request *req, *next, *prev = NULL; + struct request *requeue_list = NULL; + + rq_list_for_each_safe(rqlist, req, next) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + + if (!nvme_prep_rq_batch(nvmeq, req)) { + /* detach 'req' and add to remainder list */ + rq_list_move(rqlist, &requeue_list, req, prev); + + req = prev; + if (!req) + continue; + } + + if (!next || req->mq_hctx != next->mq_hctx) { + /* detach rest of list, and submit */ + req->rq_next = NULL; + nvme_submit_cmds(nvmeq, rqlist); + *rqlist = next; + prev = NULL; + } else + prev = req; + } + + *rqlist = requeue_list; +} + +static __always_inline void nvme_pci_unmap_rq(struct request *req) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + + if (blk_integrity_rq(req)) { + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + dma_unmap_page(dev->dev, iod->meta_dma, + rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); + } + + if (blk_rq_nr_phys_segments(req)) + nvme_unmap_data(dev, req); +} + +static void nvme_pci_complete_rq(struct request *req) +{ + nvme_pci_unmap_rq(req); + nvme_complete_rq(req); +} + +static void nvme_pci_complete_batch(struct io_comp_batch *iob) +{ + nvme_complete_batch(iob, nvme_pci_unmap_rq); +} + +/* We read the CQE phase first to check if the rest of the entry is valid */ +static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) +{ + struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; + + return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; +} + +static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) +{ + u16 head = nvmeq->cq_head; + + if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, + nvmeq->dbbuf_cq_ei)) + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); +} + +static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) +{ + if (!nvmeq->qid) + return nvmeq->dev->admin_tagset.tags[0]; + return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; +} + +static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, + struct io_comp_batch *iob, u16 idx) +{ + struct nvme_completion *cqe = &nvmeq->cqes[idx]; + __u16 command_id = READ_ONCE(cqe->command_id); + struct request *req; + + /* + * AEN requests are special as they don't time out and can + * survive any kind of queue freeze and often don't respond to + * aborts. We don't even bother to allocate a struct request + * for them but rather special case them here. + */ + if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { + nvme_complete_async_event(&nvmeq->dev->ctrl, + cqe->status, &cqe->result); + return; + } + + req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); + if (unlikely(!req)) { + dev_warn(nvmeq->dev->ctrl.device, + "invalid id %d completed on queue %d\n", + command_id, le16_to_cpu(cqe->sq_id)); + return; + } + + trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); + if (!nvme_try_complete_req(req, cqe->status, cqe->result) && + !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + nvme_pci_complete_batch)) + nvme_pci_complete_rq(req); +} + +static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) +{ + u32 tmp = nvmeq->cq_head + 1; + + if (tmp == nvmeq->q_depth) { + nvmeq->cq_head = 0; + nvmeq->cq_phase ^= 1; + } else { + nvmeq->cq_head = tmp; + } +} + +static inline int nvme_poll_cq(struct nvme_queue *nvmeq, + struct io_comp_batch *iob) +{ + int found = 0; + + while (nvme_cqe_pending(nvmeq)) { + found++; + /* + * load-load control dependency between phase and the rest of + * the cqe requires a full read memory barrier + */ + dma_rmb(); + nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); + nvme_update_cq_head(nvmeq); + } + + if (found) + nvme_ring_cq_doorbell(nvmeq); + return found; +} + +static irqreturn_t nvme_irq(int irq, void *data) +{ + struct nvme_queue *nvmeq = data; + DEFINE_IO_COMP_BATCH(iob); + + if (nvme_poll_cq(nvmeq, &iob)) { + if (!rq_list_empty(iob.req_list)) + nvme_pci_complete_batch(&iob); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static irqreturn_t nvme_irq_check(int irq, void *data) +{ + struct nvme_queue *nvmeq = data; + + if (nvme_cqe_pending(nvmeq)) + return IRQ_WAKE_THREAD; + return IRQ_NONE; +} + +/* + * Poll for completions for any interrupt driven queue + * Can be called from any context. + */ +static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) +{ + struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); + + WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); + + disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + nvme_poll_cq(nvmeq, NULL); + enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); +} + +static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) +{ + struct nvme_queue *nvmeq = hctx->driver_data; + bool found; + + if (!nvme_cqe_pending(nvmeq)) + return 0; + + spin_lock(&nvmeq->cq_poll_lock); + found = nvme_poll_cq(nvmeq, iob); + spin_unlock(&nvmeq->cq_poll_lock); + + return found; +} + +static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) +{ + struct nvme_dev *dev = to_nvme_dev(ctrl); + struct nvme_queue *nvmeq = &dev->queues[0]; + struct nvme_command c = { }; + + c.common.opcode = nvme_admin_async_event; + c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; + + spin_lock(&nvmeq->sq_lock); + nvme_sq_copy_cmd(nvmeq, &c); + nvme_write_sq_db(nvmeq, true); + spin_unlock(&nvmeq->sq_lock); +} + +static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) +{ + struct nvme_command c = { }; + + c.delete_queue.opcode = opcode; + c.delete_queue.qid = cpu_to_le16(id); + + return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); +} + +static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, + struct nvme_queue *nvmeq, s16 vector) +{ + struct nvme_command c = { }; + int flags = NVME_QUEUE_PHYS_CONTIG; + + if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) + flags |= NVME_CQ_IRQ_ENABLED; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_cq.opcode = nvme_admin_create_cq; + c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); + c.create_cq.cqid = cpu_to_le16(qid); + c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); + c.create_cq.cq_flags = cpu_to_le16(flags); + c.create_cq.irq_vector = cpu_to_le16(vector); + + return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); +} + +static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, + struct nvme_queue *nvmeq) +{ + struct nvme_ctrl *ctrl = &dev->ctrl; + struct nvme_command c = { }; + int flags = NVME_QUEUE_PHYS_CONTIG; + + /* + * Some drives have a bug that auto-enables WRRU if MEDIUM isn't + * set. Since URGENT priority is zeroes, it makes all queues + * URGENT. + */ + if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) + flags |= NVME_SQ_PRIO_MEDIUM; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_sq.opcode = nvme_admin_create_sq; + c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); + c.create_sq.sqid = cpu_to_le16(qid); + c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); + c.create_sq.sq_flags = cpu_to_le16(flags); + c.create_sq.cqid = cpu_to_le16(qid); + + return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); +} + +static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) +{ + return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); +} + +static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) +{ + return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); +} + +static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + + dev_warn(nvmeq->dev->ctrl.device, + "Abort status: 0x%x", nvme_req(req)->status); + atomic_inc(&nvmeq->dev->ctrl.abort_limit); + blk_mq_free_request(req); + return RQ_END_IO_NONE; +} + +static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) +{ + /* If true, indicates loss of adapter communication, possibly by a + * NVMe Subsystem reset. + */ + bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); + + /* If there is a reset/reinit ongoing, we shouldn't reset again. */ + switch (nvme_ctrl_state(&dev->ctrl)) { + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + return false; + default: + break; + } + + /* We shouldn't reset unless the controller is on fatal error state + * _or_ if we lost the communication with it. + */ + if (!(csts & NVME_CSTS_CFS) && !nssro) + return false; + + return true; +} + +static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) +{ + /* Read a config register to help see what died. */ + u16 pci_status; + int result; + + result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, + &pci_status); + if (result == PCIBIOS_SUCCESSFUL) + dev_warn(dev->ctrl.device, + "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", + csts, pci_status); + else + dev_warn(dev->ctrl.device, + "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", + csts, result); + + if (csts != ~0) + return; + + dev_warn(dev->ctrl.device, + "Does your device have a faulty power saving mode enabled?\n"); + dev_warn(dev->ctrl.device, + "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); +} + +static enum blk_eh_timer_return nvme_timeout(struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + struct request *abort_req; + struct nvme_command cmd = { }; + u32 csts = readl(dev->bar + NVME_REG_CSTS); + + /* If PCI error recovery process is happening, we cannot reset or + * the recovery mechanism will surely fail. + */ + mb(); + if (pci_channel_offline(to_pci_dev(dev->dev))) + return BLK_EH_RESET_TIMER; + + /* + * Reset immediately if the controller is failed + */ + if (nvme_should_reset(dev, csts)) { + nvme_warn_reset(dev, csts); + goto disable; + } + + /* + * Did we miss an interrupt? + */ + if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) + nvme_poll(req->mq_hctx, NULL); + else + nvme_poll_irqdisable(nvmeq); + + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { + dev_warn(dev->ctrl.device, + "I/O %d QID %d timeout, completion polled\n", + req->tag, nvmeq->qid); + return BLK_EH_DONE; + } + + /* + * Shutdown immediately if controller times out while starting. The + * reset work will see the pci device disabled when it gets the forced + * cancellation error. All outstanding requests are completed on + * shutdown, so we return BLK_EH_DONE. + */ + switch (nvme_ctrl_state(&dev->ctrl)) { + case NVME_CTRL_CONNECTING: + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); + fallthrough; + case NVME_CTRL_DELETING: + dev_warn_ratelimited(dev->ctrl.device, + "I/O %d QID %d timeout, disable controller\n", + req->tag, nvmeq->qid); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + nvme_dev_disable(dev, true); + return BLK_EH_DONE; + case NVME_CTRL_RESETTING: + return BLK_EH_RESET_TIMER; + default: + break; + } + + /* + * Shutdown the controller immediately and schedule a reset if the + * command was already aborted once before and still hasn't been + * returned to the driver, or if this is the admin queue. + */ + if (!nvmeq->qid || iod->aborted) { + dev_warn(dev->ctrl.device, + "I/O %d QID %d timeout, reset controller\n", + req->tag, nvmeq->qid); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + goto disable; + } + + if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { + atomic_inc(&dev->ctrl.abort_limit); + return BLK_EH_RESET_TIMER; + } + iod->aborted = true; + + cmd.abort.opcode = nvme_admin_abort_cmd; + cmd.abort.cid = nvme_cid(req); + cmd.abort.sqid = cpu_to_le16(nvmeq->qid); + + dev_warn(nvmeq->dev->ctrl.device, + "I/O %d (%s) QID %d timeout, aborting\n", + req->tag, + nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), + nvmeq->qid); + + abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), + BLK_MQ_REQ_NOWAIT); + if (IS_ERR(abort_req)) { + atomic_inc(&dev->ctrl.abort_limit); + return BLK_EH_RESET_TIMER; + } + nvme_init_request(abort_req, &cmd); + + abort_req->end_io = abort_endio; + abort_req->end_io_data = NULL; + blk_execute_rq_nowait(abort_req, false); + + /* + * The aborted req will be completed on receiving the abort req. + * We enable the timer again. If hit twice, it'll cause a device reset, + * as the device then is in a faulty state. + */ + return BLK_EH_RESET_TIMER; + +disable: + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) + return BLK_EH_DONE; + + nvme_dev_disable(dev, false); + if (nvme_try_sched_reset(&dev->ctrl)) + nvme_unquiesce_io_queues(&dev->ctrl); + return BLK_EH_DONE; +} + +static void nvme_free_queue(struct nvme_queue *nvmeq) +{ + dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + if (!nvmeq->sq_cmds) + return; + + if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { + pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), + nvmeq->sq_cmds, SQ_SIZE(nvmeq)); + } else { + dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + } +} + +static void nvme_free_queues(struct nvme_dev *dev, int lowest) +{ + int i; + + for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { + dev->ctrl.queue_count--; + nvme_free_queue(&dev->queues[i]); + } +} + +static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) +{ + struct nvme_queue *nvmeq = &dev->queues[qid]; + + if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) + return; + + /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ + mb(); + + nvmeq->dev->online_queues--; + if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) + nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); + if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) + pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); +} + +static void nvme_suspend_io_queues(struct nvme_dev *dev) +{ + int i; + + for (i = dev->ctrl.queue_count - 1; i > 0; i--) + nvme_suspend_queue(dev, i); +} + +/* + * Called only on a device that has been disabled and after all other threads + * that can check this device's completion queues have synced, except + * nvme_poll(). This is the last chance for the driver to see a natural + * completion before nvme_cancel_request() terminates all incomplete requests. + */ +static void nvme_reap_pending_cqes(struct nvme_dev *dev) +{ + int i; + + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { + spin_lock(&dev->queues[i].cq_poll_lock); + nvme_poll_cq(&dev->queues[i], NULL); + spin_unlock(&dev->queues[i].cq_poll_lock); + } +} + +static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, + int entry_size) +{ + int q_depth = dev->q_depth; + unsigned q_size_aligned = roundup(q_depth * entry_size, + NVME_CTRL_PAGE_SIZE); + + if (q_size_aligned * nr_io_queues > dev->cmb_size) { + u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); + + mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); + q_depth = div_u64(mem_per_q, entry_size); + + /* + * Ensure the reduced q_depth is above some threshold where it + * would be better to map queues in system memory with the + * original depth + */ + if (q_depth < 64) + return -ENOMEM; + } + + return q_depth; +} + +static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, + int qid) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { + nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); + if (nvmeq->sq_cmds) { + nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, + nvmeq->sq_cmds); + if (nvmeq->sq_dma_addr) { + set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); + return 0; + } + + pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); + } + } + + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), + &nvmeq->sq_dma_addr, GFP_KERNEL); + if (!nvmeq->sq_cmds) + return -ENOMEM; + return 0; +} + +static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) +{ + struct nvme_queue *nvmeq = &dev->queues[qid]; + + if (dev->ctrl.queue_count > qid) + return 0; + + nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; + nvmeq->q_depth = depth; + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), + &nvmeq->cq_dma_addr, GFP_KERNEL); + if (!nvmeq->cqes) + goto free_nvmeq; + + if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) + goto free_cqdma; + + nvmeq->dev = dev; + spin_lock_init(&nvmeq->sq_lock); + spin_lock_init(&nvmeq->cq_poll_lock); + nvmeq->cq_head = 0; + nvmeq->cq_phase = 1; + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; + nvmeq->qid = qid; + dev->ctrl.queue_count++; + + return 0; + + free_cqdma: + dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, + nvmeq->cq_dma_addr); + free_nvmeq: + return -ENOMEM; +} + +static int queue_request_irq(struct nvme_queue *nvmeq) +{ + struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); + int nr = nvmeq->dev->ctrl.instance; + + if (use_threaded_interrupts) { + return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, + nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); + } else { + return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, + NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); + } +} + +static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) +{ + struct nvme_dev *dev = nvmeq->dev; + + nvmeq->sq_tail = 0; + nvmeq->last_sq_tail = 0; + nvmeq->cq_head = 0; + nvmeq->cq_phase = 1; + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; + memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); + nvme_dbbuf_init(dev, nvmeq, qid); + dev->online_queues++; + wmb(); /* ensure the first interrupt sees the initialization */ +} + +/* + * Try getting shutdown_lock while setting up IO queues. + */ +static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) +{ + /* + * Give up if the lock is being held by nvme_dev_disable. + */ + if (!mutex_trylock(&dev->shutdown_lock)) + return -ENODEV; + + /* + * Controller is in wrong state, fail early. + */ + if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { + mutex_unlock(&dev->shutdown_lock); + return -ENODEV; + } + + return 0; +} + +static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) +{ + struct nvme_dev *dev = nvmeq->dev; + int result; + u16 vector = 0; + + clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); + + /* + * A queue's vector matches the queue identifier unless the controller + * has only one vector available. + */ + if (!polled) + vector = dev->num_vecs == 1 ? 0 : qid; + else + set_bit(NVMEQ_POLLED, &nvmeq->flags); + + result = adapter_alloc_cq(dev, qid, nvmeq, vector); + if (result) + return result; + + result = adapter_alloc_sq(dev, qid, nvmeq); + if (result < 0) + return result; + if (result) + goto release_cq; + + nvmeq->cq_vector = vector; + + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + nvme_init_queue(nvmeq, qid); + if (!polled) { + result = queue_request_irq(nvmeq); + if (result < 0) + goto release_sq; + } + + set_bit(NVMEQ_ENABLED, &nvmeq->flags); + mutex_unlock(&dev->shutdown_lock); + return result; + +release_sq: + dev->online_queues--; + mutex_unlock(&dev->shutdown_lock); + adapter_delete_sq(dev, qid); +release_cq: + adapter_delete_cq(dev, qid); + return result; +} + +static const struct blk_mq_ops nvme_mq_admin_ops = { + .queue_rq = nvme_queue_rq, + .complete = nvme_pci_complete_rq, + .init_hctx = nvme_admin_init_hctx, + .init_request = nvme_pci_init_request, + .timeout = nvme_timeout, +}; + +static const struct blk_mq_ops nvme_mq_ops = { + .queue_rq = nvme_queue_rq, + .queue_rqs = nvme_queue_rqs, + .complete = nvme_pci_complete_rq, + .commit_rqs = nvme_commit_rqs, + .init_hctx = nvme_init_hctx, + .init_request = nvme_pci_init_request, + .map_queues = nvme_pci_map_queues, + .timeout = nvme_timeout, + .poll = nvme_poll, +}; + +static void nvme_dev_remove_admin(struct nvme_dev *dev) +{ + if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { + /* + * If the controller was reset during removal, it's possible + * user requests may be waiting on a stopped queue. Start the + * queue to flush these to completion. + */ + nvme_unquiesce_admin_queue(&dev->ctrl); + nvme_remove_admin_tag_set(&dev->ctrl); + } +} + +static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) +{ + return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); +} + +static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (size <= dev->bar_mapped_size) + return 0; + if (size > pci_resource_len(pdev, 0)) + return -ENOMEM; + if (dev->bar) + iounmap(dev->bar); + dev->bar = ioremap(pci_resource_start(pdev, 0), size); + if (!dev->bar) { + dev->bar_mapped_size = 0; + return -ENOMEM; + } + dev->bar_mapped_size = size; + dev->dbs = dev->bar + NVME_REG_DBS; + + return 0; +} + +static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) +{ + int result; + u32 aqa; + struct nvme_queue *nvmeq; + + result = nvme_remap_bar(dev, db_bar_size(dev, 0)); + if (result < 0) + return result; + + dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? + NVME_CAP_NSSRC(dev->ctrl.cap) : 0; + + if (dev->subsystem && + (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) + writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); + + /* + * If the device has been passed off to us in an enabled state, just + * clear the enabled bit. The spec says we should set the 'shutdown + * notification bits', but doing so may cause the device to complete + * commands to the admin queue ... and we don't know what memory that + * might be pointing at! + */ + result = nvme_disable_ctrl(&dev->ctrl, false); + if (result < 0) + return result; + + result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); + if (result) + return result; + + dev->ctrl.numa_node = dev_to_node(dev->dev); + + nvmeq = &dev->queues[0]; + aqa = nvmeq->q_depth - 1; + aqa |= aqa << 16; + + writel(aqa, dev->bar + NVME_REG_AQA); + lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); + lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); + + result = nvme_enable_ctrl(&dev->ctrl); + if (result) + return result; + + nvmeq->cq_vector = 0; + nvme_init_queue(nvmeq, 0); + result = queue_request_irq(nvmeq); + if (result) { + dev->online_queues--; + return result; + } + + set_bit(NVMEQ_ENABLED, &nvmeq->flags); + return result; +} + +static int nvme_create_io_queues(struct nvme_dev *dev) +{ + unsigned i, max, rw_queues; + int ret = 0; + + for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { + if (nvme_alloc_queue(dev, i, dev->q_depth)) { + ret = -ENOMEM; + break; + } + } + + max = min(dev->max_qid, dev->ctrl.queue_count - 1); + if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { + rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + + dev->io_queues[HCTX_TYPE_READ]; + } else { + rw_queues = max; + } + + for (i = dev->online_queues; i <= max; i++) { + bool polled = i > rw_queues; + + ret = nvme_create_queue(&dev->queues[i], i, polled); + if (ret) + break; + } + + /* + * Ignore failing Create SQ/CQ commands, we can continue with less + * than the desired amount of queues, and even a controller without + * I/O queues can still be used to issue admin commands. This might + * be useful to upgrade a buggy firmware for example. + */ + return ret >= 0 ? 0 : ret; +} + +static u64 nvme_cmb_size_unit(struct nvme_dev *dev) +{ + u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; + + return 1ULL << (12 + 4 * szu); +} + +static u32 nvme_cmb_size(struct nvme_dev *dev) +{ + return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; +} + +static void nvme_map_cmb(struct nvme_dev *dev) +{ + u64 size, offset; + resource_size_t bar_size; + struct pci_dev *pdev = to_pci_dev(dev->dev); + int bar; + + if (dev->cmb_size) + return; + + if (NVME_CAP_CMBS(dev->ctrl.cap)) + writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); + + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); + if (!dev->cmbsz) + return; + dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); + + size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); + offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); + bar = NVME_CMB_BIR(dev->cmbloc); + bar_size = pci_resource_len(pdev, bar); + + if (offset > bar_size) + return; + + /* + * Tell the controller about the host side address mapping the CMB, + * and enable CMB decoding for the NVMe 1.4+ scheme: + */ + if (NVME_CAP_CMBS(dev->ctrl.cap)) { + hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | + (pci_bus_address(pdev, bar) + offset), + dev->bar + NVME_REG_CMBMSC); + } + + /* + * Controllers may support a CMB size larger than their BAR, + * for example, due to being behind a bridge. Reduce the CMB to + * the reported size of the BAR + */ + if (size > bar_size - offset) + size = bar_size - offset; + + if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { + dev_warn(dev->ctrl.device, + "failed to register the CMB\n"); + return; + } + + dev->cmb_size = size; + dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); + + if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == + (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) + pci_p2pmem_publish(pdev, true); + + nvme_update_attrs(dev); +} + +static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) +{ + u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; + u64 dma_addr = dev->host_mem_descs_dma; + struct nvme_command c = { }; + int ret; + + c.features.opcode = nvme_admin_set_features; + c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); + c.features.dword11 = cpu_to_le32(bits); + c.features.dword12 = cpu_to_le32(host_mem_size); + c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); + c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); + c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); + + ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); + if (ret) { + dev_warn(dev->ctrl.device, + "failed to set host mem (err %d, flags %#x).\n", + ret, bits); + } else + dev->hmb = bits & NVME_HOST_MEM_ENABLE; + + return ret; +} + +static void nvme_free_host_mem(struct nvme_dev *dev) +{ + int i; + + for (i = 0; i < dev->nr_host_mem_descs; i++) { + struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; + size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; + + dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], + le64_to_cpu(desc->addr), + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); + } + + kfree(dev->host_mem_desc_bufs); + dev->host_mem_desc_bufs = NULL; + dma_free_coherent(dev->dev, + dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), + dev->host_mem_descs, dev->host_mem_descs_dma); + dev->host_mem_descs = NULL; + dev->nr_host_mem_descs = 0; +} + +static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, + u32 chunk_size) +{ + struct nvme_host_mem_buf_desc *descs; + u32 max_entries, len; + dma_addr_t descs_dma; + int i = 0; + void **bufs; + u64 size, tmp; + + tmp = (preferred + chunk_size - 1); + do_div(tmp, chunk_size); + max_entries = tmp; + + if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) + max_entries = dev->ctrl.hmmaxd; + + descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), + &descs_dma, GFP_KERNEL); + if (!descs) + goto out; + + bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); + if (!bufs) + goto out_free_descs; + + for (size = 0; size < preferred && i < max_entries; size += len) { + dma_addr_t dma_addr; + + len = min_t(u64, chunk_size, preferred - size); + bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); + if (!bufs[i]) + break; + + descs[i].addr = cpu_to_le64(dma_addr); + descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); + i++; + } + + if (!size) + goto out_free_bufs; + + dev->nr_host_mem_descs = i; + dev->host_mem_size = size; + dev->host_mem_descs = descs; + dev->host_mem_descs_dma = descs_dma; + dev->host_mem_desc_bufs = bufs; + return 0; + +out_free_bufs: + while (--i >= 0) { + size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; + + dma_free_attrs(dev->dev, size, bufs[i], + le64_to_cpu(descs[i].addr), + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); + } + + kfree(bufs); +out_free_descs: + dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, + descs_dma); +out: + dev->host_mem_descs = NULL; + return -ENOMEM; +} + +static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) +{ + u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); + u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); + u64 chunk_size; + + /* start big and work our way down */ + for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { + if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { + if (!min || dev->host_mem_size >= min) + return 0; + nvme_free_host_mem(dev); + } + } + + return -ENOMEM; +} + +static int nvme_setup_host_mem(struct nvme_dev *dev) +{ + u64 max = (u64)max_host_mem_size_mb * SZ_1M; + u64 preferred = (u64)dev->ctrl.hmpre * 4096; + u64 min = (u64)dev->ctrl.hmmin * 4096; + u32 enable_bits = NVME_HOST_MEM_ENABLE; + int ret; + + if (!dev->ctrl.hmpre) + return 0; + + preferred = min(preferred, max); + if (min > max) { + dev_warn(dev->ctrl.device, + "min host memory (%lld MiB) above limit (%d MiB).\n", + min >> ilog2(SZ_1M), max_host_mem_size_mb); + nvme_free_host_mem(dev); + return 0; + } + + /* + * If we already have a buffer allocated check if we can reuse it. + */ + if (dev->host_mem_descs) { + if (dev->host_mem_size >= min) + enable_bits |= NVME_HOST_MEM_RETURN; + else + nvme_free_host_mem(dev); + } + + if (!dev->host_mem_descs) { + if (nvme_alloc_host_mem(dev, min, preferred)) { + dev_warn(dev->ctrl.device, + "failed to allocate host memory buffer.\n"); + return 0; /* controller must work without HMB */ + } + + dev_info(dev->ctrl.device, + "allocated %lld MiB host memory buffer.\n", + dev->host_mem_size >> ilog2(SZ_1M)); + } + + ret = nvme_set_host_mem(dev, enable_bits); + if (ret) + nvme_free_host_mem(dev); + return ret; +} + +static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", + ndev->cmbloc, ndev->cmbsz); +} +static DEVICE_ATTR_RO(cmb); + +static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%u\n", ndev->cmbloc); +} +static DEVICE_ATTR_RO(cmbloc); + +static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%u\n", ndev->cmbsz); +} +static DEVICE_ATTR_RO(cmbsz); + +static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%d\n", ndev->hmb); +} + +static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + bool new; + int ret; + + if (kstrtobool(buf, &new) < 0) + return -EINVAL; + + if (new == ndev->hmb) + return count; + + if (new) { + ret = nvme_setup_host_mem(ndev); + } else { + ret = nvme_set_host_mem(ndev, 0); + if (!ret) + nvme_free_host_mem(ndev); + } + + if (ret < 0) + return ret; + + return count; +} +static DEVICE_ATTR_RW(hmb); + +static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct nvme_ctrl *ctrl = + dev_get_drvdata(container_of(kobj, struct device, kobj)); + struct nvme_dev *dev = to_nvme_dev(ctrl); + + if (a == &dev_attr_cmb.attr || + a == &dev_attr_cmbloc.attr || + a == &dev_attr_cmbsz.attr) { + if (!dev->cmbsz) + return 0; + } + if (a == &dev_attr_hmb.attr && !ctrl->hmpre) + return 0; + + return a->mode; +} + +static struct attribute *nvme_pci_attrs[] = { + &dev_attr_cmb.attr, + &dev_attr_cmbloc.attr, + &dev_attr_cmbsz.attr, + &dev_attr_hmb.attr, + NULL, +}; + +static const struct attribute_group nvme_pci_dev_attrs_group = { + .attrs = nvme_pci_attrs, + .is_visible = nvme_pci_attrs_are_visible, +}; + +static const struct attribute_group *nvme_pci_dev_attr_groups[] = { + &nvme_dev_attrs_group, + &nvme_pci_dev_attrs_group, + NULL, +}; + +static void nvme_update_attrs(struct nvme_dev *dev) +{ + sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); +} + +/* + * nirqs is the number of interrupts available for write and read + * queues. The core already reserved an interrupt for the admin queue. + */ +static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) +{ + struct nvme_dev *dev = affd->priv; + unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; + + /* + * If there is no interrupt available for queues, ensure that + * the default queue is set to 1. The affinity set size is + * also set to one, but the irq core ignores it for this case. + * + * If only one interrupt is available or 'write_queue' == 0, combine + * write and read queues. + * + * If 'write_queues' > 0, ensure it leaves room for at least one read + * queue. + */ + if (!nrirqs) { + nrirqs = 1; + nr_read_queues = 0; + } else if (nrirqs == 1 || !nr_write_queues) { + nr_read_queues = 0; + } else if (nr_write_queues >= nrirqs) { + nr_read_queues = 1; + } else { + nr_read_queues = nrirqs - nr_write_queues; + } + + dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; + affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; + dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; + affd->set_size[HCTX_TYPE_READ] = nr_read_queues; + affd->nr_sets = nr_read_queues ? 2 : 1; +} + +static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + struct irq_affinity affd = { + .pre_vectors = 1, + .calc_sets = nvme_calc_irq_sets, + .priv = dev, + }; + unsigned int irq_queues, poll_queues; + + /* + * Poll queues don't need interrupts, but we need at least one I/O queue + * left over for non-polled I/O. + */ + poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); + dev->io_queues[HCTX_TYPE_POLL] = poll_queues; + + /* + * Initialize for the single interrupt case, will be updated in + * nvme_calc_irq_sets(). + */ + dev->io_queues[HCTX_TYPE_DEFAULT] = 1; + dev->io_queues[HCTX_TYPE_READ] = 0; + + /* + * We need interrupts for the admin queue and each non-polled I/O queue, + * but some Apple controllers require all queues to use the first + * vector. + */ + irq_queues = 1; + if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) + irq_queues += (nr_io_queues - poll_queues); + return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, + PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); +} + +static unsigned int nvme_max_io_queues(struct nvme_dev *dev) +{ + /* + * If tags are shared with admin queue (Apple bug), then + * make sure we only use one IO queue. + */ + if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) + return 1; + return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; +} + +static int nvme_setup_io_queues(struct nvme_dev *dev) +{ + struct nvme_queue *adminq = &dev->queues[0]; + struct pci_dev *pdev = to_pci_dev(dev->dev); + unsigned int nr_io_queues; + unsigned long size; + int result; + + /* + * Sample the module parameters once at reset time so that we have + * stable values to work with. + */ + dev->nr_write_queues = write_queues; + dev->nr_poll_queues = poll_queues; + + nr_io_queues = dev->nr_allocated_queues - 1; + result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); + if (result < 0) + return result; + + if (nr_io_queues == 0) + return 0; + + /* + * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions + * from set to unset. If there is a window to it is truely freed, + * pci_free_irq_vectors() jumping into this window will crash. + * And take lock to avoid racing with pci_free_irq_vectors() in + * nvme_dev_disable() path. + */ + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); + + if (dev->cmb_use_sqes) { + result = nvme_cmb_qdepth(dev, nr_io_queues, + sizeof(struct nvme_command)); + if (result > 0) { + dev->q_depth = result; + dev->ctrl.sqsize = result - 1; + } else { + dev->cmb_use_sqes = false; + } + } + + do { + size = db_bar_size(dev, nr_io_queues); + result = nvme_remap_bar(dev, size); + if (!result) + break; + if (!--nr_io_queues) { + result = -ENOMEM; + goto out_unlock; + } + } while (1); + adminq->q_db = dev->dbs; + + retry: + /* Deregister the admin queue's interrupt */ + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); + + /* + * If we enable msix early due to not intx, disable it again before + * setting up the full range we need. + */ + pci_free_irq_vectors(pdev); + + result = nvme_setup_irqs(dev, nr_io_queues); + if (result <= 0) { + result = -EIO; + goto out_unlock; + } + + dev->num_vecs = result; + result = max(result - 1, 1); + dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; + + /* + * Should investigate if there's a performance win from allocating + * more queues than interrupt vectors; it might allow the submission + * path to scale better, even if the receive path is limited by the + * number of interrupts. + */ + result = queue_request_irq(adminq); + if (result) + goto out_unlock; + set_bit(NVMEQ_ENABLED, &adminq->flags); + mutex_unlock(&dev->shutdown_lock); + + result = nvme_create_io_queues(dev); + if (result || dev->online_queues < 2) + return result; + + if (dev->online_queues - 1 < dev->max_qid) { + nr_io_queues = dev->online_queues - 1; + nvme_delete_io_queues(dev); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + nvme_suspend_io_queues(dev); + goto retry; + } + dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", + dev->io_queues[HCTX_TYPE_DEFAULT], + dev->io_queues[HCTX_TYPE_READ], + dev->io_queues[HCTX_TYPE_POLL]); + return 0; +out_unlock: + mutex_unlock(&dev->shutdown_lock); + return result; +} + +static enum rq_end_io_ret nvme_del_queue_end(struct request *req, + blk_status_t error) +{ + struct nvme_queue *nvmeq = req->end_io_data; + + blk_mq_free_request(req); + complete(&nvmeq->delete_done); + return RQ_END_IO_NONE; +} + +static enum rq_end_io_ret nvme_del_cq_end(struct request *req, + blk_status_t error) +{ + struct nvme_queue *nvmeq = req->end_io_data; + + if (error) + set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); + + return nvme_del_queue_end(req, error); +} + +static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) +{ + struct request_queue *q = nvmeq->dev->ctrl.admin_q; + struct request *req; + struct nvme_command cmd = { }; + + cmd.delete_queue.opcode = opcode; + cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); + + req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); + if (IS_ERR(req)) + return PTR_ERR(req); + nvme_init_request(req, &cmd); + + if (opcode == nvme_admin_delete_cq) + req->end_io = nvme_del_cq_end; + else + req->end_io = nvme_del_queue_end; + req->end_io_data = nvmeq; + + init_completion(&nvmeq->delete_done); + blk_execute_rq_nowait(req, false); + return 0; +} + +static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) +{ + int nr_queues = dev->online_queues - 1, sent = 0; + unsigned long timeout; + + retry: + timeout = NVME_ADMIN_TIMEOUT; + while (nr_queues > 0) { + if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) + break; + nr_queues--; + sent++; + } + while (sent) { + struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; + + timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, + timeout); + if (timeout == 0) + return false; + + sent--; + if (nr_queues) + goto retry; + } + return true; +} + +static void nvme_delete_io_queues(struct nvme_dev *dev) +{ + if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) + __nvme_delete_io_queues(dev, nvme_admin_delete_cq); +} + +static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) +{ + if (dev->io_queues[HCTX_TYPE_POLL]) + return 3; + if (dev->io_queues[HCTX_TYPE_READ]) + return 2; + return 1; +} + +static void nvme_pci_update_nr_queues(struct nvme_dev *dev) +{ + blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); + /* free previously allocated queues that are no longer usable */ + nvme_free_queues(dev, dev->online_queues); +} + +static int nvme_pci_enable(struct nvme_dev *dev) +{ + int result = -ENOMEM; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pci_enable_device_mem(pdev)) + return result; + + pci_set_master(pdev); + + if (readl(dev->bar + NVME_REG_CSTS) == -1) { + result = -ENODEV; + goto disable; + } + + /* + * Some devices and/or platforms don't advertise or work with INTx + * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll + * adjust this later. + */ + result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (result < 0) + goto disable; + + dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); + + dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, + io_queue_depth); + dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); + dev->dbs = dev->bar + 4096; + + /* + * Some Apple controllers require a non-standard SQE size. + * Interestingly they also seem to ignore the CC:IOSQES register + * so we don't bother updating it here. + */ + if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) + dev->io_sqes = 7; + else + dev->io_sqes = NVME_NVM_IOSQES; + + /* + * Temporary fix for the Apple controller found in the MacBook8,1 and + * some MacBook7,1 to avoid controller resets and data loss. + */ + if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { + dev->q_depth = 2; + dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " + "set queue depth=%u to work around controller resets\n", + dev->q_depth); + } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && + (pdev->device == 0xa821 || pdev->device == 0xa822) && + NVME_CAP_MQES(dev->ctrl.cap) == 0) { + dev->q_depth = 64; + dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " + "set queue depth=%u\n", dev->q_depth); + } + + /* + * Controllers with the shared tags quirk need the IO queue to be + * big enough so that we get 32 tags for the admin queue + */ + if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && + (dev->q_depth < (NVME_AQ_DEPTH + 2))) { + dev->q_depth = NVME_AQ_DEPTH + 2; + dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", + dev->q_depth); + } + dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ + + nvme_map_cmb(dev); + + pci_save_state(pdev); + + result = nvme_pci_configure_admin_queue(dev); + if (result) + goto free_irq; + return result; + + free_irq: + pci_free_irq_vectors(pdev); + disable: + pci_disable_device(pdev); + return result; +} + +static void nvme_dev_unmap(struct nvme_dev *dev) +{ + if (dev->bar) + iounmap(dev->bar); + pci_release_mem_regions(to_pci_dev(dev->dev)); +} + +static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + u32 csts; + + if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) + return true; + if (pdev->error_state != pci_channel_io_normal) + return true; + + csts = readl(dev->bar + NVME_REG_CSTS); + return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); +} + +static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) +{ + enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); + struct pci_dev *pdev = to_pci_dev(dev->dev); + bool dead; + + mutex_lock(&dev->shutdown_lock); + dead = nvme_pci_ctrl_is_dead(dev); + if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) { + if (pci_is_enabled(pdev)) + nvme_start_freeze(&dev->ctrl); + /* + * Give the controller a chance to complete all entered requests + * if doing a safe shutdown. + */ + if (!dead && shutdown) + nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); + } + + nvme_quiesce_io_queues(&dev->ctrl); + + if (!dead && dev->ctrl.queue_count > 0) { + nvme_delete_io_queues(dev); + nvme_disable_ctrl(&dev->ctrl, shutdown); + nvme_poll_irqdisable(&dev->queues[0]); + } + nvme_suspend_io_queues(dev); + nvme_suspend_queue(dev, 0); + pci_free_irq_vectors(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + nvme_reap_pending_cqes(dev); + + nvme_cancel_tagset(&dev->ctrl); + nvme_cancel_admin_tagset(&dev->ctrl); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) { + nvme_unquiesce_io_queues(&dev->ctrl); + if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) + nvme_unquiesce_admin_queue(&dev->ctrl); + } + mutex_unlock(&dev->shutdown_lock); +} + +static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) +{ + if (!nvme_wait_reset(&dev->ctrl)) + return -EBUSY; + nvme_dev_disable(dev, shutdown); + return 0; +} + +static int nvme_setup_prp_pools(struct nvme_dev *dev) +{ + dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, + NVME_CTRL_PAGE_SIZE, + NVME_CTRL_PAGE_SIZE, 0); + if (!dev->prp_page_pool) + return -ENOMEM; + + /* Optimisation for I/Os between 4k and 128k */ + dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, + 256, 256, 0); + if (!dev->prp_small_pool) { + dma_pool_destroy(dev->prp_page_pool); + return -ENOMEM; + } + return 0; +} + +static void nvme_release_prp_pools(struct nvme_dev *dev) +{ + dma_pool_destroy(dev->prp_page_pool); + dma_pool_destroy(dev->prp_small_pool); +} + +static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) +{ + size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; + + dev->iod_mempool = mempool_create_node(1, + mempool_kmalloc, mempool_kfree, + (void *)alloc_size, GFP_KERNEL, + dev_to_node(dev->dev)); + if (!dev->iod_mempool) + return -ENOMEM; + return 0; +} + +static void nvme_free_tagset(struct nvme_dev *dev) +{ + if (dev->tagset.tags) + nvme_remove_io_tag_set(&dev->ctrl); + dev->ctrl.tagset = NULL; +} + +/* pairs with nvme_pci_alloc_dev */ +static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) +{ + struct nvme_dev *dev = to_nvme_dev(ctrl); + + nvme_free_tagset(dev); + put_device(dev->dev); + kfree(dev->queues); + kfree(dev); +} + +static void nvme_reset_work(struct work_struct *work) +{ + struct nvme_dev *dev = + container_of(work, struct nvme_dev, ctrl.reset_work); + bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); + int result; + + if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { + dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", + dev->ctrl.state); + result = -ENODEV; + goto out; + } + + /* + * If we're called to reset a live controller first shut it down before + * moving on. + */ + if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) + nvme_dev_disable(dev, false); + nvme_sync_queues(&dev->ctrl); + + mutex_lock(&dev->shutdown_lock); + result = nvme_pci_enable(dev); + if (result) + goto out_unlock; + nvme_unquiesce_admin_queue(&dev->ctrl); + mutex_unlock(&dev->shutdown_lock); + + /* + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the + * initializing procedure here. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(dev->ctrl.device, + "failed to mark controller CONNECTING\n"); + result = -EBUSY; + goto out; + } + + result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); + if (result) + goto out; + + nvme_dbbuf_dma_alloc(dev); + + result = nvme_setup_host_mem(dev); + if (result < 0) + goto out; + + result = nvme_setup_io_queues(dev); + if (result) + goto out; + + /* + * Freeze and update the number of I/O queues as thos might have + * changed. If there are no I/O queues left after this reset, keep the + * controller around but remove all namespaces. + */ + if (dev->online_queues > 1) { + nvme_unquiesce_io_queues(&dev->ctrl); + nvme_wait_freeze(&dev->ctrl); + nvme_pci_update_nr_queues(dev); + nvme_dbbuf_set(dev); + nvme_unfreeze(&dev->ctrl); + } else { + dev_warn(dev->ctrl.device, "IO queues lost\n"); + nvme_mark_namespaces_dead(&dev->ctrl); + nvme_unquiesce_io_queues(&dev->ctrl); + nvme_remove_namespaces(&dev->ctrl); + nvme_free_tagset(dev); + } + + /* + * If only admin queue live, keep it to do further investigation or + * recovery. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { + dev_warn(dev->ctrl.device, + "failed to mark controller live state\n"); + result = -ENODEV; + goto out; + } + + nvme_start_ctrl(&dev->ctrl); + return; + + out_unlock: + mutex_unlock(&dev->shutdown_lock); + out: + /* + * Set state to deleting now to avoid blocking nvme_wait_reset(), which + * may be holding this pci_dev's device lock. + */ + dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", + result); + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); + nvme_dev_disable(dev, true); + nvme_sync_queues(&dev->ctrl); + nvme_mark_namespaces_dead(&dev->ctrl); + nvme_unquiesce_io_queues(&dev->ctrl); + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); +} + +static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) +{ + *val = readl(to_nvme_dev(ctrl)->bar + off); + return 0; +} + +static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) +{ + writel(val, to_nvme_dev(ctrl)->bar + off); + return 0; +} + +static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) +{ + *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); + return 0; +} + +static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); + + return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); +} + +static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) +{ + struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); + struct nvme_subsystem *subsys = ctrl->subsys; + + dev_err(ctrl->device, + "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", + pdev->vendor, pdev->device, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + +static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) +{ + struct nvme_dev *dev = to_nvme_dev(ctrl); + + return dma_pci_p2pdma_supported(dev->dev); +} + +static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { + .name = "pcie", + .module = THIS_MODULE, + .flags = NVME_F_METADATA_SUPPORTED, + .dev_attr_groups = nvme_pci_dev_attr_groups, + .reg_read32 = nvme_pci_reg_read32, + .reg_write32 = nvme_pci_reg_write32, + .reg_read64 = nvme_pci_reg_read64, + .free_ctrl = nvme_pci_free_ctrl, + .submit_async_event = nvme_pci_submit_async_event, + .get_address = nvme_pci_get_address, + .print_device_info = nvme_pci_print_device_info, + .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, +}; + +static int nvme_dev_map(struct nvme_dev *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pci_request_mem_regions(pdev, "nvme")) + return -ENODEV; + + if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) + goto release; + + return 0; + release: + pci_release_mem_regions(pdev); + return -ENODEV; +} + +static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) +{ + if (pdev->vendor == 0x144d && pdev->device == 0xa802) { + /* + * Several Samsung devices seem to drop off the PCIe bus + * randomly when APST is on and uses the deepest sleep state. + * This has been observed on a Samsung "SM951 NVMe SAMSUNG + * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD + * 950 PRO 256GB", but it seems to be restricted to two Dell + * laptops. + */ + if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && + (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || + dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) + return NVME_QUIRK_NO_DEEPEST_PS; + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { + /* + * Samsung SSD 960 EVO drops off the PCIe bus after system + * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as + * within few minutes after bootup on a Coffee Lake board - + * ASUS PRIME Z370-A + */ + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && + (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || + dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) + return NVME_QUIRK_NO_APST; + } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || + pdev->device == 0xa808 || pdev->device == 0xa809)) || + (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { + /* + * Forcing to use host managed nvme power settings for + * lowest idle power with quick resume latency on + * Samsung and Toshiba SSDs based on suspend behavior + * on Coffee Lake board for LENOVO C640 + */ + if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && + dmi_match(DMI_BOARD_NAME, "LNVNB161216")) + return NVME_QUIRK_SIMPLE_SUSPEND; + } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || + pdev->device == 0x500f)) { + /* + * Exclude some Kingston NV1 and A2000 devices from + * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a + * lot fo energy with s2idle sleep on some TUXEDO platforms. + */ + if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || + dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") || + dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") || + dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1")) + return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; + } + + return 0; +} + +static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + unsigned long quirks = id->driver_data; + int node = dev_to_node(&pdev->dev); + struct nvme_dev *dev; + int ret = -ENOMEM; + + dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); + if (!dev) + return ERR_PTR(-ENOMEM); + INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); + mutex_init(&dev->shutdown_lock); + + dev->nr_write_queues = write_queues; + dev->nr_poll_queues = poll_queues; + dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; + dev->queues = kcalloc_node(dev->nr_allocated_queues, + sizeof(struct nvme_queue), GFP_KERNEL, node); + if (!dev->queues) + goto out_free_dev; + + dev->dev = get_device(&pdev->dev); + + quirks |= check_vendor_combination_bug(pdev); + if (!noacpi && + !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) && + acpi_storage_d3(&pdev->dev)) { + /* + * Some systems use a bios work around to ask for D3 on + * platforms that support kernel managed suspend. + */ + dev_info(&pdev->dev, + "platform quirk: setting simple suspend\n"); + quirks |= NVME_QUIRK_SIMPLE_SUSPEND; + } + ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, + quirks); + if (ret) + goto out_put_device; + + if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + else + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); + dma_set_max_seg_size(&pdev->dev, 0xffffffff); + + /* + * Limit the max command size to prevent iod->sg allocations going + * over a single page. + */ + dev->ctrl.max_hw_sectors = min_t(u32, + NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); + dev->ctrl.max_segments = NVME_MAX_SEGS; + + /* + * There is no support for SGLs for metadata (yet), so we are limited to + * a single integrity segment for the separate metadata pointer. + */ + dev->ctrl.max_integrity_segments = 1; + return dev; + +out_put_device: + put_device(dev->dev); + kfree(dev->queues); +out_free_dev: + kfree(dev); + return ERR_PTR(ret); +} + +static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct nvme_dev *dev; + int result = -ENOMEM; + + dev = nvme_pci_alloc_dev(pdev, id); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + result = nvme_dev_map(dev); + if (result) + goto out_uninit_ctrl; + + result = nvme_setup_prp_pools(dev); + if (result) + goto out_dev_unmap; + + result = nvme_pci_alloc_iod_mempool(dev); + if (result) + goto out_release_prp_pools; + + dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); + + result = nvme_pci_enable(dev); + if (result) + goto out_release_iod_mempool; + + result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, + &nvme_mq_admin_ops, sizeof(struct nvme_iod)); + if (result) + goto out_disable; + + /* + * Mark the controller as connecting before sending admin commands to + * allow the timeout handler to do the right thing. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(dev->ctrl.device, + "failed to mark controller CONNECTING\n"); + result = -EBUSY; + goto out_disable; + } + + result = nvme_init_ctrl_finish(&dev->ctrl, false); + if (result) + goto out_disable; + + nvme_dbbuf_dma_alloc(dev); + + result = nvme_setup_host_mem(dev); + if (result < 0) + goto out_disable; + + result = nvme_setup_io_queues(dev); + if (result) + goto out_disable; + + if (dev->online_queues > 1) { + nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, + nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); + nvme_dbbuf_set(dev); + } + + if (!dev->ctrl.tagset) + dev_warn(dev->ctrl.device, "IO queues not created\n"); + + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { + dev_warn(dev->ctrl.device, + "failed to mark controller live state\n"); + result = -ENODEV; + goto out_disable; + } + + pci_set_drvdata(pdev, dev); + + nvme_start_ctrl(&dev->ctrl); + nvme_put_ctrl(&dev->ctrl); + flush_work(&dev->ctrl.scan_work); + return 0; + +out_disable: + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); + nvme_dev_disable(dev, true); + nvme_free_host_mem(dev); + nvme_dev_remove_admin(dev); + nvme_dbbuf_dma_free(dev); + nvme_free_queues(dev, 0); +out_release_iod_mempool: + mempool_destroy(dev->iod_mempool); +out_release_prp_pools: + nvme_release_prp_pools(dev); +out_dev_unmap: + nvme_dev_unmap(dev); +out_uninit_ctrl: + nvme_uninit_ctrl(&dev->ctrl); + nvme_put_ctrl(&dev->ctrl); + return result; +} + +static void nvme_reset_prepare(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + /* + * We don't need to check the return value from waiting for the reset + * state as pci_dev device lock is held, making it impossible to race + * with ->remove(). + */ + nvme_disable_prepare_reset(dev, false); + nvme_sync_queues(&dev->ctrl); +} + +static void nvme_reset_done(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + if (!nvme_try_sched_reset(&dev->ctrl)) + flush_work(&dev->ctrl.reset_work); +} + +static void nvme_shutdown(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + nvme_disable_prepare_reset(dev, true); +} + +/* + * The driver's remove may be called on a device in a partially initialized + * state. This function must not have any dependencies on the device state in + * order to proceed. + */ +static void nvme_remove(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); + pci_set_drvdata(pdev, NULL); + + if (!pci_device_is_present(pdev)) { + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); + nvme_dev_disable(dev, true); + } + + flush_work(&dev->ctrl.reset_work); + nvme_stop_ctrl(&dev->ctrl); + nvme_remove_namespaces(&dev->ctrl); + nvme_dev_disable(dev, true); + nvme_free_host_mem(dev); + nvme_dev_remove_admin(dev); + nvme_dbbuf_dma_free(dev); + nvme_free_queues(dev, 0); + mempool_destroy(dev->iod_mempool); + nvme_release_prp_pools(dev); + nvme_dev_unmap(dev); + nvme_uninit_ctrl(&dev->ctrl); +} + +#ifdef CONFIG_PM_SLEEP +static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) +{ + return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); +} + +static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) +{ + return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); +} + +static int nvme_resume(struct device *dev) +{ + struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); + struct nvme_ctrl *ctrl = &ndev->ctrl; + + if (ndev->last_ps == U32_MAX || + nvme_set_power_state(ctrl, ndev->last_ps) != 0) + goto reset; + if (ctrl->hmpre && nvme_setup_host_mem(ndev)) + goto reset; + + return 0; +reset: + return nvme_try_sched_reset(ctrl); +} + +static int nvme_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nvme_dev *ndev = pci_get_drvdata(pdev); + struct nvme_ctrl *ctrl = &ndev->ctrl; + int ret = -EBUSY; + + ndev->last_ps = U32_MAX; + + /* + * The platform does not remove power for a kernel managed suspend so + * use host managed nvme power settings for lowest idle power if + * possible. This should have quicker resume latency than a full device + * shutdown. But if the firmware is involved after the suspend or the + * device does not support any non-default power states, shut down the + * device fully. + * + * If ASPM is not enabled for the device, shut down the device and allow + * the PCI bus layer to put it into D3 in order to take the PCIe link + * down, so as to allow the platform to achieve its minimum low-power + * state (which may not be possible if the link is up). + */ + if (pm_suspend_via_firmware() || !ctrl->npss || + !pcie_aspm_enabled(pdev) || + (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) + return nvme_disable_prepare_reset(ndev, true); + + nvme_start_freeze(ctrl); + nvme_wait_freeze(ctrl); + nvme_sync_queues(ctrl); + + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) + goto unfreeze; + + /* + * Host memory access may not be successful in a system suspend state, + * but the specification allows the controller to access memory in a + * non-operational power state. + */ + if (ndev->hmb) { + ret = nvme_set_host_mem(ndev, 0); + if (ret < 0) + goto unfreeze; + } + + ret = nvme_get_power_state(ctrl, &ndev->last_ps); + if (ret < 0) + goto unfreeze; + + /* + * A saved state prevents pci pm from generically controlling the + * device's power. If we're using protocol specific settings, we don't + * want pci interfering. + */ + pci_save_state(pdev); + + ret = nvme_set_power_state(ctrl, ctrl->npss); + if (ret < 0) + goto unfreeze; + + if (ret) { + /* discard the saved state */ + pci_load_saved_state(pdev, NULL); + + /* + * Clearing npss forces a controller reset on resume. The + * correct value will be rediscovered then. + */ + ret = nvme_disable_prepare_reset(ndev, true); + ctrl->npss = 0; + } +unfreeze: + nvme_unfreeze(ctrl); + return ret; +} + +static int nvme_simple_suspend(struct device *dev) +{ + struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); + + return nvme_disable_prepare_reset(ndev, true); +} + +static int nvme_simple_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nvme_dev *ndev = pci_get_drvdata(pdev); + + return nvme_try_sched_reset(&ndev->ctrl); +} + +static const struct dev_pm_ops nvme_dev_pm_ops = { + .suspend = nvme_suspend, + .resume = nvme_resume, + .freeze = nvme_simple_suspend, + .thaw = nvme_simple_resume, + .poweroff = nvme_simple_suspend, + .restore = nvme_simple_resume, +}; +#endif /* CONFIG_PM_SLEEP */ + +static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + /* + * A frozen channel requires a reset. When detected, this method will + * shutdown the controller to quiesce. The controller will be restarted + * after the slot reset through driver's slot_reset callback. + */ + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + dev_warn(dev->ctrl.device, + "frozen state error detected, reset controller\n"); + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { + nvme_dev_disable(dev, true); + return PCI_ERS_RESULT_DISCONNECT; + } + nvme_dev_disable(dev, false); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_warn(dev->ctrl.device, + "failure state error detected, request disconnect\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + dev_info(dev->ctrl.device, "restart after slot reset\n"); + pci_restore_state(pdev); + if (!nvme_try_sched_reset(&dev->ctrl)) + nvme_unquiesce_io_queues(&dev->ctrl); + return PCI_ERS_RESULT_RECOVERED; +} + +static void nvme_error_resume(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + flush_work(&dev->ctrl.reset_work); +} + +static const struct pci_error_handlers nvme_err_handler = { + .error_detected = nvme_error_detected, + .slot_reset = nvme_slot_reset, + .resume = nvme_error_resume, + .reset_prepare = nvme_reset_prepare, + .reset_done = nvme_reset_done, +}; + +static const struct pci_device_id nvme_id_table[] = { + { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES | + NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES, }, + { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_MEDIUM_PRIO_SQ | + NVME_QUIRK_NO_TEMP_THRESH_CHANGE | + NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ + .driver_data = NVME_QUIRK_IDENTIFY_CNS | + NVME_QUIRK_DISABLE_WRITE_ZEROES | + NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_NS_DESC_LIST, }, + { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_DISABLE_WRITE_ZEROES| + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, + { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES | + NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ + .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, + { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ + .driver_data = NVME_QUIRK_BOGUS_NID | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), + .driver_data = NVME_QUIRK_SINGLE_VECTOR }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), + .driver_data = NVME_QUIRK_SINGLE_VECTOR | + NVME_QUIRK_128_BYTES_SQES | + NVME_QUIRK_SHARED_TAGS | + NVME_QUIRK_SKIP_CID_GEN | + NVME_QUIRK_IDENTIFY_CNS }, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, nvme_id_table); + +static struct pci_driver nvme_driver = { + .name = "nvme", + .id_table = nvme_id_table, + .probe = nvme_probe, + .remove = nvme_remove, + .shutdown = nvme_shutdown, + .driver = { + .probe_type = PROBE_PREFER_ASYNCHRONOUS, +#ifdef CONFIG_PM_SLEEP + .pm = &nvme_dev_pm_ops, +#endif + }, + .sriov_configure = pci_sriov_configure_simple, + .err_handler = &nvme_err_handler, +}; + +static int __init nvme_init(void) +{ + BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); + BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); + BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); + BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); + BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); + BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); + BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); + + return pci_register_driver(&nvme_driver); +} + +static void __exit nvme_exit(void) +{ + pci_unregister_driver(&nvme_driver); + flush_workqueue(nvme_wq); +} + +MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); +module_init(nvme_init); +module_exit(nvme_exit); diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c new file mode 100644 index 0000000000..391b1465eb --- /dev/null +++ b/drivers/nvme/host/pr.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015 Intel Corporation + * Keith Busch <kbusch@kernel.org> + */ +#include <linux/blkdev.h> +#include <linux/pr.h> +#include <asm/unaligned.h> + +#include "nvme.h" + +static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type) +{ + switch (type) { + case PR_WRITE_EXCLUSIVE: + return NVME_PR_WRITE_EXCLUSIVE; + case PR_EXCLUSIVE_ACCESS: + return NVME_PR_EXCLUSIVE_ACCESS; + case PR_WRITE_EXCLUSIVE_REG_ONLY: + return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY; + case PR_EXCLUSIVE_ACCESS_REG_ONLY: + return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY; + case PR_WRITE_EXCLUSIVE_ALL_REGS: + return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS; + case PR_EXCLUSIVE_ACCESS_ALL_REGS: + return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} + +static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type) +{ + switch (type) { + case NVME_PR_WRITE_EXCLUSIVE: + return PR_WRITE_EXCLUSIVE; + case NVME_PR_EXCLUSIVE_ACCESS: + return PR_EXCLUSIVE_ACCESS; + case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY: + return PR_WRITE_EXCLUSIVE_REG_ONLY; + case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY: + return PR_EXCLUSIVE_ACCESS_REG_ONLY; + case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS: + return PR_WRITE_EXCLUSIVE_ALL_REGS; + case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS: + return PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} + +static int nvme_send_ns_head_pr_command(struct block_device *bdev, + struct nvme_command *c, void *data, unsigned int data_len) +{ + struct nvme_ns_head *head = bdev->bd_disk->private_data; + int srcu_idx = srcu_read_lock(&head->srcu); + struct nvme_ns *ns = nvme_find_path(head); + int ret = -EWOULDBLOCK; + + if (ns) { + c->common.nsid = cpu_to_le32(ns->head->ns_id); + ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len); + } + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, + void *data, unsigned int data_len) +{ + c->common.nsid = cpu_to_le32(ns->head->ns_id); + return nvme_submit_sync_cmd(ns->queue, c, data, data_len); +} + +static int nvme_sc_to_pr_err(int nvme_sc) +{ + if (nvme_is_path_error(nvme_sc)) + return PR_STS_PATH_FAILED; + + switch (nvme_sc) { + case NVME_SC_SUCCESS: + return PR_STS_SUCCESS; + case NVME_SC_RESERVATION_CONFLICT: + return PR_STS_RESERVATION_CONFLICT; + case NVME_SC_ONCS_NOT_SUPPORTED: + return -EOPNOTSUPP; + case NVME_SC_BAD_ATTRIBUTES: + case NVME_SC_INVALID_OPCODE: + case NVME_SC_INVALID_FIELD: + case NVME_SC_INVALID_NS: + return -EINVAL; + default: + return PR_STS_IOERR; + } +} + +static int nvme_send_pr_command(struct block_device *bdev, + struct nvme_command *c, void *data, unsigned int data_len) +{ + if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && + bdev->bd_disk->fops == &nvme_ns_head_ops) + return nvme_send_ns_head_pr_command(bdev, c, data, data_len); + + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, + data_len); +} + +static int nvme_pr_command(struct block_device *bdev, u32 cdw10, + u64 key, u64 sa_key, u8 op) +{ + struct nvme_command c = { }; + u8 data[16] = { 0, }; + int ret; + + put_unaligned_le64(key, &data[0]); + put_unaligned_le64(sa_key, &data[8]); + + c.common.opcode = op; + c.common.cdw10 = cpu_to_le32(cdw10); + + ret = nvme_send_pr_command(bdev, &c, data, sizeof(data)); + if (ret < 0) + return ret; + + return nvme_sc_to_pr_err(ret); +} + +static int nvme_pr_register(struct block_device *bdev, u64 old, + u64 new, unsigned flags) +{ + u32 cdw10; + + if (flags & ~PR_FL_IGNORE_KEY) + return -EOPNOTSUPP; + + cdw10 = old ? 2 : 0; + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; + cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); +} + +static int nvme_pr_reserve(struct block_device *bdev, u64 key, + enum pr_type type, unsigned flags) +{ + u32 cdw10; + + if (flags & ~PR_FL_IGNORE_KEY) + return -EOPNOTSUPP; + + cdw10 = nvme_pr_type_from_blk(type) << 8; + cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); +} + +static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, + enum pr_type type, bool abort) +{ + u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1); + + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); +} + +static int nvme_pr_clear(struct block_device *bdev, u64 key) +{ + u32 cdw10 = 1 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); +} + +static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) +{ + u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); +} + +static int nvme_pr_resv_report(struct block_device *bdev, void *data, + u32 data_len, bool *eds) +{ + struct nvme_command c = { }; + int ret; + + c.common.opcode = nvme_cmd_resv_report; + c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len)); + c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT); + *eds = true; + +retry: + ret = nvme_send_pr_command(bdev, &c, data, data_len); + if (ret == NVME_SC_HOST_ID_INCONSIST && + c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) { + c.common.cdw11 = 0; + *eds = false; + goto retry; + } + + if (ret < 0) + return ret; + + return nvme_sc_to_pr_err(ret); +} + +static int nvme_pr_read_keys(struct block_device *bdev, + struct pr_keys *keys_info) +{ + u32 rse_len, num_keys = keys_info->num_keys; + struct nvme_reservation_status_ext *rse; + int ret, i; + bool eds; + + /* + * Assume we are using 128-bit host IDs and allocate a buffer large + * enough to get enough keys to fill the return keys buffer. + */ + rse_len = struct_size(rse, regctl_eds, num_keys); + rse = kzalloc(rse_len, GFP_KERNEL); + if (!rse) + return -ENOMEM; + + ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); + if (ret) + goto free_rse; + + keys_info->generation = le32_to_cpu(rse->gen); + keys_info->num_keys = get_unaligned_le16(&rse->regctl); + + num_keys = min(num_keys, keys_info->num_keys); + for (i = 0; i < num_keys; i++) { + if (eds) { + keys_info->keys[i] = + le64_to_cpu(rse->regctl_eds[i].rkey); + } else { + struct nvme_reservation_status *rs; + + rs = (struct nvme_reservation_status *)rse; + keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); + } + } + +free_rse: + kfree(rse); + return ret; +} + +static int nvme_pr_read_reservation(struct block_device *bdev, + struct pr_held_reservation *resv) +{ + struct nvme_reservation_status_ext tmp_rse, *rse; + int ret, i, num_regs; + u32 rse_len; + bool eds; + +get_num_regs: + /* + * Get the number of registrations so we know how big to allocate + * the response buffer. + */ + ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds); + if (ret) + return ret; + + num_regs = get_unaligned_le16(&tmp_rse.regctl); + if (!num_regs) { + resv->generation = le32_to_cpu(tmp_rse.gen); + return 0; + } + + rse_len = struct_size(rse, regctl_eds, num_regs); + rse = kzalloc(rse_len, GFP_KERNEL); + if (!rse) + return -ENOMEM; + + ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); + if (ret) + goto free_rse; + + if (num_regs != get_unaligned_le16(&rse->regctl)) { + kfree(rse); + goto get_num_regs; + } + + resv->generation = le32_to_cpu(rse->gen); + resv->type = block_pr_type_from_nvme(rse->rtype); + + for (i = 0; i < num_regs; i++) { + if (eds) { + if (rse->regctl_eds[i].rcsts) { + resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); + break; + } + } else { + struct nvme_reservation_status *rs; + + rs = (struct nvme_reservation_status *)rse; + if (rs->regctl_ds[i].rcsts) { + resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); + break; + } + } + } + +free_rse: + kfree(rse); + return ret; +} + +const struct pr_ops nvme_pr_ops = { + .pr_register = nvme_pr_register, + .pr_reserve = nvme_pr_reserve, + .pr_release = nvme_pr_release, + .pr_preempt = nvme_pr_preempt, + .pr_clear = nvme_pr_clear, + .pr_read_keys = nvme_pr_read_keys, + .pr_read_reservation = nvme_pr_read_reservation, +}; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c new file mode 100644 index 0000000000..c04317a966 --- /dev/null +++ b/drivers/nvme/host/rdma.c @@ -0,0 +1,2397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics RDMA host code. + * Copyright (c) 2015-2016 HGST, a Western Digital Company. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <rdma/mr_pool.h> +#include <linux/err.h> +#include <linux/string.h> +#include <linux/atomic.h> +#include <linux/blk-mq.h> +#include <linux/blk-integrity.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/nvme.h> +#include <asm/unaligned.h> + +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> +#include <linux/nvme-rdma.h> + +#include "nvme.h" +#include "fabrics.h" + + +#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */ + +#define NVME_RDMA_MAX_SEGMENTS 256 + +#define NVME_RDMA_MAX_INLINE_SEGMENTS 4 + +#define NVME_RDMA_DATA_SGL_SIZE \ + (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT) +#define NVME_RDMA_METADATA_SGL_SIZE \ + (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT) + +struct nvme_rdma_device { + struct ib_device *dev; + struct ib_pd *pd; + struct kref ref; + struct list_head entry; + unsigned int num_inline_segments; +}; + +struct nvme_rdma_qe { + struct ib_cqe cqe; + void *data; + u64 dma; +}; + +struct nvme_rdma_sgl { + int nents; + struct sg_table sg_table; +}; + +struct nvme_rdma_queue; +struct nvme_rdma_request { + struct nvme_request req; + struct ib_mr *mr; + struct nvme_rdma_qe sqe; + union nvme_result result; + __le16 status; + refcount_t ref; + struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; + u32 num_sge; + struct ib_reg_wr reg_wr; + struct ib_cqe reg_cqe; + struct nvme_rdma_queue *queue; + struct nvme_rdma_sgl data_sgl; + struct nvme_rdma_sgl *metadata_sgl; + bool use_sig_mr; +}; + +enum nvme_rdma_queue_flags { + NVME_RDMA_Q_ALLOCATED = 0, + NVME_RDMA_Q_LIVE = 1, + NVME_RDMA_Q_TR_READY = 2, +}; + +struct nvme_rdma_queue { + struct nvme_rdma_qe *rsp_ring; + int queue_size; + size_t cmnd_capsule_len; + struct nvme_rdma_ctrl *ctrl; + struct nvme_rdma_device *device; + struct ib_cq *ib_cq; + struct ib_qp *qp; + + unsigned long flags; + struct rdma_cm_id *cm_id; + int cm_error; + struct completion cm_done; + bool pi_support; + int cq_size; + struct mutex queue_lock; +}; + +struct nvme_rdma_ctrl { + /* read only in the hot path */ + struct nvme_rdma_queue *queues; + + /* other member variables */ + struct blk_mq_tag_set tag_set; + struct work_struct err_work; + + struct nvme_rdma_qe async_event_sqe; + + struct delayed_work reconnect_work; + + struct list_head list; + + struct blk_mq_tag_set admin_tag_set; + struct nvme_rdma_device *device; + + u32 max_fr_pages; + + struct sockaddr_storage addr; + struct sockaddr_storage src_addr; + + struct nvme_ctrl ctrl; + bool use_inline_data; + u32 io_queues[HCTX_MAX_TYPES]; +}; + +static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); +} + +static LIST_HEAD(device_list); +static DEFINE_MUTEX(device_list_mutex); + +static LIST_HEAD(nvme_rdma_ctrl_list); +static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); + +/* + * Disabling this option makes small I/O goes faster, but is fundamentally + * unsafe. With it turned off we will have to register a global rkey that + * allows read and write access to all physical memory. + */ +static bool register_always = true; +module_param(register_always, bool, 0444); +MODULE_PARM_DESC(register_always, + "Use memory registration even for contiguous memory regions"); + +static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event); +static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void nvme_rdma_complete_rq(struct request *rq); + +static const struct blk_mq_ops nvme_rdma_mq_ops; +static const struct blk_mq_ops nvme_rdma_admin_mq_ops; + +static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) +{ + return queue - queue->ctrl->queues; +} + +static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) +{ + return nvme_rdma_queue_idx(queue) > + queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + + queue->ctrl->io_queues[HCTX_TYPE_READ]; +} + +static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) +{ + return queue->cmnd_capsule_len - sizeof(struct nvme_command); +} + +static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, + size_t capsule_size, enum dma_data_direction dir) +{ + ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); + kfree(qe->data); +} + +static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, + size_t capsule_size, enum dma_data_direction dir) +{ + qe->data = kzalloc(capsule_size, GFP_KERNEL); + if (!qe->data) + return -ENOMEM; + + qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); + if (ib_dma_mapping_error(ibdev, qe->dma)) { + kfree(qe->data); + qe->data = NULL; + return -ENOMEM; + } + + return 0; +} + +static void nvme_rdma_free_ring(struct ib_device *ibdev, + struct nvme_rdma_qe *ring, size_t ib_queue_size, + size_t capsule_size, enum dma_data_direction dir) +{ + int i; + + for (i = 0; i < ib_queue_size; i++) + nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); + kfree(ring); +} + +static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, + size_t ib_queue_size, size_t capsule_size, + enum dma_data_direction dir) +{ + struct nvme_rdma_qe *ring; + int i; + + ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL); + if (!ring) + return NULL; + + /* + * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue + * lifetime. It's safe, since any chage in the underlying RDMA device + * will issue error recovery and queue re-creation. + */ + for (i = 0; i < ib_queue_size; i++) { + if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) + goto out_free_ring; + } + + return ring; + +out_free_ring: + nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); + return NULL; +} + +static void nvme_rdma_qp_event(struct ib_event *event, void *context) +{ + pr_debug("QP event %s (%d)\n", + ib_event_msg(event->event), event->event); + +} + +static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) +{ + int ret; + + ret = wait_for_completion_interruptible(&queue->cm_done); + if (ret) + return ret; + WARN_ON_ONCE(queue->cm_error > 0); + return queue->cm_error; +} + +static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) +{ + struct nvme_rdma_device *dev = queue->device; + struct ib_qp_init_attr init_attr; + int ret; + + memset(&init_attr, 0, sizeof(init_attr)); + init_attr.event_handler = nvme_rdma_qp_event; + /* +1 for drain */ + init_attr.cap.max_send_wr = factor * queue->queue_size + 1; + /* +1 for drain */ + init_attr.cap.max_recv_wr = queue->queue_size + 1; + init_attr.cap.max_recv_sge = 1; + init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; + init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_attr.qp_type = IB_QPT_RC; + init_attr.send_cq = queue->ib_cq; + init_attr.recv_cq = queue->ib_cq; + if (queue->pi_support) + init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; + init_attr.qp_context = queue; + + ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); + + queue->qp = queue->cm_id->qp; + return ret; +} + +static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, + struct request *rq, unsigned int hctx_idx) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + + kfree(req->sqe.data); +} + +static int nvme_rdma_init_request(struct blk_mq_tag_set *set, + struct request *rq, unsigned int hctx_idx, + unsigned int numa_node) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; + struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; + + nvme_req(rq)->ctrl = &ctrl->ctrl; + req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); + if (!req->sqe.data) + return -ENOMEM; + + /* metadata nvme_rdma_sgl struct is located after command's data SGL */ + if (queue->pi_support) + req->metadata_sgl = (void *)nvme_req(rq) + + sizeof(struct nvme_rdma_request) + + NVME_RDMA_DATA_SGL_SIZE; + + req->queue = queue; + nvme_req(rq)->cmd = req->sqe.data; + + return 0; +} + +static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data); + struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; + + BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); + + hctx->driver_data = queue; + return 0; +} + +static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data); + struct nvme_rdma_queue *queue = &ctrl->queues[0]; + + BUG_ON(hctx_idx != 0); + + hctx->driver_data = queue; + return 0; +} + +static void nvme_rdma_free_dev(struct kref *ref) +{ + struct nvme_rdma_device *ndev = + container_of(ref, struct nvme_rdma_device, ref); + + mutex_lock(&device_list_mutex); + list_del(&ndev->entry); + mutex_unlock(&device_list_mutex); + + ib_dealloc_pd(ndev->pd); + kfree(ndev); +} + +static void nvme_rdma_dev_put(struct nvme_rdma_device *dev) +{ + kref_put(&dev->ref, nvme_rdma_free_dev); +} + +static int nvme_rdma_dev_get(struct nvme_rdma_device *dev) +{ + return kref_get_unless_zero(&dev->ref); +} + +static struct nvme_rdma_device * +nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) +{ + struct nvme_rdma_device *ndev; + + mutex_lock(&device_list_mutex); + list_for_each_entry(ndev, &device_list, entry) { + if (ndev->dev->node_guid == cm_id->device->node_guid && + nvme_rdma_dev_get(ndev)) + goto out_unlock; + } + + ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); + if (!ndev) + goto out_err; + + ndev->dev = cm_id->device; + kref_init(&ndev->ref); + + ndev->pd = ib_alloc_pd(ndev->dev, + register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); + if (IS_ERR(ndev->pd)) + goto out_free_dev; + + if (!(ndev->dev->attrs.device_cap_flags & + IB_DEVICE_MEM_MGT_EXTENSIONS)) { + dev_err(&ndev->dev->dev, + "Memory registrations not supported.\n"); + goto out_free_pd; + } + + ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, + ndev->dev->attrs.max_send_sge - 1); + list_add(&ndev->entry, &device_list); +out_unlock: + mutex_unlock(&device_list_mutex); + return ndev; + +out_free_pd: + ib_dealloc_pd(ndev->pd); +out_free_dev: + kfree(ndev); +out_err: + mutex_unlock(&device_list_mutex); + return NULL; +} + +static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) +{ + if (nvme_rdma_poll_queue(queue)) + ib_free_cq(queue->ib_cq); + else + ib_cq_pool_put(queue->ib_cq, queue->cq_size); +} + +static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) +{ + struct nvme_rdma_device *dev; + struct ib_device *ibdev; + + if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) + return; + + dev = queue->device; + ibdev = dev->dev; + + if (queue->pi_support) + ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); + ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); + + /* + * The cm_id object might have been destroyed during RDMA connection + * establishment error flow to avoid getting other cma events, thus + * the destruction of the QP shouldn't use rdma_cm API. + */ + ib_destroy_qp(queue->qp); + nvme_rdma_free_cq(queue); + + nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, + sizeof(struct nvme_completion), DMA_FROM_DEVICE); + + nvme_rdma_dev_put(dev); +} + +static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support) +{ + u32 max_page_list_len; + + if (pi_support) + max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; + else + max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; + + return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); +} + +static int nvme_rdma_create_cq(struct ib_device *ibdev, + struct nvme_rdma_queue *queue) +{ + int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); + + /* + * Spread I/O queues completion vectors according their queue index. + * Admin queues can always go on completion vector 0. + */ + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; + + /* Polling queues need direct cq polling context */ + if (nvme_rdma_poll_queue(queue)) + queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, + comp_vector, IB_POLL_DIRECT); + else + queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, + comp_vector, IB_POLL_SOFTIRQ); + + if (IS_ERR(queue->ib_cq)) { + ret = PTR_ERR(queue->ib_cq); + return ret; + } + + return 0; +} + +static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) +{ + struct ib_device *ibdev; + const int send_wr_factor = 3; /* MR, SEND, INV */ + const int cq_factor = send_wr_factor + 1; /* + RECV */ + int ret, pages_per_mr; + + queue->device = nvme_rdma_find_get_device(queue->cm_id); + if (!queue->device) { + dev_err(queue->cm_id->device->dev.parent, + "no client data found!\n"); + return -ECONNREFUSED; + } + ibdev = queue->device->dev; + + /* +1 for ib_drain_qp */ + queue->cq_size = cq_factor * queue->queue_size + 1; + + ret = nvme_rdma_create_cq(ibdev, queue); + if (ret) + goto out_put_dev; + + ret = nvme_rdma_create_qp(queue, send_wr_factor); + if (ret) + goto out_destroy_ib_cq; + + queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, + sizeof(struct nvme_completion), DMA_FROM_DEVICE); + if (!queue->rsp_ring) { + ret = -ENOMEM; + goto out_destroy_qp; + } + + /* + * Currently we don't use SG_GAPS MR's so if the first entry is + * misaligned we'll end up using two entries for a single data page, + * so one additional entry is required. + */ + pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; + ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, + queue->queue_size, + IB_MR_TYPE_MEM_REG, + pages_per_mr, 0); + if (ret) { + dev_err(queue->ctrl->ctrl.device, + "failed to initialize MR pool sized %d for QID %d\n", + queue->queue_size, nvme_rdma_queue_idx(queue)); + goto out_destroy_ring; + } + + if (queue->pi_support) { + ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, + queue->queue_size, IB_MR_TYPE_INTEGRITY, + pages_per_mr, pages_per_mr); + if (ret) { + dev_err(queue->ctrl->ctrl.device, + "failed to initialize PI MR pool sized %d for QID %d\n", + queue->queue_size, nvme_rdma_queue_idx(queue)); + goto out_destroy_mr_pool; + } + } + + set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); + + return 0; + +out_destroy_mr_pool: + ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); +out_destroy_ring: + nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, + sizeof(struct nvme_completion), DMA_FROM_DEVICE); +out_destroy_qp: + rdma_destroy_qp(queue->cm_id); +out_destroy_ib_cq: + nvme_rdma_free_cq(queue); +out_put_dev: + nvme_rdma_dev_put(queue->device); + return ret; +} + +static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, + int idx, size_t queue_size) +{ + struct nvme_rdma_queue *queue; + struct sockaddr *src_addr = NULL; + int ret; + + queue = &ctrl->queues[idx]; + mutex_init(&queue->queue_lock); + queue->ctrl = ctrl; + if (idx && ctrl->ctrl.max_integrity_segments) + queue->pi_support = true; + else + queue->pi_support = false; + init_completion(&queue->cm_done); + + if (idx > 0) + queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; + else + queue->cmnd_capsule_len = sizeof(struct nvme_command); + + queue->queue_size = queue_size; + + queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(queue->cm_id)) { + dev_info(ctrl->ctrl.device, + "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); + ret = PTR_ERR(queue->cm_id); + goto out_destroy_mutex; + } + + if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) + src_addr = (struct sockaddr *)&ctrl->src_addr; + + queue->cm_error = -ETIMEDOUT; + ret = rdma_resolve_addr(queue->cm_id, src_addr, + (struct sockaddr *)&ctrl->addr, + NVME_RDMA_CM_TIMEOUT_MS); + if (ret) { + dev_info(ctrl->ctrl.device, + "rdma_resolve_addr failed (%d).\n", ret); + goto out_destroy_cm_id; + } + + ret = nvme_rdma_wait_for_cm(queue); + if (ret) { + dev_info(ctrl->ctrl.device, + "rdma connection establishment failed (%d)\n", ret); + goto out_destroy_cm_id; + } + + set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); + + return 0; + +out_destroy_cm_id: + rdma_destroy_id(queue->cm_id); + nvme_rdma_destroy_queue_ib(queue); +out_destroy_mutex: + mutex_destroy(&queue->queue_lock); + return ret; +} + +static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) +{ + rdma_disconnect(queue->cm_id); + ib_drain_qp(queue->qp); +} + +static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) +{ + if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) + return; + + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) + __nvme_rdma_stop_queue(queue); + mutex_unlock(&queue->queue_lock); +} + +static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) +{ + if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) + return; + + rdma_destroy_id(queue->cm_id); + nvme_rdma_destroy_queue_ib(queue); + mutex_destroy(&queue->queue_lock); +} + +static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->ctrl.queue_count; i++) + nvme_rdma_free_queue(&ctrl->queues[i]); +} + +static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->ctrl.queue_count; i++) + nvme_rdma_stop_queue(&ctrl->queues[i]); +} + +static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) +{ + struct nvme_rdma_queue *queue = &ctrl->queues[idx]; + int ret; + + if (idx) + ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); + else + ret = nvmf_connect_admin_queue(&ctrl->ctrl); + + if (!ret) { + set_bit(NVME_RDMA_Q_LIVE, &queue->flags); + } else { + if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) + __nvme_rdma_stop_queue(queue); + dev_info(ctrl->ctrl.device, + "failed to connect queue: %d ret=%d\n", idx, ret); + } + return ret; +} + +static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl, + int first, int last) +{ + int i, ret = 0; + + for (i = first; i < last; i++) { + ret = nvme_rdma_start_queue(ctrl, i); + if (ret) + goto out_stop_queues; + } + + return 0; + +out_stop_queues: + for (i--; i >= first; i--) + nvme_rdma_stop_queue(&ctrl->queues[i]); + return ret; +} + +static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; + int i, ret; + + nr_io_queues = nvmf_nr_io_queues(opts); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret) + return ret; + + if (nr_io_queues == 0) { + dev_err(ctrl->ctrl.device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } + + ctrl->ctrl.queue_count = nr_io_queues + 1; + dev_info(ctrl->ctrl.device, + "creating %d I/O queues.\n", nr_io_queues); + + nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues); + for (i = 1; i < ctrl->ctrl.queue_count; i++) { + ret = nvme_rdma_alloc_queue(ctrl, i, + ctrl->ctrl.sqsize + 1); + if (ret) + goto out_free_queues; + } + + return 0; + +out_free_queues: + for (i--; i >= 1; i--) + nvme_rdma_free_queue(&ctrl->queues[i]); + + return ret; +} + +static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl) +{ + unsigned int cmd_size = sizeof(struct nvme_rdma_request) + + NVME_RDMA_DATA_SGL_SIZE; + + if (ctrl->max_integrity_segments) + cmd_size += sizeof(struct nvme_rdma_sgl) + + NVME_RDMA_METADATA_SGL_SIZE; + + return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set, + &nvme_rdma_mq_ops, + ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, + cmd_size); +} + +static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl) +{ + if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } + nvme_rdma_free_queue(&ctrl->queues[0]); +} + +static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, + bool new) +{ + bool pi_capable = false; + int error; + + error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); + if (error) + return error; + + ctrl->device = ctrl->queues[0].device; + ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); + + /* T10-PI support */ + if (ctrl->device->dev->attrs.kernel_cap_flags & + IBK_INTEGRITY_HANDOVER) + pi_capable = true; + + ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, + pi_capable); + + /* + * Bind the async event SQE DMA mapping to the admin queue lifetime. + * It's safe, since any chage in the underlying RDMA device will issue + * error recovery and queue re-creation. + */ + error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + if (error) + goto out_free_queue; + + if (new) { + error = nvme_alloc_admin_tag_set(&ctrl->ctrl, + &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops, + sizeof(struct nvme_rdma_request) + + NVME_RDMA_DATA_SGL_SIZE); + if (error) + goto out_free_async_qe; + + } + + error = nvme_rdma_start_queue(ctrl, 0); + if (error) + goto out_remove_admin_tag_set; + + error = nvme_enable_ctrl(&ctrl->ctrl); + if (error) + goto out_stop_queue; + + ctrl->ctrl.max_segments = ctrl->max_fr_pages; + ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); + if (pi_capable) + ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; + else + ctrl->ctrl.max_integrity_segments = 0; + + nvme_unquiesce_admin_queue(&ctrl->ctrl); + + error = nvme_init_ctrl_finish(&ctrl->ctrl, false); + if (error) + goto out_quiesce_queue; + + return 0; + +out_quiesce_queue: + nvme_quiesce_admin_queue(&ctrl->ctrl); + blk_sync_queue(ctrl->ctrl.admin_q); +out_stop_queue: + nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); +out_remove_admin_tag_set: + if (new) + nvme_remove_admin_tag_set(&ctrl->ctrl); +out_free_async_qe: + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } +out_free_queue: + nvme_rdma_free_queue(&ctrl->queues[0]); + return error; +} + +static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) +{ + int ret, nr_queues; + + ret = nvme_rdma_alloc_io_queues(ctrl); + if (ret) + return ret; + + if (new) { + ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl); + if (ret) + goto out_free_io_queues; + } + + /* + * Only start IO queues for which we have allocated the tagset + * and limitted it to the available queues. On reconnects, the + * queue number might have changed. + */ + nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); + ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues); + if (ret) + goto out_cleanup_tagset; + + if (!new) { + nvme_start_freeze(&ctrl->ctrl); + nvme_unquiesce_io_queues(&ctrl->ctrl); + if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + nvme_unfreeze(&ctrl->ctrl); + goto out_wait_freeze_timed_out; + } + blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, + ctrl->ctrl.queue_count - 1); + nvme_unfreeze(&ctrl->ctrl); + } + + /* + * If the number of queues has increased (reconnect case) + * start all new queues now. + */ + ret = nvme_rdma_start_io_queues(ctrl, nr_queues, + ctrl->tag_set.nr_hw_queues + 1); + if (ret) + goto out_wait_freeze_timed_out; + + return 0; + +out_wait_freeze_timed_out: + nvme_quiesce_io_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); +out_cleanup_tagset: + nvme_cancel_tagset(&ctrl->ctrl); + if (new) + nvme_remove_io_tag_set(&ctrl->ctrl); +out_free_io_queues: + nvme_rdma_free_io_queues(ctrl); + return ret; +} + +static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, + bool remove) +{ + nvme_quiesce_admin_queue(&ctrl->ctrl); + blk_sync_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); + if (remove) { + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvme_remove_admin_tag_set(&ctrl->ctrl); + } + nvme_rdma_destroy_admin_queue(ctrl); +} + +static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, + bool remove) +{ + if (ctrl->ctrl.queue_count > 1) { + nvme_quiesce_io_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); + nvme_cancel_tagset(&ctrl->ctrl); + if (remove) { + nvme_unquiesce_io_queues(&ctrl->ctrl); + nvme_remove_io_tag_set(&ctrl->ctrl); + } + nvme_rdma_free_io_queues(ctrl); + } +} + +static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + + flush_work(&ctrl->err_work); + cancel_delayed_work_sync(&ctrl->reconnect_work); +} + +static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + + if (list_empty(&ctrl->list)) + goto free_ctrl; + + mutex_lock(&nvme_rdma_ctrl_mutex); + list_del(&ctrl->list); + mutex_unlock(&nvme_rdma_ctrl_mutex); + + nvmf_free_options(nctrl->opts); +free_ctrl: + kfree(ctrl->queues); + kfree(ctrl); +} + +static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) +{ + enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); + + /* If we are resetting/deleting then do nothing */ + if (state != NVME_CTRL_CONNECTING) { + WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE); + return; + } + + if (nvmf_should_reconnect(&ctrl->ctrl)) { + dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", + ctrl->ctrl.opts->reconnect_delay); + queue_delayed_work(nvme_wq, &ctrl->reconnect_work, + ctrl->ctrl.opts->reconnect_delay * HZ); + } else { + nvme_delete_ctrl(&ctrl->ctrl); + } +} + +static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) +{ + int ret; + bool changed; + + ret = nvme_rdma_configure_admin_queue(ctrl, new); + if (ret) + return ret; + + if (ctrl->ctrl.icdoff) { + ret = -EOPNOTSUPP; + dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); + goto destroy_admin; + } + + if (!(ctrl->ctrl.sgls & (1 << 2))) { + ret = -EOPNOTSUPP; + dev_err(ctrl->ctrl.device, + "Mandatory keyed sgls are not supported!\n"); + goto destroy_admin; + } + + if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { + dev_warn(ctrl->ctrl.device, + "queue_size %zu > ctrl sqsize %u, clamping down\n", + ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); + } + + if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { + dev_warn(ctrl->ctrl.device, + "ctrl sqsize %u > max queue size %u, clamping down\n", + ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); + ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; + } + + if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { + dev_warn(ctrl->ctrl.device, + "sqsize %u > ctrl maxcmd %u, clamping down\n", + ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); + ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; + } + + if (ctrl->ctrl.sgls & (1 << 20)) + ctrl->use_inline_data = true; + + if (ctrl->ctrl.queue_count > 1) { + ret = nvme_rdma_configure_io_queues(ctrl, new); + if (ret) + goto destroy_admin; + } + + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); + if (!changed) { + /* + * state change failure is ok if we started ctrl delete, + * unless we're during creation of a new controller to + * avoid races with teardown flow. + */ + enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); + + WARN_ON_ONCE(state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO); + WARN_ON_ONCE(new); + ret = -EINVAL; + goto destroy_io; + } + + nvme_start_ctrl(&ctrl->ctrl); + return 0; + +destroy_io: + if (ctrl->ctrl.queue_count > 1) { + nvme_quiesce_io_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); + nvme_cancel_tagset(&ctrl->ctrl); + if (new) + nvme_remove_io_tag_set(&ctrl->ctrl); + nvme_rdma_free_io_queues(ctrl); + } +destroy_admin: + nvme_quiesce_admin_queue(&ctrl->ctrl); + blk_sync_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); + if (new) + nvme_remove_admin_tag_set(&ctrl->ctrl); + nvme_rdma_destroy_admin_queue(ctrl); + return ret; +} + +static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) +{ + struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), + struct nvme_rdma_ctrl, reconnect_work); + + ++ctrl->ctrl.nr_reconnects; + + if (nvme_rdma_setup_ctrl(ctrl, false)) + goto requeue; + + dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", + ctrl->ctrl.nr_reconnects); + + ctrl->ctrl.nr_reconnects = 0; + + return; + +requeue: + dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", + ctrl->ctrl.nr_reconnects); + nvme_rdma_reconnect_or_remove(ctrl); +} + +static void nvme_rdma_error_recovery_work(struct work_struct *work) +{ + struct nvme_rdma_ctrl *ctrl = container_of(work, + struct nvme_rdma_ctrl, err_work); + + nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); + nvme_rdma_teardown_io_queues(ctrl, false); + nvme_unquiesce_io_queues(&ctrl->ctrl); + nvme_rdma_teardown_admin_queue(ctrl, false); + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvme_auth_stop(&ctrl->ctrl); + + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + /* state change failure is ok if we started ctrl delete */ + enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); + + WARN_ON_ONCE(state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO); + return; + } + + nvme_rdma_reconnect_or_remove(ctrl); +} + +static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) +{ + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) + return; + + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + queue_work(nvme_reset_wq, &ctrl->err_work); +} + +static void nvme_rdma_end_request(struct nvme_rdma_request *req) +{ + struct request *rq = blk_mq_rq_from_pdu(req); + + if (!refcount_dec_and_test(&req->ref)) + return; + if (!nvme_try_complete_req(rq, req->status, req->result)) + nvme_rdma_complete_rq(rq); +} + +static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, + const char *op) +{ + struct nvme_rdma_queue *queue = wc->qp->qp_context; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; + + if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE) + dev_info(ctrl->ctrl.device, + "%s for CQE 0x%p failed with status %s (%d)\n", + op, wc->wr_cqe, + ib_wc_status_msg(wc->status), wc->status); + nvme_rdma_error_recovery(ctrl); +} + +static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "MEMREG"); +} + +static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct nvme_rdma_request *req = + container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); + + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); + else + nvme_rdma_end_request(req); +} + +static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, + struct nvme_rdma_request *req) +{ + struct ib_send_wr wr = { + .opcode = IB_WR_LOCAL_INV, + .next = NULL, + .num_sge = 0, + .send_flags = IB_SEND_SIGNALED, + .ex.invalidate_rkey = req->mr->rkey, + }; + + req->reg_cqe.done = nvme_rdma_inv_rkey_done; + wr.wr_cqe = &req->reg_cqe; + + return ib_post_send(queue->qp, &wr, NULL); +} + +static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + + if (blk_integrity_rq(rq)) { + ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, + req->metadata_sgl->nents, rq_dma_dir(rq)); + sg_free_table_chained(&req->metadata_sgl->sg_table, + NVME_INLINE_METADATA_SG_CNT); + } + + ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, + rq_dma_dir(rq)); + sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); +} + +static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, + struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_device *dev = queue->device; + struct ib_device *ibdev = dev->dev; + struct list_head *pool = &queue->qp->rdma_mrs; + + if (!blk_rq_nr_phys_segments(rq)) + return; + + if (req->use_sig_mr) + pool = &queue->qp->sig_mrs; + + if (req->mr) { + ib_mr_pool_put(queue->qp, pool, req->mr); + req->mr = NULL; + } + + nvme_rdma_dma_unmap_req(ibdev, rq); +} + +static int nvme_rdma_set_sg_null(struct nvme_command *c) +{ + struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; + + sg->addr = 0; + put_unaligned_le24(0, sg->length); + put_unaligned_le32(0, sg->key); + sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; + return 0; +} + +static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, + struct nvme_rdma_request *req, struct nvme_command *c, + int count) +{ + struct nvme_sgl_desc *sg = &c->common.dptr.sgl; + struct ib_sge *sge = &req->sge[1]; + struct scatterlist *sgl; + u32 len = 0; + int i; + + for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { + sge->addr = sg_dma_address(sgl); + sge->length = sg_dma_len(sgl); + sge->lkey = queue->device->pd->local_dma_lkey; + len += sge->length; + sge++; + } + + sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); + sg->length = cpu_to_le32(len); + sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; + + req->num_sge += count; + return 0; +} + +static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, + struct nvme_rdma_request *req, struct nvme_command *c) +{ + struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; + + sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); + put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); + put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); + sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; + return 0; +} + +static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, + struct nvme_rdma_request *req, struct nvme_command *c, + int count) +{ + struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; + int nr; + + req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); + if (WARN_ON_ONCE(!req->mr)) + return -EAGAIN; + + /* + * Align the MR to a 4K page size to match the ctrl page size and + * the block virtual boundary. + */ + nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, + SZ_4K); + if (unlikely(nr < count)) { + ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); + req->mr = NULL; + if (nr < 0) + return nr; + return -EINVAL; + } + + ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); + + req->reg_cqe.done = nvme_rdma_memreg_done; + memset(&req->reg_wr, 0, sizeof(req->reg_wr)); + req->reg_wr.wr.opcode = IB_WR_REG_MR; + req->reg_wr.wr.wr_cqe = &req->reg_cqe; + req->reg_wr.wr.num_sge = 0; + req->reg_wr.mr = req->mr; + req->reg_wr.key = req->mr->rkey; + req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + + sg->addr = cpu_to_le64(req->mr->iova); + put_unaligned_le24(req->mr->length, sg->length); + put_unaligned_le32(req->mr->rkey, sg->key); + sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | + NVME_SGL_FMT_INVALIDATE; + + return 0; +} + +static void nvme_rdma_set_sig_domain(struct blk_integrity *bi, + struct nvme_command *cmd, struct ib_sig_domain *domain, + u16 control, u8 pi_type) +{ + domain->sig_type = IB_SIG_TYPE_T10_DIF; + domain->sig.dif.bg_type = IB_T10DIF_CRC; + domain->sig.dif.pi_interval = 1 << bi->interval_exp; + domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); + if (control & NVME_RW_PRINFO_PRCHK_REF) + domain->sig.dif.ref_remap = true; + + domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); + domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); + domain->sig.dif.app_escape = true; + if (pi_type == NVME_NS_DPS_PI_TYPE3) + domain->sig.dif.ref_escape = true; +} + +static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi, + struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs, + u8 pi_type) +{ + u16 control = le16_to_cpu(cmd->rw.control); + + memset(sig_attrs, 0, sizeof(*sig_attrs)); + if (control & NVME_RW_PRINFO_PRACT) { + /* for WRITE_INSERT/READ_STRIP no memory domain */ + sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; + nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, + pi_type); + /* Clear the PRACT bit since HCA will generate/verify the PI */ + control &= ~NVME_RW_PRINFO_PRACT; + cmd->rw.control = cpu_to_le16(control); + } else { + /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ + nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, + pi_type); + nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, + pi_type); + } +} + +static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask) +{ + *mask = 0; + if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) + *mask |= IB_SIG_CHECK_REFTAG; + if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) + *mask |= IB_SIG_CHECK_GUARD; +} + +static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "SIG"); +} + +static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, + struct nvme_rdma_request *req, struct nvme_command *c, + int count, int pi_count) +{ + struct nvme_rdma_sgl *sgl = &req->data_sgl; + struct ib_reg_wr *wr = &req->reg_wr; + struct request *rq = blk_mq_rq_from_pdu(req); + struct nvme_ns *ns = rq->q->queuedata; + struct bio *bio = rq->bio; + struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; + int nr; + + req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); + if (WARN_ON_ONCE(!req->mr)) + return -EAGAIN; + + nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, + req->metadata_sgl->sg_table.sgl, pi_count, NULL, + SZ_4K); + if (unlikely(nr)) + goto mr_put; + + nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, + req->mr->sig_attrs, ns->pi_type); + nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); + + ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); + + req->reg_cqe.done = nvme_rdma_sig_done; + memset(wr, 0, sizeof(*wr)); + wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; + wr->wr.wr_cqe = &req->reg_cqe; + wr->wr.num_sge = 0; + wr->wr.send_flags = 0; + wr->mr = req->mr; + wr->key = req->mr->rkey; + wr->access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + + sg->addr = cpu_to_le64(req->mr->iova); + put_unaligned_le24(req->mr->length, sg->length); + put_unaligned_le32(req->mr->rkey, sg->key); + sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; + + return 0; + +mr_put: + ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); + req->mr = NULL; + if (nr < 0) + return nr; + return -EINVAL; +} + +static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, + int *count, int *pi_count) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + int ret; + + req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); + ret = sg_alloc_table_chained(&req->data_sgl.sg_table, + blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, + NVME_INLINE_SG_CNT); + if (ret) + return -ENOMEM; + + req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, + req->data_sgl.sg_table.sgl); + + *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, + req->data_sgl.nents, rq_dma_dir(rq)); + if (unlikely(*count <= 0)) { + ret = -EIO; + goto out_free_table; + } + + if (blk_integrity_rq(rq)) { + req->metadata_sgl->sg_table.sgl = + (struct scatterlist *)(req->metadata_sgl + 1); + ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, + blk_rq_count_integrity_sg(rq->q, rq->bio), + req->metadata_sgl->sg_table.sgl, + NVME_INLINE_METADATA_SG_CNT); + if (unlikely(ret)) { + ret = -ENOMEM; + goto out_unmap_sg; + } + + req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, + rq->bio, req->metadata_sgl->sg_table.sgl); + *pi_count = ib_dma_map_sg(ibdev, + req->metadata_sgl->sg_table.sgl, + req->metadata_sgl->nents, + rq_dma_dir(rq)); + if (unlikely(*pi_count <= 0)) { + ret = -EIO; + goto out_free_pi_table; + } + } + + return 0; + +out_free_pi_table: + sg_free_table_chained(&req->metadata_sgl->sg_table, + NVME_INLINE_METADATA_SG_CNT); +out_unmap_sg: + ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, + rq_dma_dir(rq)); +out_free_table: + sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); + return ret; +} + +static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, + struct request *rq, struct nvme_command *c) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_device *dev = queue->device; + struct ib_device *ibdev = dev->dev; + int pi_count = 0; + int count, ret; + + req->num_sge = 1; + refcount_set(&req->ref, 2); /* send and recv completions */ + + c->common.flags |= NVME_CMD_SGL_METABUF; + + if (!blk_rq_nr_phys_segments(rq)) + return nvme_rdma_set_sg_null(c); + + ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count); + if (unlikely(ret)) + return ret; + + if (req->use_sig_mr) { + ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); + goto out; + } + + if (count <= dev->num_inline_segments) { + if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && + queue->ctrl->use_inline_data && + blk_rq_payload_bytes(rq) <= + nvme_rdma_inline_data_size(queue)) { + ret = nvme_rdma_map_sg_inline(queue, req, c, count); + goto out; + } + + if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { + ret = nvme_rdma_map_sg_single(queue, req, c); + goto out; + } + } + + ret = nvme_rdma_map_sg_fr(queue, req, c, count); +out: + if (unlikely(ret)) + goto out_dma_unmap_req; + + return 0; + +out_dma_unmap_req: + nvme_rdma_dma_unmap_req(ibdev, rq); + return ret; +} + +static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct nvme_rdma_qe *qe = + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); + struct nvme_rdma_request *req = + container_of(qe, struct nvme_rdma_request, sqe); + + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "SEND"); + else + nvme_rdma_end_request(req); +} + +static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, + struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, + struct ib_send_wr *first) +{ + struct ib_send_wr wr; + int ret; + + sge->addr = qe->dma; + sge->length = sizeof(struct nvme_command); + sge->lkey = queue->device->pd->local_dma_lkey; + + wr.next = NULL; + wr.wr_cqe = &qe->cqe; + wr.sg_list = sge; + wr.num_sge = num_sge; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + + if (first) + first->next = ≀ + else + first = ≀ + + ret = ib_post_send(queue->qp, first, NULL); + if (unlikely(ret)) { + dev_err(queue->ctrl->ctrl.device, + "%s failed with error code %d\n", __func__, ret); + } + return ret; +} + +static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, + struct nvme_rdma_qe *qe) +{ + struct ib_recv_wr wr; + struct ib_sge list; + int ret; + + list.addr = qe->dma; + list.length = sizeof(struct nvme_completion); + list.lkey = queue->device->pd->local_dma_lkey; + + qe->cqe.done = nvme_rdma_recv_done; + + wr.next = NULL; + wr.wr_cqe = &qe->cqe; + wr.sg_list = &list; + wr.num_sge = 1; + + ret = ib_post_recv(queue->qp, &wr, NULL); + if (unlikely(ret)) { + dev_err(queue->ctrl->ctrl.device, + "%s failed with error code %d\n", __func__, ret); + } + return ret; +} + +static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) +{ + u32 queue_idx = nvme_rdma_queue_idx(queue); + + if (queue_idx == 0) + return queue->ctrl->admin_tag_set.tags[queue_idx]; + return queue->ctrl->tag_set.tags[queue_idx - 1]; +} + +static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "ASYNC"); +} + +static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); + struct nvme_rdma_queue *queue = &ctrl->queues[0]; + struct ib_device *dev = queue->device->dev; + struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; + struct nvme_command *cmd = sqe->data; + struct ib_sge sge; + int ret; + + ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); + + memset(cmd, 0, sizeof(*cmd)); + cmd->common.opcode = nvme_admin_async_event; + cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; + cmd->common.flags |= NVME_CMD_SGL_METABUF; + nvme_rdma_set_sg_null(cmd); + + sqe->cqe.done = nvme_rdma_async_done; + + ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), + DMA_TO_DEVICE); + + ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); + WARN_ON_ONCE(ret); +} + +static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, + struct nvme_completion *cqe, struct ib_wc *wc) +{ + struct request *rq; + struct nvme_rdma_request *req; + + rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); + if (!rq) { + dev_err(queue->ctrl->ctrl.device, + "got bad command_id %#x on QP %#x\n", + cqe->command_id, queue->qp->qp_num); + nvme_rdma_error_recovery(queue->ctrl); + return; + } + req = blk_mq_rq_to_pdu(rq); + + req->status = cqe->status; + req->result = cqe->result; + + if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { + if (unlikely(!req->mr || + wc->ex.invalidate_rkey != req->mr->rkey)) { + dev_err(queue->ctrl->ctrl.device, + "Bogus remote invalidation for rkey %#x\n", + req->mr ? req->mr->rkey : 0); + nvme_rdma_error_recovery(queue->ctrl); + } + } else if (req->mr) { + int ret; + + ret = nvme_rdma_inv_rkey(queue, req); + if (unlikely(ret < 0)) { + dev_err(queue->ctrl->ctrl.device, + "Queueing INV WR for rkey %#x failed (%d)\n", + req->mr->rkey, ret); + nvme_rdma_error_recovery(queue->ctrl); + } + /* the local invalidation completion will end the request */ + return; + } + + nvme_rdma_end_request(req); +} + +static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct nvme_rdma_qe *qe = + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); + struct nvme_rdma_queue *queue = wc->qp->qp_context; + struct ib_device *ibdev = queue->device->dev; + struct nvme_completion *cqe = qe->data; + const size_t len = sizeof(struct nvme_completion); + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + nvme_rdma_wr_error(cq, wc, "RECV"); + return; + } + + /* sanity checking for received data length */ + if (unlikely(wc->byte_len < len)) { + dev_err(queue->ctrl->ctrl.device, + "Unexpected nvme completion length(%d)\n", wc->byte_len); + nvme_rdma_error_recovery(queue->ctrl); + return; + } + + ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); + /* + * AEN requests are special as they don't time out and can + * survive any kind of queue freeze and often don't respond to + * aborts. We don't even bother to allocate a struct request + * for them but rather special case them here. + */ + if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), + cqe->command_id))) + nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, + &cqe->result); + else + nvme_rdma_process_nvme_rsp(queue, cqe, wc); + ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); + + nvme_rdma_post_recv(queue, qe); +} + +static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) +{ + int ret, i; + + for (i = 0; i < queue->queue_size; i++) { + ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); + if (ret) + return ret; + } + + return 0; +} + +static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, + struct rdma_cm_event *ev) +{ + struct rdma_cm_id *cm_id = queue->cm_id; + int status = ev->status; + const char *rej_msg; + const struct nvme_rdma_cm_rej *rej_data; + u8 rej_data_len; + + rej_msg = rdma_reject_msg(cm_id, status); + rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); + + if (rej_data && rej_data_len >= sizeof(u16)) { + u16 sts = le16_to_cpu(rej_data->sts); + + dev_err(queue->ctrl->ctrl.device, + "Connect rejected: status %d (%s) nvme status %d (%s).\n", + status, rej_msg, sts, nvme_rdma_cm_msg(sts)); + } else { + dev_err(queue->ctrl->ctrl.device, + "Connect rejected: status %d (%s).\n", status, rej_msg); + } + + return -ECONNRESET; +} + +static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) +{ + struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; + int ret; + + ret = nvme_rdma_create_queue_ib(queue); + if (ret) + return ret; + + if (ctrl->opts->tos >= 0) + rdma_set_service_type(queue->cm_id, ctrl->opts->tos); + ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS); + if (ret) { + dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", + queue->cm_error); + goto out_destroy_queue; + } + + return 0; + +out_destroy_queue: + nvme_rdma_destroy_queue_ib(queue); + return ret; +} + +static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) +{ + struct nvme_rdma_ctrl *ctrl = queue->ctrl; + struct rdma_conn_param param = { }; + struct nvme_rdma_cm_req priv = { }; + int ret; + + param.qp_num = queue->qp->qp_num; + param.flow_control = 1; + + param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; + /* maximum retry count */ + param.retry_count = 7; + param.rnr_retry_count = 7; + param.private_data = &priv; + param.private_data_len = sizeof(priv); + + priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); + priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); + /* + * set the admin queue depth to the minimum size + * specified by the Fabrics standard. + */ + if (priv.qid == 0) { + priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH); + priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); + } else { + /* + * current interpretation of the fabrics spec + * is at minimum you make hrqsize sqsize+1, or a + * 1's based representation of sqsize. + */ + priv.hrqsize = cpu_to_le16(queue->queue_size); + priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); + } + + ret = rdma_connect_locked(queue->cm_id, ¶m); + if (ret) { + dev_err(ctrl->ctrl.device, + "rdma_connect_locked failed (%d).\n", ret); + return ret; + } + + return 0; +} + +static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *ev) +{ + struct nvme_rdma_queue *queue = cm_id->context; + int cm_error = 0; + + dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", + rdma_event_msg(ev->event), ev->event, + ev->status, cm_id); + + switch (ev->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + cm_error = nvme_rdma_addr_resolved(queue); + break; + case RDMA_CM_EVENT_ROUTE_RESOLVED: + cm_error = nvme_rdma_route_resolved(queue); + break; + case RDMA_CM_EVENT_ESTABLISHED: + queue->cm_error = nvme_rdma_conn_established(queue); + /* complete cm_done regardless of success/failure */ + complete(&queue->cm_done); + return 0; + case RDMA_CM_EVENT_REJECTED: + cm_error = nvme_rdma_conn_rejected(queue, ev); + break; + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_CONNECT_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + case RDMA_CM_EVENT_ADDR_ERROR: + dev_dbg(queue->ctrl->ctrl.device, + "CM error event %d\n", ev->event); + cm_error = -ECONNRESET; + break; + case RDMA_CM_EVENT_DISCONNECTED: + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + dev_dbg(queue->ctrl->ctrl.device, + "disconnect received - connection closed\n"); + nvme_rdma_error_recovery(queue->ctrl); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + /* device removal is handled via the ib_client API */ + break; + default: + dev_err(queue->ctrl->ctrl.device, + "Unexpected RDMA CM event (%d)\n", ev->event); + nvme_rdma_error_recovery(queue->ctrl); + break; + } + + if (cm_error) { + queue->cm_error = cm_error; + complete(&queue->cm_done); + } + + return 0; +} + +static void nvme_rdma_complete_timed_out(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + + nvme_rdma_stop_queue(queue); + nvmf_complete_timed_out_request(rq); +} + +static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; + + dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", + rq->tag, nvme_rdma_queue_idx(queue)); + + if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) { + /* + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. + */ + nvme_rdma_complete_timed_out(rq); + return BLK_EH_DONE; + } + + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ + nvme_rdma_error_recovery(ctrl); + return BLK_EH_RESET_TIMER; +} + +static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct nvme_rdma_queue *queue = hctx->driver_data; + struct request *rq = bd->rq; + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_qe *sqe = &req->sqe; + struct nvme_command *c = nvme_req(rq)->cmd; + struct ib_device *dev; + bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); + blk_status_t ret; + int err; + + WARN_ON_ONCE(rq->tag < 0); + + if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) + return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); + + dev = queue->device->dev; + + req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, + sizeof(struct nvme_command), + DMA_TO_DEVICE); + err = ib_dma_mapping_error(dev, req->sqe.dma); + if (unlikely(err)) + return BLK_STS_RESOURCE; + + ib_dma_sync_single_for_cpu(dev, sqe->dma, + sizeof(struct nvme_command), DMA_TO_DEVICE); + + ret = nvme_setup_cmd(ns, rq); + if (ret) + goto unmap_qe; + + nvme_start_request(rq); + + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + queue->pi_support && + (c->common.opcode == nvme_cmd_write || + c->common.opcode == nvme_cmd_read) && + nvme_ns_has_pi(ns)) + req->use_sig_mr = true; + else + req->use_sig_mr = false; + + err = nvme_rdma_map_data(queue, rq, c); + if (unlikely(err < 0)) { + dev_err(queue->ctrl->ctrl.device, + "Failed to map data (%d)\n", err); + goto err; + } + + sqe->cqe.done = nvme_rdma_send_done; + + ib_dma_sync_single_for_device(dev, sqe->dma, + sizeof(struct nvme_command), DMA_TO_DEVICE); + + err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, + req->mr ? &req->reg_wr.wr : NULL); + if (unlikely(err)) + goto err_unmap; + + return BLK_STS_OK; + +err_unmap: + nvme_rdma_unmap_data(queue, rq); +err: + if (err == -EIO) + ret = nvme_host_path_error(rq); + else if (err == -ENOMEM || err == -EAGAIN) + ret = BLK_STS_RESOURCE; + else + ret = BLK_STS_IOERR; + nvme_cleanup_cmd(rq); +unmap_qe: + ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), + DMA_TO_DEVICE); + return ret; +} + +static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) +{ + struct nvme_rdma_queue *queue = hctx->driver_data; + + return ib_process_cq_direct(queue->ib_cq, -1); +} + +static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req) +{ + struct request *rq = blk_mq_rq_from_pdu(req); + struct ib_mr_status mr_status; + int ret; + + ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + nvme_req(rq)->status = NVME_SC_INVALID_PI; + return; + } + + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + nvme_req(rq)->status = NVME_SC_GUARD_CHECK; + break; + case IB_SIG_BAD_REFTAG: + nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; + break; + case IB_SIG_BAD_APPTAG: + nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; + break; + } + pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", + mr_status.sig_err.err_type, mr_status.sig_err.expected, + mr_status.sig_err.actual); + } +} + +static void nvme_rdma_complete_rq(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct ib_device *ibdev = queue->device->dev; + + if (req->use_sig_mr) + nvme_rdma_check_pi_status(req); + + nvme_rdma_unmap_data(queue, rq); + ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), + DMA_TO_DEVICE); + nvme_complete_rq(rq); +} + +static void nvme_rdma_map_queues(struct blk_mq_tag_set *set) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); + + nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); +} + +static const struct blk_mq_ops nvme_rdma_mq_ops = { + .queue_rq = nvme_rdma_queue_rq, + .complete = nvme_rdma_complete_rq, + .init_request = nvme_rdma_init_request, + .exit_request = nvme_rdma_exit_request, + .init_hctx = nvme_rdma_init_hctx, + .timeout = nvme_rdma_timeout, + .map_queues = nvme_rdma_map_queues, + .poll = nvme_rdma_poll, +}; + +static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { + .queue_rq = nvme_rdma_queue_rq, + .complete = nvme_rdma_complete_rq, + .init_request = nvme_rdma_init_request, + .exit_request = nvme_rdma_exit_request, + .init_hctx = nvme_rdma_init_admin_hctx, + .timeout = nvme_rdma_timeout, +}; + +static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) +{ + nvme_rdma_teardown_io_queues(ctrl, shutdown); + nvme_quiesce_admin_queue(&ctrl->ctrl); + nvme_disable_ctrl(&ctrl->ctrl, shutdown); + nvme_rdma_teardown_admin_queue(ctrl, shutdown); +} + +static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) +{ + nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true); +} + +static void nvme_rdma_reset_ctrl_work(struct work_struct *work) +{ + struct nvme_rdma_ctrl *ctrl = + container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); + + nvme_stop_ctrl(&ctrl->ctrl); + nvme_rdma_shutdown_ctrl(ctrl, false); + + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + /* state change failure should never happen */ + WARN_ON_ONCE(1); + return; + } + + if (nvme_rdma_setup_ctrl(ctrl, false)) + goto out_fail; + + return; + +out_fail: + ++ctrl->ctrl.nr_reconnects; + nvme_rdma_reconnect_or_remove(ctrl); +} + +static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { + .name = "rdma", + .module = THIS_MODULE, + .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED, + .reg_read32 = nvmf_reg_read32, + .reg_read64 = nvmf_reg_read64, + .reg_write32 = nvmf_reg_write32, + .free_ctrl = nvme_rdma_free_ctrl, + .submit_async_event = nvme_rdma_submit_async_event, + .delete_ctrl = nvme_rdma_delete_ctrl, + .get_address = nvmf_get_address, + .stop_ctrl = nvme_rdma_stop_ctrl, +}; + +/* + * Fails a connection request if it matches an existing controller + * (association) with the same tuple: + * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN> + * + * if local address is not specified in the request, it will match an + * existing controller with all the other parameters the same and no + * local port address specified as well. + * + * The ports don't need to be compared as they are intrinsically + * already matched by the port pointers supplied. + */ +static bool +nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts) +{ + struct nvme_rdma_ctrl *ctrl; + bool found = false; + + mutex_lock(&nvme_rdma_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { + found = nvmf_ip_options_match(&ctrl->ctrl, opts); + if (found) + break; + } + mutex_unlock(&nvme_rdma_ctrl_mutex); + + return found; +} + +static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, + struct nvmf_ctrl_options *opts) +{ + struct nvme_rdma_ctrl *ctrl; + int ret; + bool changed; + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return ERR_PTR(-ENOMEM); + ctrl->ctrl.opts = opts; + INIT_LIST_HEAD(&ctrl->list); + + if (!(opts->mask & NVMF_OPT_TRSVCID)) { + opts->trsvcid = + kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL); + if (!opts->trsvcid) { + ret = -ENOMEM; + goto out_free_ctrl; + } + opts->mask |= NVMF_OPT_TRSVCID; + } + + ret = inet_pton_with_scope(&init_net, AF_UNSPEC, + opts->traddr, opts->trsvcid, &ctrl->addr); + if (ret) { + pr_err("malformed address passed: %s:%s\n", + opts->traddr, opts->trsvcid); + goto out_free_ctrl; + } + + if (opts->mask & NVMF_OPT_HOST_TRADDR) { + ret = inet_pton_with_scope(&init_net, AF_UNSPEC, + opts->host_traddr, NULL, &ctrl->src_addr); + if (ret) { + pr_err("malformed src address passed: %s\n", + opts->host_traddr); + goto out_free_ctrl; + } + } + + if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { + ret = -EALREADY; + goto out_free_ctrl; + } + + INIT_DELAYED_WORK(&ctrl->reconnect_work, + nvme_rdma_reconnect_ctrl_work); + INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); + INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); + + ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + + opts->nr_poll_queues + 1; + ctrl->ctrl.sqsize = opts->queue_size - 1; + ctrl->ctrl.kato = opts->kato; + + ret = -ENOMEM; + ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), + GFP_KERNEL); + if (!ctrl->queues) + goto out_free_ctrl; + + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, + 0 /* no quirks, we're perfect! */); + if (ret) + goto out_kfree_queues; + + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); + WARN_ON_ONCE(!changed); + + ret = nvme_rdma_setup_ctrl(ctrl, true); + if (ret) + goto out_uninit_ctrl; + + dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); + + mutex_lock(&nvme_rdma_ctrl_mutex); + list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); + mutex_unlock(&nvme_rdma_ctrl_mutex); + + return &ctrl->ctrl; + +out_uninit_ctrl: + nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); + if (ret > 0) + ret = -EIO; + return ERR_PTR(ret); +out_kfree_queues: + kfree(ctrl->queues); +out_free_ctrl: + kfree(ctrl); + return ERR_PTR(ret); +} + +static struct nvmf_transport_ops nvme_rdma_transport = { + .name = "rdma", + .module = THIS_MODULE, + .required_opts = NVMF_OPT_TRADDR, + .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | + NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | + NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | + NVMF_OPT_TOS, + .create_ctrl = nvme_rdma_create_ctrl, +}; + +static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) +{ + struct nvme_rdma_ctrl *ctrl; + struct nvme_rdma_device *ndev; + bool found = false; + + mutex_lock(&device_list_mutex); + list_for_each_entry(ndev, &device_list, entry) { + if (ndev->dev == ib_device) { + found = true; + break; + } + } + mutex_unlock(&device_list_mutex); + + if (!found) + return; + + /* Delete all controllers using this device */ + mutex_lock(&nvme_rdma_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { + if (ctrl->device->dev != ib_device) + continue; + nvme_delete_ctrl(&ctrl->ctrl); + } + mutex_unlock(&nvme_rdma_ctrl_mutex); + + flush_workqueue(nvme_delete_wq); +} + +static struct ib_client nvme_rdma_ib_client = { + .name = "nvme_rdma", + .remove = nvme_rdma_remove_one +}; + +static int __init nvme_rdma_init_module(void) +{ + int ret; + + ret = ib_register_client(&nvme_rdma_ib_client); + if (ret) + return ret; + + ret = nvmf_register_transport(&nvme_rdma_transport); + if (ret) + goto err_unreg_client; + + return 0; + +err_unreg_client: + ib_unregister_client(&nvme_rdma_ib_client); + return ret; +} + +static void __exit nvme_rdma_cleanup_module(void) +{ + struct nvme_rdma_ctrl *ctrl; + + nvmf_unregister_transport(&nvme_rdma_transport); + ib_unregister_client(&nvme_rdma_ib_client); + + mutex_lock(&nvme_rdma_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) + nvme_delete_ctrl(&ctrl->ctrl); + mutex_unlock(&nvme_rdma_ctrl_mutex); + flush_workqueue(nvme_delete_wq); +} + +module_init(nvme_rdma_init_module); +module_exit(nvme_rdma_cleanup_module); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c new file mode 100644 index 0000000000..212e1b05d2 --- /dev/null +++ b/drivers/nvme/host/sysfs.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sysfs interface for the NVMe core driver. + * + * Copyright (c) 2011-2014, Intel Corporation. + */ + +#include <linux/nvme-auth.h> + +#include "nvme.h" +#include "fabrics.h" + +static ssize_t nvme_sysfs_reset(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + int ret; + + ret = nvme_reset_ctrl_sync(ctrl); + if (ret < 0) + return ret; + return count; +} +static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); + +static ssize_t nvme_sysfs_rescan(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + nvme_queue_scan(ctrl); + return count; +} +static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); + +static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + + if (disk->fops == &nvme_bdev_ops) + return nvme_get_ns_from_dev(dev)->head; + else + return disk->private_data; +} + +static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + struct nvme_ns_ids *ids = &head->ids; + struct nvme_subsystem *subsys = head->subsys; + int serial_len = sizeof(subsys->serial); + int model_len = sizeof(subsys->model); + + if (!uuid_is_null(&ids->uuid)) + return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); + + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); + + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); + + while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || + subsys->serial[serial_len - 1] == '\0')) + serial_len--; + while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || + subsys->model[model_len - 1] == '\0')) + model_len--; + + return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, + serial_len, subsys->serial, model_len, subsys->model, + head->ns_id); +} +static DEVICE_ATTR_RO(wwid); + +static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); +} +static DEVICE_ATTR_RO(nguid); + +static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; + + /* For backward compatibility expose the NGUID to userspace if + * we have no UUID set + */ + if (uuid_is_null(&ids->uuid)) { + dev_warn_once(dev, + "No UUID available providing old NGUID\n"); + return sysfs_emit(buf, "%pU\n", ids->nguid); + } + return sysfs_emit(buf, "%pU\n", &ids->uuid); +} +static DEVICE_ATTR_RO(uuid); + +static ssize_t eui_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); +} +static DEVICE_ATTR_RO(eui); + +static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); +} +static DEVICE_ATTR_RO(nsid); + +static struct attribute *nvme_ns_id_attrs[] = { + &dev_attr_wwid.attr, + &dev_attr_uuid.attr, + &dev_attr_nguid.attr, + &dev_attr_eui.attr, + &dev_attr_nsid.attr, +#ifdef CONFIG_NVME_MULTIPATH + &dev_attr_ana_grpid.attr, + &dev_attr_ana_state.attr, +#endif + NULL, +}; + +static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; + + if (a == &dev_attr_uuid.attr) { + if (uuid_is_null(&ids->uuid) && + !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + return 0; + } + if (a == &dev_attr_nguid.attr) { + if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + return 0; + } + if (a == &dev_attr_eui.attr) { + if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + return 0; + } +#ifdef CONFIG_NVME_MULTIPATH + if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { + if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ + return 0; + if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) + return 0; + } +#endif + return a->mode; +} + +static const struct attribute_group nvme_ns_id_attr_group = { + .attrs = nvme_ns_id_attrs, + .is_visible = nvme_ns_id_attrs_are_visible, +}; + +const struct attribute_group *nvme_ns_id_attr_groups[] = { + &nvme_ns_id_attr_group, + NULL, +}; + +#define nvme_show_str_function(field) \ +static ssize_t field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ + return sysfs_emit(buf, "%.*s\n", \ + (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); + +nvme_show_str_function(model); +nvme_show_str_function(serial); +nvme_show_str_function(firmware_rev); + +#define nvme_show_int_function(field) \ +static ssize_t field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ + return sysfs_emit(buf, "%d\n", ctrl->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); + +nvme_show_int_function(cntlid); +nvme_show_int_function(numa_node); +nvme_show_int_function(queue_count); +nvme_show_int_function(sqsize); +nvme_show_int_function(kato); + +static ssize_t nvme_sysfs_delete(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) + return -EBUSY; + + if (device_remove_file_self(dev, attr)) + nvme_delete_ctrl_sync(ctrl); + return count; +} +static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); + +static ssize_t nvme_sysfs_show_transport(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ctrl->ops->name); +} +static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); + +static ssize_t nvme_sysfs_show_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + static const char *const state_name[] = { + [NVME_CTRL_NEW] = "new", + [NVME_CTRL_LIVE] = "live", + [NVME_CTRL_RESETTING] = "resetting", + [NVME_CTRL_CONNECTING] = "connecting", + [NVME_CTRL_DELETING] = "deleting", + [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", + [NVME_CTRL_DEAD] = "dead", + }; + + if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && + state_name[ctrl->state]) + return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); + + return sysfs_emit(buf, "unknown state\n"); +} + +static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); + +static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); +} +static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); + +static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); +} +static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); + +static ssize_t nvme_sysfs_show_hostid(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); +} +static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); + +static ssize_t nvme_sysfs_show_address(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); +} +static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); + +static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (ctrl->opts->max_reconnects == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", + opts->max_reconnects * opts->reconnect_delay); +} + +static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + int ctrl_loss_tmo, err; + + err = kstrtoint(buf, 10, &ctrl_loss_tmo); + if (err) + return -EINVAL; + + if (ctrl_loss_tmo < 0) + opts->max_reconnects = -1; + else + opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, + opts->reconnect_delay); + return count; +} +static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, + nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); + +static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->opts->reconnect_delay == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); +} + +static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + unsigned int v; + int err; + + err = kstrtou32(buf, 10, &v); + if (err) + return err; + + ctrl->opts->reconnect_delay = v; + return count; +} +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, + nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); + +static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->opts->fast_io_fail_tmo == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); +} + +static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + int fast_io_fail_tmo, err; + + err = kstrtoint(buf, 10, &fast_io_fail_tmo); + if (err) + return -EINVAL; + + if (fast_io_fail_tmo < 0) + opts->fast_io_fail_tmo = -1; + else + opts->fast_io_fail_tmo = fast_io_fail_tmo; + return count; +} +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); + +static ssize_t cntrltype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const char * const type[] = { + [NVME_CTRL_IO] = "io\n", + [NVME_CTRL_DISC] = "discovery\n", + [NVME_CTRL_ADMIN] = "admin\n", + }; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) + return sysfs_emit(buf, "reserved\n"); + + return sysfs_emit(buf, type[ctrl->cntrltype]); +} +static DEVICE_ATTR_RO(cntrltype); + +static ssize_t dctype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const char * const type[] = { + [NVME_DCTYPE_NOT_REPORTED] = "none\n", + [NVME_DCTYPE_DDC] = "ddc\n", + [NVME_DCTYPE_CDC] = "cdc\n", + }; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) + return sysfs_emit(buf, "reserved\n"); + + return sysfs_emit(buf, type[ctrl->dctype]); +} +static DEVICE_ATTR_RO(dctype); + +#ifdef CONFIG_NVME_AUTH +static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (!opts->dhchap_secret) + return sysfs_emit(buf, "none\n"); + return sysfs_emit(buf, "%s\n", opts->dhchap_secret); +} + +static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + char *dhchap_secret; + + if (!ctrl->opts->dhchap_secret) + return -EINVAL; + if (count < 7) + return -EINVAL; + if (memcmp(buf, "DHHC-1:", 7)) + return -EINVAL; + + dhchap_secret = kzalloc(count + 1, GFP_KERNEL); + if (!dhchap_secret) + return -ENOMEM; + memcpy(dhchap_secret, buf, count); + nvme_auth_stop(ctrl); + if (strcmp(dhchap_secret, opts->dhchap_secret)) { + struct nvme_dhchap_key *key, *host_key; + int ret; + + ret = nvme_auth_generate_key(dhchap_secret, &key); + if (ret) { + kfree(dhchap_secret); + return ret; + } + kfree(opts->dhchap_secret); + opts->dhchap_secret = dhchap_secret; + host_key = ctrl->host_key; + mutex_lock(&ctrl->dhchap_auth_mutex); + ctrl->host_key = key; + mutex_unlock(&ctrl->dhchap_auth_mutex); + nvme_auth_free_key(host_key); + } else + kfree(dhchap_secret); + /* Start re-authentication */ + dev_info(ctrl->device, "re-authenticating controller\n"); + queue_work(nvme_wq, &ctrl->dhchap_auth_work); + + return count; +} + +static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, + nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); + +static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (!opts->dhchap_ctrl_secret) + return sysfs_emit(buf, "none\n"); + return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); +} + +static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + char *dhchap_secret; + + if (!ctrl->opts->dhchap_ctrl_secret) + return -EINVAL; + if (count < 7) + return -EINVAL; + if (memcmp(buf, "DHHC-1:", 7)) + return -EINVAL; + + dhchap_secret = kzalloc(count + 1, GFP_KERNEL); + if (!dhchap_secret) + return -ENOMEM; + memcpy(dhchap_secret, buf, count); + nvme_auth_stop(ctrl); + if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { + struct nvme_dhchap_key *key, *ctrl_key; + int ret; + + ret = nvme_auth_generate_key(dhchap_secret, &key); + if (ret) { + kfree(dhchap_secret); + return ret; + } + kfree(opts->dhchap_ctrl_secret); + opts->dhchap_ctrl_secret = dhchap_secret; + ctrl_key = ctrl->ctrl_key; + mutex_lock(&ctrl->dhchap_auth_mutex); + ctrl->ctrl_key = key; + mutex_unlock(&ctrl->dhchap_auth_mutex); + nvme_auth_free_key(ctrl_key); + } else + kfree(dhchap_secret); + /* Start re-authentication */ + dev_info(ctrl->device, "re-authenticating controller\n"); + queue_work(nvme_wq, &ctrl->dhchap_auth_work); + + return count; +} + +static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, + nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); +#endif + +static struct attribute *nvme_dev_attrs[] = { + &dev_attr_reset_controller.attr, + &dev_attr_rescan_controller.attr, + &dev_attr_model.attr, + &dev_attr_serial.attr, + &dev_attr_firmware_rev.attr, + &dev_attr_cntlid.attr, + &dev_attr_delete_controller.attr, + &dev_attr_transport.attr, + &dev_attr_subsysnqn.attr, + &dev_attr_address.attr, + &dev_attr_state.attr, + &dev_attr_numa_node.attr, + &dev_attr_queue_count.attr, + &dev_attr_sqsize.attr, + &dev_attr_hostnqn.attr, + &dev_attr_hostid.attr, + &dev_attr_ctrl_loss_tmo.attr, + &dev_attr_reconnect_delay.attr, + &dev_attr_fast_io_fail_tmo.attr, + &dev_attr_kato.attr, + &dev_attr_cntrltype.attr, + &dev_attr_dctype.attr, +#ifdef CONFIG_NVME_AUTH + &dev_attr_dhchap_secret.attr, + &dev_attr_dhchap_ctrl_secret.attr, +#endif + NULL +}; + +static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) + return 0; + if (a == &dev_attr_address.attr && !ctrl->ops->get_address) + return 0; + if (a == &dev_attr_hostnqn.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_hostid.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) + return 0; +#ifdef CONFIG_NVME_AUTH + if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) + return 0; +#endif + + return a->mode; +} + +const struct attribute_group nvme_dev_attrs_group = { + .attrs = nvme_dev_attrs, + .is_visible = nvme_dev_attrs_are_visible, +}; +EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); + +const struct attribute_group *nvme_dev_attr_groups[] = { + &nvme_dev_attrs_group, + NULL, +}; + +#define SUBSYS_ATTR_RO(_name, _mode, _show) \ + struct device_attribute subsys_attr_##_name = \ + __ATTR(_name, _mode, _show, NULL) + +static ssize_t nvme_subsys_show_nqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + return sysfs_emit(buf, "%s\n", subsys->subnqn); +} +static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); + +static ssize_t nvme_subsys_show_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + switch (subsys->subtype) { + case NVME_NQN_DISC: + return sysfs_emit(buf, "discovery\n"); + case NVME_NQN_NVME: + return sysfs_emit(buf, "nvm\n"); + default: + return sysfs_emit(buf, "reserved\n"); + } +} +static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); + +#define nvme_subsys_show_str_function(field) \ +static ssize_t subsys_##field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_subsystem *subsys = \ + container_of(dev, struct nvme_subsystem, dev); \ + return sysfs_emit(buf, "%.*s\n", \ + (int)sizeof(subsys->field), subsys->field); \ +} \ +static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); + +nvme_subsys_show_str_function(model); +nvme_subsys_show_str_function(serial); +nvme_subsys_show_str_function(firmware_rev); + +static struct attribute *nvme_subsys_attrs[] = { + &subsys_attr_model.attr, + &subsys_attr_serial.attr, + &subsys_attr_firmware_rev.attr, + &subsys_attr_subsysnqn.attr, + &subsys_attr_subsystype.attr, +#ifdef CONFIG_NVME_MULTIPATH + &subsys_attr_iopolicy.attr, +#endif + NULL, +}; + +static const struct attribute_group nvme_subsys_attrs_group = { + .attrs = nvme_subsys_attrs, +}; + +const struct attribute_group *nvme_subsys_attrs_groups[] = { + &nvme_subsys_attrs_group, + NULL, +}; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c new file mode 100644 index 0000000000..f1d62d7442 --- /dev/null +++ b/drivers/nvme/host/tcp.c @@ -0,0 +1,2673 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics TCP host. + * Copyright (c) 2018 Lightbits Labs. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/nvme-tcp.h> +#include <net/sock.h> +#include <net/tcp.h> +#include <linux/blk-mq.h> +#include <crypto/hash.h> +#include <net/busy_poll.h> +#include <trace/events/sock.h> + +#include "nvme.h" +#include "fabrics.h" + +struct nvme_tcp_queue; + +/* Define the socket priority to use for connections were it is desirable + * that the NIC consider performing optimized packet processing or filtering. + * A non-zero value being sufficient to indicate general consideration of any + * possible optimization. Making it a module param allows for alternative + * values that may be unique for some NIC implementations. + */ +static int so_priority; +module_param(so_priority, int, 0644); +MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* lockdep can detect a circular dependency of the form + * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock + * because dependencies are tracked for both nvme-tcp and user contexts. Using + * a separate class prevents lockdep from conflating nvme-tcp socket use with + * user-space socket API use. + */ +static struct lock_class_key nvme_tcp_sk_key[2]; +static struct lock_class_key nvme_tcp_slock_key[2]; + +static void nvme_tcp_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", + &nvme_tcp_slock_key[0], + "sk_lock-AF_INET-NVME", + &nvme_tcp_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", + &nvme_tcp_slock_key[1], + "sk_lock-AF_INET6-NVME", + &nvme_tcp_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +} +#else +static void nvme_tcp_reclassify_socket(struct socket *sock) { } +#endif + +enum nvme_tcp_send_state { + NVME_TCP_SEND_CMD_PDU = 0, + NVME_TCP_SEND_H2C_PDU, + NVME_TCP_SEND_DATA, + NVME_TCP_SEND_DDGST, +}; + +struct nvme_tcp_request { + struct nvme_request req; + void *pdu; + struct nvme_tcp_queue *queue; + u32 data_len; + u32 pdu_len; + u32 pdu_sent; + u32 h2cdata_left; + u32 h2cdata_offset; + u16 ttag; + __le16 status; + struct list_head entry; + struct llist_node lentry; + __le32 ddgst; + + struct bio *curr_bio; + struct iov_iter iter; + + /* send state */ + size_t offset; + size_t data_sent; + enum nvme_tcp_send_state state; +}; + +enum nvme_tcp_queue_flags { + NVME_TCP_Q_ALLOCATED = 0, + NVME_TCP_Q_LIVE = 1, + NVME_TCP_Q_POLLING = 2, +}; + +enum nvme_tcp_recv_state { + NVME_TCP_RECV_PDU = 0, + NVME_TCP_RECV_DATA, + NVME_TCP_RECV_DDGST, +}; + +struct nvme_tcp_ctrl; +struct nvme_tcp_queue { + struct socket *sock; + struct work_struct io_work; + int io_cpu; + + struct mutex queue_lock; + struct mutex send_mutex; + struct llist_head req_list; + struct list_head send_list; + + /* recv state */ + void *pdu; + int pdu_remaining; + int pdu_offset; + size_t data_remaining; + size_t ddgst_remaining; + unsigned int nr_cqe; + + /* send state */ + struct nvme_tcp_request *request; + + u32 maxh2cdata; + size_t cmnd_capsule_len; + struct nvme_tcp_ctrl *ctrl; + unsigned long flags; + bool rd_enabled; + + bool hdr_digest; + bool data_digest; + struct ahash_request *rcv_hash; + struct ahash_request *snd_hash; + __le32 exp_ddgst; + __le32 recv_ddgst; + + struct page_frag_cache pf_cache; + + void (*state_change)(struct sock *); + void (*data_ready)(struct sock *); + void (*write_space)(struct sock *); +}; + +struct nvme_tcp_ctrl { + /* read only in the hot path */ + struct nvme_tcp_queue *queues; + struct blk_mq_tag_set tag_set; + + /* other member variables */ + struct list_head list; + struct blk_mq_tag_set admin_tag_set; + struct sockaddr_storage addr; + struct sockaddr_storage src_addr; + struct nvme_ctrl ctrl; + + struct work_struct err_work; + struct delayed_work connect_work; + struct nvme_tcp_request async_req; + u32 io_queues[HCTX_MAX_TYPES]; +}; + +static LIST_HEAD(nvme_tcp_ctrl_list); +static DEFINE_MUTEX(nvme_tcp_ctrl_mutex); +static struct workqueue_struct *nvme_tcp_wq; +static const struct blk_mq_ops nvme_tcp_mq_ops; +static const struct blk_mq_ops nvme_tcp_admin_mq_ops; +static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); + +static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); +} + +static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) +{ + return queue - queue->ctrl->queues; +} + +static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) +{ + u32 queue_idx = nvme_tcp_queue_id(queue); + + if (queue_idx == 0) + return queue->ctrl->admin_tag_set.tags[queue_idx]; + return queue->ctrl->tag_set.tags[queue_idx - 1]; +} + +static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) +{ + return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; +} + +static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) +{ + return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; +} + +static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req) +{ + return req->pdu; +} + +static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req) +{ + /* use the pdu space in the back for the data pdu */ + return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - + sizeof(struct nvme_tcp_data_pdu); +} + +static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req) +{ + if (nvme_is_fabrics(req->req.cmd)) + return NVME_TCP_ADMIN_CCSZ; + return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); +} + +static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) +{ + return req == &req->queue->ctrl->async_req; +} + +static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) +{ + struct request *rq; + + if (unlikely(nvme_tcp_async_req(req))) + return false; /* async events don't have a request */ + + rq = blk_mq_rq_from_pdu(req); + + return rq_data_dir(rq) == WRITE && req->data_len && + req->data_len <= nvme_tcp_inline_data_size(req); +} + +static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) +{ + return req->iter.bvec->bv_page; +} + +static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) +{ + return req->iter.bvec->bv_offset + req->iter.iov_offset; +} + +static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) +{ + return min_t(size_t, iov_iter_single_seg_count(&req->iter), + req->pdu_len - req->pdu_sent); +} + +static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) +{ + return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? + req->pdu_len - req->pdu_sent : 0; +} + +static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, + int len) +{ + return nvme_tcp_pdu_data_left(req) <= len; +} + +static void nvme_tcp_init_iter(struct nvme_tcp_request *req, + unsigned int dir) +{ + struct request *rq = blk_mq_rq_from_pdu(req); + struct bio_vec *vec; + unsigned int size; + int nr_bvec; + size_t offset; + + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { + vec = &rq->special_vec; + nr_bvec = 1; + size = blk_rq_payload_bytes(rq); + offset = 0; + } else { + struct bio *bio = req->curr_bio; + struct bvec_iter bi; + struct bio_vec bv; + + vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); + nr_bvec = 0; + bio_for_each_bvec(bv, bio, bi) { + nr_bvec++; + } + size = bio->bi_iter.bi_size; + offset = bio->bi_iter.bi_bvec_done; + } + + iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); + req->iter.iov_offset = offset; +} + +static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, + int len) +{ + req->data_sent += len; + req->pdu_sent += len; + iov_iter_advance(&req->iter, len); + if (!iov_iter_count(&req->iter) && + req->data_sent < req->data_len) { + req->curr_bio = req->curr_bio->bi_next; + nvme_tcp_init_iter(req, ITER_SOURCE); + } +} + +static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) +{ + int ret; + + /* drain the send queue as much as we can... */ + do { + ret = nvme_tcp_try_send(queue); + } while (ret > 0); +} + +static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) +{ + return !list_empty(&queue->send_list) || + !llist_empty(&queue->req_list); +} + +static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, + bool sync, bool last) +{ + struct nvme_tcp_queue *queue = req->queue; + bool empty; + + empty = llist_add(&req->lentry, &queue->req_list) && + list_empty(&queue->send_list) && !queue->request; + + /* + * if we're the first on the send_list and we can try to send + * directly, otherwise queue io_work. Also, only do that if we + * are on the same cpu, so we don't introduce contention. + */ + if (queue->io_cpu == raw_smp_processor_id() && + sync && empty && mutex_trylock(&queue->send_mutex)) { + nvme_tcp_send_all(queue); + mutex_unlock(&queue->send_mutex); + } + + if (last && nvme_tcp_queue_more(queue)) + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); +} + +static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_request *req; + struct llist_node *node; + + for (node = llist_del_all(&queue->req_list); node; node = node->next) { + req = llist_entry(node, struct nvme_tcp_request, lentry); + list_add(&req->entry, &queue->send_list); + } +} + +static inline struct nvme_tcp_request * +nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_request *req; + + req = list_first_entry_or_null(&queue->send_list, + struct nvme_tcp_request, entry); + if (!req) { + nvme_tcp_process_req_list(queue); + req = list_first_entry_or_null(&queue->send_list, + struct nvme_tcp_request, entry); + if (unlikely(!req)) + return NULL; + } + + list_del(&req->entry); + return req; +} + +static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, + __le32 *dgst) +{ + ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); + crypto_ahash_final(hash); +} + +static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, + struct page *page, off_t off, size_t len) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, len, off); + ahash_request_set_crypt(hash, &sg, NULL, len); + crypto_ahash_update(hash); +} + +static inline void nvme_tcp_hdgst(struct ahash_request *hash, + void *pdu, size_t len) +{ + struct scatterlist sg; + + sg_init_one(&sg, pdu, len); + ahash_request_set_crypt(hash, &sg, pdu + len, len); + crypto_ahash_digest(hash); +} + +static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, + void *pdu, size_t pdu_len) +{ + struct nvme_tcp_hdr *hdr = pdu; + __le32 recv_digest; + __le32 exp_digest; + + if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { + dev_err(queue->ctrl->ctrl.device, + "queue %d: header digest flag is cleared\n", + nvme_tcp_queue_id(queue)); + return -EPROTO; + } + + recv_digest = *(__le32 *)(pdu + hdr->hlen); + nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); + exp_digest = *(__le32 *)(pdu + hdr->hlen); + if (recv_digest != exp_digest) { + dev_err(queue->ctrl->ctrl.device, + "header digest error: recv %#x expected %#x\n", + le32_to_cpu(recv_digest), le32_to_cpu(exp_digest)); + return -EIO; + } + + return 0; +} + +static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) +{ + struct nvme_tcp_hdr *hdr = pdu; + u8 digest_len = nvme_tcp_hdgst_len(queue); + u32 len; + + len = le32_to_cpu(hdr->plen) - hdr->hlen - + ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); + + if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { + dev_err(queue->ctrl->ctrl.device, + "queue %d: data digest flag is cleared\n", + nvme_tcp_queue_id(queue)); + return -EPROTO; + } + crypto_ahash_init(queue->rcv_hash); + + return 0; +} + +static void nvme_tcp_exit_request(struct blk_mq_tag_set *set, + struct request *rq, unsigned int hctx_idx) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + page_frag_free(req->pdu); +} + +static int nvme_tcp_init_request(struct blk_mq_tag_set *set, + struct request *rq, unsigned int hctx_idx, + unsigned int numa_node) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_tcp_cmd_pdu *pdu; + int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; + struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; + u8 hdgst = nvme_tcp_hdgst_len(queue); + + req->pdu = page_frag_alloc(&queue->pf_cache, + sizeof(struct nvme_tcp_cmd_pdu) + hdgst, + GFP_KERNEL | __GFP_ZERO); + if (!req->pdu) + return -ENOMEM; + + pdu = req->pdu; + req->queue = queue; + nvme_req(rq)->ctrl = &ctrl->ctrl; + nvme_req(rq)->cmd = &pdu->cmd; + + return 0; +} + +static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); + struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; + + hctx->driver_data = queue; + return 0; +} + +static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); + struct nvme_tcp_queue *queue = &ctrl->queues[0]; + + hctx->driver_data = queue; + return 0; +} + +static enum nvme_tcp_recv_state +nvme_tcp_recv_state(struct nvme_tcp_queue *queue) +{ + return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : + (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : + NVME_TCP_RECV_DATA; +} + +static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) +{ + queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + + nvme_tcp_hdgst_len(queue); + queue->pdu_offset = 0; + queue->data_remaining = -1; + queue->ddgst_remaining = 0; +} + +static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) +{ + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) + return; + + dev_warn(ctrl->device, "starting error recovery\n"); + queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); +} + +static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, + struct nvme_completion *cqe) +{ + struct nvme_tcp_request *req; + struct request *rq; + + rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); + if (!rq) { + dev_err(queue->ctrl->ctrl.device, + "got bad cqe.command_id %#x on queue %d\n", + cqe->command_id, nvme_tcp_queue_id(queue)); + nvme_tcp_error_recovery(&queue->ctrl->ctrl); + return -EINVAL; + } + + req = blk_mq_rq_to_pdu(rq); + if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) + req->status = cqe->status; + + if (!nvme_try_complete_req(rq, req->status, cqe->result)) + nvme_complete_rq(rq); + queue->nr_cqe++; + + return 0; +} + +static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, + struct nvme_tcp_data_pdu *pdu) +{ + struct request *rq; + + rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); + if (!rq) { + dev_err(queue->ctrl->ctrl.device, + "got bad c2hdata.command_id %#x on queue %d\n", + pdu->command_id, nvme_tcp_queue_id(queue)); + return -ENOENT; + } + + if (!blk_rq_payload_bytes(rq)) { + dev_err(queue->ctrl->ctrl.device, + "queue %d tag %#x unexpected data\n", + nvme_tcp_queue_id(queue), rq->tag); + return -EIO; + } + + queue->data_remaining = le32_to_cpu(pdu->data_length); + + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && + unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { + dev_err(queue->ctrl->ctrl.device, + "queue %d tag %#x SUCCESS set but not last PDU\n", + nvme_tcp_queue_id(queue), rq->tag); + nvme_tcp_error_recovery(&queue->ctrl->ctrl); + return -EPROTO; + } + + return 0; +} + +static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, + struct nvme_tcp_rsp_pdu *pdu) +{ + struct nvme_completion *cqe = &pdu->cqe; + int ret = 0; + + /* + * AEN requests are special as they don't time out and can + * survive any kind of queue freeze and often don't respond to + * aborts. We don't even bother to allocate a struct request + * for them but rather special case them here. + */ + if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), + cqe->command_id))) + nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, + &cqe->result); + else + ret = nvme_tcp_process_nvme_cqe(queue, cqe); + + return ret; +} + +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) +{ + struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req); + struct nvme_tcp_queue *queue = req->queue; + struct request *rq = blk_mq_rq_from_pdu(req); + u32 h2cdata_sent = req->pdu_len; + u8 hdgst = nvme_tcp_hdgst_len(queue); + u8 ddgst = nvme_tcp_ddgst_len(queue); + + req->state = NVME_TCP_SEND_H2C_PDU; + req->offset = 0; + req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); + req->pdu_sent = 0; + req->h2cdata_left -= req->pdu_len; + req->h2cdata_offset += h2cdata_sent; + + memset(data, 0, sizeof(*data)); + data->hdr.type = nvme_tcp_h2c_data; + if (!req->h2cdata_left) + data->hdr.flags = NVME_TCP_F_DATA_LAST; + if (queue->hdr_digest) + data->hdr.flags |= NVME_TCP_F_HDGST; + if (queue->data_digest) + data->hdr.flags |= NVME_TCP_F_DDGST; + data->hdr.hlen = sizeof(*data); + data->hdr.pdo = data->hdr.hlen + hdgst; + data->hdr.plen = + cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); + data->ttag = req->ttag; + data->command_id = nvme_cid(rq); + data->data_offset = cpu_to_le32(req->h2cdata_offset); + data->data_length = cpu_to_le32(req->pdu_len); +} + +static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, + struct nvme_tcp_r2t_pdu *pdu) +{ + struct nvme_tcp_request *req; + struct request *rq; + u32 r2t_length = le32_to_cpu(pdu->r2t_length); + u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); + + rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); + if (!rq) { + dev_err(queue->ctrl->ctrl.device, + "got bad r2t.command_id %#x on queue %d\n", + pdu->command_id, nvme_tcp_queue_id(queue)); + return -ENOENT; + } + req = blk_mq_rq_to_pdu(rq); + + if (unlikely(!r2t_length)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, r2t_length); + return -EPROTO; + } + + if (unlikely(req->data_sent + r2t_length > req->data_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len %u exceeded data len %u (%zu sent)\n", + rq->tag, r2t_length, req->data_len, req->data_sent); + return -EPROTO; + } + + if (unlikely(r2t_offset < req->data_sent)) { + dev_err(queue->ctrl->ctrl.device, + "req %d unexpected r2t offset %u (expected %zu)\n", + rq->tag, r2t_offset, req->data_sent); + return -EPROTO; + } + + req->pdu_len = 0; + req->h2cdata_left = r2t_length; + req->h2cdata_offset = r2t_offset; + req->ttag = pdu->ttag; + + nvme_tcp_setup_h2c_data_pdu(req); + nvme_tcp_queue_request(req, false, true); + + return 0; +} + +static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, + unsigned int *offset, size_t *len) +{ + struct nvme_tcp_hdr *hdr; + char *pdu = queue->pdu; + size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); + int ret; + + ret = skb_copy_bits(skb, *offset, + &pdu[queue->pdu_offset], rcv_len); + if (unlikely(ret)) + return ret; + + queue->pdu_remaining -= rcv_len; + queue->pdu_offset += rcv_len; + *offset += rcv_len; + *len -= rcv_len; + if (queue->pdu_remaining) + return 0; + + hdr = queue->pdu; + if (queue->hdr_digest) { + ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); + if (unlikely(ret)) + return ret; + } + + + if (queue->data_digest) { + ret = nvme_tcp_check_ddgst(queue, queue->pdu); + if (unlikely(ret)) + return ret; + } + + switch (hdr->type) { + case nvme_tcp_c2h_data: + return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); + case nvme_tcp_rsp: + nvme_tcp_init_recv_ctx(queue); + return nvme_tcp_handle_comp(queue, (void *)queue->pdu); + case nvme_tcp_r2t: + nvme_tcp_init_recv_ctx(queue); + return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); + default: + dev_err(queue->ctrl->ctrl.device, + "unsupported pdu type (%d)\n", hdr->type); + return -EINVAL; + } +} + +static inline void nvme_tcp_end_request(struct request *rq, u16 status) +{ + union nvme_result res = {}; + + if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) + nvme_complete_rq(rq); +} + +static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, + unsigned int *offset, size_t *len) +{ + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; + struct request *rq = + nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + while (true) { + int recv_len, ret; + + recv_len = min_t(size_t, *len, queue->data_remaining); + if (!recv_len) + break; + + if (!iov_iter_count(&req->iter)) { + req->curr_bio = req->curr_bio->bi_next; + + /* + * If we don`t have any bios it means that controller + * sent more data than we requested, hence error + */ + if (!req->curr_bio) { + dev_err(queue->ctrl->ctrl.device, + "queue %d no space in request %#x", + nvme_tcp_queue_id(queue), rq->tag); + nvme_tcp_init_recv_ctx(queue); + return -EIO; + } + nvme_tcp_init_iter(req, ITER_DEST); + } + + /* we can read only from what is left in this bio */ + recv_len = min_t(size_t, recv_len, + iov_iter_count(&req->iter)); + + if (queue->data_digest) + ret = skb_copy_and_hash_datagram_iter(skb, *offset, + &req->iter, recv_len, queue->rcv_hash); + else + ret = skb_copy_datagram_iter(skb, *offset, + &req->iter, recv_len); + if (ret) { + dev_err(queue->ctrl->ctrl.device, + "queue %d failed to copy request %#x data", + nvme_tcp_queue_id(queue), rq->tag); + return ret; + } + + *len -= recv_len; + *offset += recv_len; + queue->data_remaining -= recv_len; + } + + if (!queue->data_remaining) { + if (queue->data_digest) { + nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); + queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; + } else { + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { + nvme_tcp_end_request(rq, + le16_to_cpu(req->status)); + queue->nr_cqe++; + } + nvme_tcp_init_recv_ctx(queue); + } + } + + return 0; +} + +static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, + struct sk_buff *skb, unsigned int *offset, size_t *len) +{ + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; + char *ddgst = (char *)&queue->recv_ddgst; + size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); + off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; + int ret; + + ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len); + if (unlikely(ret)) + return ret; + + queue->ddgst_remaining -= recv_len; + *offset += recv_len; + *len -= recv_len; + if (queue->ddgst_remaining) + return 0; + + if (queue->recv_ddgst != queue->exp_ddgst) { + struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), + pdu->command_id); + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); + + dev_err(queue->ctrl->ctrl.device, + "data digest error: recv %#x expected %#x\n", + le32_to_cpu(queue->recv_ddgst), + le32_to_cpu(queue->exp_ddgst)); + } + + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { + struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), + pdu->command_id); + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + nvme_tcp_end_request(rq, le16_to_cpu(req->status)); + queue->nr_cqe++; + } + + nvme_tcp_init_recv_ctx(queue); + return 0; +} + +static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, + unsigned int offset, size_t len) +{ + struct nvme_tcp_queue *queue = desc->arg.data; + size_t consumed = len; + int result; + + if (unlikely(!queue->rd_enabled)) + return -EFAULT; + + while (len) { + switch (nvme_tcp_recv_state(queue)) { + case NVME_TCP_RECV_PDU: + result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); + break; + case NVME_TCP_RECV_DATA: + result = nvme_tcp_recv_data(queue, skb, &offset, &len); + break; + case NVME_TCP_RECV_DDGST: + result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); + break; + default: + result = -EFAULT; + } + if (result) { + dev_err(queue->ctrl->ctrl.device, + "receive failed: %d\n", result); + queue->rd_enabled = false; + nvme_tcp_error_recovery(&queue->ctrl->ctrl); + return result; + } + } + + return consumed; +} + +static void nvme_tcp_data_ready(struct sock *sk) +{ + struct nvme_tcp_queue *queue; + + trace_sk_data_ready(sk); + + read_lock_bh(&sk->sk_callback_lock); + queue = sk->sk_user_data; + if (likely(queue && queue->rd_enabled) && + !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); + read_unlock_bh(&sk->sk_callback_lock); +} + +static void nvme_tcp_write_space(struct sock *sk) +{ + struct nvme_tcp_queue *queue; + + read_lock_bh(&sk->sk_callback_lock); + queue = sk->sk_user_data; + if (likely(queue && sk_stream_is_writeable(sk))) { + clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); + } + read_unlock_bh(&sk->sk_callback_lock); +} + +static void nvme_tcp_state_change(struct sock *sk) +{ + struct nvme_tcp_queue *queue; + + read_lock_bh(&sk->sk_callback_lock); + queue = sk->sk_user_data; + if (!queue) + goto done; + + switch (sk->sk_state) { + case TCP_CLOSE: + case TCP_CLOSE_WAIT: + case TCP_LAST_ACK: + case TCP_FIN_WAIT1: + case TCP_FIN_WAIT2: + nvme_tcp_error_recovery(&queue->ctrl->ctrl); + break; + default: + dev_info(queue->ctrl->ctrl.device, + "queue %d socket state %d\n", + nvme_tcp_queue_id(queue), sk->sk_state); + } + + queue->state_change(sk); +done: + read_unlock_bh(&sk->sk_callback_lock); +} + +static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) +{ + queue->request = NULL; +} + +static void nvme_tcp_fail_request(struct nvme_tcp_request *req) +{ + if (nvme_tcp_async_req(req)) { + union nvme_result res = {}; + + nvme_complete_async_event(&req->queue->ctrl->ctrl, + cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); + } else { + nvme_tcp_end_request(blk_mq_rq_from_pdu(req), + NVME_SC_HOST_PATH_ERROR); + } +} + +static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) +{ + struct nvme_tcp_queue *queue = req->queue; + int req_data_len = req->data_len; + u32 h2cdata_left = req->h2cdata_left; + + while (true) { + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, + }; + struct page *page = nvme_tcp_req_cur_page(req); + size_t offset = nvme_tcp_req_cur_offset(req); + size_t len = nvme_tcp_req_cur_length(req); + bool last = nvme_tcp_pdu_last_send(req, len); + int req_data_sent = req->data_sent; + int ret; + + if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) + msg.msg_flags |= MSG_EOR; + else + msg.msg_flags |= MSG_MORE; + + if (!sendpage_ok(page)) + msg.msg_flags &= ~MSG_SPLICE_PAGES; + + bvec_set_page(&bvec, page, len, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); + ret = sock_sendmsg(queue->sock, &msg); + if (ret <= 0) + return ret; + + if (queue->data_digest) + nvme_tcp_ddgst_update(queue->snd_hash, page, + offset, ret); + + /* + * update the request iterator except for the last payload send + * in the request where we don't want to modify it as we may + * compete with the RX path completing the request. + */ + if (req_data_sent + ret < req_data_len) + nvme_tcp_advance_req(req, ret); + + /* fully successful last send in current PDU */ + if (last && ret == len) { + if (queue->data_digest) { + nvme_tcp_ddgst_final(queue->snd_hash, + &req->ddgst); + req->state = NVME_TCP_SEND_DDGST; + req->offset = 0; + } else { + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); + } + return 1; + } + } + return -EAGAIN; +} + +static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) +{ + struct nvme_tcp_queue *queue = req->queue; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; + bool inline_data = nvme_tcp_has_inline_data(req); + u8 hdgst = nvme_tcp_hdgst_len(queue); + int len = sizeof(*pdu) + hdgst - req->offset; + int ret; + + if (inline_data || nvme_tcp_queue_more(queue)) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags |= MSG_EOR; + + if (queue->hdr_digest && !req->offset) + nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + + bvec_set_virt(&bvec, (void *)pdu + req->offset, len); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); + ret = sock_sendmsg(queue->sock, &msg); + if (unlikely(ret <= 0)) + return ret; + + len -= ret; + if (!len) { + if (inline_data) { + req->state = NVME_TCP_SEND_DATA; + if (queue->data_digest) + crypto_ahash_init(queue->snd_hash); + } else { + nvme_tcp_done_send_req(queue); + } + return 1; + } + req->offset += ret; + + return -EAGAIN; +} + +static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) +{ + struct nvme_tcp_queue *queue = req->queue; + struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req); + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, }; + u8 hdgst = nvme_tcp_hdgst_len(queue); + int len = sizeof(*pdu) - req->offset + hdgst; + int ret; + + if (queue->hdr_digest && !req->offset) + nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + + if (!req->h2cdata_left) + msg.msg_flags |= MSG_SPLICE_PAGES; + + bvec_set_virt(&bvec, (void *)pdu + req->offset, len); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); + ret = sock_sendmsg(queue->sock, &msg); + if (unlikely(ret <= 0)) + return ret; + + len -= ret; + if (!len) { + req->state = NVME_TCP_SEND_DATA; + if (queue->data_digest) + crypto_ahash_init(queue->snd_hash); + return 1; + } + req->offset += ret; + + return -EAGAIN; +} + +static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) +{ + struct nvme_tcp_queue *queue = req->queue; + size_t offset = req->offset; + u32 h2cdata_left = req->h2cdata_left; + int ret; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; + struct kvec iov = { + .iov_base = (u8 *)&req->ddgst + req->offset, + .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset + }; + + if (nvme_tcp_queue_more(queue)) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags |= MSG_EOR; + + ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); + if (unlikely(ret <= 0)) + return ret; + + if (offset + ret == NVME_TCP_DIGEST_LENGTH) { + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); + return 1; + } + + req->offset += ret; + return -EAGAIN; +} + +static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_request *req; + unsigned int noreclaim_flag; + int ret = 1; + + if (!queue->request) { + queue->request = nvme_tcp_fetch_request(queue); + if (!queue->request) + return 0; + } + req = queue->request; + + noreclaim_flag = memalloc_noreclaim_save(); + if (req->state == NVME_TCP_SEND_CMD_PDU) { + ret = nvme_tcp_try_send_cmd_pdu(req); + if (ret <= 0) + goto done; + if (!nvme_tcp_has_inline_data(req)) + goto out; + } + + if (req->state == NVME_TCP_SEND_H2C_PDU) { + ret = nvme_tcp_try_send_data_pdu(req); + if (ret <= 0) + goto done; + } + + if (req->state == NVME_TCP_SEND_DATA) { + ret = nvme_tcp_try_send_data(req); + if (ret <= 0) + goto done; + } + + if (req->state == NVME_TCP_SEND_DDGST) + ret = nvme_tcp_try_send_ddgst(req); +done: + if (ret == -EAGAIN) { + ret = 0; + } else if (ret < 0) { + dev_err(queue->ctrl->ctrl.device, + "failed to send request %d\n", ret); + nvme_tcp_fail_request(queue->request); + nvme_tcp_done_send_req(queue); + } +out: + memalloc_noreclaim_restore(noreclaim_flag); + return ret; +} + +static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) +{ + struct socket *sock = queue->sock; + struct sock *sk = sock->sk; + read_descriptor_t rd_desc; + int consumed; + + rd_desc.arg.data = queue; + rd_desc.count = 1; + lock_sock(sk); + queue->nr_cqe = 0; + consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); + release_sock(sk); + return consumed; +} + +static void nvme_tcp_io_work(struct work_struct *w) +{ + struct nvme_tcp_queue *queue = + container_of(w, struct nvme_tcp_queue, io_work); + unsigned long deadline = jiffies + msecs_to_jiffies(1); + + do { + bool pending = false; + int result; + + if (mutex_trylock(&queue->send_mutex)) { + result = nvme_tcp_try_send(queue); + mutex_unlock(&queue->send_mutex); + if (result > 0) + pending = true; + else if (unlikely(result < 0)) + break; + } + + result = nvme_tcp_try_recv(queue); + if (result > 0) + pending = true; + else if (unlikely(result < 0)) + return; + + if (!pending || !queue->rd_enabled) + return; + + } while (!time_after(jiffies, deadline)); /* quota is exhausted */ + + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); +} + +static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); + + ahash_request_free(queue->rcv_hash); + ahash_request_free(queue->snd_hash); + crypto_free_ahash(tfm); +} + +static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) +{ + struct crypto_ahash *tfm; + + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!queue->snd_hash) + goto free_tfm; + ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); + + queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!queue->rcv_hash) + goto free_snd_hash; + ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); + + return 0; +free_snd_hash: + ahash_request_free(queue->snd_hash); +free_tfm: + crypto_free_ahash(tfm); + return -ENOMEM; +} + +static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) +{ + struct nvme_tcp_request *async = &ctrl->async_req; + + page_frag_free(async->pdu); +} + +static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) +{ + struct nvme_tcp_queue *queue = &ctrl->queues[0]; + struct nvme_tcp_request *async = &ctrl->async_req; + u8 hdgst = nvme_tcp_hdgst_len(queue); + + async->pdu = page_frag_alloc(&queue->pf_cache, + sizeof(struct nvme_tcp_cmd_pdu) + hdgst, + GFP_KERNEL | __GFP_ZERO); + if (!async->pdu) + return -ENOMEM; + + async->queue = &ctrl->queues[0]; + return 0; +} + +static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct page *page; + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + unsigned int noreclaim_flag; + + if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) + return; + + if (queue->hdr_digest || queue->data_digest) + nvme_tcp_free_crypto(queue); + + if (queue->pf_cache.va) { + page = virt_to_head_page(queue->pf_cache.va); + __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); + queue->pf_cache.va = NULL; + } + + noreclaim_flag = memalloc_noreclaim_save(); + sock_release(queue->sock); + memalloc_noreclaim_restore(noreclaim_flag); + + kfree(queue->pdu); + mutex_destroy(&queue->send_mutex); + mutex_destroy(&queue->queue_lock); +} + +static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_icreq_pdu *icreq; + struct nvme_tcp_icresp_pdu *icresp; + struct msghdr msg = {}; + struct kvec iov; + bool ctrl_hdgst, ctrl_ddgst; + u32 maxh2cdata; + int ret; + + icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); + if (!icreq) + return -ENOMEM; + + icresp = kzalloc(sizeof(*icresp), GFP_KERNEL); + if (!icresp) { + ret = -ENOMEM; + goto free_icreq; + } + + icreq->hdr.type = nvme_tcp_icreq; + icreq->hdr.hlen = sizeof(*icreq); + icreq->hdr.pdo = 0; + icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); + icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); + icreq->maxr2t = 0; /* single inflight r2t supported */ + icreq->hpda = 0; /* no alignment constraint */ + if (queue->hdr_digest) + icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; + if (queue->data_digest) + icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; + + iov.iov_base = icreq; + iov.iov_len = sizeof(*icreq); + ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); + if (ret < 0) + goto free_icresp; + + memset(&msg, 0, sizeof(msg)); + iov.iov_base = icresp; + iov.iov_len = sizeof(*icresp); + ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, + iov.iov_len, msg.msg_flags); + if (ret < 0) + goto free_icresp; + + ret = -EINVAL; + if (icresp->hdr.type != nvme_tcp_icresp) { + pr_err("queue %d: bad type returned %d\n", + nvme_tcp_queue_id(queue), icresp->hdr.type); + goto free_icresp; + } + + if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { + pr_err("queue %d: bad pdu length returned %d\n", + nvme_tcp_queue_id(queue), icresp->hdr.plen); + goto free_icresp; + } + + if (icresp->pfv != NVME_TCP_PFV_1_0) { + pr_err("queue %d: bad pfv returned %d\n", + nvme_tcp_queue_id(queue), icresp->pfv); + goto free_icresp; + } + + ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); + if ((queue->data_digest && !ctrl_ddgst) || + (!queue->data_digest && ctrl_ddgst)) { + pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", + nvme_tcp_queue_id(queue), + queue->data_digest ? "enabled" : "disabled", + ctrl_ddgst ? "enabled" : "disabled"); + goto free_icresp; + } + + ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); + if ((queue->hdr_digest && !ctrl_hdgst) || + (!queue->hdr_digest && ctrl_hdgst)) { + pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", + nvme_tcp_queue_id(queue), + queue->hdr_digest ? "enabled" : "disabled", + ctrl_hdgst ? "enabled" : "disabled"); + goto free_icresp; + } + + if (icresp->cpda != 0) { + pr_err("queue %d: unsupported cpda returned %d\n", + nvme_tcp_queue_id(queue), icresp->cpda); + goto free_icresp; + } + + maxh2cdata = le32_to_cpu(icresp->maxdata); + if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { + pr_err("queue %d: invalid maxh2cdata returned %u\n", + nvme_tcp_queue_id(queue), maxh2cdata); + goto free_icresp; + } + queue->maxh2cdata = maxh2cdata; + + ret = 0; +free_icresp: + kfree(icresp); +free_icreq: + kfree(icreq); + return ret; +} + +static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) +{ + return nvme_tcp_queue_id(queue) == 0; +} + +static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + + return !nvme_tcp_admin_queue(queue) && + qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; +} + +static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + + return !nvme_tcp_admin_queue(queue) && + !nvme_tcp_default_queue(queue) && + qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + + ctrl->io_queues[HCTX_TYPE_READ]; +} + +static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + + return !nvme_tcp_admin_queue(queue) && + !nvme_tcp_default_queue(queue) && + !nvme_tcp_read_queue(queue) && + qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + + ctrl->io_queues[HCTX_TYPE_READ] + + ctrl->io_queues[HCTX_TYPE_POLL]; +} + +static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) +{ + struct nvme_tcp_ctrl *ctrl = queue->ctrl; + int qid = nvme_tcp_queue_id(queue); + int n = 0; + + if (nvme_tcp_default_queue(queue)) + n = qid - 1; + else if (nvme_tcp_read_queue(queue)) + n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; + else if (nvme_tcp_poll_queue(queue)) + n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - + ctrl->io_queues[HCTX_TYPE_READ] - 1; + queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); +} + +static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + int ret, rcv_pdu_size; + + mutex_init(&queue->queue_lock); + queue->ctrl = ctrl; + init_llist_head(&queue->req_list); + INIT_LIST_HEAD(&queue->send_list); + mutex_init(&queue->send_mutex); + INIT_WORK(&queue->io_work, nvme_tcp_io_work); + + if (qid > 0) + queue->cmnd_capsule_len = nctrl->ioccsz * 16; + else + queue->cmnd_capsule_len = sizeof(struct nvme_command) + + NVME_TCP_ADMIN_CCSZ; + + ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, + IPPROTO_TCP, &queue->sock); + if (ret) { + dev_err(nctrl->device, + "failed to create socket: %d\n", ret); + goto err_destroy_mutex; + } + + nvme_tcp_reclassify_socket(queue->sock); + + /* Single syn retry */ + tcp_sock_set_syncnt(queue->sock->sk, 1); + + /* Set TCP no delay */ + tcp_sock_set_nodelay(queue->sock->sk); + + /* + * Cleanup whatever is sitting in the TCP transmit queue on socket + * close. This is done to prevent stale data from being sent should + * the network connection be restored before TCP times out. + */ + sock_no_linger(queue->sock->sk); + + if (so_priority > 0) + sock_set_priority(queue->sock->sk, so_priority); + + /* Set socket type of service */ + if (nctrl->opts->tos >= 0) + ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); + + /* Set 10 seconds timeout for icresp recvmsg */ + queue->sock->sk->sk_rcvtimeo = 10 * HZ; + + queue->sock->sk->sk_allocation = GFP_ATOMIC; + queue->sock->sk->sk_use_task_frag = false; + nvme_tcp_set_queue_io_cpu(queue); + queue->request = NULL; + queue->data_remaining = 0; + queue->ddgst_remaining = 0; + queue->pdu_remaining = 0; + queue->pdu_offset = 0; + sk_set_memalloc(queue->sock->sk); + + if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { + ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, + sizeof(ctrl->src_addr)); + if (ret) { + dev_err(nctrl->device, + "failed to bind queue %d socket %d\n", + qid, ret); + goto err_sock; + } + } + + if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { + char *iface = nctrl->opts->host_iface; + sockptr_t optval = KERNEL_SOCKPTR(iface); + + ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, + optval, strlen(iface)); + if (ret) { + dev_err(nctrl->device, + "failed to bind to interface %s queue %d err %d\n", + iface, qid, ret); + goto err_sock; + } + } + + queue->hdr_digest = nctrl->opts->hdr_digest; + queue->data_digest = nctrl->opts->data_digest; + if (queue->hdr_digest || queue->data_digest) { + ret = nvme_tcp_alloc_crypto(queue); + if (ret) { + dev_err(nctrl->device, + "failed to allocate queue %d crypto\n", qid); + goto err_sock; + } + } + + rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + + nvme_tcp_hdgst_len(queue); + queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); + if (!queue->pdu) { + ret = -ENOMEM; + goto err_crypto; + } + + dev_dbg(nctrl->device, "connecting queue %d\n", + nvme_tcp_queue_id(queue)); + + ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, + sizeof(ctrl->addr), 0); + if (ret) { + dev_err(nctrl->device, + "failed to connect socket: %d\n", ret); + goto err_rcv_pdu; + } + + ret = nvme_tcp_init_connection(queue); + if (ret) + goto err_init_connect; + + set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); + + return 0; + +err_init_connect: + kernel_sock_shutdown(queue->sock, SHUT_RDWR); +err_rcv_pdu: + kfree(queue->pdu); +err_crypto: + if (queue->hdr_digest || queue->data_digest) + nvme_tcp_free_crypto(queue); +err_sock: + sock_release(queue->sock); + queue->sock = NULL; +err_destroy_mutex: + mutex_destroy(&queue->send_mutex); + mutex_destroy(&queue->queue_lock); + return ret; +} + +static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) +{ + struct socket *sock = queue->sock; + + write_lock_bh(&sock->sk->sk_callback_lock); + sock->sk->sk_user_data = NULL; + sock->sk->sk_data_ready = queue->data_ready; + sock->sk->sk_state_change = queue->state_change; + sock->sk->sk_write_space = queue->write_space; + write_unlock_bh(&sock->sk->sk_callback_lock); +} + +static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) +{ + kernel_sock_shutdown(queue->sock, SHUT_RDWR); + nvme_tcp_restore_sock_ops(queue); + cancel_work_sync(&queue->io_work); +} + +static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + + if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) + return; + + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) + __nvme_tcp_stop_queue(queue); + mutex_unlock(&queue->queue_lock); +} + +static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) +{ + write_lock_bh(&queue->sock->sk->sk_callback_lock); + queue->sock->sk->sk_user_data = queue; + queue->state_change = queue->sock->sk->sk_state_change; + queue->data_ready = queue->sock->sk->sk_data_ready; + queue->write_space = queue->sock->sk->sk_write_space; + queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; + queue->sock->sk->sk_state_change = nvme_tcp_state_change; + queue->sock->sk->sk_write_space = nvme_tcp_write_space; +#ifdef CONFIG_NET_RX_BUSY_POLL + queue->sock->sk->sk_ll_usec = 1; +#endif + write_unlock_bh(&queue->sock->sk->sk_callback_lock); +} + +static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[idx]; + int ret; + + queue->rd_enabled = true; + nvme_tcp_init_recv_ctx(queue); + nvme_tcp_setup_sock_ops(queue); + + if (idx) + ret = nvmf_connect_io_queue(nctrl, idx); + else + ret = nvmf_connect_admin_queue(nctrl); + + if (!ret) { + set_bit(NVME_TCP_Q_LIVE, &queue->flags); + } else { + if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) + __nvme_tcp_stop_queue(queue); + dev_err(nctrl->device, + "failed to connect queue: %d ret=%d\n", idx, ret); + } + return ret; +} + +static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) +{ + if (to_tcp_ctrl(ctrl)->async_req.pdu) { + cancel_work_sync(&ctrl->async_event_work); + nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); + to_tcp_ctrl(ctrl)->async_req.pdu = NULL; + } + + nvme_tcp_free_queue(ctrl, 0); +} + +static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvme_tcp_free_queue(ctrl, i); +} + +static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvme_tcp_stop_queue(ctrl, i); +} + +static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, + int first, int last) +{ + int i, ret; + + for (i = first; i < last; i++) { + ret = nvme_tcp_start_queue(ctrl, i); + if (ret) + goto out_stop_queues; + } + + return 0; + +out_stop_queues: + for (i--; i >= first; i--) + nvme_tcp_stop_queue(ctrl, i); + return ret; +} + +static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) +{ + int ret; + + ret = nvme_tcp_alloc_queue(ctrl, 0); + if (ret) + return ret; + + ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); + if (ret) + goto out_free_queue; + + return 0; + +out_free_queue: + nvme_tcp_free_queue(ctrl, 0); + return ret; +} + +static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) +{ + int i, ret; + + for (i = 1; i < ctrl->queue_count; i++) { + ret = nvme_tcp_alloc_queue(ctrl, i); + if (ret) + goto out_free_queues; + } + + return 0; + +out_free_queues: + for (i--; i >= 1; i--) + nvme_tcp_free_queue(ctrl, i); + + return ret; +} + +static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) +{ + unsigned int nr_io_queues; + int ret; + + nr_io_queues = nvmf_nr_io_queues(ctrl->opts); + ret = nvme_set_queue_count(ctrl, &nr_io_queues); + if (ret) + return ret; + + if (nr_io_queues == 0) { + dev_err(ctrl->device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } + + ctrl->queue_count = nr_io_queues + 1; + dev_info(ctrl->device, + "creating %d I/O queues.\n", nr_io_queues); + + nvmf_set_io_queues(ctrl->opts, nr_io_queues, + to_tcp_ctrl(ctrl)->io_queues); + return __nvme_tcp_alloc_io_queues(ctrl); +} + +static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) +{ + nvme_tcp_stop_io_queues(ctrl); + if (remove) + nvme_remove_io_tag_set(ctrl); + nvme_tcp_free_io_queues(ctrl); +} + +static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) +{ + int ret, nr_queues; + + ret = nvme_tcp_alloc_io_queues(ctrl); + if (ret) + return ret; + + if (new) { + ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set, + &nvme_tcp_mq_ops, + ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, + sizeof(struct nvme_tcp_request)); + if (ret) + goto out_free_io_queues; + } + + /* + * Only start IO queues for which we have allocated the tagset + * and limitted it to the available queues. On reconnects, the + * queue number might have changed. + */ + nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); + ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); + if (ret) + goto out_cleanup_connect_q; + + if (!new) { + nvme_start_freeze(ctrl); + nvme_unquiesce_io_queues(ctrl); + if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + nvme_unfreeze(ctrl); + goto out_wait_freeze_timed_out; + } + blk_mq_update_nr_hw_queues(ctrl->tagset, + ctrl->queue_count - 1); + nvme_unfreeze(ctrl); + } + + /* + * If the number of queues has increased (reconnect case) + * start all new queues now. + */ + ret = nvme_tcp_start_io_queues(ctrl, nr_queues, + ctrl->tagset->nr_hw_queues + 1); + if (ret) + goto out_wait_freeze_timed_out; + + return 0; + +out_wait_freeze_timed_out: + nvme_quiesce_io_queues(ctrl); + nvme_sync_io_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); +out_cleanup_connect_q: + nvme_cancel_tagset(ctrl); + if (new) + nvme_remove_io_tag_set(ctrl); +out_free_io_queues: + nvme_tcp_free_io_queues(ctrl); + return ret; +} + +static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) +{ + nvme_tcp_stop_queue(ctrl, 0); + if (remove) + nvme_remove_admin_tag_set(ctrl); + nvme_tcp_free_admin_queue(ctrl); +} + +static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) +{ + int error; + + error = nvme_tcp_alloc_admin_queue(ctrl); + if (error) + return error; + + if (new) { + error = nvme_alloc_admin_tag_set(ctrl, + &to_tcp_ctrl(ctrl)->admin_tag_set, + &nvme_tcp_admin_mq_ops, + sizeof(struct nvme_tcp_request)); + if (error) + goto out_free_queue; + } + + error = nvme_tcp_start_queue(ctrl, 0); + if (error) + goto out_cleanup_tagset; + + error = nvme_enable_ctrl(ctrl); + if (error) + goto out_stop_queue; + + nvme_unquiesce_admin_queue(ctrl); + + error = nvme_init_ctrl_finish(ctrl, false); + if (error) + goto out_quiesce_queue; + + return 0; + +out_quiesce_queue: + nvme_quiesce_admin_queue(ctrl); + blk_sync_queue(ctrl->admin_q); +out_stop_queue: + nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); +out_cleanup_tagset: + if (new) + nvme_remove_admin_tag_set(ctrl); +out_free_queue: + nvme_tcp_free_admin_queue(ctrl); + return error; +} + +static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, + bool remove) +{ + nvme_quiesce_admin_queue(ctrl); + blk_sync_queue(ctrl->admin_q); + nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); + if (remove) + nvme_unquiesce_admin_queue(ctrl); + nvme_tcp_destroy_admin_queue(ctrl, remove); +} + +static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, + bool remove) +{ + if (ctrl->queue_count <= 1) + return; + nvme_quiesce_admin_queue(ctrl); + nvme_quiesce_io_queues(ctrl); + nvme_sync_io_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); + nvme_cancel_tagset(ctrl); + if (remove) + nvme_unquiesce_io_queues(ctrl); + nvme_tcp_destroy_io_queues(ctrl, remove); +} + +static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) +{ + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + /* If we are resetting/deleting then do nothing */ + if (state != NVME_CTRL_CONNECTING) { + WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE); + return; + } + + if (nvmf_should_reconnect(ctrl)) { + dev_info(ctrl->device, "Reconnecting in %d seconds...\n", + ctrl->opts->reconnect_delay); + queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, + ctrl->opts->reconnect_delay * HZ); + } else { + dev_info(ctrl->device, "Removing controller...\n"); + nvme_delete_ctrl(ctrl); + } +} + +static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) +{ + struct nvmf_ctrl_options *opts = ctrl->opts; + int ret; + + ret = nvme_tcp_configure_admin_queue(ctrl, new); + if (ret) + return ret; + + if (ctrl->icdoff) { + ret = -EOPNOTSUPP; + dev_err(ctrl->device, "icdoff is not supported!\n"); + goto destroy_admin; + } + + if (!nvme_ctrl_sgl_supported(ctrl)) { + ret = -EOPNOTSUPP; + dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); + goto destroy_admin; + } + + if (opts->queue_size > ctrl->sqsize + 1) + dev_warn(ctrl->device, + "queue_size %zu > ctrl sqsize %u, clamping down\n", + opts->queue_size, ctrl->sqsize + 1); + + if (ctrl->sqsize + 1 > ctrl->maxcmd) { + dev_warn(ctrl->device, + "sqsize %u > ctrl maxcmd %u, clamping down\n", + ctrl->sqsize + 1, ctrl->maxcmd); + ctrl->sqsize = ctrl->maxcmd - 1; + } + + if (ctrl->queue_count > 1) { + ret = nvme_tcp_configure_io_queues(ctrl, new); + if (ret) + goto destroy_admin; + } + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { + /* + * state change failure is ok if we started ctrl delete, + * unless we're during creation of a new controller to + * avoid races with teardown flow. + */ + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + WARN_ON_ONCE(state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO); + WARN_ON_ONCE(new); + ret = -EINVAL; + goto destroy_io; + } + + nvme_start_ctrl(ctrl); + return 0; + +destroy_io: + if (ctrl->queue_count > 1) { + nvme_quiesce_io_queues(ctrl); + nvme_sync_io_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); + nvme_cancel_tagset(ctrl); + nvme_tcp_destroy_io_queues(ctrl, new); + } +destroy_admin: + nvme_quiesce_admin_queue(ctrl); + blk_sync_queue(ctrl->admin_q); + nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); + nvme_tcp_destroy_admin_queue(ctrl, new); + return ret; +} + +static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) +{ + struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), + struct nvme_tcp_ctrl, connect_work); + struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; + + ++ctrl->nr_reconnects; + + if (nvme_tcp_setup_ctrl(ctrl, false)) + goto requeue; + + dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", + ctrl->nr_reconnects); + + ctrl->nr_reconnects = 0; + + return; + +requeue: + dev_info(ctrl->device, "Failed reconnect attempt %d\n", + ctrl->nr_reconnects); + nvme_tcp_reconnect_or_remove(ctrl); +} + +static void nvme_tcp_error_recovery_work(struct work_struct *work) +{ + struct nvme_tcp_ctrl *tcp_ctrl = container_of(work, + struct nvme_tcp_ctrl, err_work); + struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; + + nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); + nvme_tcp_teardown_io_queues(ctrl, false); + /* unquiesce to fail fast pending requests */ + nvme_unquiesce_io_queues(ctrl); + nvme_tcp_teardown_admin_queue(ctrl, false); + nvme_unquiesce_admin_queue(ctrl); + nvme_auth_stop(ctrl); + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { + /* state change failure is ok if we started ctrl delete */ + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + WARN_ON_ONCE(state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO); + return; + } + + nvme_tcp_reconnect_or_remove(ctrl); +} + +static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) +{ + nvme_tcp_teardown_io_queues(ctrl, shutdown); + nvme_quiesce_admin_queue(ctrl); + nvme_disable_ctrl(ctrl, shutdown); + nvme_tcp_teardown_admin_queue(ctrl, shutdown); +} + +static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) +{ + nvme_tcp_teardown_ctrl(ctrl, true); +} + +static void nvme_reset_ctrl_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, reset_work); + + nvme_stop_ctrl(ctrl); + nvme_tcp_teardown_ctrl(ctrl, false); + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { + /* state change failure is ok if we started ctrl delete */ + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + WARN_ON_ONCE(state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO); + return; + } + + if (nvme_tcp_setup_ctrl(ctrl, false)) + goto out_fail; + + return; + +out_fail: + ++ctrl->nr_reconnects; + nvme_tcp_reconnect_or_remove(ctrl); +} + +static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) +{ + flush_work(&to_tcp_ctrl(ctrl)->err_work); + cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); +} + +static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + + if (list_empty(&ctrl->list)) + goto free_ctrl; + + mutex_lock(&nvme_tcp_ctrl_mutex); + list_del(&ctrl->list); + mutex_unlock(&nvme_tcp_ctrl_mutex); + + nvmf_free_options(nctrl->opts); +free_ctrl: + kfree(ctrl->queues); + kfree(ctrl); +} + +static void nvme_tcp_set_sg_null(struct nvme_command *c) +{ + struct nvme_sgl_desc *sg = &c->common.dptr.sgl; + + sg->addr = 0; + sg->length = 0; + sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | + NVME_SGL_FMT_TRANSPORT_A; +} + +static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, + struct nvme_command *c, u32 data_len) +{ + struct nvme_sgl_desc *sg = &c->common.dptr.sgl; + + sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); + sg->length = cpu_to_le32(data_len); + sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; +} + +static void nvme_tcp_set_sg_host_data(struct nvme_command *c, + u32 data_len) +{ + struct nvme_sgl_desc *sg = &c->common.dptr.sgl; + + sg->addr = 0; + sg->length = cpu_to_le32(data_len); + sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | + NVME_SGL_FMT_TRANSPORT_A; +} + +static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); + struct nvme_tcp_queue *queue = &ctrl->queues[0]; + struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; + struct nvme_command *cmd = &pdu->cmd; + u8 hdgst = nvme_tcp_hdgst_len(queue); + + memset(pdu, 0, sizeof(*pdu)); + pdu->hdr.type = nvme_tcp_cmd; + if (queue->hdr_digest) + pdu->hdr.flags |= NVME_TCP_F_HDGST; + pdu->hdr.hlen = sizeof(*pdu); + pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); + + cmd->common.opcode = nvme_admin_async_event; + cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; + cmd->common.flags |= NVME_CMD_SGL_METABUF; + nvme_tcp_set_sg_null(cmd); + + ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; + ctrl->async_req.offset = 0; + ctrl->async_req.curr_bio = NULL; + ctrl->async_req.data_len = 0; + + nvme_tcp_queue_request(&ctrl->async_req, true, true); +} + +static void nvme_tcp_complete_timed_out(struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; + + nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); + nvmf_complete_timed_out_request(rq); +} + +static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); + u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; + int qid = nvme_tcp_queue_id(req->queue); + + dev_warn(ctrl->device, + "queue %d: timeout cid %#x type %d opcode %#x (%s)\n", + nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type, + opc, nvme_opcode_str(qid, opc, fctype)); + + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { + /* + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. + */ + nvme_tcp_complete_timed_out(rq); + return BLK_EH_DONE; + } + + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ + nvme_tcp_error_recovery(ctrl); + return BLK_EH_RESET_TIMER; +} + +static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, + struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); + struct nvme_command *c = &pdu->cmd; + + c->common.flags |= NVME_CMD_SGL_METABUF; + + if (!blk_rq_nr_phys_segments(rq)) + nvme_tcp_set_sg_null(c); + else if (rq_data_dir(rq) == WRITE && + req->data_len <= nvme_tcp_inline_data_size(req)) + nvme_tcp_set_sg_inline(queue, c, req->data_len); + else + nvme_tcp_set_sg_host_data(c, req->data_len); + + return 0; +} + +static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, + struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); + struct nvme_tcp_queue *queue = req->queue; + u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; + blk_status_t ret; + + ret = nvme_setup_cmd(ns, rq); + if (ret) + return ret; + + req->state = NVME_TCP_SEND_CMD_PDU; + req->status = cpu_to_le16(NVME_SC_SUCCESS); + req->offset = 0; + req->data_sent = 0; + req->pdu_len = 0; + req->pdu_sent = 0; + req->h2cdata_left = 0; + req->data_len = blk_rq_nr_phys_segments(rq) ? + blk_rq_payload_bytes(rq) : 0; + req->curr_bio = rq->bio; + if (req->curr_bio && req->data_len) + nvme_tcp_init_iter(req, rq_data_dir(rq)); + + if (rq_data_dir(rq) == WRITE && + req->data_len <= nvme_tcp_inline_data_size(req)) + req->pdu_len = req->data_len; + + pdu->hdr.type = nvme_tcp_cmd; + pdu->hdr.flags = 0; + if (queue->hdr_digest) + pdu->hdr.flags |= NVME_TCP_F_HDGST; + if (queue->data_digest && req->pdu_len) { + pdu->hdr.flags |= NVME_TCP_F_DDGST; + ddgst = nvme_tcp_ddgst_len(queue); + } + pdu->hdr.hlen = sizeof(*pdu); + pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; + pdu->hdr.plen = + cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); + + ret = nvme_tcp_map_data(queue, rq); + if (unlikely(ret)) { + nvme_cleanup_cmd(rq); + dev_err(queue->ctrl->ctrl.device, + "Failed to map data (%d)\n", ret); + return ret; + } + + return 0; +} + +static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) +{ + struct nvme_tcp_queue *queue = hctx->driver_data; + + if (!llist_empty(&queue->req_list)) + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); +} + +static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct nvme_tcp_queue *queue = hctx->driver_data; + struct request *rq = bd->rq; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); + blk_status_t ret; + + if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) + return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); + + ret = nvme_tcp_setup_cmd_pdu(ns, rq); + if (unlikely(ret)) + return ret; + + nvme_start_request(rq); + + nvme_tcp_queue_request(req, true, bd->last); + + return BLK_STS_OK; +} + +static void nvme_tcp_map_queues(struct blk_mq_tag_set *set) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); + + nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); +} + +static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) +{ + struct nvme_tcp_queue *queue = hctx->driver_data; + struct sock *sk = queue->sock->sk; + + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + return 0; + + set_bit(NVME_TCP_Q_POLLING, &queue->flags); + if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) + sk_busy_loop(sk, true); + nvme_tcp_try_recv(queue); + clear_bit(NVME_TCP_Q_POLLING, &queue->flags); + return queue->nr_cqe; +} + +static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; + struct sockaddr_storage src_addr; + int ret, len; + + len = nvmf_get_address(ctrl, buf, size); + + mutex_lock(&queue->queue_lock); + + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + goto done; + ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); + if (ret > 0) { + if (len > 0) + len--; /* strip trailing newline */ + len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", + (len) ? "," : "", &src_addr); + } +done: + mutex_unlock(&queue->queue_lock); + + return len; +} + +static const struct blk_mq_ops nvme_tcp_mq_ops = { + .queue_rq = nvme_tcp_queue_rq, + .commit_rqs = nvme_tcp_commit_rqs, + .complete = nvme_complete_rq, + .init_request = nvme_tcp_init_request, + .exit_request = nvme_tcp_exit_request, + .init_hctx = nvme_tcp_init_hctx, + .timeout = nvme_tcp_timeout, + .map_queues = nvme_tcp_map_queues, + .poll = nvme_tcp_poll, +}; + +static const struct blk_mq_ops nvme_tcp_admin_mq_ops = { + .queue_rq = nvme_tcp_queue_rq, + .complete = nvme_complete_rq, + .init_request = nvme_tcp_init_request, + .exit_request = nvme_tcp_exit_request, + .init_hctx = nvme_tcp_init_admin_hctx, + .timeout = nvme_tcp_timeout, +}; + +static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { + .name = "tcp", + .module = THIS_MODULE, + .flags = NVME_F_FABRICS | NVME_F_BLOCKING, + .reg_read32 = nvmf_reg_read32, + .reg_read64 = nvmf_reg_read64, + .reg_write32 = nvmf_reg_write32, + .free_ctrl = nvme_tcp_free_ctrl, + .submit_async_event = nvme_tcp_submit_async_event, + .delete_ctrl = nvme_tcp_delete_ctrl, + .get_address = nvme_tcp_get_address, + .stop_ctrl = nvme_tcp_stop_ctrl, +}; + +static bool +nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) +{ + struct nvme_tcp_ctrl *ctrl; + bool found = false; + + mutex_lock(&nvme_tcp_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { + found = nvmf_ip_options_match(&ctrl->ctrl, opts); + if (found) + break; + } + mutex_unlock(&nvme_tcp_ctrl_mutex); + + return found; +} + +static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, + struct nvmf_ctrl_options *opts) +{ + struct nvme_tcp_ctrl *ctrl; + int ret; + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ctrl->list); + ctrl->ctrl.opts = opts; + ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + + opts->nr_poll_queues + 1; + ctrl->ctrl.sqsize = opts->queue_size - 1; + ctrl->ctrl.kato = opts->kato; + + INIT_DELAYED_WORK(&ctrl->connect_work, + nvme_tcp_reconnect_ctrl_work); + INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); + INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); + + if (!(opts->mask & NVMF_OPT_TRSVCID)) { + opts->trsvcid = + kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL); + if (!opts->trsvcid) { + ret = -ENOMEM; + goto out_free_ctrl; + } + opts->mask |= NVMF_OPT_TRSVCID; + } + + ret = inet_pton_with_scope(&init_net, AF_UNSPEC, + opts->traddr, opts->trsvcid, &ctrl->addr); + if (ret) { + pr_err("malformed address passed: %s:%s\n", + opts->traddr, opts->trsvcid); + goto out_free_ctrl; + } + + if (opts->mask & NVMF_OPT_HOST_TRADDR) { + ret = inet_pton_with_scope(&init_net, AF_UNSPEC, + opts->host_traddr, NULL, &ctrl->src_addr); + if (ret) { + pr_err("malformed src address passed: %s\n", + opts->host_traddr); + goto out_free_ctrl; + } + } + + if (opts->mask & NVMF_OPT_HOST_IFACE) { + if (!__dev_get_by_name(&init_net, opts->host_iface)) { + pr_err("invalid interface passed: %s\n", + opts->host_iface); + ret = -ENODEV; + goto out_free_ctrl; + } + } + + if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { + ret = -EALREADY; + goto out_free_ctrl; + } + + ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), + GFP_KERNEL); + if (!ctrl->queues) { + ret = -ENOMEM; + goto out_free_ctrl; + } + + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); + if (ret) + goto out_kfree_queues; + + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + WARN_ON_ONCE(1); + ret = -EINTR; + goto out_uninit_ctrl; + } + + ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); + if (ret) + goto out_uninit_ctrl; + + dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); + + mutex_lock(&nvme_tcp_ctrl_mutex); + list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); + mutex_unlock(&nvme_tcp_ctrl_mutex); + + return &ctrl->ctrl; + +out_uninit_ctrl: + nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); + if (ret > 0) + ret = -EIO; + return ERR_PTR(ret); +out_kfree_queues: + kfree(ctrl->queues); +out_free_ctrl: + kfree(ctrl); + return ERR_PTR(ret); +} + +static struct nvmf_transport_ops nvme_tcp_transport = { + .name = "tcp", + .module = THIS_MODULE, + .required_opts = NVMF_OPT_TRADDR, + .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | + NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | + NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | + NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | + NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, + .create_ctrl = nvme_tcp_create_ctrl, +}; + +static int __init nvme_tcp_init_module(void) +{ + BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); + BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); + BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128); + BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128); + BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24); + + nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!nvme_tcp_wq) + return -ENOMEM; + + nvmf_register_transport(&nvme_tcp_transport); + return 0; +} + +static void __exit nvme_tcp_cleanup_module(void) +{ + struct nvme_tcp_ctrl *ctrl; + + nvmf_unregister_transport(&nvme_tcp_transport); + + mutex_lock(&nvme_tcp_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) + nvme_delete_ctrl(&ctrl->ctrl); + mutex_unlock(&nvme_tcp_ctrl_mutex); + flush_workqueue(nvme_delete_wq); + + destroy_workqueue(nvme_tcp_wq); +} + +module_init(nvme_tcp_init_module); +module_exit(nvme_tcp_cleanup_module); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c new file mode 100644 index 0000000000..1c36fcedea --- /dev/null +++ b/drivers/nvme/host/trace.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM Express device driver tracepoints + * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH + */ + +#include <asm/unaligned.h> +#include "trace.h" + +static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u16 sqid = get_unaligned_le16(cdw10); + + trace_seq_printf(p, "sqid=%u", sqid); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u16 sqid = get_unaligned_le16(cdw10); + u16 qsize = get_unaligned_le16(cdw10 + 2); + u16 sq_flags = get_unaligned_le16(cdw10 + 4); + u16 cqid = get_unaligned_le16(cdw10 + 6); + + + trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u", + sqid, qsize, sq_flags, cqid); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_delete_cq(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u16 cqid = get_unaligned_le16(cdw10); + + trace_seq_printf(p, "cqid=%u", cqid); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u16 cqid = get_unaligned_le16(cdw10); + u16 qsize = get_unaligned_le16(cdw10 + 2); + u16 cq_flags = get_unaligned_le16(cdw10 + 4); + u16 irq_vector = get_unaligned_le16(cdw10 + 6); + + trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u", + cqid, qsize, cq_flags, irq_vector); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 cns = cdw10[0]; + u16 ctrlid = get_unaligned_le16(cdw10 + 2); + + trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_admin_set_features(struct trace_seq *p, + u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 fid = cdw10[0]; + u8 sv = cdw10[3] & 0x8; + u32 cdw11 = get_unaligned_le32(cdw10 + 4); + + trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_admin_get_features(struct trace_seq *p, + u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 fid = cdw10[0]; + u8 sel = cdw10[1] & 0x7; + u32 cdw11 = get_unaligned_le32(cdw10 + 4); + + trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_get_lba_status(struct trace_seq *p, + u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u32 mndw = get_unaligned_le32(cdw10 + 8); + u16 rl = get_unaligned_le16(cdw10 + 12); + u8 atype = cdw10[15]; + + trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u", + slba, mndw, rl, atype); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 lbaf = cdw10[0] & 0xF; + u8 mset = (cdw10[0] >> 4) & 0x1; + u8 pi = (cdw10[0] >> 5) & 0x7; + u8 pil = cdw10[1] & 0x1; + u8 ses = (cdw10[1] >> 1) & 0x7; + + trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u", + lbaf, mset, pi, pil, ses); + + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u16 length = get_unaligned_le16(cdw10 + 8); + u16 control = get_unaligned_le16(cdw10 + 10); + u32 dsmgmt = get_unaligned_le32(cdw10 + 12); + u32 reftag = get_unaligned_le32(cdw10 + 16); + + trace_seq_printf(p, + "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u", + slba, length, control, dsmgmt, reftag); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "nr=%u, attributes=%u", + get_unaligned_le32(cdw10), + get_unaligned_le32(cdw10 + 4)); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u8 zsa = cdw10[12]; + u8 all = cdw10[13]; + + trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u32 numd = get_unaligned_le32(cdw10 + 8); + u8 zra = cdw10[12]; + u8 zrasf = cdw10[13]; + u8 pr = cdw10[14]; + + trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u", + slba, numd, zra, zrasf, pr); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "cdw10=%*ph", 24, cdw10); + trace_seq_putc(p, 0); + + return ret; +} + +const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, + u8 opcode, u8 *cdw10) +{ + switch (opcode) { + case nvme_admin_delete_sq: + return nvme_trace_delete_sq(p, cdw10); + case nvme_admin_create_sq: + return nvme_trace_create_sq(p, cdw10); + case nvme_admin_delete_cq: + return nvme_trace_delete_cq(p, cdw10); + case nvme_admin_create_cq: + return nvme_trace_create_cq(p, cdw10); + case nvme_admin_identify: + return nvme_trace_admin_identify(p, cdw10); + case nvme_admin_set_features: + return nvme_trace_admin_set_features(p, cdw10); + case nvme_admin_get_features: + return nvme_trace_admin_get_features(p, cdw10); + case nvme_admin_get_lba_status: + return nvme_trace_get_lba_status(p, cdw10); + case nvme_admin_format_nvm: + return nvme_trace_admin_format_nvm(p, cdw10); + default: + return nvme_trace_common(p, cdw10); + } +} + +const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, + u8 opcode, u8 *cdw10) +{ + switch (opcode) { + case nvme_cmd_read: + case nvme_cmd_write: + case nvme_cmd_write_zeroes: + case nvme_cmd_zone_append: + return nvme_trace_read_write(p, cdw10); + case nvme_cmd_dsm: + return nvme_trace_dsm(p, cdw10); + case nvme_cmd_zone_mgmt_send: + return nvme_trace_zone_mgmt_send(p, cdw10); + case nvme_cmd_zone_mgmt_recv: + return nvme_trace_zone_mgmt_recv(p, cdw10); + default: + return nvme_trace_common(p, cdw10); + } +} + +static const char *nvme_trace_fabrics_property_set(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 attrib = spc[0]; + u32 ofst = get_unaligned_le32(spc + 4); + u64 value = get_unaligned_le64(spc + 8); + + trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx", + attrib, ofst, value); + trace_seq_putc(p, 0); + return ret; +} + +static const char *nvme_trace_fabrics_connect(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u16 recfmt = get_unaligned_le16(spc); + u16 qid = get_unaligned_le16(spc + 2); + u16 sqsize = get_unaligned_le16(spc + 4); + u8 cattr = spc[6]; + u32 kato = get_unaligned_le32(spc + 8); + + trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", + recfmt, qid, sqsize, cattr, kato); + trace_seq_putc(p, 0); + return ret; +} + +static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 attrib = spc[0]; + u32 ofst = get_unaligned_le32(spc + 4); + + trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst); + trace_seq_putc(p, 0); + return ret; +} + +static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 spsp0 = spc[1]; + u8 spsp1 = spc[2]; + u8 secp = spc[3]; + u32 tl = get_unaligned_le32(spc + 4); + + trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u", + spsp0, spsp1, secp, tl); + trace_seq_putc(p, 0); + return ret; +} + +static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 spsp0 = spc[1]; + u8 spsp1 = spc[2]; + u8 secp = spc[3]; + u32 al = get_unaligned_le32(spc + 4); + + trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u", + spsp0, spsp1, secp, al); + trace_seq_putc(p, 0); + return ret; +} + +static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "specific=%*ph", 24, spc); + trace_seq_putc(p, 0); + return ret; +} + +const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, + u8 fctype, u8 *spc) +{ + switch (fctype) { + case nvme_fabrics_type_property_set: + return nvme_trace_fabrics_property_set(p, spc); + case nvme_fabrics_type_connect: + return nvme_trace_fabrics_connect(p, spc); + case nvme_fabrics_type_property_get: + return nvme_trace_fabrics_property_get(p, spc); + case nvme_fabrics_type_auth_send: + return nvme_trace_fabrics_auth_send(p, spc); + case nvme_fabrics_type_auth_receive: + return nvme_trace_fabrics_auth_receive(p, spc); + default: + return nvme_trace_fabrics_common(p, spc); + } +} + +const char *nvme_trace_disk_name(struct trace_seq *p, char *name) +{ + const char *ret = trace_seq_buffer_ptr(p); + + if (*name) + trace_seq_printf(p, "disk=%s, ", name); + trace_seq_putc(p, 0); + + return ret; +} + +EXPORT_TRACEPOINT_SYMBOL_GPL(nvme_sq); diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h new file mode 100644 index 0000000000..4fb5922ffd --- /dev/null +++ b/drivers/nvme/host/trace.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NVM Express device driver tracepoints + * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nvme + +#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NVME_H + +#include <linux/nvme.h> +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +#include "nvme.h" + +const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, + u8 *cdw10); +const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, + u8 *cdw10); +const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype, + u8 *spc); + +#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ + ((opcode) == nvme_fabrics_command ? \ + nvme_trace_parse_fabrics_cmd(p, fctype, cdw10) : \ + ((qid) ? \ + nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \ + nvme_trace_parse_admin_cmd(p, opcode, cdw10))) + +const char *nvme_trace_disk_name(struct trace_seq *p, char *name); +#define __print_disk_name(name) \ + nvme_trace_disk_name(p, name) + +#ifndef TRACE_HEADER_MULTI_READ +static inline void __assign_disk_name(char *name, struct gendisk *disk) +{ + if (disk) + memcpy(name, disk->disk_name, DISK_NAME_LEN); + else + memset(name, 0, DISK_NAME_LEN); +} +#endif + +TRACE_EVENT(nvme_setup_cmd, + TP_PROTO(struct request *req, struct nvme_command *cmd), + TP_ARGS(req, cmd), + TP_STRUCT__entry( + __array(char, disk, DISK_NAME_LEN) + __field(int, ctrl_id) + __field(int, qid) + __field(u8, opcode) + __field(u8, flags) + __field(u8, fctype) + __field(u16, cid) + __field(u32, nsid) + __field(bool, metadata) + __array(u8, cdw10, 24) + ), + TP_fast_assign( + __entry->ctrl_id = nvme_req(req)->ctrl->instance; + __entry->qid = nvme_req_qid(req); + __entry->opcode = cmd->common.opcode; + __entry->flags = cmd->common.flags; + __entry->cid = cmd->common.command_id; + __entry->nsid = le32_to_cpu(cmd->common.nsid); + __entry->metadata = !!blk_integrity_rq(req); + __entry->fctype = cmd->fabrics.fctype; + __assign_disk_name(__entry->disk, req->q->disk); + memcpy(__entry->cdw10, &cmd->common.cdws, + sizeof(__entry->cdw10)); + ), + TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)", + __entry->ctrl_id, __print_disk_name(__entry->disk), + __entry->qid, __entry->cid, __entry->nsid, + __entry->flags, __entry->metadata, + show_opcode_name(__entry->qid, __entry->opcode, + __entry->fctype), + parse_nvme_cmd(__entry->qid, __entry->opcode, + __entry->fctype, __entry->cdw10)) +); + +TRACE_EVENT(nvme_complete_rq, + TP_PROTO(struct request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __array(char, disk, DISK_NAME_LEN) + __field(int, ctrl_id) + __field(int, qid) + __field(int, cid) + __field(u64, result) + __field(u8, retries) + __field(u8, flags) + __field(u16, status) + ), + TP_fast_assign( + __entry->ctrl_id = nvme_req(req)->ctrl->instance; + __entry->qid = nvme_req_qid(req); + __entry->cid = nvme_req(req)->cmd->common.command_id; + __entry->result = le64_to_cpu(nvme_req(req)->result.u64); + __entry->retries = nvme_req(req)->retries; + __entry->flags = nvme_req(req)->flags; + __entry->status = nvme_req(req)->status; + __assign_disk_name(__entry->disk, req->q->disk); + ), + TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x", + __entry->ctrl_id, __print_disk_name(__entry->disk), + __entry->qid, __entry->cid, __entry->result, + __entry->retries, __entry->flags, __entry->status) + +); + +#define aer_name(aer) { aer, #aer } + +TRACE_EVENT(nvme_async_event, + TP_PROTO(struct nvme_ctrl *ctrl, u32 result), + TP_ARGS(ctrl, result), + TP_STRUCT__entry( + __field(int, ctrl_id) + __field(u32, result) + ), + TP_fast_assign( + __entry->ctrl_id = ctrl->instance; + __entry->result = result; + ), + TP_printk("nvme%d: NVME_AEN=%#08x [%s]", + __entry->ctrl_id, __entry->result, + __print_symbolic(__entry->result & 0x7, + aer_name(NVME_AER_ERROR), + aer_name(NVME_AER_SMART), + aer_name(NVME_AER_NOTICE), + aer_name(NVME_AER_CSS), + aer_name(NVME_AER_VS)) + ) +); + +#undef aer_name + +TRACE_EVENT(nvme_sq, + TP_PROTO(struct request *req, __le16 sq_head, int sq_tail), + TP_ARGS(req, sq_head, sq_tail), + TP_STRUCT__entry( + __field(int, ctrl_id) + __array(char, disk, DISK_NAME_LEN) + __field(int, qid) + __field(u16, sq_head) + __field(u16, sq_tail) + ), + TP_fast_assign( + __entry->ctrl_id = nvme_req(req)->ctrl->instance; + __assign_disk_name(__entry->disk, req->q->disk); + __entry->qid = nvme_req_qid(req); + __entry->sq_head = le16_to_cpu(sq_head); + __entry->sq_tail = sq_tail; + ), + TP_printk("nvme%d: %sqid=%d, head=%u, tail=%u", + __entry->ctrl_id, __print_disk_name(__entry->disk), + __entry->qid, __entry->sq_head, __entry->sq_tail + ) +); + +#endif /* _TRACE_NVME_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c new file mode 100644 index 0000000000..ec8557810c --- /dev/null +++ b/drivers/nvme/host/zns.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +#include <linux/blkdev.h> +#include <linux/vmalloc.h> +#include "nvme.h" + +int nvme_revalidate_zones(struct nvme_ns *ns) +{ + struct request_queue *q = ns->queue; + + blk_queue_chunk_sectors(q, ns->zsze); + blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); + + return blk_revalidate_disk_zones(ns->disk, NULL); +} + +static int nvme_set_max_append(struct nvme_ctrl *ctrl) +{ + struct nvme_command c = { }; + struct nvme_id_ctrl_zns *id; + int status; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + c.identify.opcode = nvme_admin_identify; + c.identify.cns = NVME_ID_CNS_CS_CTRL; + c.identify.csi = NVME_CSI_ZNS; + + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); + if (status) { + kfree(id); + return status; + } + + if (id->zasl) + ctrl->max_zone_append = 1 << (id->zasl + 3); + else + ctrl->max_zone_append = ctrl->max_hw_sectors; + kfree(id); + return 0; +} + +int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) +{ + struct nvme_effects_log *log = ns->head->effects; + struct request_queue *q = ns->queue; + struct nvme_command c = { }; + struct nvme_id_ns_zns *id; + int status; + + /* Driver requires zone append support */ + if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) & + NVME_CMD_EFFECTS_CSUPP)) { + if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags)) + dev_warn(ns->ctrl->device, + "Zone Append supported for zoned namespace:%d. Remove read-only mode\n", + ns->head->ns_id); + } else { + set_bit(NVME_NS_FORCE_RO, &ns->flags); + dev_warn(ns->ctrl->device, + "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n", + ns->head->ns_id); + } + + /* Lazily query controller append limit for the first zoned namespace */ + if (!ns->ctrl->max_zone_append) { + status = nvme_set_max_append(ns->ctrl); + if (status) + return status; + } + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(ns->head->ns_id); + c.identify.cns = NVME_ID_CNS_CS_NS; + c.identify.csi = NVME_CSI_ZNS; + + status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); + if (status) + goto free_data; + + /* + * We currently do not handle devices requiring any of the zoned + * operation characteristics. + */ + if (id->zoc) { + dev_warn(ns->ctrl->device, + "zone operations:%x not supported for namespace:%u\n", + le16_to_cpu(id->zoc), ns->head->ns_id); + status = -ENODEV; + goto free_data; + } + + ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze)); + if (!is_power_of_2(ns->zsze)) { + dev_warn(ns->ctrl->device, + "invalid zone size:%llu for namespace:%u\n", + ns->zsze, ns->head->ns_id); + status = -ENODEV; + goto free_data; + } + + disk_set_zoned(ns->disk, BLK_ZONED_HM); + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); + disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); + disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); +free_data: + kfree(id); + return status; +} + +static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, + unsigned int nr_zones, size_t *buflen) +{ + struct request_queue *q = ns->disk->queue; + size_t bufsize; + void *buf; + + const size_t min_bufsize = sizeof(struct nvme_zone_report) + + sizeof(struct nvme_zone_descriptor); + + nr_zones = min_t(unsigned int, nr_zones, + get_capacity(ns->disk) >> ilog2(ns->zsze)); + + bufsize = sizeof(struct nvme_zone_report) + + nr_zones * sizeof(struct nvme_zone_descriptor); + bufsize = min_t(size_t, bufsize, + queue_max_hw_sectors(q) << SECTOR_SHIFT); + bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + + while (bufsize >= min_bufsize) { + buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); + if (buf) { + *buflen = bufsize; + return buf; + } + bufsize >>= 1; + } + return NULL; +} + +static int nvme_zone_parse_entry(struct nvme_ns *ns, + struct nvme_zone_descriptor *entry, + unsigned int idx, report_zones_cb cb, + void *data) +{ + struct blk_zone zone = { }; + + if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { + dev_err(ns->ctrl->device, "invalid zone type %#x\n", + entry->zt); + return -EINVAL; + } + + zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; + zone.cond = entry->zs >> 4; + zone.len = ns->zsze; + zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); + zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); + if (zone.cond == BLK_ZONE_COND_FULL) + zone.wp = zone.start + zone.len; + else + zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); + + return cb(&zone, idx, data); +} + +int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct nvme_zone_report *report; + struct nvme_command c = { }; + int ret, zone_idx = 0; + unsigned int nz, i; + size_t buflen; + + if (ns->head->ids.csi != NVME_CSI_ZNS) + return -EINVAL; + + report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); + if (!report) + return -ENOMEM; + + c.zmr.opcode = nvme_cmd_zone_mgmt_recv; + c.zmr.nsid = cpu_to_le32(ns->head->ns_id); + c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen)); + c.zmr.zra = NVME_ZRA_ZONE_REPORT; + c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; + c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; + + sector &= ~(ns->zsze - 1); + while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { + memset(report, 0, buflen); + + c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); + ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); + if (ret) { + if (ret > 0) + ret = -EIO; + goto out_free; + } + + nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); + if (!nz) + break; + + for (i = 0; i < nz && zone_idx < nr_zones; i++) { + ret = nvme_zone_parse_entry(ns, &report->entries[i], + zone_idx, cb, data); + if (ret) + goto out_free; + zone_idx++; + } + + sector += ns->zsze * nz; + } + + if (zone_idx > 0) + ret = zone_idx; + else + ret = -EINVAL; +out_free: + kvfree(report); + return ret; +} + +blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, + struct nvme_command *c, enum nvme_zone_mgmt_action action) +{ + memset(c, 0, sizeof(*c)); + + c->zms.opcode = nvme_cmd_zone_mgmt_send; + c->zms.nsid = cpu_to_le32(ns->head->ns_id); + c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + c->zms.zsa = action; + + if (req_op(req) == REQ_OP_ZONE_RESET_ALL) + c->zms.select_all = 1; + + return BLK_STS_OK; +} |