diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/nvme/host/nvme.h | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/nvme/host/nvme.h')
-rw-r--r-- | drivers/nvme/host/nvme.h | 122 |
1 files changed, 81 insertions, 41 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index e7411dac00..7b87763e2f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -16,6 +16,7 @@ #include <linux/rcupdate.h> #include <linux/wait.h> #include <linux/t10-pi.h> +#include <linux/ratelimit_types.h> #include <trace/events/block.h> @@ -262,6 +263,7 @@ enum nvme_ctrl_flags { struct nvme_ctrl { bool comp_seen; bool identified; + bool passthru_err_log_enabled; enum nvme_ctrl_state state; spinlock_t lock; struct mutex scan_lock; @@ -302,14 +304,13 @@ struct nvme_ctrl { u32 max_hw_sectors; u32 max_segments; u32 max_integrity_segments; - u32 max_discard_sectors; - u32 max_discard_segments; u32 max_zeroes_sectors; #ifdef CONFIG_BLK_DEV_ZONED u32 max_zone_append; #endif u16 crdt[3]; u16 oncs; + u8 dmrl; u32 dmrsl; u16 oacs; u16 sqsize; @@ -450,13 +451,28 @@ struct nvme_ns_head { struct list_head list; struct srcu_struct srcu; struct nvme_subsystem *subsys; - unsigned ns_id; struct nvme_ns_ids ids; struct list_head entry; struct kref ref; bool shared; + bool passthru_err_log_enabled; int instance; struct nvme_effects_log *effects; + u64 nuse; + unsigned ns_id; + int lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; + u16 sgs; + u32 sws; +#ifdef CONFIG_BLK_DEV_ZONED + u64 zsze; +#endif + unsigned long features; + + struct ratelimit_state rs_nuse; struct cdev cdev; struct device cdev_device; @@ -498,17 +514,6 @@ struct nvme_ns { struct kref kref; struct nvme_ns_head *head; - int lba_shift; - u16 ms; - u16 pi_size; - u16 sgs; - u32 sws; - u8 pi_type; - u8 guard_type; -#ifdef CONFIG_BLK_DEV_ZONED - u64 zsze; -#endif - unsigned long features; unsigned long flags; #define NVME_NS_REMOVING 0 #define NVME_NS_ANA_PENDING 2 @@ -519,13 +524,12 @@ struct nvme_ns { struct device cdev_device; struct nvme_fault_inject fault_inject; - }; /* NVMe ns supports metadata actions by the controller (generate/strip) */ -static inline bool nvme_ns_has_pi(struct nvme_ns *ns) +static inline bool nvme_ns_has_pi(struct nvme_ns_head *head) { - return ns->pi_type && ns->ms == ns->pi_size; + return head->pi_type && head->ms == head->pi_size; } struct nvme_ctrl_ops { @@ -657,17 +661,17 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) /* * Convert a 512B sector number to a device logical block number. */ -static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) +static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector) { - return sector >> (ns->lba_shift - SECTOR_SHIFT); + return sector >> (head->lba_shift - SECTOR_SHIFT); } /* * Convert a device logical block number to a 512B sector number. */ -static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) +static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba) { - return lba << (ns->lba_shift - SECTOR_SHIFT); + return lba << (head->lba_shift - SECTOR_SHIFT); } /* @@ -802,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *req); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, - bool queue_live); + bool queue_live, enum nvme_ctrl_state state); static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live) { - if (likely(ctrl->state == NVME_CTRL_LIVE)) + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (likely(state == NVME_CTRL_LIVE)) return true; - if (ctrl->ops->flags & NVME_F_FABRICS && - ctrl->state == NVME_CTRL_DELETING) + if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING) return queue_live; - return __nvme_check_ready(ctrl, rq, queue_live); + return __nvme_check_ready(ctrl, rq, queue_live, state); } /* @@ -833,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); } +/* + * Flags for __nvme_submit_sync_cmd() + */ +typedef __u32 __bitwise nvme_submit_flags_t; + +enum { + /* Insert request at the head of the queue */ + NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0), + /* Set BLK_MQ_REQ_NOWAIT when allocating request */ + NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), + /* Set BLK_MQ_REQ_RESERVED when allocating request */ + NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), + /* Retry command when NVME_SC_DNR is not set in the result */ + NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), +}; + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, - int qid, int at_head, - blk_mq_req_flags_t flags); + int qid, nvme_submit_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result); @@ -873,10 +893,12 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); +int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, + struct nvme_id_ns **id); int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); -extern const struct attribute_group *nvme_ns_id_attr_groups[]; +extern const struct attribute_group *nvme_ns_attr_groups[]; extern const struct pr_ops nvme_pr_ops; extern const struct block_device_operations nvme_ns_head_ops; extern const struct attribute_group nvme_dev_attrs_group; @@ -926,6 +948,10 @@ extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute subsys_attr_iopolicy; +static inline bool nvme_disk_is_ns_head(struct gendisk *disk) +{ + return disk->fops == &nvme_ns_head_ops; +} #else #define multipath false static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) @@ -1003,6 +1029,10 @@ static inline void nvme_mpath_start_request(struct request *rq) static inline void nvme_mpath_end_request(struct request *rq) { } +static inline bool nvme_disk_is_ns_head(struct gendisk *disk) +{ + return false; +} #endif /* CONFIG_NVME_MULTIPATH */ int nvme_revalidate_zones(struct nvme_ns *ns); @@ -1031,7 +1061,10 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) { - return dev_to_disk(dev)->private_data; + struct gendisk *disk = dev_to_disk(dev); + + WARN_ON(nvme_disk_is_ns_head(disk)); + return disk->private_data; } #ifdef CONFIG_NVME_HWMON @@ -1108,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) } #ifdef CONFIG_NVME_VERBOSE_ERRORS -const unsigned char *nvme_get_error_status_str(u16 status); -const unsigned char *nvme_get_opcode_str(u8 opcode); -const unsigned char *nvme_get_admin_opcode_str(u8 opcode); -const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); +const char *nvme_get_error_status_str(u16 status); +const char *nvme_get_opcode_str(u8 opcode); +const char *nvme_get_admin_opcode_str(u8 opcode); +const char *nvme_get_fabrics_opcode_str(u8 opcode); #else /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const unsigned char *nvme_get_error_status_str(u16 status) +static inline const char *nvme_get_error_status_str(u16 status) { return "I/O Error"; } -static inline const unsigned char *nvme_get_opcode_str(u8 opcode) +static inline const char *nvme_get_opcode_str(u8 opcode) { return "I/O Cmd"; } -static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +static inline const char *nvme_get_admin_opcode_str(u8 opcode) { return "Admin Cmd"; } -static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) +static inline const char *nvme_get_fabrics_opcode_str(u8 opcode) { return "Fabrics Cmd"; } #endif /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) +static inline const char *nvme_opcode_str(int qid, u8 opcode) { - if (opcode == nvme_fabrics_command) - return nvme_get_fabrics_opcode_str(fctype); return qid ? nvme_get_opcode_str(opcode) : nvme_get_admin_opcode_str(opcode); } + +static inline const char *nvme_fabrics_opcode_str( + int qid, const struct nvme_command *cmd) +{ + if (nvme_is_fabrics(cmd)) + return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype); + + return nvme_opcode_str(qid, cmd->common.opcode); +} #endif /* _NVME_H */ |