diff options
Diffstat (limited to 'plugins')
-rw-r--r-- | plugins/wdc/wdc-nvme.c | 445 | ||||
-rw-r--r-- | plugins/wdc/wdc-nvme.h | 3 | ||||
-rw-r--r-- | plugins/zns/zns.c | 116 |
3 files changed, 521 insertions, 43 deletions
diff --git a/plugins/wdc/wdc-nvme.c b/plugins/wdc/wdc-nvme.c index 4468344..d5e17b8 100644 --- a/plugins/wdc/wdc-nvme.c +++ b/plugins/wdc/wdc-nvme.c @@ -79,6 +79,7 @@ #define WDC_NVME_SN650_DEV_ID_1 0x2701 #define WDC_NVME_SN650_DEV_ID_2 0x2702 #define WDC_NVME_SN650_DEV_ID_3 0x2720 +#define WDC_NVME_SN650_DEV_ID_4 0x2721 #define WDC_NVME_SN450_DEV_ID_1 0x2712 #define WDC_NVME_SN450_DEV_ID_2 0x2713 #define WDC_NVME_SXSLCL_DEV_ID 0x2001 @@ -122,6 +123,7 @@ #define WDC_DRIVE_CAP_CLOUD_SSD_VERSION 0x0000000004000000 #define WDC_DRIVE_CAP_PCIE_STATS 0x0000000008000000 #define WDC_DRIVE_CAP_INFO_2 0x0000000010000000 +#define WDC_DRIVE_CAP_C3_LOG_PAGE 0x0000000020000000 #define WDC_DRIVE_CAP_DRIVE_ESSENTIALS 0x0000000100000000 #define WDC_DRIVE_CAP_DUI_DATA 0x0000000200000000 @@ -322,6 +324,15 @@ #define WDC_FW_ACT_HISTORY_C2_LOG_BUF_LEN 0x1000 #define WDC_MAX_NUM_ACT_HIST_ENTRIES 20 +/* C3 Latency Monitor Log Page */ +#define WDC_LATENCY_MON_LOG_BUF_LEN 0x200 +#define WDC_LATENCY_MON_OPCODE 0xC3 +#define WDC_LATENCY_MON_VERSION 0x0001 + +#define WDC_C3_GUID_LENGTH 16 +static __u8 wdc_lat_mon_guid[WDC_C3_GUID_LENGTH] = { 0x92, 0x7a, 0xc0, 0x8c, 0xd0, 0x84, 0x6c, 0x9c, + 0x70, 0x43, 0xe6, 0xd4, 0x58, 0x5e, 0xd4, 0x85 }; + /* D0 Smart Log Page */ #define WDC_NVME_GET_VU_SMART_LOG_OPCODE 0xD0 #define WDC_NVME_VU_SMART_LOG_LEN 0x200 @@ -523,7 +534,6 @@ typedef enum EOL_RRER = 108, /* Raw Read Error Rate */ } EOL_LOG_PAGE_C0_OFFSETS; - typedef struct __attribute__((__packed__)) _WDC_DE_VU_FILE_META_DATA { __u8 fileName[WDC_DE_FILE_NAME_SIZE]; @@ -800,6 +810,48 @@ struct wdc_bd_ca_log_format { __u8 raw_value[7]; }; +#define READ 0 +#define WRITE 1 +#define TRIM 2 +#define RESERVED 3 + +struct __attribute__((__packed__)) wdc_ssd_latency_monitor_log { + __u8 feature_status; /* 0x00 */ + __u8 rsvd1; /* 0x01 */ + __le16 active_bucket_timer; /* 0x02 */ + __le16 active_bucket_timer_threshold; /* 0x04 */ + __u8 active_threshold_a; /* 0x06 */ + __u8 active_threshold_b; /* 0x07 */ + __u8 active_threshold_c; /* 0x08 */ + __u8 active_threshold_d; /* 0x09 */ + __le16 active_latency_config; /* 0x0A */ + __u8 active_latency_min_window; /* 0x0C */ + __u8 rsvd2[0x13]; /* 0x0D */ + + __le32 active_bucket_counter[4][4] ; /* 0x20 - 0x5F */ + __le64 active_latency_timestamp[4][3]; /* 0x60 - 0xBF */ + __le16 active_measured_latency[4][3]; /* 0xC0 - 0xD7 */ + __le16 active_latency_stamp_units; /* 0xD8 */ + __u8 rsvd3[0x16]; /* 0xDA */ + + __le32 static_bucket_counter[4][4] ; /* 0xF0 - 0x12F */ + __le64 static_latency_timestamp[4][3]; /* 0x130 - 0x18F */ + __le16 static_measured_latency[4][3]; /* 0x190 - 0x1A7 */ + __le16 static_latency_stamp_units; /* 0x1A8 */ + __u8 rsvd4[0x16]; /* 0x1AA */ + + __le16 debug_log_trigger_enable; /* 0x1C0 */ + __le16 debug_log_measured_latency; /* 0x1C2 */ + __le64 debug_log_latency_stamp; /* 0x1C4 */ + __le16 debug_log_ptr; /* 0x1CC */ + __le16 debug_log_counter_trigger; /* 0x1CE */ + __u8 debug_log_stamp_units; /* 0x1D0 */ + __u8 rsvd5[0x1D]; /* 0x1D1 */ + + __le16 log_page_version; /* 0x1EE */ + __u8 log_page_guid[0x10]; /* 0x1F0 */ +}; + struct __attribute__((__packed__)) wdc_ssd_ca_perf_stats { __le64 nand_bytes_wr_lo; /* 0x00 - NAND Bytes Written lo */ __le64 nand_bytes_wr_hi; /* 0x08 - NAND Bytes Written hi */ @@ -1238,6 +1290,10 @@ static __u64 wdc_get_drive_capabilities(int fd) { WDC_DRVIE_CAP_DISABLE_CTLR_TELE_LOG | WDC_DRIVE_CAP_REASON_ID | WDC_DRIVE_CAP_LOG_PAGE_DIR); + /* verify the 0xC3 log page is supported */ + if (wdc_nvme_check_supported_log_page(fd, WDC_LATENCY_MON_OPCODE) == true) + capabilities |= WDC_DRIVE_CAP_C3_LOG_PAGE; + /* verify the 0xCA log page is supported */ if (wdc_nvme_check_supported_log_page(fd, WDC_NVME_GET_DEVICE_INFO_LOG_OPCODE) == true) capabilities |= WDC_DRIVE_CAP_CA_LOG_PAGE; @@ -1292,6 +1348,7 @@ static __u64 wdc_get_drive_capabilities(int fd) { case WDC_NVME_SN650_DEV_ID_1: case WDC_NVME_SN650_DEV_ID_2: case WDC_NVME_SN650_DEV_ID_3: + case WDC_NVME_SN650_DEV_ID_4: case WDC_NVME_SN450_DEV_ID_1: case WDC_NVME_SN450_DEV_ID_2: /* verify the 0xC0 log page is supported */ @@ -1386,6 +1443,10 @@ static __u64 wdc_get_enc_drive_capabilities(int fd) { WDC_DRIVE_CAP_DRIVE_STATUS | WDC_DRIVE_CAP_CLEAR_ASSERT | WDC_DRIVE_CAP_RESIZE); + /* verify the 0xC3 log page is supported */ + if (wdc_nvme_check_supported_log_page(fd, WDC_LATENCY_MON_OPCODE) == true) + capabilities |= WDC_DRIVE_CAP_C3_LOG_PAGE; + /* verify the 0xCB log page is supported */ if (wdc_nvme_check_supported_log_page(fd, WDC_NVME_GET_FW_ACT_HISTORY_LOG_ID) == true) capabilities |= WDC_DRIVE_CAP_FW_ACTIVATE_HISTORY; @@ -1972,6 +2033,7 @@ static int wdc_do_dump_e6(int fd, __u32 opcode,__u32 data_len, static int wdc_do_cap_telemetry_log(int fd, char *file, __u32 bs, int type, int data_area) { struct nvme_telemetry_log_page_hdr *hdr; + struct nvme_id_ctrl ctrl; size_t full_size, offset = WDC_TELEMETRY_HEADER_LENGTH; int err = 0, output; void *page_log; @@ -2060,6 +2122,46 @@ static int wdc_do_cap_telemetry_log(int fd, char *file, __u32 bs, int type, int case 3: full_size = (le16_to_cpu(hdr->dalb3) * WDC_TELEMETRY_BLOCK_SIZE) + WDC_TELEMETRY_HEADER_LENGTH; break; + case 4: + err = nvme_identify_ctrl(fd, &ctrl); + if (err) { + perror("identify-ctrl"); + goto close_output; + } + + if (posix_memalign(&buf, getpagesize(), get_feat_buf_len(NVME_FEAT_HOST_BEHAVIOR))) { + fprintf(stderr, "can not allocate feature payload\n"); + errno = ENOMEM; + err = -1; + goto close_output; + } + memset(buf, 0, get_feat_buf_len(NVME_FEAT_HOST_BEHAVIOR)); + + err = nvme_get_feature(fd, NVME_NSID_ALL, NVME_FEAT_HOST_BEHAVIOR, 0, 0, + 0, get_feat_buf_len(NVME_FEAT_HOST_BEHAVIOR), buf, &result); + if (err > 0) { + nvme_show_status(err); + } else if (err < 0) { + perror("get-feature"); + } else { + if ((ctrl.lpa & 0x40)) { + if (((unsigned char *)buf)[1] == 1) + full_size = (le32_to_cpu(hdr->dalb4) * WDC_TELEMETRY_BLOCK_SIZE) + WDC_TELEMETRY_HEADER_LENGTH; + else { + fprintf(stderr, "Data area 4 unsupported, Host Behavior Support ETDAS not set to 1\n"); + errno = EINVAL; + err = -1; + } + } else { + fprintf(stderr, "Data area 4 unsupported, bit 6 of Log Page Attributes not set\n"); + errno = EINVAL; + err = -1; + } + } + free(buf); + if (err) + goto close_output; + break; default: fprintf(stderr, "%s: Invalid data area requested, data area = %d\n", __func__, data_area); err = -EINVAL; @@ -3496,6 +3598,170 @@ static int wdc_print_log(struct wdc_ssd_perf_stats *perf, int fmt) return 0; } +static int wdc_convert_ts(time_t time, char *ts_buf) +{ + struct tm gmTimeInfo; + time_t time_Human, time_ms; + char buf[80]; + + time_Human = time/1000; + time_ms = time % 1000; + + gmtime_r((const time_t *)&time_Human, &gmTimeInfo); + + strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &gmTimeInfo); + sprintf(ts_buf, "%s.%03ld GMT", buf, time_ms); + + return 0; +} + +static int wdc_print_latency_monitor_log_normal(int fd, struct wdc_ssd_latency_monitor_log *log_data) +{ + printf("Latency Monitor/C3 Log Page Data \n"); + printf(" Controller : %s\n", devicename); + int err = -1, i, j; + struct nvme_id_ctrl ctrl; + char ts_buf[128]; + + err = nvme_identify_ctrl(fd, &ctrl); + if (!err) + printf(" Serial Number: %-.*s\n", (int)sizeof(ctrl.sn), ctrl.sn); + else { + fprintf(stderr, "ERROR : WDC : latency monitor read id ctrl failure, err = %d\n", err); + return err; + } + + printf(" Feature Status 0x%x \n", log_data->feature_status); + printf(" Active Bucket Timer %d min \n", 5*le16_to_cpu(log_data->active_bucket_timer)); + printf(" Active Bucket Timer Threshold %d min \n", 5*le16_to_cpu(log_data->active_bucket_timer_threshold)); + printf(" Active Threshold A %d ms \n", 5*(le16_to_cpu(log_data->active_threshold_a+1))); + printf(" Active Threshold B %d ms \n", 5*(le16_to_cpu(log_data->active_threshold_b+1))); + printf(" Active Threshold C %d ms \n", 5*(le16_to_cpu(log_data->active_threshold_c+1))); + printf(" Active Threshold D %d ms \n", 5*(le16_to_cpu(log_data->active_threshold_d+1))); + printf(" Active Latency Config 0x%x \n", le16_to_cpu(log_data->active_latency_config)); + printf(" Active Latency Minimum Window %d ms \n", 100*log_data->active_latency_min_window); + printf(" Active Latency Stamp Units %d \n", le16_to_cpu(log_data->active_latency_stamp_units)); + printf(" Static Latency Stamp Units %d \n", le16_to_cpu(log_data->static_latency_stamp_units)); + printf(" Debug Log Trigger Enable %d \n", le16_to_cpu(log_data->debug_log_trigger_enable)); + + printf(" Read Write Deallocate/Trim \n"); + for (i = 0; i <= 3; i++) { + printf(" Active Bucket Counter: Bucket %d %27d %27d %27d \n", + i, le32_to_cpu(log_data->active_bucket_counter[i][READ]), le32_to_cpu(log_data->active_bucket_counter[i][WRITE]), + le32_to_cpu(log_data->active_bucket_counter[i][TRIM])); + } + + for (i = 0; i <= 3; i++) { + printf(" Active Measured Latency: Bucket %d %27d ms %27d ms %27d ms \n", + i, le16_to_cpu(log_data->active_measured_latency[i][READ]), le16_to_cpu(log_data->active_measured_latency[i][WRITE]), + le16_to_cpu(log_data->active_measured_latency[i][TRIM])); + } + + for (i = 0; i <= 3; i++) { + printf(" Active Latency Time Stamp: Bucket %d ", i); + for (j = 0; j <= 2; j++) { + if (le64_to_cpu(log_data->active_latency_timestamp[i][j]) == -1) + printf(" N/A "); + else { + wdc_convert_ts(le64_to_cpu(log_data->active_latency_timestamp[i][j]), ts_buf); + printf("%s ", ts_buf); + } + } + printf("\n"); + } + + for (i = 0; i <= 3; i++) { + printf(" Static Bucket Counter: Bucket %d %27d %27d %27d \n", + i, le32_to_cpu(log_data->static_bucket_counter[i][READ]), le32_to_cpu(log_data->static_bucket_counter[i][WRITE]), + le32_to_cpu(log_data->static_bucket_counter[i][TRIM])); + } + + for (i = 0; i <= 3; i++) { + printf(" Static Measured Latency: Bucket %d %27d ms %27d ms %27d ms \n", + i, le16_to_cpu(log_data->static_measured_latency[i][READ]), le16_to_cpu(log_data->static_measured_latency[i][WRITE]), + le16_to_cpu(log_data->static_measured_latency[i][TRIM])); + } + + for (i = 0; i <= 3; i++) { + printf(" Static Latency Time Stamp: Bucket %d ", i); + for (j = 0; j <= 2; j++) { + if (le64_to_cpu(log_data->static_latency_timestamp[i][j]) == -1) + printf(" N/A "); + else { + wdc_convert_ts(le64_to_cpu(log_data->static_latency_timestamp[i][j]), ts_buf); + printf("%s ", ts_buf); + } + } + printf("\n"); + } + + return 0; +} + +static void wdc_print_latency_monitor_log_json(struct wdc_ssd_latency_monitor_log *log_data) +{ + int i, j; + char buf[128]; + char *operation[3] = {"Read", "Write", "Trim"}; + struct json_object *root; + root = json_create_object(); + + json_object_add_value_int(root, "Feature Status", log_data->feature_status); + json_object_add_value_int(root, "Active Bucket Timer", 5*le16_to_cpu(log_data->active_bucket_timer)); + json_object_add_value_int(root, "Active Bucket Timer Threshold", 5*le16_to_cpu(log_data->active_bucket_timer_threshold)); + json_object_add_value_int(root, "Active Threshold A", 5*le16_to_cpu(log_data->active_threshold_a+1)); + json_object_add_value_int(root, "Active Threshold B", 5*le16_to_cpu(log_data->active_threshold_b+1)); + json_object_add_value_int(root, "Active Threshold C", 5*le16_to_cpu(log_data->active_threshold_c+1)); + json_object_add_value_int(root, "Active Threshold D", 5*le16_to_cpu(log_data->active_threshold_d+1)); + json_object_add_value_int(root, "Active Latency Config", le16_to_cpu(log_data->active_latency_config)); + json_object_add_value_int(root, "Active Lantency Minimum Window", 100*log_data->active_latency_min_window); + json_object_add_value_int(root, "Active Latency Stamp Units", le16_to_cpu(log_data->active_latency_stamp_units)); + json_object_add_value_int(root, "Static Latency Stamp Units", le16_to_cpu(log_data->static_latency_stamp_units)); + json_object_add_value_int(root, "Debug Log Trigger Enable", le16_to_cpu(log_data->debug_log_trigger_enable)); + + for (i = 0; i <= 3; i++) { + for (j = 0; j <= 2; j++) { + sprintf(buf, "Active Bucket Counter: Bucket %d %s", i, operation[j]); + json_object_add_value_int(root, buf, le32_to_cpu(log_data->active_bucket_counter[i][j])); + } + } + for (i = 0; i <= 3; i++) { + for (j = 0; j <= 2; j++) { + sprintf(buf, "Active Measured Latency: Bucket %d %s", i, operation[j]); + json_object_add_value_int(root, buf, le16_to_cpu(log_data->active_measured_latency[i][j])); + } + } + for (i = 0; i <= 3; i++) { + for (j = 0; j <= 2; j++) { + sprintf(buf, "Active Latency Time Stamp: Bucket %d %s", i, operation[j]); + json_object_add_value_int(root, buf, le64_to_cpu(log_data->active_latency_timestamp[i][j])); + } + } + for (i = 0; i <= 3; i++) { + for (j = 0; j <= 2; j++) { + sprintf(buf, "Static Bucket Counter: Bucket %d %s", i, operation[j]); + json_object_add_value_int(root, buf, le32_to_cpu(log_data->static_bucket_counter[i][j])); + } + } + for (i = 0; i <= 3; i++) { + for (j = 0; j <= 2; j++) { + sprintf(buf, "Static Measured Latency: Bucket %d %s", i, operation[j]); + json_object_add_value_int(root, buf, le16_to_cpu(log_data->static_measured_latency[i][j])); + } + } + for (i = 0; i <= 3; i++) { + for (j = 0; j <= 2; j++) { + sprintf(buf, "Static Latency Time Stamp: Bucket %d %s", i, operation[j]); + json_object_add_value_int(root, buf, le64_to_cpu(log_data->static_latency_timestamp[i][j])); + } + } + + json_print_object(root, NULL); + printf("\n"); + + json_free_object(root); +} + static void wdc_print_fb_ca_log_normal(struct wdc_ssd_ca_perf_stats *perf) { uint64_t converted = 0; @@ -4351,10 +4617,12 @@ static void wdc_print_smart_cloud_attr_C0_normal(void *data) printf(" SMART Cloud Attributes :- \n"); - printf(" Physical media units written %.0Lf\n", - int128_to_double(&log_data[SCAO_PMUW])); - printf(" Physical media units Read %.0Lf\n", - int128_to_double(&log_data[SCAO_PMUR])); + printf(" Physical media units written - %"PRIu64" %"PRIu64"\n", + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUW+8] & 0xFFFFFFFFFFFFFFFF), + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUW] & 0xFFFFFFFFFFFFFFFF)); + printf(" Physical media units read - %"PRIu64" %"PRIu64"\n", + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUR+8] & 0xFFFFFFFFFFFFFFFF), + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUR] & 0xFFFFFFFFFFFFFFFF)); printf(" Bad user nand blocks - Raw %"PRIu64"\n", (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_BUNBR] & 0x0000FFFFFFFFFFFF)); printf(" Bad user nand blocks - Normalized %d\n", @@ -4432,10 +4700,14 @@ static void wdc_print_smart_cloud_attr_C0_json(void *data) uint16_t smart_log_ver = 0; root = json_create_object(); - json_object_add_value_float(root, "Physical media units written", - int128_to_double(&log_data[SCAO_PMUW])); - json_object_add_value_int(root, "Physical media units Read", - int128_to_double(&log_data[SCAO_PMUR])); + json_object_add_value_int(root, "Physical media units written hi", + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUW+8] & 0xFFFFFFFFFFFFFFFF)); + json_object_add_value_int(root, "Physical media units written lo", + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUW] & 0xFFFFFFFFFFFFFFFF)); + json_object_add_value_int(root, "Physical media units read hi", + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUR+8] & 0xFFFFFFFFFFFFFFFF)); + json_object_add_value_int(root, "Physical media units read lo", + (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_PMUR] & 0xFFFFFFFFFFFFFFFF)); json_object_add_value_uint(root, "Bad user nand blocks - Raw", (uint64_t)le64_to_cpu(*(uint64_t *)&log_data[SCAO_BUNBR] & 0x0000FFFFFFFFFFFF)); json_object_add_value_uint(root, "Bad user nand blocks - Normalized", @@ -4595,7 +4867,7 @@ static int wdc_print_c0_eol_log(void *data, int fmt) return 0; } -static int wdc_get_c0_log_page(int fd, char *format, int uuid_index) +static int wdc_get_c0_log_page(int fd, char *format, int uuid_index, __u32 namespace_id) { int ret = 0; int fmt = -1; @@ -4639,8 +4911,15 @@ static int wdc_get_c0_log_page(int fd, char *format, int uuid_index) return -1; } + if (namespace_id == NVME_NSID_ALL) { + ret = namespace_id = nvme_get_nsid(fd); + if (ret < 0) { + namespace_id = NVME_NSID_ALL; + } + } + /* Get the 0xC0 log data */ - ret = nvme_get_log14(fd, 0xFFFFFFFF, WDC_NVME_GET_EOL_STATUS_LOG_OPCODE, + ret = nvme_get_log14(fd, namespace_id, WDC_NVME_GET_EOL_STATUS_LOG_OPCODE, NVME_NO_LOG_LSP, 0, 0, false, uuid_index, 0, false, WDC_NVME_SMART_CLOUD_ATTR_LEN, data); if (strcmp(format, "json")) @@ -4687,7 +4966,7 @@ static int wdc_get_c0_log_page(int fd, char *format, int uuid_index) } /* Get the 0xC0 log data */ - ret = nvme_get_log14(fd, 0xFFFFFFFF, WDC_NVME_GET_EOL_STATUS_LOG_OPCODE, + ret = nvme_get_log14(fd, NVME_NSID_ALL, WDC_NVME_GET_EOL_STATUS_LOG_OPCODE, NVME_NO_LOG_LSP, 0, 0, false, uuid_index, 0, false, WDC_NVME_EOL_STATUS_LOG_LEN, data); if (strcmp(format, "json")) @@ -4714,7 +4993,7 @@ static int wdc_get_c0_log_page(int fd, char *format, int uuid_index) } /* Get the 0xC0 log data */ - ret = nvme_get_log(fd, 0xFFFFFFFF, WDC_NVME_GET_EOL_STATUS_LOG_OPCODE, + ret = nvme_get_log(fd, NVME_NSID_ALL, WDC_NVME_GET_EOL_STATUS_LOG_OPCODE, false, NVME_NO_LOG_LSP, WDC_NVME_EOL_STATUS_LOG_LEN, data); if (strcmp(format, "json")) @@ -4740,7 +5019,7 @@ static int wdc_get_c0_log_page(int fd, char *format, int uuid_index) } /* Get the 0xC0 log data */ - ret = nvme_get_log(fd, 0xFFFFFFFF, WDC_NVME_GET_SMART_CLOUD_ATTR_LOG_OPCODE, + ret = nvme_get_log(fd, NVME_NSID_ALL, WDC_NVME_GET_SMART_CLOUD_ATTR_LOG_OPCODE, false, NVME_NO_LOG_LSP, WDC_NVME_SMART_CLOUD_ATTR_LEN, data); if (strcmp(format, "json")) @@ -4768,6 +5047,23 @@ static int wdc_get_c0_log_page(int fd, char *format, int uuid_index) return ret; } +static int wdc_print_latency_monitor_log(int fd, struct wdc_ssd_latency_monitor_log *log_data, int fmt) +{ + if (!log_data) { + fprintf(stderr, "ERROR : WDC : Invalid C3 log data buffer\n"); + return -1; + } + switch (fmt) { + case NORMAL: + wdc_print_latency_monitor_log_normal(fd, log_data); + break; + case JSON: + wdc_print_latency_monitor_log_json(log_data); + break; + } + return 0; +} + static int wdc_print_fb_ca_log(struct wdc_ssd_ca_perf_stats *perf, int fmt) { if (!perf) { @@ -5030,6 +5326,77 @@ static int wdc_get_c1_log_page(int fd, char *format, uint8_t interval) return ret; } +static int wdc_get_c3_log_page(int fd, char *format) +{ + int ret = 0; + int fmt = -1; + __u8 *data; + int i; + struct wdc_ssd_latency_monitor_log *log_data; + + if (!wdc_check_device(fd)) + return -1; + fmt = validate_output_format(format); + if (fmt < 0) { + fprintf(stderr, "ERROR : WDC : invalid output format\n"); + return fmt; + } + + if ((data = (__u8 *) malloc(sizeof(__u8) * WDC_LATENCY_MON_LOG_BUF_LEN)) == NULL) { + fprintf(stderr, "ERROR : WDC : malloc : %s\n", strerror(errno)); + return -1; + } + memset(data, 0, sizeof (__u8) * WDC_LATENCY_MON_LOG_BUF_LEN); + + ret = nvme_get_log14(fd, NVME_NSID_ALL, WDC_LATENCY_MON_OPCODE, + NVME_NO_LOG_LSP, NVME_NO_LOG_LPO, 0, 0, + 0, 0, 0, WDC_LATENCY_MON_LOG_BUF_LEN, data); + + if (strcmp(format, "json")) + fprintf(stderr, "NVMe Status:%s(%x)\n", nvme_status_to_string(ret), ret); + + if (ret == 0) { + log_data = (struct wdc_ssd_latency_monitor_log*)data; + + /* check log page version */ + if (log_data->log_page_version != WDC_LATENCY_MON_VERSION) { + fprintf(stderr, "ERROR : WDC : invalid latency monitor version\n"); + ret = -1; + goto out; + } + + /* check log page guid */ + /* Verify GUID matches */ + for (i=0; i<16; i++) { + if (wdc_lat_mon_guid[i] != log_data->log_page_guid[i]) { + fprintf(stderr, "ERROR : WDC : Unknown GUID in C3 Log Page data\n"); + int j; + fprintf(stderr, "ERROR : WDC : Expected GUID: 0x"); + for (j = 0; j<16; j++) { + fprintf(stderr, "%x", wdc_lat_mon_guid[j]); + } + fprintf(stderr, "\nERROR : WDC : Actual GUID: 0x"); + for (j = 0; j<16; j++) { + fprintf(stderr, "%x", log_data->log_page_guid[j]); + } + fprintf(stderr, "\n"); + + ret = -1; + goto out; + } + } + + /* parse the data */ + wdc_print_latency_monitor_log(fd, log_data, fmt); + } else { + fprintf(stderr, "ERROR : WDC : Unable to read C3 data from buffer\n"); + } + +out: + free(data); + return ret; +} + static int wdc_get_d0_log_page(int fd, char *format) { int ret = 0; @@ -5083,6 +5450,7 @@ static int wdc_vs_smart_add_log(int argc, char **argv, struct command *command, int fd; const char *log_page_version = "Log Page Version: 0 = vendor, 1 = WDC"; const char *log_page_mask = "Log Page Mask, comma separated list: 0xC0, 0xC1, 0xCA, 0xD0"; + const char *namespace_id = "desired namespace id"; int ret = 0; int uuid_index = 0; int page_mask = 0, num, i; @@ -5094,6 +5462,7 @@ static int wdc_vs_smart_add_log(int argc, char **argv, struct command *command, char *output_format; __u8 log_page_version; char *log_page_mask; + __u32 namespace_id; }; struct config cfg = { @@ -5101,6 +5470,7 @@ static int wdc_vs_smart_add_log(int argc, char **argv, struct command *command, .output_format = "normal", .log_page_version = 0, .log_page_mask = "", + .namespace_id = NVME_NSID_ALL, }; OPT_ARGS(opts) = { @@ -5108,6 +5478,7 @@ static int wdc_vs_smart_add_log(int argc, char **argv, struct command *command, OPT_FMT("output-format", 'o', &cfg.output_format, "Output Format: normal|json"), OPT_BYTE("log-page-version", 'l', &cfg.log_page_version, log_page_version), OPT_LIST("log-page-mask", 'p', &cfg.log_page_mask, log_page_mask), + OPT_UINT("namespace-id", 'n', &cfg.namespace_id, namespace_id), OPT_END() }; @@ -5170,7 +5541,7 @@ static int wdc_vs_smart_add_log(int argc, char **argv, struct command *command, if (((capabilities & WDC_DRIVE_CAP_C0_LOG_PAGE) == WDC_DRIVE_CAP_C0_LOG_PAGE) && (page_mask & WDC_C0_PAGE_MASK)) { /* Get 0xC0 log page if possible. */ - ret = wdc_get_c0_log_page(fd, cfg.output_format, uuid_index); + ret = wdc_get_c0_log_page(fd, cfg.output_format, uuid_index, cfg.namespace_id); if (ret) fprintf(stderr, "ERROR : WDC : Failure reading the C0 Log Page, ret = %d\n", ret); } @@ -5201,6 +5572,48 @@ out: return ret; } +static int wdc_get_latency_monitor_log(int argc, char **argv, struct command *command, + struct plugin *plugin) +{ + const char *desc = "Retrieve latency monitor log data."; + int fd; + int ret = 0; + __u64 capabilities = 0; + + struct config { + char *output_format; + }; + + struct config cfg = { + .output_format = "normal", + }; + + OPT_ARGS(opts) = { + OPT_FMT("output-format", 'o', &cfg.output_format, "Output Format: normal|json"), + OPT_END() + }; + + fd = parse_and_open(argc, argv, desc, opts); + if (fd < 0) + return fd; + + capabilities = wdc_get_drive_capabilities(fd); + + if ((capabilities & WDC_DRIVE_CAP_C3_LOG_PAGE) == 0) { + fprintf(stderr, "ERROR : WDC: unsupported device for this command\n"); + ret = -1; + goto out; + } + + ret = wdc_get_c3_log_page(fd, cfg.output_format); + if (ret) + fprintf(stderr, "ERROR : WDC : Failure reading the C3 Log Page, ret = %d\n", ret); + +out: + + return ret; +} + static int wdc_do_clear_pcie_correctable_errors(int fd) { int ret; @@ -7849,6 +8262,8 @@ static int wdc_capabilities(int argc, char **argv, capabilities & WDC_DRIVE_CAP_C0_LOG_PAGE ? "Supported" : "Not Supported"); printf("--C1 Log Page : %s\n", capabilities & WDC_DRIVE_CAP_C1_LOG_PAGE ? "Supported" : "Not Supported"); + printf("--C3 Log Page : %s\n", + capabilities & WDC_DRIVE_CAP_C3_LOG_PAGE ? "Supported" : "Not Supported"); printf("--CA Log Page : %s\n", capabilities & WDC_DRIVE_CAP_CA_LOG_PAGE ? "Supported" : "Not Supported"); printf("--D0 Log Page : %s\n", diff --git a/plugins/wdc/wdc-nvme.h b/plugins/wdc/wdc-nvme.h index 29ff6a0..e046007 100644 --- a/plugins/wdc/wdc-nvme.h +++ b/plugins/wdc/wdc-nvme.h @@ -4,7 +4,7 @@ #if !defined(WDC_NVME) || defined(CMD_HEADER_MULTI_READ) #define WDC_NVME -#define WDC_PLUGIN_VERSION "1.14.1" +#define WDC_PLUGIN_VERSION "1.15.4" #include "cmd.h" PLUGIN(NAME("wdc", "Western Digital vendor specific extensions", WDC_PLUGIN_VERSION), @@ -36,6 +36,7 @@ PLUGIN(NAME("wdc", "Western Digital vendor specific extensions", WDC_PLUGIN_VERS ENTRY("capabilities", "WDC Device Capabilities", wdc_capabilities) ENTRY("cloud-SSD-plugin-version", "WDC Cloud SSD Plugin Version", wdc_cloud_ssd_plugin_version) ENTRY("vs-pcie-stats", "WDC VS PCIE Statistics", wdc_vs_pcie_stats) + ENTRY("get-latency-monitor-log", "WDC Get Latency Monitor Log Page", wdc_get_latency_monitor_log) ) ); diff --git a/plugins/zns/zns.c b/plugins/zns/zns.c index e558317..2f765b6 100644 --- a/plugins/zns/zns.c +++ b/plugins/zns/zns.c @@ -565,13 +565,25 @@ static int report_zones(int argc, char **argv, struct command *cmd, struct plugi const char *state = "state of zones to list"; const char *ext = "set to use the extended report zones"; const char *part = "set to use the partial report"; - const char *human_readable = "show report zones in readable format"; + const char *verbose = "show report zones verbosity"; enum nvme_print_flags flags; int fd, zdes = 0, err = -1; __u32 report_size; void *report; bool huge = false; + struct nvme_zone_report *buff; + + unsigned int nr_zones_chunks = 1024, /* 1024 entries * 64 bytes per entry = 64k byte transfer */ + nr_zones_retrieved = 0, + nr_zones, + offset, + log_len; + int total_nr_zones = 0; + struct nvme_zns_id_ns id_zns; + struct nvme_id_ns id_ns; + uint8_t lbaf; + __le64 zsze; struct config { char *output_format; @@ -579,7 +591,7 @@ static int report_zones(int argc, char **argv, struct command *cmd, struct plugi __u32 namespace_id; int num_descs; int state; - int human_readable; + int verbose; bool extended; bool partial; }; @@ -595,7 +607,7 @@ static int report_zones(int argc, char **argv, struct command *cmd, struct plugi OPT_UINT("descs", 'd', &cfg.num_descs, num_descs), OPT_UINT("state", 'S', &cfg.state, state), OPT_FMT("output-format", 'o', &cfg.output_format, output_format), - OPT_FLAG("human-readable",'H', &cfg.human_readable, human_readable), + OPT_FLAG("verbose", 'v', &cfg.verbose, verbose), OPT_FLAG("extended", 'e', &cfg.extended, ext), OPT_FLAG("partial", 'p', &cfg.partial, part), OPT_END() @@ -608,7 +620,7 @@ static int report_zones(int argc, char **argv, struct command *cmd, struct plugi flags = validate_output_format(cfg.output_format); if (flags < 0) goto close_fd; - if (cfg.human_readable) + if (cfg.verbose) flags |= VERBOSE; if (!cfg.namespace_id) { @@ -627,23 +639,53 @@ static int report_zones(int argc, char **argv, struct command *cmd, struct plugi } } - if (cfg.num_descs == -1) { - struct nvme_zone_report r; + err = nvme_identify_ns(fd, cfg.namespace_id, false, &id_ns); + if (err) { + nvme_show_status(err); + goto close_fd; + } - err = nvme_zns_report_zones(fd, cfg.namespace_id, 0, - 0, cfg.state, 0, sizeof(r), &r); - if (err > 0) { - nvme_show_status(err); - goto close_fd; - } else if (err < 0) { - perror("zns report-zones"); - goto close_fd; - } - cfg.num_descs = le64_to_cpu(r.nr_zones); + err = nvme_zns_identify_ns(fd, cfg.namespace_id, &id_zns); + if (!err) { + /* get zsze field from zns id ns data - needed for offset calculation */ + lbaf = id_ns.flbas & NVME_NS_FLBAS_LBA_MASK; + zsze = le64_to_cpu(id_zns.lbafe[lbaf].zsze); + } + else { + nvme_show_status(err); + goto close_fd; + } + + log_len = sizeof(struct nvme_zone_report); + buff = calloc(1, log_len); + if (!buff) { + err = -ENOMEM; + goto close_fd; + } + + err = nvme_zns_report_zones(fd, cfg.namespace_id, 0, + 0, cfg.state, 0, log_len, buff); + if (err > 0) { + nvme_show_status(err); + goto free_buff; + } + else if (err < 0) { + perror("zns report-zones"); + goto free_buff; + } + + total_nr_zones = le64_to_cpu(buff->nr_zones); + + if (cfg.num_descs == -1) { + cfg.num_descs = total_nr_zones; } - report_size = sizeof(struct nvme_zone_report) + cfg.num_descs * - (sizeof(struct nvme_zns_desc) + cfg.num_descs * zdes); + nr_zones = cfg.num_descs; + if (nr_zones < nr_zones_chunks) + nr_zones_chunks = nr_zones; + + log_len = sizeof(struct nvme_zone_report) + ((sizeof(struct nvme_zns_desc) * nr_zones_chunks) + (nr_zones_chunks * zdes)); + report_size = log_len; report = nvme_alloc(report_size, &huge); if (!report) { @@ -652,17 +694,37 @@ static int report_zones(int argc, char **argv, struct command *cmd, struct plugi goto close_fd; } - err = nvme_zns_report_zones(fd, cfg.namespace_id, cfg.zslba, - cfg.extended, cfg.state, cfg.partial, report_size, report); - if (!err) - nvme_show_zns_report_zones(report, cfg.num_descs, zdes, - report_size, flags); - else if (err > 0) - nvme_show_status(err); - else - perror("zns report-zones"); + offset = cfg.zslba; + printf("nr_zones: %"PRIu64"\n", (uint64_t)le64_to_cpu(total_nr_zones)); + + while (nr_zones_retrieved < nr_zones) { + if (nr_zones_retrieved >= nr_zones) + break; + + if (nr_zones_retrieved + nr_zones_chunks > nr_zones) { + nr_zones_chunks = nr_zones - nr_zones_retrieved; + log_len = sizeof(struct nvme_zone_report) + ((sizeof(struct nvme_zns_desc) * nr_zones_chunks) + (nr_zones_chunks * zdes)); + } + + err = nvme_zns_report_zones(fd, cfg.namespace_id, offset, + cfg.extended, cfg.state, cfg.partial, log_len, report); + if (err > 0) { + nvme_show_status(err); + break; + } + + if (!err) + nvme_show_zns_report_zones(report, nr_zones_chunks, zdes, + log_len, flags); + + nr_zones_retrieved += nr_zones_chunks; + offset = (nr_zones_retrieved * zsze); + } nvme_free(report, huge); + +free_buff: + free(buff); close_fd: close(fd); return nvme_status_to_errno(err, false); |