summaryrefslogtreecommitdiffstats
path: root/plugins
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--plugins/memblaze/memblaze-nvme.c882
-rw-r--r--plugins/memblaze/memblaze-nvme.h42
-rw-r--r--plugins/meson.build3
-rw-r--r--plugins/micron/micron-nvme.c36
-rw-r--r--plugins/nbft/nbft-plugin.c22
-rw-r--r--plugins/netapp/netapp-nvme.c43
-rw-r--r--plugins/ocp/ocp-clear-features.c2
-rw-r--r--plugins/ocp/ocp-fw-activation-history.c6
-rw-r--r--plugins/ocp/ocp-nvme.c1526
-rw-r--r--plugins/ocp/ocp-nvme.h3
-rw-r--r--plugins/ocp/ocp-utils.c27
-rw-r--r--plugins/ocp/ocp-utils.h22
-rw-r--r--plugins/sed/sedopal_cmd.c19
-rw-r--r--plugins/solidigm/solidigm-garbage-collection.c4
-rw-r--r--plugins/solidigm/solidigm-latency-tracking.c4
-rw-r--r--plugins/solidigm/solidigm-log-page-dir.c74
-rw-r--r--plugins/solidigm/solidigm-nvme.h2
-rw-r--r--plugins/solidigm/solidigm-smart.c4
-rw-r--r--plugins/solidigm/solidigm-temp-stats.c34
-rw-r--r--plugins/solidigm/solidigm-util.c33
-rw-r--r--plugins/solidigm/solidigm-util.h5
-rw-r--r--plugins/ssstc/ssstc-nvme.c430
-rw-r--r--plugins/ssstc/ssstc-nvme.h16
-rw-r--r--plugins/wdc/wdc-nvme.c327
-rw-r--r--plugins/wdc/wdc-nvme.h2
-rw-r--r--plugins/wdc/wdc-utils.c2
26 files changed, 2369 insertions, 1201 deletions
diff --git a/plugins/memblaze/memblaze-nvme.c b/plugins/memblaze/memblaze-nvme.c
index b215125..c189f1b 100644
--- a/plugins/memblaze/memblaze-nvme.c
+++ b/plugins/memblaze/memblaze-nvme.c
@@ -65,7 +65,7 @@ static int compare_fw_version(const char *fw1, const char *fw2)
* 0: old memblaze format
* *******************************************************/
#define MEMBLAZE_FORMAT (0)
-#define INTEL_FORMAT (1)
+#define INTEL_FORMAT (1)
/* 2.13 = papaya */
#define IS_PAPAYA(str) (!strcmp(str, "2.13"))
@@ -107,14 +107,15 @@ static __u64 raw_2_u64(const __u8 *buf, size_t len)
return le64_to_cpu(val);
}
-static void get_memblaze_new_smart_info(struct nvme_p4_smart_log *smart, int index, __u8 *nm_val, __u8 *raw_val)
+static void get_memblaze_new_smart_info(struct nvme_p4_smart_log *smart, int index, __u8 *nm_val,
+ __u8 *raw_val)
{
memcpy(nm_val, smart->itemArr[index].nmVal, NM_SIZE);
memcpy(raw_val, smart->itemArr[index].rawVal, RAW_SIZE);
}
-static void show_memblaze_smart_log_new(struct nvme_memblaze_smart_log *s,
- unsigned int nsid, const char *devname)
+static void show_memblaze_smart_log_new(struct nvme_memblaze_smart_log *s, unsigned int nsid,
+ const char *devname)
{
struct nvme_p4_smart_log *smart = (struct nvme_p4_smart_log *)s;
__u8 *nm = malloc(NM_SIZE * sizeof(__u8));
@@ -130,7 +131,8 @@ static void show_memblaze_smart_log_new(struct nvme_memblaze_smart_log *s,
return;
}
- printf("%s:%s %s:%x\n", "Additional Smart Log for NVME device", devname, "namespace-id", nsid);
+ printf("%s:%s %s:%x\n", "Additional Smart Log for NVME device", devname, "namespace-id",
+ nsid);
printf("%-34s%-11s%s\n", "key", "normalized", "raw");
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_PROGRAM_FAIL, nm, raw);
@@ -140,42 +142,51 @@ static void show_memblaze_smart_log_new(struct nvme_memblaze_smart_log *s,
printf("%-32s: %3d%% %"PRIu64"\n", "erase_fail_count", *nm, int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_WEARLEVELING_COUNT, nm, raw);
- printf("%-31s : %3d%% %s%u%s%u%s%u\n", "wear_leveling", *nm,
- "min: ", *(__u16 *)raw, ", max: ", *(__u16 *)(raw+2), ", avg: ", *(__u16 *)(raw+4));
+ printf("%-31s : %3d%% %s%u%s%u%s%u\n", "wear_leveling", *nm, "min: ", *(__u16 *)raw,
+ ", max: ", *(__u16 *)(raw+2), ", avg: ", *(__u16 *)(raw+4));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_E2E_DECTECTION_COUNT, nm, raw);
- printf("%-31s: %3d%% %"PRIu64"\n", "end_to_end_error_detection_count", *nm, int48_to_long(raw));
+ printf("%-31s: %3d%% %"PRIu64"\n", "end_to_end_error_detection_count", *nm,
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_PCIE_CRC_ERR_COUNT, nm, raw);
printf("%-32s: %3d%% %"PRIu64"\n", "crc_error_count", *nm, int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_TIMED_WORKLOAD_MEDIA_WEAR, nm, raw);
- printf("%-32s: %3d%% %.3f%%\n", "timed_workload_media_wear", *nm, ((float)int48_to_long(raw))/1000);
+ printf("%-32s: %3d%% %.3f%%\n", "timed_workload_media_wear", *nm,
+ ((float)int48_to_long(raw))/1000);
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_TIMED_WORKLOAD_HOST_READ, nm, raw);
- printf("%-32s: %3d%% %"PRIu64"%%\n", "timed_workload_host_reads", *nm, int48_to_long(raw));
+ printf("%-32s: %3d%% %"PRIu64"%%\n", "timed_workload_host_reads", *nm,
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_TIMED_WORKLOAD_TIMER, nm, raw);
- printf("%-32s: %3d%% %"PRIu64"%s\n", "timed_workload_timer", *nm, int48_to_long(raw), " min");
+ printf("%-32s: %3d%% %"PRIu64"%s\n", "timed_workload_timer", *nm,
+ int48_to_long(raw), " min");
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_THERMAL_THROTTLE_STATUS, nm, raw);
- printf("%-32s: %3d%% %u%%%s%"PRIu64"\n", "thermal_throttle_status", *nm,
- *raw, ", cnt: ", int48_to_long(raw+1));
+ printf("%-32s: %3d%% %u%%%s%"PRIu64"\n", "thermal_throttle_status", *nm, *raw,
+ ", cnt: ", int48_to_long(raw+1));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_RETRY_BUFF_OVERFLOW_COUNT, nm, raw);
- printf("%-32s: %3d%% %"PRIu64"\n", "retry_buffer_overflow_count", *nm, int48_to_long(raw));
+ printf("%-32s: %3d%% %"PRIu64"\n", "retry_buffer_overflow_count", *nm,
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_PLL_LOCK_LOSS_COUNT, nm, raw);
- printf("%-32s: %3d%% %"PRIu64"\n", "pll_lock_loss_count", *nm, int48_to_long(raw));
+ printf("%-32s: %3d%% %"PRIu64"\n", "pll_lock_loss_count", *nm,
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_TOTAL_WRITE, nm, raw);
- printf("%-32s: %3d%% %s%"PRIu64"\n", "nand_bytes_written", *nm, "sectors: ", int48_to_long(raw));
+ printf("%-32s: %3d%% %s%"PRIu64"\n", "nand_bytes_written", *nm, "sectors: ",
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_HOST_WRITE, nm, raw);
- printf("%-32s: %3d%% %s%"PRIu64"\n", "host_bytes_written", *nm, "sectors: ", int48_to_long(raw));
+ printf("%-32s: %3d%% %s%"PRIu64"\n", "host_bytes_written", *nm, "sectors: ",
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_SYSTEM_AREA_LIFE_LEFT, nm, raw);
- printf("%-32s: %3d%% %"PRIu64"\n", "system_area_life_left", *nm, int48_to_long(raw));
+ printf("%-32s: %3d%% %"PRIu64"\n", "system_area_life_left", *nm,
+ int48_to_long(raw));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_TOTAL_READ, nm, raw);
printf("%-32s: %3d%% %"PRIu64"\n", "total_read", *nm, int48_to_long(raw));
@@ -189,8 +200,8 @@ static void show_memblaze_smart_log_new(struct nvme_memblaze_smart_log *s,
"max: ", *(__u16 *)raw, ", min: ", *(__u16 *)(raw+2), ", curr: ", *(__u16 *)(raw+4));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_TEMPT_SINCE_BOOTUP, nm, raw);
- printf("%-32s: %3d%% %s%u%s%u%s%u\n", "tempt_since_bootup", *nm, "max: ", *(__u16 *)raw,
- ", min: ", *(__u16 *)(raw+2), ", curr: ", *(__u16 *)(raw+4));
+ printf("%-32s: %3d%% %s%u%s%u%s%u\n", "tempt_since_bootup", *nm, "max: ",
+ *(__u16 *)raw, ", min: ", *(__u16 *)(raw+2), ", curr: ", *(__u16 *)(raw+4));
get_memblaze_new_smart_info(smart, RAISIN_SI_VD_READ_FAIL, nm, raw);
printf("%-32s: %3d%% %"PRIu64"\n", "read_fail_count", *nm, int48_to_long(raw));
@@ -319,23 +330,24 @@ static void show_memblaze_smart_log_old(struct nvme_memblaze_smart_log *smart,
}
get_memblaze_new_smart_info(s, PROGRAM_FAIL, nm, raw);
printf("%-32s : %3d%% %"PRIu64"\n",
- "program_fail_count", *nm, int48_to_long(raw));
+ "program_fail_count", *nm, int48_to_long(raw));
get_memblaze_new_smart_info(s, ERASE_FAIL, nm, raw);
printf("%-32s : %3d%% %"PRIu64"\n",
- "erase_fail_count", *nm, int48_to_long(raw));
+ "erase_fail_count", *nm, int48_to_long(raw));
get_memblaze_new_smart_info(s, WEARLEVELING_COUNT, nm, raw);
printf("%-31s : %3d%% %s%u%s%u%s%u\n",
- "wear_leveling", *nm, "min: ", *(__u16 *)raw, ", max: ", *(__u16 *)(raw+2), ", avg: ", *(__u16 *)(raw+4));
+ "wear_leveling", *nm, "min: ", *(__u16 *)raw, ", max: ", *(__u16 *)(raw+2),
+ ", avg: ", *(__u16 *)(raw+4));
get_memblaze_new_smart_info(s, TOTAL_WRITE, nm, raw);
printf("%-32s : %3d%% %"PRIu64"\n",
- "nand_bytes_written", *nm, 32*int48_to_long(raw));
+ "nand_bytes_written", *nm, 32*int48_to_long(raw));
get_memblaze_new_smart_info(s, HOST_WRITE, nm, raw);
printf("%-32s : %3d%% %"PRIu64"\n",
- "host_bytes_written", *nm, 32*int48_to_long(raw));
+ "host_bytes_written", *nm, 32*int48_to_long(raw));
free(nm);
free(raw);
@@ -402,14 +414,15 @@ int parse_params(char *str, int number, ...)
return 0;
}
-static int mb_get_additional_smart_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+static int mb_get_additional_smart_log(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
{
struct nvme_memblaze_smart_log smart_log;
char *desc =
- "Get Memblaze vendor specific additional smart log (optionally, for the specified namespace), and show it.";
+ "Get Memblaze vendor specific additional smart log, and show it.";
const char *namespace = "(optional) desired namespace";
const char *raw = "dump output in binary format";
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
struct config {
__u32 namespace_id;
bool raw_binary;
@@ -442,7 +455,6 @@ static int mb_get_additional_smart_log(int argc, char **argv, struct command *cm
if (err > 0)
nvme_show_status(err);
- dev_close(dev);
return err;
}
@@ -460,12 +472,13 @@ static char *mb_feature_to_string(int feature)
}
}
-static int mb_get_powermanager_status(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+static int mb_get_powermanager_status(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
{
const char *desc = "Get Memblaze power management ststus\n (value 0 - 25w, 1 - 20w, 2 - 15w)";
__u32 result;
__u32 feature_id = MB_FEAT_POWER_MGMT;
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
int err;
OPT_ARGS(opts) = {
@@ -477,16 +490,16 @@ static int mb_get_powermanager_status(int argc, char **argv, struct command *cmd
return err;
struct nvme_get_features_args args = {
- .args_size = sizeof(args),
- .fd = dev_fd(dev),
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
.fid = feature_id,
.nsid = 0,
.sel = 0,
.cdw11 = 0,
.uuidx = 0,
- .data_len = 0,
+ .data_len = 0,
.data = NULL,
- .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
.result = &result,
};
err = nvme_get_features(&args);
@@ -497,16 +510,16 @@ static int mb_get_powermanager_status(int argc, char **argv, struct command *cmd
mb_feature_to_string(feature_id), nvme_select_to_string(0), result);
else if (err > 0)
nvme_show_status(err);
- dev_close(dev);
return err;
}
-static int mb_set_powermanager_status(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+static int mb_set_powermanager_status(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
{
const char *desc = "Set Memblaze power management status\n (value 0 - 25w, 1 - 20w, 2 - 15w)";
const char *value = "new value of feature (required)";
const char *save = "specifies that the controller shall save the attribute";
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
__u32 result;
int err;
@@ -533,8 +546,8 @@ static int mb_set_powermanager_status(int argc, char **argv, struct command *cmd
return err;
struct nvme_set_features_args args = {
- .args_size = sizeof(args),
- .fd = dev_fd(dev),
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
.fid = cfg.feature_id,
.nsid = 0,
.cdw11 = cfg.value,
@@ -542,9 +555,9 @@ static int mb_set_powermanager_status(int argc, char **argv, struct command *cmd
.save = cfg.save,
.uuidx = 0,
.cdw15 = 0,
- .data_len = 0,
+ .data_len = 0,
.data = NULL,
- .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
.result = &result,
};
err = nvme_set_features(&args);
@@ -556,14 +569,14 @@ static int mb_set_powermanager_status(int argc, char **argv, struct command *cmd
else if (err > 0)
nvme_show_status(err);
- dev_close(dev);
return err;
}
-#define P2MIN (1)
-#define P2MAX (5000)
-#define MB_FEAT_HIGH_LATENCY_VALUE_SHIFT (15)
-static int mb_set_high_latency_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+#define P2MIN (1)
+#define P2MAX (5000)
+#define MB_FEAT_HIGH_LATENCY_VALUE_SHIFT (15)
+static int mb_set_high_latency_log(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
{
const char *desc = "Set Memblaze high latency log\n"
" input parameter p1,p2\n"
@@ -571,7 +584,7 @@ static int mb_set_high_latency_log(int argc, char **argv, struct command *cmd, s
" p2 value: 1 .. 5000 ms";
const char *param = "input parameters";
int param1 = 0, param2 = 0;
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
__u32 result;
int err;
@@ -598,12 +611,10 @@ static int mb_set_high_latency_log(int argc, char **argv, struct command *cmd, s
if (parse_params(cfg.param, 2, &param1, &param2)) {
printf("setfeature: invalid formats %s\n", cfg.param);
- dev_close(dev);
return -EINVAL;
}
if ((param1 == 1) && (param2 < P2MIN || param2 > P2MAX)) {
printf("setfeature: invalid high io latency threshold %d\n", param2);
- dev_close(dev);
return -EINVAL;
}
cfg.value = (param1 << MB_FEAT_HIGH_LATENCY_VALUE_SHIFT) | param2;
@@ -632,7 +643,6 @@ static int mb_set_high_latency_log(int argc, char **argv, struct command *cmd, s
else if (err > 0)
nvme_show_status(err);
- dev_close(dev);
return err;
}
@@ -672,7 +682,7 @@ static int find_deadbeef(char *buf)
return 0;
}
-#define TIME_STR_SIZE (44)
+#define TIME_STR_SIZE (44)
static int glp_high_latency(FILE *fdi, char *buf, int buflen, int print)
{
struct log_page_high_latency *logEntry;
@@ -723,11 +733,12 @@ static int glp_high_latency(FILE *fdi, char *buf, int buflen, int print)
return 1;
}
-static int mb_high_latency_log_print(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+static int mb_high_latency_log_print(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
{
const char *desc = "Get Memblaze high latency log";
char buf[LOG_PAGE_SIZE];
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
FILE *fdi = NULL;
int err;
@@ -742,14 +753,13 @@ static int mb_high_latency_log_print(int argc, char **argv, struct command *cmd,
fdi = fopen(FID_C3_LOG_FILENAME, "w+");
glp_high_latency_show_bar(fdi, DO_PRINT_FLAG);
- err = nvme_get_log_simple(dev_fd(dev), GLP_ID_VU_GET_HIGH_LATENCY_LOG,
- sizeof(buf), &buf);
+ err = nvme_get_log_simple(dev_fd(dev), GLP_ID_VU_GET_HIGH_LATENCY_LOG, sizeof(buf), &buf);
while (1) {
if (!glp_high_latency(fdi, buf, LOG_PAGE_SIZE, DO_PRINT_FLAG))
break;
- err = nvme_get_log_simple(dev_fd(dev), GLP_ID_VU_GET_HIGH_LATENCY_LOG,
- sizeof(buf), &buf);
+ err = nvme_get_log_simple(dev_fd(dev), GLP_ID_VU_GET_HIGH_LATENCY_LOG, sizeof(buf),
+ &buf);
if (err) {
nvme_show_status(err);
break;
@@ -758,7 +768,6 @@ static int mb_high_latency_log_print(int argc, char **argv, struct command *cmd,
if (fdi)
fclose(fdi);
- dev_close(dev);
return err;
}
@@ -787,7 +796,7 @@ static int mb_selective_download(int argc, char **argv, struct command *cmd, str
int xfer = 4096;
void *fw_buf;
int selectNo, fw_fd, fw_size, err, offset = 0;
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
struct stat sb;
int i;
@@ -901,12 +910,11 @@ out_free:
out_close:
close(fw_fd);
out:
- dev_close(dev);
return err;
}
static void ioLatencyHistogramOutput(FILE *fd, int index, int start, int end, char *unit0,
- char *unit1, unsigned int *pHistogram, int print)
+ char *unit1, unsigned int *pHistogram, int print)
{
int len;
char string[64], subString0[12], subString1[12];
@@ -917,8 +925,7 @@ static void ioLatencyHistogramOutput(FILE *fd, int index, int start, int end, ch
else
snprintf(subString1, sizeof(subString1), "%s", "+INF");
len = snprintf(string, sizeof(string), "%-11d %-11s %-11s %-11u\n",
- index, subString0, subString1,
- pHistogram[index]);
+ index, subString0, subString1, pHistogram[index]);
fwrite(string, 1, len, fd);
if (print)
printf("%s", string);
@@ -960,7 +967,8 @@ int io_latency_histogram(char *file, char *buf, int print, int logid)
strcpy(unit[0], "ms");
strcpy(unit[1], "ms");
for (i = 1; i < 32; i++, index++)
- ioLatencyHistogramOutput(fdi, index, i, i + 1, unit[0], unit[1], (unsigned int *)buf, print);
+ ioLatencyHistogramOutput(fdi, index, i, i + 1, unit[0], unit[1],
+ (unsigned int *)buf, print);
for (i = 1; i < 32; i++, index++) {
if (i == 31) {
@@ -976,9 +984,11 @@ int io_latency_histogram(char *file, char *buf, int print, int logid)
strcpy(unit[0], "s");
strcpy(unit[1], "s");
for (i = 1; i < 4; i++, index++)
- ioLatencyHistogramOutput(fdi, index, i, i + 1, unit[0], unit[1], (unsigned int *)buf, print);
+ ioLatencyHistogramOutput(fdi, index, i, i + 1, unit[0], unit[1],
+ (unsigned int *)buf, print);
- ioLatencyHistogramOutput(fdi, index, i, 0x7FFFFFFF, unit[0], unit[1], (unsigned int *)buf, print);
+ ioLatencyHistogramOutput(fdi, index, i, 0x7FFFFFFF, unit[0], unit[1],
+ (unsigned int *)buf, print);
} else {
fPRINT_PARAM1("Unsupported io latency histogram revision\n");
}
@@ -993,7 +1003,7 @@ static int mb_lat_stats_log_print(int argc, char **argv, struct command *cmd, st
char stats[LOG_PAGE_SIZE];
char f1[] = FID_C1_LOG_FILENAME;
char f2[] = FID_C2_LOG_FILENAME;
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
int err;
const char *desc = "Get Latency Statistics log and show it.";
@@ -1015,8 +1025,7 @@ static int mb_lat_stats_log_print(int argc, char **argv, struct command *cmd, st
if (err)
return err;
- err = nvme_get_log_simple(dev_fd(dev), cfg.write ? 0xc2 : 0xc1,
- sizeof(stats), &stats);
+ err = nvme_get_log_simple(dev_fd(dev), cfg.write ? 0xc2 : 0xc1, sizeof(stats), &stats);
if (!err)
io_latency_histogram(cfg.write ? f2 : f1, stats, DO_PRINT_FLAG,
cfg.write ? GLP_ID_VU_GET_WRITE_LATENCY_HISTOGRAM :
@@ -1024,14 +1033,14 @@ static int mb_lat_stats_log_print(int argc, char **argv, struct command *cmd, st
else
nvme_show_status(err);
- dev_close(dev);
return err;
}
-static int memblaze_clear_error_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+static int memblaze_clear_error_log(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
{
char *desc = "Clear Memblaze devices error log.";
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
int err;
__u32 result;
@@ -1043,9 +1052,9 @@ static int memblaze_clear_error_log(int argc, char **argv, struct command *cmd,
};
struct config cfg = {
- .feature_id = 0xf7,
- .value = 0x534d0001,
- .save = 0,
+ .feature_id = 0xf7,
+ .value = 0x534d0001,
+ .save = 0,
};
OPT_ARGS(opts) = {
@@ -1075,16 +1084,15 @@ static int memblaze_clear_error_log(int argc, char **argv, struct command *cmd,
if (err < 0)
perror("set-feature");
if (!err)
- printf("set-feature:%02x (%s), value:%#08x\n", cfg.feature_id, mb_feature_to_string(cfg.feature_id), cfg.value);
+ printf("set-feature:%02x (%s), value:%#08x\n", cfg.feature_id,
+ mb_feature_to_string(cfg.feature_id), cfg.value);
else if (err > 0)
nvme_show_status(err);
- dev_close(dev);
return err;
}
-static int mb_set_lat_stats(int argc, char **argv,
- struct command *command, struct plugin *plugin)
+static int mb_set_lat_stats(int argc, char **argv, struct command *command, struct plugin *plugin)
{
const char *desc = (
"Enable/Disable Latency Statistics Tracking.\n"
@@ -1098,7 +1106,7 @@ static int mb_set_lat_stats(int argc, char **argv,
const __u32 cdw12 = 0x0;
const __u32 data_len = 32;
const __u32 save = 0;
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
void *buf = NULL;
__u32 result;
int err;
@@ -1168,12 +1176,10 @@ static int mb_set_lat_stats(int argc, char **argv,
case None:
err = nvme_get_features(&args_get);
if (!err) {
- printf(
- "Latency Statistics Tracking (FID 0x%X) is currently (%i).\n",
- fid, result);
+ printf("Latency Statistics Tracking (FID 0x%X) is currently (%i).\n", fid,
+ result);
} else {
printf("Could not read feature id 0xE2.\n");
- dev_close(dev);
return err;
}
break;
@@ -1186,15 +1192,13 @@ static int mb_set_lat_stats(int argc, char **argv,
perror("Enable latency tracking");
fprintf(stderr, "Command failed while parsing.\n");
} else {
- printf("Successfully set enable bit for FID (0x%X) to %i.\n",
- 0xe2, option);
+ printf("Successfully set enable bit for FID (0x%X) to %i.\n", 0xe2, option);
}
break;
default:
printf("%d not supported.\n", option);
err = EINVAL;
}
- dev_close(dev);
return err;
}
@@ -1238,17 +1242,17 @@ struct __packed smart_log_add_item_12 {
uint8_t rsvd1;
union {
struct wear_level wear_level; // 0xad
- struct temp_since_born { // 0xe7
+ struct __packed temp_since_born { // 0xe7
__le16 max;
__le16 min;
__le16 curr;
} temp_since_born;
- struct power_consumption { // 0xe8
+ struct __packed power_consumption { // 0xe8
__le16 max;
__le16 min;
__le16 curr;
} power_consumption;
- struct temp_since_power_on { // 0xaf
+ struct __packed temp_since_power_on { // 0xaf
__le16 max;
__le16 min;
__le16 curr;
@@ -1268,10 +1272,10 @@ struct __packed smart_log_add_item_10 {
uint8_t rsvd[2];
};
-struct smart_log_add {
+struct __packed smart_log_add {
union {
union {
- struct smart_log_add_v0 {
+ struct __packed smart_log_add_v0 {
struct smart_log_add_item_12 program_fail_count;
struct smart_log_add_item_12 erase_fail_count;
struct smart_log_add_item_12 wear_leveling_count;
@@ -1300,7 +1304,7 @@ struct smart_log_add {
};
union {
- struct smart_log_add_v2 {
+ struct __packed smart_log_add_v2 {
struct smart_log_add_item_12 program_fail_count;
struct smart_log_add_item_12 erase_fail_count;
struct smart_log_add_item_12 wear_leveling_count;
@@ -1322,7 +1326,7 @@ struct smart_log_add {
struct smart_log_add_item_12 xor_fail_count;
struct smart_log_add_item_12 xor_invoked_count;
struct smart_log_add_item_12 inflight_read_io_cmd;
- struct smart_log_add_item_12 flash_error_media_count;
+ struct smart_log_add_item_12 inflight_write_io_cmd;
struct smart_log_add_item_12 nand_bytes_read;
struct smart_log_add_item_12 temp_since_born;
struct smart_log_add_item_12 power_consumption;
@@ -1334,7 +1338,7 @@ struct smart_log_add {
};
union {
- struct smart_log_add_v3 {
+ struct __packed smart_log_add_v3 {
struct smart_log_add_item_10 program_fail_count;
struct smart_log_add_item_10 erase_fail_count;
struct smart_log_add_item_10 wear_leveling_count;
@@ -1401,33 +1405,33 @@ static void smart_log_add_v0_print(struct smart_log_add_item_12 *item, int item_
switch (item->id) {
case 0xad:
printf("min: %d, max: %d, avg: %d\n",
- le16_to_cpu(item->wear_level.min),
- le16_to_cpu(item->wear_level.max),
- le16_to_cpu(item->wear_level.avg));
+ le16_to_cpu(item->wear_level.min),
+ le16_to_cpu(item->wear_level.max),
+ le16_to_cpu(item->wear_level.avg));
break;
case 0xe7:
printf("max: %d °C (%d K), min: %d °C (%d K), curr: %d °C (%d K)\n",
- K2C(le16_to_cpu(item->temp_since_born.max)),
- le16_to_cpu(item->temp_since_born.max),
- K2C(le16_to_cpu(item->temp_since_born.min)),
- le16_to_cpu(item->temp_since_born.min),
- K2C(le16_to_cpu(item->temp_since_born.curr)),
- le16_to_cpu(item->temp_since_born.curr));
+ K2C(le16_to_cpu(item->temp_since_born.max)),
+ le16_to_cpu(item->temp_since_born.max),
+ K2C(le16_to_cpu(item->temp_since_born.min)),
+ le16_to_cpu(item->temp_since_born.min),
+ K2C(le16_to_cpu(item->temp_since_born.curr)),
+ le16_to_cpu(item->temp_since_born.curr));
break;
case 0xe8:
printf("max: %d, min: %d, curr: %d\n",
- le16_to_cpu(item->power_consumption.max),
- le16_to_cpu(item->power_consumption.min),
- le16_to_cpu(item->power_consumption.curr));
+ le16_to_cpu(item->power_consumption.max),
+ le16_to_cpu(item->power_consumption.min),
+ le16_to_cpu(item->power_consumption.curr));
break;
case 0xaf:
printf("max: %d °C (%d K), min: %d °C (%d K), curr: %d °C (%d K)\n",
- K2C(le16_to_cpu(item->temp_since_power_on.max)),
- le16_to_cpu(item->temp_since_power_on.max),
- K2C(le16_to_cpu(item->temp_since_power_on.min)),
- le16_to_cpu(item->temp_since_power_on.min),
- K2C(le16_to_cpu(item->temp_since_power_on.curr)),
- le16_to_cpu(item->temp_since_power_on.curr));
+ K2C(le16_to_cpu(item->temp_since_power_on.max)),
+ le16_to_cpu(item->temp_since_power_on.max),
+ K2C(le16_to_cpu(item->temp_since_power_on.min)),
+ le16_to_cpu(item->temp_since_power_on.min),
+ K2C(le16_to_cpu(item->temp_since_power_on.curr)),
+ le16_to_cpu(item->temp_since_power_on.curr));
break;
default:
printf("%" PRIu64 "\n", int48_to_long(item->raw));
@@ -1460,7 +1464,7 @@ static void smart_log_add_v2_print(struct smart_log_add_item_12 *item, int item_
[0xfd] = {18, "xor_fail_count" },
[0xfe] = {19, "xor_invoked_count" },
[0xe5] = {20, "inflight_read_io_cmd" },
- [0xe6] = {21, "flash_error_media_count" },
+ [0xe6] = {21, "inflight_write_io_cmd" },
[0xf8] = {22, "nand_bytes_read" },
[0xe7] = {23, "temp_since_born" },
[0xe8] = {24, "power_consumption" },
@@ -1476,33 +1480,33 @@ static void smart_log_add_v2_print(struct smart_log_add_item_12 *item, int item_
switch (item->id) {
case 0xad:
printf("min: %d, max: %d, avg: %d\n",
- le16_to_cpu(item->wear_level.min),
- le16_to_cpu(item->wear_level.max),
- le16_to_cpu(item->wear_level.avg));
+ le16_to_cpu(item->wear_level.min),
+ le16_to_cpu(item->wear_level.max),
+ le16_to_cpu(item->wear_level.avg));
break;
case 0xe7:
printf("max: %d °C (%d K), min: %d °C (%d K), curr: %d °C (%d K)\n",
- K2C(le16_to_cpu(item->temp_since_born.max)),
- le16_to_cpu(item->temp_since_born.max),
- K2C(le16_to_cpu(item->temp_since_born.min)),
- le16_to_cpu(item->temp_since_born.min),
- K2C(le16_to_cpu(item->temp_since_born.curr)),
- le16_to_cpu(item->temp_since_born.curr));
+ K2C(le16_to_cpu(item->temp_since_born.max)),
+ le16_to_cpu(item->temp_since_born.max),
+ K2C(le16_to_cpu(item->temp_since_born.min)),
+ le16_to_cpu(item->temp_since_born.min),
+ K2C(le16_to_cpu(item->temp_since_born.curr)),
+ le16_to_cpu(item->temp_since_born.curr));
break;
case 0xe8:
printf("max: %d, min: %d, curr: %d\n",
- le16_to_cpu(item->power_consumption.max),
- le16_to_cpu(item->power_consumption.min),
- le16_to_cpu(item->power_consumption.curr));
+ le16_to_cpu(item->power_consumption.max),
+ le16_to_cpu(item->power_consumption.min),
+ le16_to_cpu(item->power_consumption.curr));
break;
case 0xaf:
printf("max: %d °C (%d K), min: %d °C (%d K), curr: %d °C (%d K)\n",
- K2C(le16_to_cpu(item->temp_since_power_on.max)),
- le16_to_cpu(item->temp_since_power_on.max),
- K2C(le16_to_cpu(item->temp_since_power_on.min)),
- le16_to_cpu(item->temp_since_power_on.min),
- K2C(le16_to_cpu(item->temp_since_power_on.curr)),
- le16_to_cpu(item->temp_since_power_on.curr));
+ K2C(le16_to_cpu(item->temp_since_power_on.max)),
+ le16_to_cpu(item->temp_since_power_on.max),
+ K2C(le16_to_cpu(item->temp_since_power_on.min)),
+ le16_to_cpu(item->temp_since_power_on.min),
+ K2C(le16_to_cpu(item->temp_since_power_on.curr)),
+ le16_to_cpu(item->temp_since_power_on.curr));
break;
default:
printf("%" PRIu64 "\n", int48_to_long(item->raw));
@@ -1546,9 +1550,9 @@ static void smart_log_add_v3_print(struct smart_log_add_item_10 *item, int item_
switch (item->id) {
case 0xad:
printf("min: %d, max: %d, avg: %d\n",
- le16_to_cpu(item->wear_level.min),
- le16_to_cpu(item->wear_level.max),
- le16_to_cpu(item->wear_level.avg));
+ le16_to_cpu(item->wear_level.min),
+ le16_to_cpu(item->wear_level.max),
+ le16_to_cpu(item->wear_level.avg));
break;
default:
printf("%" PRIu64 "\n", int48_to_long(item->raw));
@@ -1571,13 +1575,13 @@ static void smart_log_add_print(struct smart_log_add *log, const char *devname)
switch (version) {
case 0:
return smart_log_add_v0_print(&log->v0_raw[0],
- sizeof(struct smart_log_add_v0) / sizeof(struct smart_log_add_item_12));
+ sizeof(struct smart_log_add_v0) / sizeof(struct smart_log_add_item_12));
case 2:
return smart_log_add_v2_print(&log->v2_raw[0],
- sizeof(struct smart_log_add_v2) / sizeof(struct smart_log_add_item_12));
+ sizeof(struct smart_log_add_v2) / sizeof(struct smart_log_add_item_12));
case 3:
return smart_log_add_v3_print(&log->v3_raw[0],
- sizeof(struct smart_log_add_v3) / sizeof(struct smart_log_add_item_10));
+ sizeof(struct smart_log_add_v3) / sizeof(struct smart_log_add_item_10));
case 1:
fprintf(stderr, "Version %d: N/A\n", version);
@@ -1604,9 +1608,7 @@ static int mb_get_smart_log_add(int argc, char **argv, struct command *cmd, stru
OPT_FLAG("raw-binary", 'b', &cfg.raw_binary, "dump the whole log buffer in binary format"),
OPT_END()};
- // Open device
-
- struct nvme_dev *dev = NULL;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
err = parse_and_open(&dev, argc, argv, cmd->help, opts);
if (err)
@@ -1616,7 +1618,8 @@ static int mb_get_smart_log_add(int argc, char **argv, struct command *cmd, stru
struct smart_log_add log = {0};
- err = nvme_get_log_simple(dev_fd(dev), LID_SMART_LOG_ADD, sizeof(struct smart_log_add), &log);
+ err = nvme_get_log_simple(dev_fd(dev), LID_SMART_LOG_ADD, sizeof(struct smart_log_add),
+ &log);
if (!err) {
if (!cfg.raw_binary)
smart_log_add_print(&log, dev->name);
@@ -1628,9 +1631,6 @@ static int mb_get_smart_log_add(int argc, char **argv, struct command *cmd, stru
nvme_show_error("%s: %s", cmd->name, nvme_strerror(errno));
}
- // Close device
-
- dev_close(dev);
return err;
}
@@ -1643,7 +1643,7 @@ struct latency_stats_bucket {
struct __packed latency_stats {
union {
- struct latency_stats_v2_0 {
+ struct __packed latency_stats_v2_0 {
uint32_t minor_version;
uint32_t major_version;
uint32_t bucket_read_data[32];
@@ -1660,9 +1660,9 @@ struct __packed latency_stats {
struct __packed high_latency_log {
union {
- struct high_latency_log_v1 {
+ struct __packed high_latency_log_v1 {
uint32_t version;
- struct high_latency_log_entry {
+ struct __packed high_latency_log_entry {
uint64_t timestamp; // ms
uint32_t latency;
uint32_t qid;
@@ -1688,12 +1688,12 @@ struct __packed high_latency_log {
struct __packed performance_stats {
union {
- struct performance_stats_v1 {
+ struct __packed performance_stats_v1 {
uint8_t version;
uint8_t rsvd[3];
- struct performance_stats_timestamp {
+ struct __packed performance_stats_timestamp {
uint8_t timestamp[6];
- struct performance_stats_entry {
+ struct __packed performance_stats_entry {
uint16_t read_iops; // K IOPS
uint16_t read_bandwidth; // MiB
uint32_t read_latency; // us
@@ -1705,12 +1705,36 @@ struct __packed performance_stats {
} entries[3600];
} timestamps[24];
} v1;
+ struct __packed performance_stats_v2 {
+ uint8_t version;
+ uint8_t rsvd[3];
+ struct __packed performance_stats_timestamp_v2 {
+ uint8_t timestamp[6];
+ struct __packed performance_stats_entry_v2 {
+ uint16_t read_iops;
+ uint16_t read_bandwidth;
+ uint16_t read_latency_avg;
+ uint16_t read_latency_max;
+ uint8_t scale_of_read_iops;
+ uint8_t scale_of_read_bandwidth;
+ uint8_t scale_of_read_latency_avg;
+ uint8_t scale_of_read_latency_max;
+ uint16_t write_iops;
+ uint16_t write_bandwidth;
+ uint16_t write_latency_avg;
+ uint16_t write_latency_max;
+ uint8_t scale_of_write_iops;
+ uint8_t scale_of_write_bandwidth;
+ uint8_t scale_of_write_latency_avg;
+ uint8_t scale_of_write_latency_max;
+ } entries[3600];
+ } timestamps[24];
+ } v2;
uint8_t raw[4 + 24 * (6 + 3600 * 24)];
};
};
-static int mb_set_latency_feature(int argc, char **argv, struct command *cmd,
- struct plugin *plugin)
+static int mb_set_latency_feature(int argc, char **argv, struct command *cmd, struct plugin *plugin)
{
int err = 0;
@@ -1728,26 +1752,24 @@ static int mb_set_latency_feature(int argc, char **argv, struct command *cmd,
OPT_ARGS(opts) = {
OPT_UINT("sel-perf-log", 's', &cfg.perf_monitor,
- "Select features to turn on, default: Disable\n"
- " bit 0: latency statistics\n"
- " bit 1: high latency log\n"
- " bit 2: Performance stat"),
+ "Select features to turn on, default: Disable\n"
+ " bit 0: latency statistics\n"
+ " bit 1: high latency log\n"
+ " bit 2: Performance stat"),
OPT_UINT("set-commands-mask", 'm', &cfg.cmd_mask,
- "Set Enable, default: Disable\n"
- " bit 0: Read commands\n"
- " bit 1: high Write commands\n"
- " bit 2: De-allocate/TRIM (this bit is not worked for Performance stat.)"),
+ "Set Enable, default: Disable\n"
+ " bit 0: Read commands\n"
+ " bit 1: high Write commands\n"
+ " bit 2: De-allocate/TRIM (this bit is not worked for Performance stat.)"),
OPT_UINT("set-read-threshold", 'r', &cfg.read_threshold,
- "set read high latency log threshold, it's a 0-based value and unit is 10ms"),
+ "set read high latency log threshold, it's a 0-based value and unit is 10ms"),
OPT_UINT("set-write-threshold", 'w', &cfg.write_threshold,
- "set write high latency log threshold, it's a 0-based value and unit is 10ms"),
+ "set write high latency log threshold, it's a 0-based value and unit is 10ms"),
OPT_UINT("set-trim-threshold", 't', &cfg.de_allocate_trim_threshold,
- "set trim high latency log threshold, it's a 0-based value and unit is 10ms"),
+ "set trim high latency log threshold, it's a 0-based value and unit is 10ms"),
OPT_END()};
- // Open device
-
- struct nvme_dev *dev = NULL;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
err = parse_and_open(&dev, argc, argv, cmd->help, opts);
if (err)
@@ -1766,9 +1788,9 @@ static int mb_set_latency_feature(int argc, char **argv, struct command *cmd,
.cdw11 = 0 | cfg.perf_monitor,
.cdw12 = 0 | cfg.cmd_mask,
.cdw13 = 0 |
- (cfg.read_threshold & 0xff) |
- ((cfg.write_threshold & 0xff) << 8) |
- ((cfg.de_allocate_trim_threshold & 0xff) << 16),
+ (cfg.read_threshold & 0xff) |
+ ((cfg.write_threshold & 0xff) << 8) |
+ ((cfg.de_allocate_trim_threshold & 0xff) << 16),
.cdw15 = 0,
.save = 0,
.uuidx = 0,
@@ -1786,14 +1808,10 @@ static int mb_set_latency_feature(int argc, char **argv, struct command *cmd,
else
nvme_show_error("%s: %s", cmd->name, nvme_strerror(errno));
- // Close device
-
- dev_close(dev);
return err;
}
-static int mb_get_latency_feature(int argc, char **argv, struct command *cmd,
- struct plugin *plugin)
+static int mb_get_latency_feature(int argc, char **argv, struct command *cmd, struct plugin *plugin)
{
int err = 0;
@@ -1802,9 +1820,7 @@ static int mb_get_latency_feature(int argc, char **argv, struct command *cmd,
OPT_ARGS(opts) = {
OPT_END()};
- // Open device
-
- struct nvme_dev *dev = NULL;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
err = parse_and_open(&dev, argc, argv, cmd->help, opts);
if (err)
@@ -1835,8 +1851,512 @@ static int mb_get_latency_feature(int argc, char **argv, struct command *cmd,
nvme_show_error("%s: %s", cmd->name, nvme_strerror(errno));
}
- // Close device
+ return err;
+}
+
+static void latency_stats_v2_0_print(struct latency_stats *log, int size)
+{
+ static const struct latency_stats_bucket buckets[0xff] = {
+ [1] = {"0us", "50us" },
+ [2] = {"50us", "100us"},
+ [3] = {"100us", "150us"},
+ [4] = {"150us", "200us"},
+ [5] = {"200us", "300us"},
+ [6] = {"300us", "400us"},
+ [7] = {"400us", "500us"},
+ [8] = {"500us", "600us"},
+ [9] = {"600us", "700us"},
+ [10] = {"700us", "800us"},
+ [11] = {"800us", "900us"},
+ [12] = {"900us", "1ms" },
+ [13] = {"1ms", "5ms" },
+ [14] = {"5ms", "10ms" },
+ [15] = {"10ms", "20ms" },
+ [16] = {"20ms", "50ms" },
+ [17] = {"50ms", "100ms"},
+ [18] = {"100ms", "200ms"},
+ [19] = {"200ms", "300ms"},
+ [20] = {"300ms", "400ms"},
+ [21] = {"400ms", "500ms"},
+ [22] = {"500ms", "600ms"},
+ [23] = {"600ms", "700ms"},
+ [24] = {"700ms", "800ms"},
+ [25] = {"800ms", "900ms"},
+ [26] = {"900ms", "1s" },
+ [27] = {"1s", "2s" },
+ [28] = {"2s", "3s" },
+ [29] = {"3s", "4s" },
+ [30] = {"4s", "5s" },
+ [31] = {"5s", "8s" },
+ [32] = {"8s", "INF" },
+ };
+
+ printf("Bucket 1-32 IO Read Command Data\n");
+ printf("-------------------------------------------\n");
+ printf("%-12s%-12s%-12s%-12s\n", "Bucket", "Start(>=)", "End(<)", "Value");
+ int bucket_count = sizeof(log->v2_0.bucket_read_data) / sizeof(uint32_t);
+
+ for (int i = 0; i < bucket_count; i++) {
+ printf("%-12u%-12s%-12s%-12u\n", i + 1, buckets[i + 1].start_threshold,
+ buckets[i + 1].end_threshold, log->v2_0.bucket_read_data[i]);
+ }
+ printf("\n");
+
+ printf("Bucket 1-32 IO Write Command Data\n");
+ printf("-------------------------------------------\n");
+ printf("%-12s%-12s%-12s%-12s\n", "Bucket", "Start(>=)", "End(<)", "Value");
+ bucket_count = sizeof(log->v2_0.bucket_write_data) / sizeof(uint32_t);
+
+ for (int i = 0; i < bucket_count; i++) {
+ printf("%-12u%-12s%-12s%-12u\n", i + 1, buckets[i + 1].start_threshold,
+ buckets[i + 1].end_threshold, log->v2_0.bucket_write_data[i]);
+ }
+ printf("\n");
+
+ printf("Bucket 1-32 IO Trim Command Data\n");
+ printf("-------------------------------------------\n");
+ printf("%-12s%-12s%-12s%-12s\n", "Bucket", "Start(>=)", "End(<)", "Value");
+ bucket_count = sizeof(log->v2_0.bucket_trim_data) / sizeof(uint32_t);
+
+ for (int i = 0; i < bucket_count; i++) {
+ printf("%-12u%-12s%-12s%-12u\n", i + 1, buckets[i + 1].start_threshold,
+ buckets[i + 1].end_threshold, log->v2_0.bucket_trim_data[i]);
+ }
+ printf("\n");
+}
+
+static void latency_stats_print(struct latency_stats *log, const char *devname)
+{
+ uint32_t minor_version = *(uint32_t *)&log->raw[0];
+ uint32_t major_version = *(uint32_t *)&log->raw[4];
+
+ printf("Major Version: %u, Minor Version: %u\n", major_version, minor_version);
+ printf("\n");
+ printf("Latency Statistics Log for NVMe device: %s\n", devname);
+ printf("\n");
+
+ switch (major_version) {
+ case 2:
+ switch (minor_version) {
+ case 0:
+ latency_stats_v2_0_print(log, sizeof(struct latency_stats));
+ break;
+ default:
+ fprintf(stderr, "Major Version %u, Minor Version %u: Not supported yet\n",
+ major_version, minor_version);
+ break;
+ }
+ break;
+
+ default:
+ fprintf(stderr, "Major Version %u: Not supported yet\n", major_version);
+ break;
+ }
+}
+
+static int mb_get_latency_stats(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+{
+ // Get the configuration
+
+ struct config {
+ bool raw_binary;
+ };
+
+ struct config cfg = {0};
+
+ OPT_ARGS(opts) = {
+ OPT_FLAG("raw-binary",
+ 'b',
+ &cfg.raw_binary,
+ "dump the whole log buffer in binary format"),
+ OPT_END()};
+
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
+
+ int err = parse_and_open(&dev, argc, argv, cmd->help, opts);
+
+ if (err)
+ return err;
+
+ // Get log
+
+ struct latency_stats log = {0};
+
+ err = nvme_get_log_simple(dev_fd(dev), LID_LATENCY_STATISTICS, sizeof(struct latency_stats),
+ &log);
+ if (!err) {
+ if (!cfg.raw_binary)
+ latency_stats_print(&log, dev->name);
+ else
+ d_raw((unsigned char *)&log, sizeof(struct latency_stats));
+ } else if (err > 0) {
+ nvme_show_status(err);
+ } else {
+ nvme_show_error("%s: %s", cmd->name, nvme_strerror(errno));
+ }
+
+ return err;
+}
+
+static void high_latency_log_v1_print(struct high_latency_log *log, int size)
+{
+ printf("%-24s%-12s%-12s%-6s%-6s%-6s%-6s%-12s%-24s%-6s%-6s%-6s%-6s%-6s\n",
+ "Timestamp", "Latency(us)", "QID", "OpC", "Fuse", "PSDT", "CID", "NSID", "SLBA",
+ "NLB", "DType", "PInfo", "FUA", "LR");
+
+ for (int i = 0; i < 1024; i++) {
+ if (log->v1.entries[i].timestamp == 0)
+ break;
+
+ // Get the timestamp
+
+ time_t timestamp_ms = log->v1.entries[i].timestamp;
+ time_t timestamp_s = timestamp_ms / 1000;
+ int time_ms = timestamp_ms % 1000;
+ char str_time_s[20] = {0};
+ char str_time_ms[32] = {0};
+
+ strftime(str_time_s, sizeof(str_time_s), "%Y-%m-%d %H:%M:%S",
+ localtime(&timestamp_s));
+ snprintf(str_time_ms, sizeof(str_time_ms), "%s.%03d", str_time_s, time_ms);
+ printf("%-24s", str_time_ms);
+
+ //
+ printf("%-12" PRIu32, log->v1.entries[i].latency);
+ printf("%-12" PRIu32, log->v1.entries[i].qid);
+ printf("%#-6" PRIx32, log->v1.entries[i].opcode);
+ printf("%-6" PRIu32, log->v1.entries[i].fuse);
+ printf("%-6" PRIu32, log->v1.entries[i].psdt);
+ printf("%-6" PRIu32, log->v1.entries[i].cid);
+ printf("%-12" PRIu32, log->v1.entries[i].nsid);
+ printf("%-24" PRIu64, log->v1.entries[i].slba);
+ printf("%-6" PRIu32, log->v1.entries[i].nlb);
+ printf("%-6" PRIu32, log->v1.entries[i].dtype);
+ printf("%-6" PRIu32, log->v1.entries[i].pinfo);
+ printf("%-6" PRIu32, log->v1.entries[i].fua);
+ printf("%-6" PRIu32, log->v1.entries[i].lr);
+ printf("\n");
+ }
+}
+
+static void high_latency_log_print(struct high_latency_log *log, const char *devname)
+{
+ uint32_t version = *(uint32_t *)&log->raw[0];
+
+ printf("Version: %u\n", version);
+ printf("\n");
+ printf("High Latency Log for NVMe device: %s\n", devname);
+ printf("\n");
+
+ switch (version) {
+ case 1:
+ high_latency_log_v1_print(log, sizeof(struct high_latency_log));
+ break;
+
+ default:
+ fprintf(stderr, "Version %u: Not supported yet\n", version);
+ break;
+ }
+}
+
+static int mb_get_high_latency_log(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
+{
+ // Get the configuration
+
+ struct config {
+ bool raw_binary;
+ };
+
+ struct config cfg = {0};
+
+ OPT_ARGS(opts) = {
+ OPT_FLAG("raw-binary",
+ 'b',
+ &cfg.raw_binary,
+ "dump the whole log buffer in binary format"),
+ OPT_END()};
+
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
+
+ int err = parse_and_open(&dev, argc, argv, cmd->help, opts);
+
+ if (err)
+ return err;
+
+ // Get log
+
+ struct high_latency_log log = {0};
+
+ err = nvme_get_log_simple(dev_fd(dev), LID_HIGH_LATENCY_LOG,
+ sizeof(struct high_latency_log), &log);
+ if (!err) {
+ if (!cfg.raw_binary)
+ high_latency_log_print(&log, dev->name);
+ else
+ d_raw((unsigned char *)&log, sizeof(struct high_latency_log));
+ } else if (err > 0) {
+ nvme_show_status(err);
+ } else {
+ nvme_show_error("%s: %s", cmd->name, nvme_strerror(errno));
+ }
+
+ return err;
+}
+
+static void performance_stats_v1_print(struct performance_stats *log, int duration)
+{
+ for (int i = 0; i < duration; i++) {
+ // Print timestamp
+
+ time_t timestamp_ms = int48_to_long(log->v1.timestamps[i].timestamp);
+ time_t timestamp_s = timestamp_ms / 1000;
+ int time_ms = timestamp_ms % 1000;
+ char time_s[32] = {0};
+
+ strftime(time_s, sizeof(time_s), "%Y-%m-%d %H:%M:%S", localtime(&timestamp_s));
+ printf("Timestamp %2d: %s.%03d\n", i + 1, time_s, time_ms);
+
+ // Print entry title
+
+ printf("%-8s%-14s%-21s%-22s%-22s%-15s%-22s%-23s%-23s\n", "Entry", "Read-IOs(K)",
+ "Read-Bandwidth(MiB)", "Avg-Read-Latency(us)", "Max-Read-Latency(us)",
+ "Write-IOs(K)", "Write-Bandwidth(MiB)", "Avg-Write-Latency(us)",
+ "Max-Write-Latency(us)");
+
+ // Print all entries content
+
+ struct performance_stats_entry entry = {0};
+
+ for (int j = 0; j < 3600; j++) {
+ entry.read_iops =
+ log->v1.timestamps[i].entries[j].read_iops;
+ entry.read_bandwidth =
+ log->v1.timestamps[i].entries[j].read_bandwidth;
+ entry.read_latency =
+ log->v1.timestamps[i].entries[j].read_latency;
+ entry.read_latency_max =
+ log->v1.timestamps[i].entries[j].read_latency_max;
+ entry.write_iops =
+ log->v1.timestamps[i].entries[j].write_iops;
+ entry.write_bandwidth =
+ log->v1.timestamps[i].entries[j].write_bandwidth;
+ entry.write_latency =
+ log->v1.timestamps[i].entries[j].write_latency;
+ entry.write_latency_max =
+ log->v1.timestamps[i].entries[j].write_latency_max;
+
+ if (entry.read_iops == 0 && entry.write_iops == 0)
+ continue;
+
+ printf("%-8u%-14u%-21u%-22u%-22u%-15u%-22u%-23u%-23u\n",
+ j + 1,
+ entry.read_iops,
+ entry.read_bandwidth,
+ entry.read_iops == 0 ?
+ 0 : entry.read_latency / (1000 * entry.read_iops),
+ entry.read_latency_max,
+ entry.write_iops,
+ entry.write_bandwidth,
+ entry.write_iops == 0 ?
+ 0 : entry.write_latency / (1000 * entry.write_iops),
+ entry.write_latency_max);
+ usleep(100);
+ }
+ printf("\n");
+ }
+}
+
+static void performance_stats_v2_print(struct performance_stats *log, int duration)
+{
+ for (int i = 0; i < duration; i++) {
+ // Print timestamp
+
+ time_t timestamp_ms = int48_to_long(log->v2.timestamps[i].timestamp);
+ time_t timestamp_s = timestamp_ms / 1000;
+ int time_ms = timestamp_ms % 1000;
+ char time_s[32] = {0};
+
+ strftime(time_s, sizeof(time_s), "%Y-%m-%d %H:%M:%S", localtime(&timestamp_s));
+ printf("Timestamp %2d: %s.%03d\n", i + 1, time_s, time_ms);
+
+ // Print entry title
+
+ printf("%-8s%-23s%-23s%-23s%-23s%-23s%-23s%-23s%-23s\n",
+ "Entry",
+ "Read-IOs(IOPS)", "Read-Bandwidth(KiB)",
+ "Avg-Read-Latency(us)", "Max-Read-Latency(us)",
+ "Write-IOs(IOPS)", "Write-Bandwidth(KiB)",
+ "Avg-Write-Latency(us)", "Max-Write-Latency(us)");
+
+ // Print all entries content
+ for (int j = 0; j < 3600; j++) {
+ uint32_t read_iops =
+ log->v2.timestamps[i].entries[j].read_iops;
+ uint32_t read_bandwidth =
+ log->v2.timestamps[i].entries[j].read_bandwidth;
+ uint32_t read_latency_avg =
+ log->v2.timestamps[i].entries[j].read_latency_avg;
+ uint32_t read_latency_max =
+ log->v2.timestamps[i].entries[j].read_latency_max;
+ uint32_t scale_of_read_iops =
+ log->v2.timestamps[i].entries[j].scale_of_read_iops;
+ uint32_t scale_of_read_bandwidth =
+ log->v2.timestamps[i].entries[j].scale_of_read_bandwidth;
+ uint32_t scale_of_read_latency_avg =
+ log->v2.timestamps[i].entries[j].scale_of_read_latency_avg;
+ uint32_t scale_of_read_latency_max =
+ log->v2.timestamps[i].entries[j].scale_of_read_latency_max;
+
+ uint32_t write_iops =
+ log->v2.timestamps[i].entries[j].write_iops;
+ uint32_t write_bandwidth =
+ log->v2.timestamps[i].entries[j].write_bandwidth;
+ uint32_t write_latency_avg =
+ log->v2.timestamps[i].entries[j].write_latency_avg;
+ uint32_t write_latency_max =
+ log->v2.timestamps[i].entries[j].write_latency_max;
+ uint32_t scale_of_write_iops =
+ log->v2.timestamps[i].entries[j].scale_of_write_iops;
+ uint32_t scale_of_write_bandwidth =
+ log->v2.timestamps[i].entries[j].scale_of_write_bandwidth;
+ uint32_t scale_of_write_latency_avg =
+ log->v2.timestamps[i].entries[j].scale_of_write_latency_avg;
+ uint32_t scale_of_write_latency_max =
+ log->v2.timestamps[i].entries[j].scale_of_write_latency_max;
+
+ if (read_iops == 0 && write_iops == 0)
+ continue;
+
+ while (scale_of_read_iops < 4 && scale_of_read_iops) {
+ read_iops *= 10;
+ scale_of_read_iops--;
+ }
+ while (scale_of_read_bandwidth < 3 && scale_of_read_bandwidth) {
+ read_bandwidth *= 1024;
+ scale_of_read_bandwidth--;
+ }
+ while (scale_of_read_latency_avg < 3 && scale_of_read_latency_avg) {
+ read_latency_avg *= 1000;
+ scale_of_read_latency_avg--;
+ }
+ while (scale_of_read_latency_max < 3 && scale_of_read_latency_max) {
+ read_latency_max *= 1000;
+ scale_of_read_latency_max--;
+ }
+
+ while (scale_of_write_iops < 4 && scale_of_write_iops) {
+ write_iops *= 10;
+ scale_of_write_iops--;
+ }
+ while (scale_of_write_bandwidth < 3 && scale_of_write_bandwidth) {
+ write_bandwidth *= 1024;
+ scale_of_write_bandwidth--;
+ }
+ while (scale_of_write_latency_avg < 3 && scale_of_write_latency_avg) {
+ write_latency_avg *= 1000;
+ scale_of_write_latency_avg--;
+ }
+ while (scale_of_write_latency_max < 3 && scale_of_write_latency_max) {
+ write_latency_max *= 1000;
+ scale_of_write_latency_max--;
+ }
+
+ printf("%-8u%-23u%-23u%-23u%-23u%-23u%-23u%-23u%-23u\n",
+ j + 1,
+ read_iops,
+ read_bandwidth,
+ read_latency_avg,
+ read_latency_max,
+ write_iops,
+ write_bandwidth,
+ write_latency_avg,
+ write_latency_max);
+ usleep(100);
+ }
+ printf("\n");
+ }
+}
+
+static void performance_stats_print(struct performance_stats *log, const char *devname,
+ int duration)
+{
+ uint8_t version = *(uint8_t *)&log->raw[0];
+
+ printf("Version: %u\n", version);
+ printf("\n");
+ printf("Performance Stat log for NVMe device: %s\n", devname);
+ printf("\n");
+
+ switch (version) {
+ case 1:
+ performance_stats_v1_print(log, duration);
+ break;
+ case 2:
+ performance_stats_v2_print(log, duration);
+ break;
+ default:
+ fprintf(stderr, "Version %u: Not supported yet\n", version);
+ break;
+ }
+}
+
+static int mb_get_performance_stats(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
+{
+ // Get the configuration
+
+ struct config {
+ int duration;
+ bool raw_binary;
+ };
+
+ struct config cfg = {.duration = 1, .raw_binary = false};
+
+ OPT_ARGS(opts) = {
+ OPT_UINT("duration",
+ 'd',
+ &cfg.duration,
+ "[1-24] hours: duration of the log to be printed, default is 1 hour"),
+ OPT_FLAG("raw-binary",
+ 'b',
+ &cfg.raw_binary,
+ "dump the whole log buffer in binary format"),
+ OPT_END()};
+
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
+
+ int err = parse_and_open(&dev, argc, argv, cmd->help, opts);
+
+ if (err)
+ return err;
+
+ // Check parameters
+ if (cfg.duration < 1 || cfg.duration > 24) {
+ fprintf(stderr, "duration must be between 1 and 24.\n");
+ exit(1);
+ }
+
+ // Get log
+
+ struct performance_stats log = {0};
+
+ int log_size = 4 + cfg.duration * sizeof(struct performance_stats_timestamp);
+ // Get one more timestamp if duration is odd number to avoid non-dw alignment issues
+ int xfer_size = (cfg.duration % 2) > 0 ?
+ (4 + (cfg.duration + 1) * sizeof(struct performance_stats_timestamp)) : log_size;
+
+ err = nvme_get_log_simple(dev_fd(dev), LID_PERFORMANCE_STATISTICS, xfer_size, &log);
+ if (!err) {
+ if (!cfg.raw_binary)
+ performance_stats_print(&log, dev->name, cfg.duration);
+ else
+ d_raw((unsigned char *)&log, log_size);
+ } else if (err > 0) {
+ nvme_show_status(err);
+ } else {
+ nvme_show_error("%s: %s", cmd->name, nvme_strerror(errno));
+ }
- dev_close(dev);
return err;
}
diff --git a/plugins/memblaze/memblaze-nvme.h b/plugins/memblaze/memblaze-nvme.h
index e25267b..6c7462f 100644
--- a/plugins/memblaze/memblaze-nvme.h
+++ b/plugins/memblaze/memblaze-nvme.h
@@ -9,18 +9,36 @@
PLUGIN(NAME("memblaze", "Memblaze vendor specific extensions", NVME_VERSION),
COMMAND_LIST(
- ENTRY("smart-log-add", "Retrieve Memblaze SMART Log, show it", mb_get_additional_smart_log)
- ENTRY("get-pm-status", "Get Memblaze Power Manager Status", mb_get_powermanager_status)
- ENTRY("set-pm-status", "Set Memblaze Power Manager Status", mb_set_powermanager_status)
- ENTRY("select-download", "Selective Firmware Download", mb_selective_download)
- ENTRY("lat-stats", "Enable and disable Latency Statistics logging", mb_set_lat_stats)
- ENTRY("lat-stats-print", "Retrieve IO Latency Statistics log, show it", mb_lat_stats_log_print)
- ENTRY("lat-log", "Set Memblaze High Latency Log", mb_set_high_latency_log)
- ENTRY("lat-log-print", "Output Memblaze High Latency Log", mb_high_latency_log_print)
- ENTRY("clear-error-log", "Clear error log", memblaze_clear_error_log)
- ENTRY("smart-log-add-x", "Retrieve Memblaze SMART Log, show it", mb_get_smart_log_add)
- ENTRY("lat-set-feature-x", "Set Enable/Disable for Latency Monitor feature", mb_set_latency_feature)
- ENTRY("lat-get-feature-x", "Get Enabled/Disabled of Latency Monitor feature", mb_get_latency_feature)
+ ENTRY("smart-log-add", "Retrieve Memblaze SMART Log, show it",
+ mb_get_additional_smart_log)
+ ENTRY("get-pm-status", "Get Memblaze Power Manager Status",
+ mb_get_powermanager_status)
+ ENTRY("set-pm-status", "Set Memblaze Power Manager Status",
+ mb_set_powermanager_status)
+ ENTRY("select-download", "Selective Firmware Download",
+ mb_selective_download)
+ ENTRY("lat-stats", "Enable and disable Latency Statistics logging",
+ mb_set_lat_stats)
+ ENTRY("lat-stats-print", "Retrieve IO Latency Statistics log, show it",
+ mb_lat_stats_log_print)
+ ENTRY("lat-log", "Set Memblaze High Latency Log",
+ mb_set_high_latency_log)
+ ENTRY("lat-log-print", "Output Memblaze High Latency Log",
+ mb_high_latency_log_print)
+ ENTRY("clear-error-log", "Clear error log",
+ memblaze_clear_error_log)
+ ENTRY("smart-log-add-x", "Retrieve Memblaze SMART Log, show it",
+ mb_get_smart_log_add)
+ ENTRY("lat-set-feature-x", "Set Enable/Disable for Latency Monitor feature",
+ mb_set_latency_feature)
+ ENTRY("lat-get-feature-x", "Get Enabled/Disabled of Latency Monitor feature",
+ mb_get_latency_feature)
+ ENTRY("lat-stats-print-x", "Get Latency Statistics log and show it.",
+ mb_get_latency_stats)
+ ENTRY("lat-log-print-x", "Get High Latency log and show it.",
+ mb_get_high_latency_log)
+ ENTRY("perf-stats-print-x", "Get Performance Stat log and show it.",
+ mb_get_performance_stats)
)
);
diff --git a/plugins/meson.build b/plugins/meson.build
index bb4c9ad..146fa2a 100644
--- a/plugins/meson.build
+++ b/plugins/meson.build
@@ -26,10 +26,11 @@ if json_c_dep.found()
'plugins/wdc/wdc-utils.c',
'plugins/ymtc/ymtc-nvme.c',
'plugins/zns/zns.c',
+ 'plugins/ssstc/ssstc-nvme.c',
]
subdir('solidigm')
subdir('ocp')
- if conf.has('HAVE_SED_OPAL')
+ if conf.get('HAVE_SED_OPAL') != 0
subdir('sed')
endif
endif
diff --git a/plugins/micron/micron-nvme.c b/plugins/micron/micron-nvme.c
index 63a7a79..ddecc97 100644
--- a/plugins/micron/micron-nvme.c
+++ b/plugins/micron/micron-nvme.c
@@ -132,34 +132,24 @@ static enum eDriveModel GetDriveModel(int idx)
if (vendor_id == MICRON_VENDOR_ID) {
switch (device_id) {
case 0x5196:
- fallthrough;
case 0x51A0:
- fallthrough;
case 0x51A1:
- fallthrough;
case 0x51A2:
eModel = M51AX;
break;
case 0x51B0:
- fallthrough;
case 0x51B1:
- fallthrough;
case 0x51B2:
eModel = M51BX;
break;
case 0x51C0:
- fallthrough;
case 0x51C1:
- fallthrough;
case 0x51C2:
- fallthrough;
case 0x51C3:
eModel = M51CX;
break;
case 0x5405:
- fallthrough;
case 0x5406:
- fallthrough;
case 0x5407:
eModel = M5407;
break;
@@ -226,7 +216,6 @@ static int SetupDebugDataDirectories(char *strSN, char *strFilePath,
int length = 0;
int nIndex = 0;
char *strTemp = NULL;
- struct stat dirStat;
int j;
int k = 0;
int i = 0;
@@ -304,18 +293,17 @@ static int SetupDebugDataDirectories(char *strSN, char *strFilePath,
strMainDirName[nIndex] = '\0';
j = 1;
- while (!stat(strMainDirName, &dirStat)) {
+ while (mkdir(strMainDirName, 0777) < 0) {
+ if (errno != EEXIST) {
+ err = -1;
+ goto exit_status;
+ }
strMainDirName[nIndex] = '\0';
sprintf(strAppend, "-%d", j);
strcat(strMainDirName, strAppend);
j++;
}
- if (mkdir(strMainDirName, 0777) < 0) {
- err = -1;
- goto exit_status;
- }
-
if (strOSDirName) {
sprintf(strOSDirName, "%s/%s", strMainDirName, "OS");
if (mkdir(strOSDirName, 0777) < 0) {
@@ -331,7 +319,7 @@ static int SetupDebugDataDirectories(char *strSN, char *strFilePath,
rmdir(strOSDirName);
rmdir(strMainDirName);
err = -1;
- }
+ }
}
exit_status:
@@ -3217,28 +3205,20 @@ static int micron_internal_logs(int argc, char **argv, struct command *cmd,
err = -1;
switch (aVendorLogs[i].ucLogPage) {
case 0xE1:
- fallthrough;
case 0xE5:
- fallthrough;
case 0xE9:
err = 1;
break;
case 0xE2:
- fallthrough;
case 0xE3:
- fallthrough;
case 0xE4:
- fallthrough;
case 0xE8:
- fallthrough;
case 0xEA:
err = get_common_log(dev_fd(dev), aVendorLogs[i].ucLogPage,
&dataBuffer, &bSize);
break;
case 0xC1:
- fallthrough;
case 0xC2:
- fallthrough;
case 0xC4:
err = GetLogPageSize(dev_fd(dev), aVendorLogs[i].ucLogPage,
&bSize);
@@ -3247,7 +3227,6 @@ static int micron_internal_logs(int argc, char **argv, struct command *cmd,
&dataBuffer, bSize);
break;
case 0xE6:
- fallthrough;
case 0xE7:
puiIDDBuf = (unsigned int *)&ctrl;
uiMask = puiIDDBuf[1015];
@@ -3273,11 +3252,8 @@ static int micron_internal_logs(int argc, char **argv, struct command *cmd,
}
break;
case 0xF7:
- fallthrough;
case 0xF9:
- fallthrough;
case 0xFC:
- fallthrough;
case 0xFD:
if (eModel == M51BX)
(void)NVMEResetLog(dev_fd(dev), aVendorLogs[i].ucLogPage,
diff --git a/plugins/nbft/nbft-plugin.c b/plugins/nbft/nbft-plugin.c
index 2193ffb..1bb60be 100644
--- a/plugins/nbft/nbft-plugin.c
+++ b/plugins/nbft/nbft-plugin.c
@@ -4,11 +4,12 @@
#include <stdio.h>
#include <fnmatch.h>
+#include <libnvme.h>
#include "nvme-print.h"
#include "nvme.h"
#include "nbft.h"
-#include "libnvme.h"
#include "fabrics.h"
+#include "util/logging.h"
#define CREATE_CMD
#include "nbft-plugin.h"
@@ -168,7 +169,11 @@ static json_object *ssns_to_json(struct nbft_info_subsystem_ns *ss)
|| json_object_add_value_int(ss_json, "pdu_header_digest_required",
ss->pdu_header_digest_required)
|| json_object_add_value_int(ss_json, "data_digest_required",
- ss->data_digest_required))
+ ss->data_digest_required)
+ || json_object_add_value_int(ss_json, "discovered",
+ ss->discovered)
+ || json_object_add_value_int(ss_json, "unavailable",
+ ss->unavailable))
goto fail;
return ss_json;
@@ -319,7 +324,7 @@ static int json_show_nbfts(struct list_head *nbft_list, bool show_subsys,
bool show_hfi, bool show_discovery)
{
struct json_object *nbft_json_array, *nbft_json;
- struct nbft_file_entry *entry;
+ struct nbft_file_entry *entry = NULL;
nbft_json_array = json_create_array();
if (!nbft_json_array)
@@ -510,7 +515,7 @@ static void normal_show_nbfts(struct list_head *nbft_list, bool show_subsys,
bool show_hfi, bool show_discovery)
{
bool not_first = false;
- struct nbft_file_entry *entry;
+ struct nbft_file_entry *entry = NULL;
list_for_each(nbft_list, entry, node) {
if (not_first)
@@ -529,6 +534,8 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
enum nvme_print_flags flags;
int ret;
bool show_subsys = false, show_hfi = false, show_discovery = false;
+ unsigned int verbose = 0;
+ nvme_root_t r;
OPT_ARGS(opts) = {
OPT_FMT("output-format", 'o', &format, "Output format: normal|json"),
@@ -536,6 +543,7 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
OPT_FLAG("hfi", 'H', &show_hfi, "show NBFT HFIs"),
OPT_FLAG("discovery", 'd', &show_discovery, "show NBFT discovery controllers"),
OPT_STRING("nbft-path", 0, "STR", &nbft_path, "user-defined path for NBFT tables"),
+ OPT_INCR("verbose", 'v', &verbose, "Increase logging verbosity"),
OPT_END()
};
@@ -543,10 +551,15 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
if (ret)
return ret;
+ log_level = map_log_level(verbose, false /* quiet */);
+
ret = validate_output_format(format, &flags);
if (ret < 0)
return ret;
+ /* initialize libnvme logging */
+ r = nvme_create_root(stderr, log_level);
+
if (!(show_subsys || show_hfi || show_discovery))
show_subsys = show_hfi = show_discovery = true;
@@ -559,5 +572,6 @@ int show_nbft(int argc, char **argv, struct command *cmd, struct plugin *plugin)
ret = json_show_nbfts(&nbft_list, show_subsys, show_hfi, show_discovery);
free_nbfts(&nbft_list);
}
+ nvme_free_tree(r);
return ret;
}
diff --git a/plugins/netapp/netapp-nvme.c b/plugins/netapp/netapp-nvme.c
index 2ecdcc5..99f0a20 100644
--- a/plugins/netapp/netapp-nvme.c
+++ b/plugins/netapp/netapp-nvme.c
@@ -46,12 +46,14 @@ enum {
enum {
ONTAP_C2_LOG_SUPPORTED_LSP = 0x0,
ONTAP_C2_LOG_NSINFO_LSP = 0x1,
+ ONTAP_C2_LOG_PLATFORM_LSP = 0x2,
};
enum {
- ONTAP_VSERVER_TLV = 0x11,
- ONTAP_VOLUME_TLV = 0x12,
- ONTAP_NS_TLV = 0x13,
+ ONTAP_VSERVER_NAME_TLV = 0x11,
+ ONTAP_VOLUME_NAME_TLV = 0x12,
+ ONTAP_NS_NAME_TLV = 0x13,
+ ONTAP_NS_PATH_TLV = 0x14,
};
static const char *dev_path = "/dev/";
@@ -134,8 +136,10 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
unsigned char *log_data)
{
int lsp, tlv, label_len;
- char *vserver_name, *volume_name, *namespace_name;
+ char *vserver_name, *volume_name, *namespace_name, *namespace_path;
char vol_name[ONTAP_LABEL_LEN], ns_name[ONTAP_LABEL_LEN];
+ char ns_path[ONTAP_LABEL_LEN];
+ bool nspath_tlv_available = false;
const char *ontap_vol = "/vol/";
int i, j;
@@ -145,9 +149,9 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
/* lsp not related to nsinfo */
return;
- /* get the vserver tlv and name */
+ /* get the vserver name tlv */
tlv = *(__u8 *)&log_data[32];
- if (tlv == ONTAP_VSERVER_TLV) {
+ if (tlv == ONTAP_VSERVER_NAME_TLV) {
label_len = (*(__u16 *)&log_data[34]) * 4;
vserver_name = (char *)&log_data[36];
ontap_labels_to_str(vsname, vserver_name, label_len);
@@ -159,9 +163,9 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
i = 36 + label_len;
j = i + 2;
- /* get the volume tlv and name */
+ /* get the volume name tlv */
tlv = *(__u8 *)&log_data[i];
- if (tlv == ONTAP_VOLUME_TLV) {
+ if (tlv == ONTAP_VOLUME_NAME_TLV) {
label_len = (*(__u16 *)&log_data[j]) * 4;
volume_name = (char *)&log_data[j + 2];
ontap_labels_to_str(vol_name, volume_name, label_len);
@@ -173,9 +177,9 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
i += 4 + label_len;
j += 4 + label_len;
- /* get the namespace tlv and name */
+ /* get the namespace name tlv */
tlv = *(__u8 *)&log_data[i];
- if (tlv == ONTAP_NS_TLV) {
+ if (tlv == ONTAP_NS_NAME_TLV) {
label_len = (*(__u16 *)&log_data[j]) * 4;
namespace_name = (char *)&log_data[j + 2];
ontap_labels_to_str(ns_name, namespace_name, label_len);
@@ -185,8 +189,25 @@ static void netapp_get_ontap_labels(char *vsname, char *nspath,
return;
}
- snprintf(nspath, ONTAP_NS_PATHLEN, "%s%s%s%s", ontap_vol,
+ i += 4 + label_len;
+ j += 4 + label_len;
+ /* get the namespace path tlv if available */
+ tlv = *(__u8 *)&log_data[i];
+ if (tlv == ONTAP_NS_PATH_TLV) {
+ nspath_tlv_available = true;
+ label_len = (*(__u16 *)&log_data[j]) * 4;
+ namespace_path = (char *)&log_data[j + 2];
+ ontap_labels_to_str(ns_path, namespace_path, label_len);
+ }
+
+ if (nspath_tlv_available) {
+ /* set nspath from the corresponding ns_path string */
+ snprintf(nspath, ONTAP_NS_PATHLEN, "%s", ns_path);
+ } else {
+ /* set nspath by concatenating ontap_vol with ns_name */
+ snprintf(nspath, ONTAP_NS_PATHLEN, "%s%s%s%s", ontap_vol,
vol_name, "/", ns_name);
+ }
}
static void netapp_smdevice_json(struct json_object *devices, char *devname,
diff --git a/plugins/ocp/ocp-clear-features.c b/plugins/ocp/ocp-clear-features.c
index 0f49584..731dfea 100644
--- a/plugins/ocp/ocp-clear-features.c
+++ b/plugins/ocp/ocp-clear-features.c
@@ -18,7 +18,7 @@ static int ocp_clear_feature(int argc, char **argv, const char *desc, const __u8
__u32 result = 0;
__u32 clear = 1 << 31;
struct nvme_dev *dev;
- int uuid_index = 0;
+ __u8 uuid_index = 0;
bool uuid = true;
int err;
diff --git a/plugins/ocp/ocp-fw-activation-history.c b/plugins/ocp/ocp-fw-activation-history.c
index ad96c6b..16598a0 100644
--- a/plugins/ocp/ocp-fw-activation-history.c
+++ b/plugins/ocp/ocp-fw-activation-history.c
@@ -66,7 +66,7 @@ static void ocp_fw_activation_history_normal(const struct fw_activation_history
printf(" %-22s%d\n", "activation count:",
le16_to_cpu(entry->activation_count));
printf(" %-22s%"PRIu64"\n", "timestamp:",
- le64_to_cpu(entry->timestamp));
+ (0x0000FFFFFFFFFFFF & le64_to_cpu(entry->timestamp)));
printf(" %-22s%"PRIu64"\n", "power cycle count:",
le64_to_cpu(entry->power_cycle_count));
printf(" %-22s%.*s\n", "previous firmware:", (int)sizeof(entry->previous_fw),
@@ -106,7 +106,7 @@ static void ocp_fw_activation_history_json(const struct fw_activation_history *f
json_object_add_value_uint(entry_obj, "activation count",
le16_to_cpu(entry->activation_count));
json_object_add_value_uint64(entry_obj, "timestamp",
- le64_to_cpu(entry->timestamp));
+ (0x0000FFFFFFFFFFFF & le64_to_cpu(entry->timestamp)));
json_object_add_value_uint(entry_obj, "power cycle count",
le64_to_cpu(entry->power_cycle_count));
@@ -162,7 +162,7 @@ int ocp_fw_activation_history_log(int argc, char **argv, struct command *cmd,
if (err)
return err;
- int uuid_index = 0;
+ __u8 uuid_index = 0;
/*
* Best effort attempt at uuid. Otherwise, assume no index (i.e. 0)
diff --git a/plugins/ocp/ocp-nvme.c b/plugins/ocp/ocp-nvme.c
index 53ae0f4..6eaa773 100644
--- a/plugins/ocp/ocp-nvme.c
+++ b/plugins/ocp/ocp-nvme.c
@@ -705,7 +705,7 @@ static int eol_plp_failure_mode_set(struct nvme_dev *dev, const __u32 nsid,
{
__u32 result;
int err;
- int uuid_index = 0;
+ __u8 uuid_index = 0;
if (uuid) {
/* OCP 2.0 requires UUID index support */
@@ -2060,6 +2060,90 @@ static int ocp_device_capabilities_log(int argc, char **argv, struct command *cm
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
+/// Set Telemetry Profile (Feature Identifier C8h) Set Feature
+
+static int ocp_set_telemetry_profile(struct nvme_dev *dev, __u8 tps)
+{
+ __u32 result;
+ int err;
+ __u8 uuid_index = 0;
+
+ /* OCP 2.0 requires UUID index support */
+ err = ocp_get_uuid_index(dev, &uuid_index);
+ if (err || !uuid_index) {
+ nvme_show_error("ERROR: No OCP UUID index found");
+ return err;
+ }
+
+ struct nvme_set_features_args args = {
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
+ .fid = 0xC8,
+ .nsid = 0xFFFFFFFF,
+ .cdw11 = tps,
+ .cdw12 = 0,
+ .save = true,
+ .uuidx = uuid_index,
+ .cdw15 = 0,
+ .data_len = 0,
+ .data = NULL,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .result = &result,
+ };
+
+ err = nvme_set_features(&args);
+ if (err > 0) {
+ nvme_show_status(err);
+ } else if (err < 0) {
+ nvme_show_perror("Set Telemetry Profile");
+ fprintf(stderr, "Command failed while parsing.\n");
+ } else {
+ printf("Successfully Set Telemetry Profile (feature: 0xC8) to below values\n");
+ printf("Telemetry Profile Select: 0x%x\n", tps);
+ }
+
+ return err;
+}
+
+static int ocp_set_telemetry_profile_feature(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
+{
+ const char *desc = "Set Telemetry Profile (Feature Identifier C8h) Set Feature.";
+ const char *tps = "Telemetry Profile Select for device debug data collection";
+ struct nvme_dev *dev;
+ int err;
+
+ struct config {
+ __u8 tps;
+ };
+
+ struct config cfg = {
+ .tps = 0,
+ };
+
+ OPT_ARGS(opts) = {
+ OPT_BYTE("telemetry-profile-select", 't', &cfg.tps, tps),
+ OPT_END()
+ };
+
+ err = parse_and_open(&dev, argc, argv, desc, opts);
+ if (err)
+ return err;
+
+ if (argconfig_parse_seen(opts, "telemetry-profile-select"))
+ err = ocp_set_telemetry_profile(dev, cfg.tps);
+ else
+ nvme_show_error("Telemetry Profile Select is a required argument");
+
+ dev_close(dev);
+
+ return err;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
/// DSSD Power State (Feature Identifier C7h) Set Feature
static int set_dssd_power_state(struct nvme_dev *dev, const __u32 nsid,
@@ -2068,7 +2152,7 @@ static int set_dssd_power_state(struct nvme_dev *dev, const __u32 nsid,
{
__u32 result;
int err;
- int uuid_index = 0;
+ __u8 uuid_index = 0;
if (uuid) {
/* OCP 2.0 requires UUID index support */
@@ -2143,7 +2227,7 @@ static int set_dssd_power_state_feature(int argc, char **argv, struct command *c
if (err)
return err;
- if (argconfig_parse_seen(opts, "power state"))
+ if (argconfig_parse_seen(opts, "power-state"))
err = set_dssd_power_state(dev, nsid, fid, cfg.power_state,
cfg.save,
!argconfig_parse_seen(opts, "no-uuid"));
@@ -2160,137 +2244,271 @@ static int set_dssd_power_state_feature(int argc, char **argv, struct command *c
/// plp_health_check_interval
static int set_plp_health_check_interval(int argc, char **argv, struct command *cmd,
- struct plugin *plugin)
+ struct plugin *plugin)
{
- const char *desc = "Define Issue Set Feature command (FID : 0xC6) PLP Health Check Interval";
- const char *plp_health_interval = "[31:16]:PLP Health Check Interval";
- const char *save = "Specifies that the controller shall save the attribute";
- const __u32 nsid = 0;
- const __u8 fid = 0xc6;
- struct nvme_dev *dev;
- int err;
- __u32 result;
- int uuid_index = 0;
-
- struct config {
- __le16 plp_health_interval;
- bool save;
- };
-
- struct config cfg = {
- .plp_health_interval = 0,
- .save = false,
- };
-
- OPT_ARGS(opts) = {
- OPT_BYTE("plp_health_interval", 'p', &cfg.plp_health_interval, plp_health_interval),
- OPT_FLAG("save", 's', &cfg.save, save),
- OPT_FLAG("no-uuid", 'n', NULL,
- "Skip UUID index search (UUID index not required for OCP 1.0)"),
- OPT_END()
- };
-
- err = parse_and_open(&dev, argc, argv, desc, opts);
- if (err)
- return err;
-
-
- if (!argconfig_parse_seen(opts, "no-uuid")) {
- /* OCP 2.0 requires UUID index support */
- err = ocp_get_uuid_index(dev, &uuid_index);
- if (err || !uuid_index) {
- printf("ERROR: No OCP UUID index found");
- return err;
- }
- }
-
-
- struct nvme_set_features_args args = {
- .args_size = sizeof(args),
- .fd = dev_fd(dev),
- .fid = fid,
- .nsid = nsid,
- .cdw11 = cfg.plp_health_interval << 16,
- .cdw12 = 0,
- .save = cfg.save,
- .uuidx = uuid_index,
- .cdw15 = 0,
- .data_len = 0,
- .data = NULL,
- .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
- .result = &result,
- };
-
- err = nvme_set_features(&args);
- if (err > 0) {
- nvme_show_status(err);
- } else if (err < 0) {
- nvme_show_perror("Define PLP Health Check Interval");
- fprintf(stderr, "Command failed while parsing.\n");
- } else {
- printf("Successfully set the PLP Health Check Interval");
- printf("PLP Health Check Interval: 0x%x\n", cfg.plp_health_interval);
- printf("Save bit Value: 0x%x\n", cfg.save);
- }
- return err;
+ const char *desc = "Define Issue Set Feature command (FID : 0xC6) PLP Health Check Interval";
+ const char *plp_health_interval = "[31:16]:PLP Health Check Interval";
+ const char *save = "Specifies that the controller shall save the attribute";
+ const __u32 nsid = 0;
+ const __u8 fid = 0xc6;
+ struct nvme_dev *dev;
+ int err;
+ __u32 result;
+ __u8 uuid_index = 0;
+
+ struct config {
+ __le16 plp_health_interval;
+ bool save;
+ };
+
+ struct config cfg = {
+ .plp_health_interval = 0,
+ .save = false,
+ };
+
+ OPT_ARGS(opts) = {
+ OPT_BYTE("plp_health_interval", 'p', &cfg.plp_health_interval, plp_health_interval),
+ OPT_FLAG("save", 's', &cfg.save, save),
+ OPT_FLAG("no-uuid", 'n', NULL,
+ "Skip UUID index search (UUID index not required for OCP 1.0)"),
+ OPT_END()
+ };
+
+ err = parse_and_open(&dev, argc, argv, desc, opts);
+ if (err)
+ return err;
+
+
+ if (!argconfig_parse_seen(opts, "no-uuid")) {
+ /* OCP 2.0 requires UUID index support */
+ err = ocp_get_uuid_index(dev, &uuid_index);
+ if (err || !uuid_index) {
+ printf("ERROR: No OCP UUID index found");
+ return err;
+ }
+ }
+
+
+ struct nvme_set_features_args args = {
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
+ .fid = fid,
+ .nsid = nsid,
+ .cdw11 = cfg.plp_health_interval << 16,
+ .cdw12 = 0,
+ .save = cfg.save,
+ .uuidx = uuid_index,
+ .cdw15 = 0,
+ .data_len = 0,
+ .data = NULL,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .result = &result,
+ };
+
+ err = nvme_set_features(&args);
+ if (err > 0) {
+ nvme_show_status(err);
+ } else if (err < 0) {
+ nvme_show_perror("Define PLP Health Check Interval");
+ fprintf(stderr, "Command failed while parsing.\n");
+ } else {
+ printf("Successfully set the PLP Health Check Interval");
+ printf("PLP Health Check Interval: 0x%x\n", cfg.plp_health_interval);
+ printf("Save bit Value: 0x%x\n", cfg.save);
+ }
+ return err;
}
static int get_plp_health_check_interval(int argc, char **argv, struct command *cmd,
- struct plugin *plugin)
+ struct plugin *plugin)
{
- const char *desc = "Define Issue Get Feature command (FID : 0xC6) PLP Health Check Interval";
- const char *sel = "[0-3,8]: current/default/saved/supported/changed";
- const __u32 nsid = 0;
- const __u8 fid = 0xc6;
- struct nvme_dev *dev;
- __u32 result;
- int err;
-
- struct config {
- __u8 sel;
- };
-
- struct config cfg = {
- .sel = 0,
- };
-
- OPT_ARGS(opts) = {
- OPT_BYTE("sel", 'S', &cfg.sel, sel),
- OPT_END()
- };
-
- err = parse_and_open(&dev, argc, argv, desc, opts);
- if (err)
- return err;
-
-
- struct nvme_get_features_args args = {
- .args_size = sizeof(args),
- .fd = dev_fd(dev),
- .fid = fid,
- .nsid = nsid,
- .sel = cfg.sel,
- .cdw11 = 0,
- .uuidx = 0,
- .data_len = 0,
- .data = NULL,
- .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
- .result = &result,
- };
-
- err = nvme_get_features(&args);
- if (!err) {
- printf("get-feature:0xC6 %s value: %#08x\n", nvme_select_to_string(cfg.sel), result);
-
- if (cfg.sel == NVME_GET_FEATURES_SEL_SUPPORTED)
- nvme_show_select_result(fid, result);
- } else {
- nvme_show_error("Could not get feature: 0xC6");
- }
-
- return err;
+ const char *desc = "Define Issue Get Feature command (FID : 0xC6) PLP Health Check Interval";
+ const char *sel = "[0-3,8]: current/default/saved/supported/changed";
+ const __u32 nsid = 0;
+ const __u8 fid = 0xc6;
+ struct nvme_dev *dev;
+ __u32 result;
+ int err;
+
+ struct config {
+ __u8 sel;
+ };
+
+ struct config cfg = {
+ .sel = 0,
+ };
+
+ OPT_ARGS(opts) = {
+ OPT_BYTE("sel", 'S', &cfg.sel, sel),
+ OPT_END()
+ };
+
+ err = parse_and_open(&dev, argc, argv, desc, opts);
+ if (err)
+ return err;
+
+
+ struct nvme_get_features_args args = {
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
+ .fid = fid,
+ .nsid = nsid,
+ .sel = cfg.sel,
+ .cdw11 = 0,
+ .uuidx = 0,
+ .data_len = 0,
+ .data = NULL,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .result = &result,
+ };
+
+ err = nvme_get_features(&args);
+ if (!err) {
+ printf("get-feature:0xC6 %s value: %#08x\n", nvme_select_to_string(cfg.sel), result);
+
+ if (cfg.sel == NVME_GET_FEATURES_SEL_SUPPORTED)
+ nvme_show_select_result(fid, result);
+ } else {
+ nvme_show_error("Could not get feature: 0xC6");
+ }
+
+ return err;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+/// dssd_async_event_config
+
+static int set_dssd_async_event_config(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
+{
+
+ const char *desc = "Issue Set Feature command (FID : 0xC9) DSSD Async Event Config";
+ const char *epn = "[0]:Enable Panic Notices";
+ const char *save = "Specifies that the controller shall save the attribute";
+ const __u32 nsid = 0;
+ const __u8 fid = 0xc9;
+ struct nvme_dev *dev;
+ int err;
+ __u32 result;
+ __u8 uuid_index = 0;
+
+ struct config {
+ bool epn;
+ bool save;
+ };
+
+ struct config cfg = {
+ .epn = false,
+ .save = false,
+ };
+
+ OPT_ARGS(opts) = {
+ OPT_FLAG("enable-panic-notices", 'e', &cfg.epn, epn),
+ OPT_FLAG("save", 's', &cfg.save, save),
+ OPT_END()
+ };
+
+ err = parse_and_open(&dev, argc, argv, desc, opts);
+ if (err)
+ return err;
+
+ /* OCP 2.0 requires UUID index support */
+ err = ocp_get_uuid_index(dev, &uuid_index);
+ if (err || !uuid_index) {
+ printf("ERROR: No OCP UUID index found\n");
+ return err;
+ }
+
+ struct nvme_set_features_args args = {
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
+ .fid = fid,
+ .nsid = nsid,
+ .cdw11 = cfg.epn ? 1 : 0,
+ .cdw12 = 0,
+ .save = cfg.save,
+ .uuidx = uuid_index,
+ .cdw15 = 0,
+ .data_len = 0,
+ .data = NULL,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .result = &result,
+ };
+
+ err = nvme_set_features(&args);
+ if (err > 0) {
+ nvme_show_status(err);
+ } else if (err < 0) {
+ nvme_show_perror("Set DSSD Asynchronous Event Configuration\n");
+ fprintf(stderr, "Command failed while parsing.\n");
+ } else {
+ printf("Successfully set the DSSD Asynchronous Event Configuration\n");
+ printf("Enable Panic Notices bit Value: 0x%x\n", cfg.epn);
+ printf("Save bit Value: 0x%x\n", cfg.save);
+ }
+ return err;
+}
+
+static int get_dssd_async_event_config(int argc, char **argv, struct command *cmd,
+ struct plugin *plugin)
+{
+
+ const char *desc = "Issue Get Feature command (FID : 0xC9) DSSD Async Event Config";
+ const char *sel = "[0-3]: current/default/saved/supported";
+ const __u32 nsid = 0;
+ const __u8 fid = 0xc9;
+ struct nvme_dev *dev;
+ __u32 result;
+ int err;
+
+ struct config {
+ __u8 sel;
+ };
+
+ struct config cfg = {
+ .sel = 0,
+ };
+
+ OPT_ARGS(opts) = {
+ OPT_BYTE("sel", 'S', &cfg.sel, sel),
+ OPT_END()
+ };
+
+ err = parse_and_open(&dev, argc, argv, desc, opts);
+ if (err)
+ return err;
+
+
+ struct nvme_get_features_args args = {
+ .args_size = sizeof(args),
+ .fd = dev_fd(dev),
+ .fid = fid,
+ .nsid = nsid,
+ .sel = cfg.sel,
+ .cdw11 = 0,
+ .uuidx = 0,
+ .data_len = 0,
+ .data = NULL,
+ .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
+ .result = &result,
+ };
+
+ err = nvme_get_features(&args);
+ if (!err) {
+ printf("get-feature:0xC9 %s value: %#08x\n", nvme_select_to_string(cfg.sel), result);
+
+ if (cfg.sel == NVME_GET_FEATURES_SEL_SUPPORTED)
+ nvme_show_select_result(fid, result);
+ } else {
+ nvme_show_error("Could not get feature: 0xC9\n");
+ }
+
+ return err;
}
///////////////////////////////////////////////////////////////////////////////
@@ -2342,36 +2560,36 @@ static int get_plp_health_check_interval(int argc, char **argv, struct command *
* @reserved3: reserved
*/
struct __attribute__((__packed__)) telemetry_str_log_format {
- __u8 log_page_version;
- __u8 reserved1[15];
- __u8 log_page_guid[C9_GUID_LENGTH];
- __le64 sls;
- __u8 reserved2[24];
- __le64 sits;
- __le64 sitsz;
- __le64 ests;
- __le64 estsz;
- __le64 vu_eve_sts;
- __le64 vu_eve_st_sz;
- __le64 ascts;
- __le64 asctsz;
- __u8 fifo1[16];
- __u8 fifo2[16];
- __u8 fifo3[16];
- __u8 fifo4[16];
- __u8 fifo5[16];
- __u8 fifo6[16];
- __u8 fifo7[16];
- __u8 fifo8[16];
- __u8 fifo9[16];
- __u8 fifo10[16];
- __u8 fifo11[16];
- __u8 fifo12[16];
- __u8 fifo13[16];
- __u8 fifo14[16];
- __u8 fifo15[16];
- __u8 fifo16[16];
- __u8 reserved3[48];
+ __u8 log_page_version;
+ __u8 reserved1[15];
+ __u8 log_page_guid[C9_GUID_LENGTH];
+ __le64 sls;
+ __u8 reserved2[24];
+ __le64 sits;
+ __le64 sitsz;
+ __le64 ests;
+ __le64 estsz;
+ __le64 vu_eve_sts;
+ __le64 vu_eve_st_sz;
+ __le64 ascts;
+ __le64 asctsz;
+ __u8 fifo1[16];
+ __u8 fifo2[16];
+ __u8 fifo3[16];
+ __u8 fifo4[16];
+ __u8 fifo5[16];
+ __u8 fifo6[16];
+ __u8 fifo7[16];
+ __u8 fifo8[16];
+ __u8 fifo9[16];
+ __u8 fifo10[16];
+ __u8 fifo11[16];
+ __u8 fifo12[16];
+ __u8 fifo13[16];
+ __u8 fifo14[16];
+ __u8 fifo15[16];
+ __u8 fifo16[16];
+ __u8 reserved3[48];
};
/*
@@ -2385,11 +2603,11 @@ struct __attribute__((__packed__)) telemetry_str_log_format {
* @reserved2 reserved
*/
struct __attribute__((__packed__)) statistics_id_str_table_entry {
- __le16 vs_si;
- __u8 reserved1;
- __u8 ascii_id_len;
- __le64 ascii_id_ofst;
- __le32 reserved2;
+ __le16 vs_si;
+ __u8 reserved1;
+ __u8 ascii_id_len;
+ __le64 ascii_id_ofst;
+ __le32 reserved2;
};
/*
@@ -2402,11 +2620,11 @@ struct __attribute__((__packed__)) statistics_id_str_table_entry {
* @reserved2 reserved
*/
struct __attribute__((__packed__)) event_id_str_table_entry {
- __u8 deb_eve_class;
- __le16 ei;
- __u8 ascii_id_len;
- __le64 ascii_id_ofst;
- __le32 reserved2;
+ __u8 deb_eve_class;
+ __le16 ei;
+ __u8 ascii_id_len;
+ __le64 ascii_id_ofst;
+ __le32 reserved2;
};
/*
@@ -2419,525 +2637,525 @@ struct __attribute__((__packed__)) event_id_str_table_entry {
* @reserved reserved
*/
struct __attribute__((__packed__)) vu_event_id_str_table_entry {
- __u8 deb_eve_class;
- __le16 vu_ei;
- __u8 ascii_id_len;
- __le64 ascii_id_ofst;
- __le32 reserved;
+ __u8 deb_eve_class;
+ __le16 vu_ei;
+ __u8 ascii_id_len;
+ __le64 ascii_id_ofst;
+ __le32 reserved;
};
/* Function declaration for Telemetry String Log Format (LID:C9h) */
static int ocp_telemetry_str_log_format(int argc, char **argv, struct command *cmd,
- struct plugin *plugin);
+ struct plugin *plugin);
static int ocp_print_C9_log_normal(struct telemetry_str_log_format *log_data,__u8 *log_data_buf)
{
- //calculating the index value for array
- __le64 stat_id_index = (log_data->sitsz * 4) / 16;
- __le64 eve_id_index = (log_data->estsz * 4) / 16;
- __le64 vu_eve_index = (log_data->vu_eve_st_sz * 4) / 16;
- __le64 ascii_table_index = (log_data->asctsz * 4);
- //Calculating the offset for dynamic fields.
- __le64 stat_id_str_table_ofst = C9_TELEMETRY_STR_LOG_SIST_OFST + (log_data->sitsz * 4);
- __le64 event_str_table_ofst = stat_id_str_table_ofst + (log_data->estsz * 4);
- __le64 vu_event_str_table_ofst = event_str_table_ofst + (log_data->vu_eve_st_sz * 4);
- __le64 ascii_table_ofst = vu_event_str_table_ofst + (log_data->asctsz * 4);
- struct statistics_id_str_table_entry stat_id_str_table_arr[stat_id_index];
- struct event_id_str_table_entry event_id_str_table_arr[eve_id_index];
- struct vu_event_id_str_table_entry vu_event_id_str_table_arr[vu_eve_index];
- __u8 ascii_table_info_arr[ascii_table_index];
- int j;
-
- printf(" Log Page Version : 0x%x\n", log_data->log_page_version);
-
- printf(" Reserved : ");
- for (j = 0; j < 15; j++)
- printf("%d", log_data->reserved1[j]);
- printf("\n");
-
- printf(" Log page GUID : 0x");
- for (j = C9_GUID_LENGTH - 1; j >= 0; j--)
- printf("%x", log_data->log_page_guid[j]);
- printf("\n");
-
- printf(" Telemetry String Log Size : 0x%lx\n", le64_to_cpu(log_data->sls));
-
- printf(" Reserved : ");
- for (j = 0; j < 24; j++)
- printf("%d", log_data->reserved2[j]);
- printf("\n");
-
- printf(" Statistics Identifier String Table Start : 0x%lx\n", le64_to_cpu(log_data->sits));
- printf(" Statistics Identifier String Table Size : 0x%lx\n", le64_to_cpu(log_data->sitsz));
- printf(" Event String Table Start : 0x%lx\n", le64_to_cpu(log_data->ests));
- printf(" Event String Table Size : 0x%lx\n", le64_to_cpu(log_data->estsz));
- printf(" VU Event String Table Start : 0x%lx\n", le64_to_cpu(log_data->vu_eve_sts));
- printf(" VU Event String Table Size : 0x%lx\n", le64_to_cpu(log_data->vu_eve_st_sz));
- printf(" ASCII Table Start : 0x%lx\n", le64_to_cpu(log_data->ascts));
- printf(" ASCII Table Size : 0x%lx\n", le64_to_cpu(log_data->asctsz));
-
- printf(" FIFO 1 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo1[j], log_data->fifo1[j]);
- }
-
- printf(" FIFO 2 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo2[j], log_data->fifo2[j]);
- }
-
- printf(" FIFO 3 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo3[j], log_data->fifo3[j]);
- }
-
- printf(" FIFO 4 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
-
- printf(" %d %d %c \n", j, log_data->fifo4[j], log_data->fifo4[j]);
- }
-
- printf(" FIFO 5 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo5[j], log_data->fifo5[j]);
- }
-
- printf(" FIFO 6 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo6[j], log_data->fifo6[j]);
- }
-
- printf(" FIFO 7 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo7[j], log_data->fifo7[j]);
- }
-
- printf(" FIFO 8 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf("index value ascii_val");
- printf(" %d %d %c \n", j, log_data->fifo8[j], log_data->fifo8[j]);
- }
-
- printf(" FIFO 9 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo9[j], log_data->fifo9[j]);
- }
-
- printf(" FIFO 10 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo10[j], log_data->fifo10[j]);
- }
-
- printf(" FIFO 11 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo11[j], log_data->fifo11[j]);
- }
-
- printf(" FIFO 12 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo12[j], log_data->fifo12[j]);
- }
-
- printf(" FIFO 13 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo13[j], log_data->fifo13[j]);
- }
-
- printf(" FIFO 14 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo14[j], log_data->fifo14[j]);
- }
-
- printf(" FIFO 15 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo15[j], log_data->fifo16[j]);
- }
-
- printf(" FIFO 16 ASCII String\n");
- printf(" index value ascii_val\n");
- for (j = 0; j < 16; j++){
- printf(" %d %d %c \n", j, log_data->fifo16[j], log_data->fifo16[j]);
- }
-
- printf(" Reserved : ");
- for (j = 0; j < 48; j++)
- printf("%d", log_data->reserved3[j]);
- printf("\n");
-
- memcpy(stat_id_str_table_arr, (__u8*)log_data_buf + stat_id_str_table_ofst, (log_data->sitsz * 4));
- memcpy(event_id_str_table_arr, (__u8*)log_data_buf + event_str_table_ofst, (log_data->estsz * 4));
- memcpy(vu_event_id_str_table_arr, (__u8*)log_data_buf + vu_event_str_table_ofst, (log_data->vu_eve_st_sz * 4));
- memcpy(ascii_table_info_arr, (__u8*)log_data_buf + ascii_table_ofst, (log_data->asctsz * 4));
-
- printf(" Statistics Identifier String Table\n");
- for (j = 0; j < stat_id_index; j++){
- printf(" Vendor Specific Statistic Identifier : 0x%x\n",le16_to_cpu(stat_id_str_table_arr[j].vs_si));
- printf(" Reserved : 0x%d",stat_id_str_table_arr[j].reserved1);
- printf(" ASCII ID Length : 0x%x\n",stat_id_str_table_arr[j].ascii_id_len);
- printf(" ASCII ID offset : 0x%lx\n",le64_to_cpu(stat_id_str_table_arr[j].ascii_id_ofst));
- printf(" Reserved : 0x%d\n",stat_id_str_table_arr[j].reserved2);
- }
-
- printf(" Event Identifier String Table Entry\n");
- for (j = 0; j < eve_id_index; j++){
- printf(" Debug Event Class : 0x%x\n",event_id_str_table_arr[j].deb_eve_class);
- printf(" Event Identifier : 0x%x\n",le16_to_cpu(event_id_str_table_arr[j].ei));
- printf(" ASCII ID Length : 0x%x\n",event_id_str_table_arr[j].ascii_id_len);
- printf(" ASCII ID offset : 0x%lx\n",le64_to_cpu(event_id_str_table_arr[j].ascii_id_ofst));
- printf(" Reserved : 0x%d\n",event_id_str_table_arr[j].reserved2);
-
- }
-
- printf(" VU Event Identifier String Table Entry\n");
- for (j = 0; j < vu_eve_index; j++){
- printf(" Debug Event Class : 0x%x\n",vu_event_id_str_table_arr[j].deb_eve_class);
- printf(" VU Event Identifier : 0x%x\n",le16_to_cpu(vu_event_id_str_table_arr[j].vu_ei));
- printf(" ASCII ID Length : 0x%x\n",vu_event_id_str_table_arr[j].ascii_id_len);
- printf(" ASCII ID offset : 0x%lx\n",le64_to_cpu(vu_event_id_str_table_arr[j].ascii_id_ofst));
- printf(" Reserved : 0x%d\n",vu_event_id_str_table_arr[j].reserved);
-
- }
-
- printf(" ASCII Table\n");
- printf(" Byte Data_Byte ASCII_Character\n");
- for (j = 0; j < ascii_table_index; j++){
- printf(" %lld 0x%x %c \n",ascii_table_ofst+j,ascii_table_info_arr[j],ascii_table_info_arr[j]);
- }
- return 0;
+ //calculating the index value for array
+ __le64 stat_id_index = (log_data->sitsz * 4) / 16;
+ __le64 eve_id_index = (log_data->estsz * 4) / 16;
+ __le64 vu_eve_index = (log_data->vu_eve_st_sz * 4) / 16;
+ __le64 ascii_table_index = (log_data->asctsz * 4);
+ //Calculating the offset for dynamic fields.
+ __le64 stat_id_str_table_ofst = C9_TELEMETRY_STR_LOG_SIST_OFST + (log_data->sitsz * 4);
+ __le64 event_str_table_ofst = stat_id_str_table_ofst + (log_data->estsz * 4);
+ __le64 vu_event_str_table_ofst = event_str_table_ofst + (log_data->vu_eve_st_sz * 4);
+ __le64 ascii_table_ofst = vu_event_str_table_ofst + (log_data->asctsz * 4);
+ struct statistics_id_str_table_entry stat_id_str_table_arr[stat_id_index];
+ struct event_id_str_table_entry event_id_str_table_arr[eve_id_index];
+ struct vu_event_id_str_table_entry vu_event_id_str_table_arr[vu_eve_index];
+ __u8 ascii_table_info_arr[ascii_table_index];
+ int j;
+
+ printf(" Log Page Version : 0x%x\n", log_data->log_page_version);
+
+ printf(" Reserved : ");
+ for (j = 0; j < 15; j++)
+ printf("%d", log_data->reserved1[j]);
+ printf("\n");
+
+ printf(" Log page GUID : 0x");
+ for (j = C9_GUID_LENGTH - 1; j >= 0; j--)
+ printf("%x", log_data->log_page_guid[j]);
+ printf("\n");
+
+ printf(" Telemetry String Log Size : 0x%lx\n", le64_to_cpu(log_data->sls));
+
+ printf(" Reserved : ");
+ for (j = 0; j < 24; j++)
+ printf("%d", log_data->reserved2[j]);
+ printf("\n");
+
+ printf(" Statistics Identifier String Table Start : 0x%lx\n", le64_to_cpu(log_data->sits));
+ printf(" Statistics Identifier String Table Size : 0x%lx\n", le64_to_cpu(log_data->sitsz));
+ printf(" Event String Table Start : 0x%lx\n", le64_to_cpu(log_data->ests));
+ printf(" Event String Table Size : 0x%lx\n", le64_to_cpu(log_data->estsz));
+ printf(" VU Event String Table Start : 0x%lx\n", le64_to_cpu(log_data->vu_eve_sts));
+ printf(" VU Event String Table Size : 0x%lx\n", le64_to_cpu(log_data->vu_eve_st_sz));
+ printf(" ASCII Table Start : 0x%lx\n", le64_to_cpu(log_data->ascts));
+ printf(" ASCII Table Size : 0x%lx\n", le64_to_cpu(log_data->asctsz));
+
+ printf(" FIFO 1 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo1[j], log_data->fifo1[j]);
+ }
+
+ printf(" FIFO 2 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo2[j], log_data->fifo2[j]);
+ }
+
+ printf(" FIFO 3 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo3[j], log_data->fifo3[j]);
+ }
+
+ printf(" FIFO 4 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+
+ printf(" %d %d %c \n", j, log_data->fifo4[j], log_data->fifo4[j]);
+ }
+
+ printf(" FIFO 5 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo5[j], log_data->fifo5[j]);
+ }
+
+ printf(" FIFO 6 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo6[j], log_data->fifo6[j]);
+ }
+
+ printf(" FIFO 7 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo7[j], log_data->fifo7[j]);
+ }
+
+ printf(" FIFO 8 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf("index value ascii_val");
+ printf(" %d %d %c \n", j, log_data->fifo8[j], log_data->fifo8[j]);
+ }
+
+ printf(" FIFO 9 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo9[j], log_data->fifo9[j]);
+ }
+
+ printf(" FIFO 10 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo10[j], log_data->fifo10[j]);
+ }
+
+ printf(" FIFO 11 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo11[j], log_data->fifo11[j]);
+ }
+
+ printf(" FIFO 12 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo12[j], log_data->fifo12[j]);
+ }
+
+ printf(" FIFO 13 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo13[j], log_data->fifo13[j]);
+ }
+
+ printf(" FIFO 14 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo14[j], log_data->fifo14[j]);
+ }
+
+ printf(" FIFO 15 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo15[j], log_data->fifo16[j]);
+ }
+
+ printf(" FIFO 16 ASCII String\n");
+ printf(" index value ascii_val\n");
+ for (j = 0; j < 16; j++){
+ printf(" %d %d %c \n", j, log_data->fifo16[j], log_data->fifo16[j]);
+ }
+
+ printf(" Reserved : ");
+ for (j = 0; j < 48; j++)
+ printf("%d", log_data->reserved3[j]);
+ printf("\n");
+
+ memcpy(stat_id_str_table_arr, (__u8*)log_data_buf + stat_id_str_table_ofst, (log_data->sitsz * 4));
+ memcpy(event_id_str_table_arr, (__u8*)log_data_buf + event_str_table_ofst, (log_data->estsz * 4));
+ memcpy(vu_event_id_str_table_arr, (__u8*)log_data_buf + vu_event_str_table_ofst, (log_data->vu_eve_st_sz * 4));
+ memcpy(ascii_table_info_arr, (__u8*)log_data_buf + ascii_table_ofst, (log_data->asctsz * 4));
+
+ printf(" Statistics Identifier String Table\n");
+ for (j = 0; j < stat_id_index; j++){
+ printf(" Vendor Specific Statistic Identifier : 0x%x\n",le16_to_cpu(stat_id_str_table_arr[j].vs_si));
+ printf(" Reserved : 0x%d",stat_id_str_table_arr[j].reserved1);
+ printf(" ASCII ID Length : 0x%x\n",stat_id_str_table_arr[j].ascii_id_len);
+ printf(" ASCII ID offset : 0x%lx\n",le64_to_cpu(stat_id_str_table_arr[j].ascii_id_ofst));
+ printf(" Reserved : 0x%d\n",stat_id_str_table_arr[j].reserved2);
+ }
+
+ printf(" Event Identifier String Table Entry\n");
+ for (j = 0; j < eve_id_index; j++){
+ printf(" Debug Event Class : 0x%x\n",event_id_str_table_arr[j].deb_eve_class);
+ printf(" Event Identifier : 0x%x\n",le16_to_cpu(event_id_str_table_arr[j].ei));
+ printf(" ASCII ID Length : 0x%x\n",event_id_str_table_arr[j].ascii_id_len);
+ printf(" ASCII ID offset : 0x%lx\n",le64_to_cpu(event_id_str_table_arr[j].ascii_id_ofst));
+ printf(" Reserved : 0x%d\n",event_id_str_table_arr[j].reserved2);
+
+ }
+
+ printf(" VU Event Identifier String Table Entry\n");
+ for (j = 0; j < vu_eve_index; j++){
+ printf(" Debug Event Class : 0x%x\n",vu_event_id_str_table_arr[j].deb_eve_class);
+ printf(" VU Event Identifier : 0x%x\n",le16_to_cpu(vu_event_id_str_table_arr[j].vu_ei));
+ printf(" ASCII ID Length : 0x%x\n",vu_event_id_str_table_arr[j].ascii_id_len);
+ printf(" ASCII ID offset : 0x%lx\n",le64_to_cpu(vu_event_id_str_table_arr[j].ascii_id_ofst));
+ printf(" Reserved : 0x%d\n",vu_event_id_str_table_arr[j].reserved);
+
+ }
+
+ printf(" ASCII Table\n");
+ printf(" Byte Data_Byte ASCII_Character\n");
+ for (j = 0; j < ascii_table_index; j++){
+ printf(" %lld 0x%x %c \n",ascii_table_ofst+j,ascii_table_info_arr[j],ascii_table_info_arr[j]);
+ }
+ return 0;
}
static int ocp_print_C9_log_json(struct telemetry_str_log_format *log_data,__u8 *log_data_buf)
{
- struct json_object *root = json_create_object();
- struct json_object *stat_table = json_create_object();
- struct json_object *eve_table = json_create_object();
- struct json_object *vu_eve_table = json_create_object();
- struct json_object *entry = json_create_object();
- char res_arr[48];
- char *res = res_arr;
- char guid_buf[C9_GUID_LENGTH];
- char *guid = guid_buf;
- char fifo_arr[16];
- char *fifo = fifo_arr;
- //calculating the index value for array
- __le64 stat_id_index = (log_data->sitsz * 4) / 16;
- __le64 eve_id_index = (log_data->estsz * 4) / 16;
- __le64 vu_eve_index = (log_data->vu_eve_st_sz * 4) / 16;
- __le64 ascii_table_index = (log_data->asctsz * 4);
- //Calculating the offset for dynamic fields.
- __le64 stat_id_str_table_ofst = C9_TELEMETRY_STR_LOG_SIST_OFST + (log_data->sitsz * 4);
- __le64 event_str_table_ofst = stat_id_str_table_ofst + (log_data->estsz * 4);
- __le64 vu_event_str_table_ofst = event_str_table_ofst + (log_data->vu_eve_st_sz * 4);
- __le64 ascii_table_ofst = vu_event_str_table_ofst + (log_data->asctsz * 4);
- struct statistics_id_str_table_entry stat_id_str_table_arr[stat_id_index];
- struct event_id_str_table_entry event_id_str_table_arr[eve_id_index];
- struct vu_event_id_str_table_entry vu_event_id_str_table_arr[vu_eve_index];
- __u8 ascii_table_info_arr[ascii_table_index];
- char ascii_buf[ascii_table_index];
- char *ascii = ascii_buf;
- int j;
-
- json_object_add_value_int(root, "Log Page Version", le16_to_cpu(log_data->log_page_version));
-
- memset((__u8 *)res, 0, 15);
- for (j = 0; j < 15; j++)
- res += sprintf(res, "%d", log_data->reserved1[j]);
- json_object_add_value_string(root, "Reserved", res_arr);
-
- memset((void *)guid, 0, C9_GUID_LENGTH);
- for (j = C9_GUID_LENGTH - 1; j >= 0; j--)
- guid += sprintf(guid, "%02x", log_data->log_page_guid[j]);
- json_object_add_value_string(root, "Log page GUID", guid_buf);
-
- json_object_add_value_int(root, "Telemetry String Log Size", le64_to_cpu(log_data->sls));
-
- memset((__u8 *)res, 0, 24);
- for (j = 0; j < 24; j++)
- res += sprintf(res, "%d", log_data->reserved2[j]);
- json_object_add_value_string(root, "Reserved", res_arr);
-
- json_object_add_value_int(root, "Statistics Identifier String Table Start", le64_to_cpu(log_data->sits));
- json_object_add_value_int(root, "Event String Table Start", le64_to_cpu(log_data->ests));
- json_object_add_value_int(root, "Event String Table Size", le64_to_cpu(log_data->estsz));
- json_object_add_value_int(root, "VU Event String Table Start", le64_to_cpu(log_data->vu_eve_sts));
- json_object_add_value_int(root, "VU Event String Table Size", le64_to_cpu(log_data->vu_eve_st_sz));
- json_object_add_value_int(root, "ASCII Table Start", le64_to_cpu(log_data->ascts));
- json_object_add_value_int(root, "ASCII Table Size", le64_to_cpu(log_data->asctsz));
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo1[j]);
- json_object_add_value_string(root, "FIFO 1 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo2[j]);
- json_object_add_value_string(root, "FIFO 2 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo3[j]);
- json_object_add_value_string(root, "FIFO 3 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo4[j]);
- json_object_add_value_string(root, "FIFO 4 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo5[j]);
- json_object_add_value_string(root, "FIFO 5 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo6[j]);
- json_object_add_value_string(root, "FIFO 6 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo7[j]);
- json_object_add_value_string(root, "FIFO 7 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo8[j]);
- json_object_add_value_string(root, "FIFO 8 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo9[j]);
- json_object_add_value_string(root, "FIFO 9 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo10[j]);
- json_object_add_value_string(root, "FIFO 10 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo11[j]);
- json_object_add_value_string(root, "FIFO 11 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo12[j]);
- json_object_add_value_string(root, "FIFO 12 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo13[j]);
- json_object_add_value_string(root, "FIFO 13 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo14[j]);
- json_object_add_value_string(root, "FIFO 14 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo15[j]);
- json_object_add_value_string(root, "FIFO 15 ASCII String", fifo_arr);
-
- memset((void *)fifo, 0, 16);
- for (j = 0; j < 16; j++)
- fifo += sprintf(fifo, "%c", log_data->fifo16[j]);
- json_object_add_value_string(root, "FIFO 16 ASCII String", fifo_arr);
-
- memset((__u8 *)res, 0, 48);
- for (j = 0; j < 48; j++)
- res += sprintf(res, "%d", log_data->reserved3[j]);
- json_object_add_value_string(root, "Reserved", res_arr);
-
- memcpy(stat_id_str_table_arr, (__u8*)log_data_buf + stat_id_str_table_ofst, (log_data->sitsz * 4));
- memcpy(event_id_str_table_arr, (__u8*)log_data_buf + event_str_table_ofst, (log_data->estsz * 4));
- memcpy(vu_event_id_str_table_arr, (__u8*)log_data_buf + vu_event_str_table_ofst, (log_data->vu_eve_st_sz * 4));
- memcpy(ascii_table_info_arr, (__u8*)log_data_buf + ascii_table_ofst, (log_data->asctsz * 4));
-
- for (j = 0; j < stat_id_index; j++){
- json_object_add_value_int(entry, "Vendor Specific Statistic Identifier", le16_to_cpu(stat_id_str_table_arr[j].vs_si));
- json_object_add_value_int(entry, "Reserved", le64_to_cpu(stat_id_str_table_arr[j].reserved1));
- json_object_add_value_int(entry, "ASCII ID Length", le64_to_cpu(stat_id_str_table_arr[j].ascii_id_len));
- json_object_add_value_int(entry, "ASCII ID offset", le64_to_cpu(stat_id_str_table_arr[j].ascii_id_ofst));
- json_object_add_value_int(entry, "Reserved", le64_to_cpu(stat_id_str_table_arr[j].reserved2));
- json_array_add_value_object(stat_table, entry);
- }
- json_object_add_value_array(root, "Statistics Identifier String Table", stat_table);
-
- for (j = 0; j < eve_id_index; j++){
- json_object_add_value_int(entry, "Debug Event Class", le16_to_cpu(event_id_str_table_arr[j].deb_eve_class));
- json_object_add_value_int(entry, "Event Identifier", le16_to_cpu(event_id_str_table_arr[j].ei));
- json_object_add_value_int(entry, "ASCII ID Length", le64_to_cpu(event_id_str_table_arr[j].ascii_id_len));
- json_object_add_value_int(entry, "ASCII ID offset", le64_to_cpu(event_id_str_table_arr[j].ascii_id_ofst));
- json_object_add_value_int(entry, "Reserved", le64_to_cpu(event_id_str_table_arr[j].reserved2));
- json_array_add_value_object(eve_table, entry);
- }
- json_object_add_value_array(root, "Event Identifier String Table Entry", eve_table);
-
- for (j = 0; j < vu_eve_index; j++){
- json_object_add_value_int(entry, "Debug Event Class", le16_to_cpu(vu_event_id_str_table_arr[j].deb_eve_class));
- json_object_add_value_int(entry, "VU Event Identifier", le16_to_cpu(vu_event_id_str_table_arr[j].vu_ei));
- json_object_add_value_int(entry, "ASCII ID Length", le64_to_cpu(vu_event_id_str_table_arr[j].ascii_id_len));
- json_object_add_value_int(entry, "ASCII ID offset", le64_to_cpu(vu_event_id_str_table_arr[j].ascii_id_ofst));
- json_object_add_value_int(entry, "Reserved", le64_to_cpu(vu_event_id_str_table_arr[j].reserved));
- json_array_add_value_object(vu_eve_table, entry);
- }
- json_object_add_value_array(root, "VU Event Identifier String Table Entry", vu_eve_table);
-
- memset((void *)ascii, 0, ascii_table_index);
- for (j = 0; j < ascii_table_index; j++)
- ascii += sprintf(ascii, "%c", ascii_table_info_arr[j]);
- json_object_add_value_string(root, "ASCII Table", ascii_buf);
-
- json_print_object(root, NULL);
- printf("\n");
- json_free_object(root);
- json_free_object(stat_table);
- json_free_object(eve_table);
- json_free_object(vu_eve_table);
-
- return 0;
+ struct json_object *root = json_create_object();
+ struct json_object *stat_table = json_create_object();
+ struct json_object *eve_table = json_create_object();
+ struct json_object *vu_eve_table = json_create_object();
+ struct json_object *entry = json_create_object();
+ char res_arr[48];
+ char *res = res_arr;
+ char guid_buf[C9_GUID_LENGTH];
+ char *guid = guid_buf;
+ char fifo_arr[16];
+ char *fifo = fifo_arr;
+ //calculating the index value for array
+ __le64 stat_id_index = (log_data->sitsz * 4) / 16;
+ __le64 eve_id_index = (log_data->estsz * 4) / 16;
+ __le64 vu_eve_index = (log_data->vu_eve_st_sz * 4) / 16;
+ __le64 ascii_table_index = (log_data->asctsz * 4);
+ //Calculating the offset for dynamic fields.
+ __le64 stat_id_str_table_ofst = C9_TELEMETRY_STR_LOG_SIST_OFST + (log_data->sitsz * 4);
+ __le64 event_str_table_ofst = stat_id_str_table_ofst + (log_data->estsz * 4);
+ __le64 vu_event_str_table_ofst = event_str_table_ofst + (log_data->vu_eve_st_sz * 4);
+ __le64 ascii_table_ofst = vu_event_str_table_ofst + (log_data->asctsz * 4);
+ struct statistics_id_str_table_entry stat_id_str_table_arr[stat_id_index];
+ struct event_id_str_table_entry event_id_str_table_arr[eve_id_index];
+ struct vu_event_id_str_table_entry vu_event_id_str_table_arr[vu_eve_index];
+ __u8 ascii_table_info_arr[ascii_table_index];
+ char ascii_buf[ascii_table_index];
+ char *ascii = ascii_buf;
+ int j;
+
+ json_object_add_value_int(root, "Log Page Version", le16_to_cpu(log_data->log_page_version));
+
+ memset((__u8 *)res, 0, 15);
+ for (j = 0; j < 15; j++)
+ res += sprintf(res, "%d", log_data->reserved1[j]);
+ json_object_add_value_string(root, "Reserved", res_arr);
+
+ memset((void *)guid, 0, C9_GUID_LENGTH);
+ for (j = C9_GUID_LENGTH - 1; j >= 0; j--)
+ guid += sprintf(guid, "%02x", log_data->log_page_guid[j]);
+ json_object_add_value_string(root, "Log page GUID", guid_buf);
+
+ json_object_add_value_int(root, "Telemetry String Log Size", le64_to_cpu(log_data->sls));
+
+ memset((__u8 *)res, 0, 24);
+ for (j = 0; j < 24; j++)
+ res += sprintf(res, "%d", log_data->reserved2[j]);
+ json_object_add_value_string(root, "Reserved", res_arr);
+
+ json_object_add_value_int(root, "Statistics Identifier String Table Start", le64_to_cpu(log_data->sits));
+ json_object_add_value_int(root, "Event String Table Start", le64_to_cpu(log_data->ests));
+ json_object_add_value_int(root, "Event String Table Size", le64_to_cpu(log_data->estsz));
+ json_object_add_value_int(root, "VU Event String Table Start", le64_to_cpu(log_data->vu_eve_sts));
+ json_object_add_value_int(root, "VU Event String Table Size", le64_to_cpu(log_data->vu_eve_st_sz));
+ json_object_add_value_int(root, "ASCII Table Start", le64_to_cpu(log_data->ascts));
+ json_object_add_value_int(root, "ASCII Table Size", le64_to_cpu(log_data->asctsz));
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo1[j]);
+ json_object_add_value_string(root, "FIFO 1 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo2[j]);
+ json_object_add_value_string(root, "FIFO 2 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo3[j]);
+ json_object_add_value_string(root, "FIFO 3 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo4[j]);
+ json_object_add_value_string(root, "FIFO 4 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo5[j]);
+ json_object_add_value_string(root, "FIFO 5 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo6[j]);
+ json_object_add_value_string(root, "FIFO 6 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo7[j]);
+ json_object_add_value_string(root, "FIFO 7 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo8[j]);
+ json_object_add_value_string(root, "FIFO 8 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo9[j]);
+ json_object_add_value_string(root, "FIFO 9 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo10[j]);
+ json_object_add_value_string(root, "FIFO 10 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo11[j]);
+ json_object_add_value_string(root, "FIFO 11 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo12[j]);
+ json_object_add_value_string(root, "FIFO 12 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo13[j]);
+ json_object_add_value_string(root, "FIFO 13 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo14[j]);
+ json_object_add_value_string(root, "FIFO 14 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo15[j]);
+ json_object_add_value_string(root, "FIFO 15 ASCII String", fifo_arr);
+
+ memset((void *)fifo, 0, 16);
+ for (j = 0; j < 16; j++)
+ fifo += sprintf(fifo, "%c", log_data->fifo16[j]);
+ json_object_add_value_string(root, "FIFO 16 ASCII String", fifo_arr);
+
+ memset((__u8 *)res, 0, 48);
+ for (j = 0; j < 48; j++)
+ res += sprintf(res, "%d", log_data->reserved3[j]);
+ json_object_add_value_string(root, "Reserved", res_arr);
+
+ memcpy(stat_id_str_table_arr, (__u8*)log_data_buf + stat_id_str_table_ofst, (log_data->sitsz * 4));
+ memcpy(event_id_str_table_arr, (__u8*)log_data_buf + event_str_table_ofst, (log_data->estsz * 4));
+ memcpy(vu_event_id_str_table_arr, (__u8*)log_data_buf + vu_event_str_table_ofst, (log_data->vu_eve_st_sz * 4));
+ memcpy(ascii_table_info_arr, (__u8*)log_data_buf + ascii_table_ofst, (log_data->asctsz * 4));
+
+ for (j = 0; j < stat_id_index; j++){
+ json_object_add_value_int(entry, "Vendor Specific Statistic Identifier", le16_to_cpu(stat_id_str_table_arr[j].vs_si));
+ json_object_add_value_int(entry, "Reserved", le64_to_cpu(stat_id_str_table_arr[j].reserved1));
+ json_object_add_value_int(entry, "ASCII ID Length", le64_to_cpu(stat_id_str_table_arr[j].ascii_id_len));
+ json_object_add_value_int(entry, "ASCII ID offset", le64_to_cpu(stat_id_str_table_arr[j].ascii_id_ofst));
+ json_object_add_value_int(entry, "Reserved", le64_to_cpu(stat_id_str_table_arr[j].reserved2));
+ json_array_add_value_object(stat_table, entry);
+ }
+ json_object_add_value_array(root, "Statistics Identifier String Table", stat_table);
+
+ for (j = 0; j < eve_id_index; j++){
+ json_object_add_value_int(entry, "Debug Event Class", le16_to_cpu(event_id_str_table_arr[j].deb_eve_class));
+ json_object_add_value_int(entry, "Event Identifier", le16_to_cpu(event_id_str_table_arr[j].ei));
+ json_object_add_value_int(entry, "ASCII ID Length", le64_to_cpu(event_id_str_table_arr[j].ascii_id_len));
+ json_object_add_value_int(entry, "ASCII ID offset", le64_to_cpu(event_id_str_table_arr[j].ascii_id_ofst));
+ json_object_add_value_int(entry, "Reserved", le64_to_cpu(event_id_str_table_arr[j].reserved2));
+ json_array_add_value_object(eve_table, entry);
+ }
+ json_object_add_value_array(root, "Event Identifier String Table Entry", eve_table);
+
+ for (j = 0; j < vu_eve_index; j++){
+ json_object_add_value_int(entry, "Debug Event Class", le16_to_cpu(vu_event_id_str_table_arr[j].deb_eve_class));
+ json_object_add_value_int(entry, "VU Event Identifier", le16_to_cpu(vu_event_id_str_table_arr[j].vu_ei));
+ json_object_add_value_int(entry, "ASCII ID Length", le64_to_cpu(vu_event_id_str_table_arr[j].ascii_id_len));
+ json_object_add_value_int(entry, "ASCII ID offset", le64_to_cpu(vu_event_id_str_table_arr[j].ascii_id_ofst));
+ json_object_add_value_int(entry, "Reserved", le64_to_cpu(vu_event_id_str_table_arr[j].reserved));
+ json_array_add_value_object(vu_eve_table, entry);
+ }
+ json_object_add_value_array(root, "VU Event Identifier String Table Entry", vu_eve_table);
+
+ memset((void *)ascii, 0, ascii_table_index);
+ for (j = 0; j < ascii_table_index; j++)
+ ascii += sprintf(ascii, "%c", ascii_table_info_arr[j]);
+ json_object_add_value_string(root, "ASCII Table", ascii_buf);
+
+ json_print_object(root, NULL);
+ printf("\n");
+ json_free_object(root);
+ json_free_object(stat_table);
+ json_free_object(eve_table);
+ json_free_object(vu_eve_table);
+
+ return 0;
}
static void ocp_print_c9_log_binary(__u8 *log_data_buf,int total_log_page_size)
{
- return d_raw((unsigned char *)log_data_buf, total_log_page_size);
+ return d_raw((unsigned char *)log_data_buf, total_log_page_size);
}
static int get_c9_log_page(struct nvme_dev *dev, char *format)
{
- int ret = 0;
- __u8 *header_data;
- struct telemetry_str_log_format *log_data;
+ int ret = 0;
+ __u8 *header_data;
+ struct telemetry_str_log_format *log_data;
enum nvme_print_flags fmt;
- __u8 *full_log_buf_data = NULL;
- __le64 stat_id_str_table_ofst = 0;
- __le64 event_str_table_ofst = 0;
- __le64 vu_event_str_table_ofst = 0;
- __le64 ascii_table_ofst = 0;
- __le64 total_log_page_sz = 0;
-
- ret = validate_output_format(format, &fmt);
- if (ret < 0) {
- fprintf(stderr, "ERROR : OCP : invalid output format\n");
- return ret;
- }
-
- header_data = (__u8 *)malloc(sizeof(__u8) * C9_TELEMETRY_STR_LOG_LEN);
- if (!header_data) {
- fprintf(stderr, "ERROR : OCP : malloc : %s\n", strerror(errno));
- return -1;
- }
- memset(header_data, 0, sizeof(__u8) * C9_TELEMETRY_STR_LOG_LEN);
-
- ret = nvme_get_log_simple(dev_fd(dev), C9_TELEMETRY_STRING_LOG_ENABLE_OPCODE,
- C9_TELEMETRY_STR_LOG_LEN, header_data);
-
- if (!ret) {
- log_data = (struct telemetry_str_log_format *)header_data;
- printf("Statistics Identifier String Table Size = %lld\n",log_data->sitsz);
- printf("Event String Table Size = %lld\n",log_data->estsz);
- printf("VU Event String Table Size = %lld\n",log_data->vu_eve_st_sz);
- printf("ASCII Table Size = %lld\n",log_data->asctsz);
-
- //Calculating the offset for dynamic fields.
- stat_id_str_table_ofst = C9_TELEMETRY_STR_LOG_SIST_OFST + (log_data->sitsz * 4);
- event_str_table_ofst = stat_id_str_table_ofst + (log_data->estsz * 4);
- vu_event_str_table_ofst = event_str_table_ofst + (log_data->vu_eve_st_sz * 4);
- ascii_table_ofst = vu_event_str_table_ofst + (log_data->asctsz * 4);
- total_log_page_sz = stat_id_str_table_ofst + event_str_table_ofst + vu_event_str_table_ofst + ascii_table_ofst;
-
- printf("stat_id_str_table_ofst = %lld\n",stat_id_str_table_ofst);
- printf("event_str_table_ofst = %lld\n",event_str_table_ofst);
- printf("vu_event_str_table_ofst = %lld\n",vu_event_str_table_ofst);
- printf("ascii_table_ofst = %lld\n",ascii_table_ofst);
- printf("total_log_page_sz = %lld\n",total_log_page_sz);
-
- full_log_buf_data = (__u8 *)malloc(sizeof(__u8) * total_log_page_sz);
- if (!full_log_buf_data) {
- fprintf(stderr, "ERROR : OCP : malloc : %s\n", strerror(errno));
- return -1;
- }
- memset(full_log_buf_data, 0, sizeof(__u8) * total_log_page_sz);
-
- ret = nvme_get_log_simple(dev_fd(dev), C9_TELEMETRY_STRING_LOG_ENABLE_OPCODE,
- total_log_page_sz, full_log_buf_data);
-
- if (!ret) {
- switch (fmt) {
- case NORMAL:
- ocp_print_C9_log_normal(log_data,full_log_buf_data);
- break;
- case JSON:
- ocp_print_C9_log_json(log_data,full_log_buf_data);
- break;
- case BINARY:
- ocp_print_c9_log_binary(full_log_buf_data,total_log_page_sz);
- break;
- default:
- fprintf(stderr, "unhandled output format\n");
- break;
- }
- } else{
- fprintf(stderr, "ERROR : OCP : Unable to read C9 data from buffer\n");
- }
- } else {
- fprintf(stderr, "ERROR : OCP : Unable to read C9 data from buffer\n");
- }
-
- free(header_data);
- free(full_log_buf_data);
-
- return ret;
+ __u8 *full_log_buf_data = NULL;
+ __le64 stat_id_str_table_ofst = 0;
+ __le64 event_str_table_ofst = 0;
+ __le64 vu_event_str_table_ofst = 0;
+ __le64 ascii_table_ofst = 0;
+ __le64 total_log_page_sz = 0;
+
+ ret = validate_output_format(format, &fmt);
+ if (ret < 0) {
+ fprintf(stderr, "ERROR : OCP : invalid output format\n");
+ return ret;
+ }
+
+ header_data = (__u8 *)malloc(sizeof(__u8) * C9_TELEMETRY_STR_LOG_LEN);
+ if (!header_data) {
+ fprintf(stderr, "ERROR : OCP : malloc : %s\n", strerror(errno));
+ return -1;
+ }
+ memset(header_data, 0, sizeof(__u8) * C9_TELEMETRY_STR_LOG_LEN);
+
+ ret = nvme_get_log_simple(dev_fd(dev), C9_TELEMETRY_STRING_LOG_ENABLE_OPCODE,
+ C9_TELEMETRY_STR_LOG_LEN, header_data);
+
+ if (!ret) {
+ log_data = (struct telemetry_str_log_format *)header_data;
+ printf("Statistics Identifier String Table Size = %lld\n",log_data->sitsz);
+ printf("Event String Table Size = %lld\n",log_data->estsz);
+ printf("VU Event String Table Size = %lld\n",log_data->vu_eve_st_sz);
+ printf("ASCII Table Size = %lld\n",log_data->asctsz);
+
+ //Calculating the offset for dynamic fields.
+ stat_id_str_table_ofst = C9_TELEMETRY_STR_LOG_SIST_OFST + (log_data->sitsz * 4);
+ event_str_table_ofst = stat_id_str_table_ofst + (log_data->estsz * 4);
+ vu_event_str_table_ofst = event_str_table_ofst + (log_data->vu_eve_st_sz * 4);
+ ascii_table_ofst = vu_event_str_table_ofst + (log_data->asctsz * 4);
+ total_log_page_sz = stat_id_str_table_ofst + event_str_table_ofst + vu_event_str_table_ofst + ascii_table_ofst;
+
+ printf("stat_id_str_table_ofst = %lld\n",stat_id_str_table_ofst);
+ printf("event_str_table_ofst = %lld\n",event_str_table_ofst);
+ printf("vu_event_str_table_ofst = %lld\n",vu_event_str_table_ofst);
+ printf("ascii_table_ofst = %lld\n",ascii_table_ofst);
+ printf("total_log_page_sz = %lld\n",total_log_page_sz);
+
+ full_log_buf_data = (__u8 *)malloc(sizeof(__u8) * total_log_page_sz);
+ if (!full_log_buf_data) {
+ fprintf(stderr, "ERROR : OCP : malloc : %s\n", strerror(errno));
+ return -1;
+ }
+ memset(full_log_buf_data, 0, sizeof(__u8) * total_log_page_sz);
+
+ ret = nvme_get_log_simple(dev_fd(dev), C9_TELEMETRY_STRING_LOG_ENABLE_OPCODE,
+ total_log_page_sz, full_log_buf_data);
+
+ if (!ret) {
+ switch (fmt) {
+ case NORMAL:
+ ocp_print_C9_log_normal(log_data,full_log_buf_data);
+ break;
+ case JSON:
+ ocp_print_C9_log_json(log_data,full_log_buf_data);
+ break;
+ case BINARY:
+ ocp_print_c9_log_binary(full_log_buf_data,total_log_page_sz);
+ break;
+ default:
+ fprintf(stderr, "unhandled output format\n");
+ break;
+ }
+ } else{
+ fprintf(stderr, "ERROR : OCP : Unable to read C9 data from buffer\n");
+ }
+ } else {
+ fprintf(stderr, "ERROR : OCP : Unable to read C9 data from buffer\n");
+ }
+
+ free(header_data);
+ free(full_log_buf_data);
+
+ return ret;
}
static int ocp_telemetry_str_log_format(int argc, char **argv, struct command *cmd,
- struct plugin *plugin)
+ struct plugin *plugin)
{
- struct nvme_dev *dev;
- int ret = 0;
- const char *desc = "Retrieve telemetry string log format";
+ struct nvme_dev *dev;
+ int ret = 0;
+ const char *desc = "Retrieve telemetry string log format";
- struct config {
- char *output_format;
- };
+ struct config {
+ char *output_format;
+ };
- struct config cfg = {
- .output_format = "normal",
- };
+ struct config cfg = {
+ .output_format = "normal",
+ };
- OPT_ARGS(opts) = {
- OPT_FMT("output-format", 'o', &cfg.output_format, "output Format: normal|json"),
- OPT_END()
- };
+ OPT_ARGS(opts) = {
+ OPT_FMT("output-format", 'o', &cfg.output_format, "output Format: normal|json"),
+ OPT_END()
+ };
- ret = parse_and_open(&dev, argc, argv, desc, opts);
- if (ret)
- return ret;
+ ret = parse_and_open(&dev, argc, argv, desc, opts);
+ if (ret)
+ return ret;
- ret = get_c9_log_page(dev, cfg.output_format);
- if (ret)
- fprintf(stderr, "ERROR : OCP : Failure reading the C9 Log Page, ret = %d\n", ret);
+ ret = get_c9_log_page(dev, cfg.output_format);
+ if (ret)
+ fprintf(stderr, "ERROR : OCP : Failure reading the C9 Log Page, ret = %d\n", ret);
- dev_close(dev);
+ dev_close(dev);
- return ret;
+ return ret;
}
///////////////////////////////////////////////////////////////////////////////
diff --git a/plugins/ocp/ocp-nvme.h b/plugins/ocp/ocp-nvme.h
index 95539b0..0317ea7 100644
--- a/plugins/ocp/ocp-nvme.h
+++ b/plugins/ocp/ocp-nvme.h
@@ -30,6 +30,9 @@ PLUGIN(NAME("ocp", "OCP cloud SSD extensions", NVME_VERSION),
ENTRY("set-plp-health-check-interval", "Set PLP Health Check Interval", set_plp_health_check_interval)
ENTRY("get-plp-health-check-interval", "Get PLP Health Check Interval", get_plp_health_check_interval)
ENTRY("telemetry-string-log", "Retrieve Telemetry string Log Page", ocp_telemetry_str_log_format)
+ ENTRY("set-telemetry-profile", "Set Telemetry Profile Feature", ocp_set_telemetry_profile_feature)
+ ENTRY("set-dssd-async-event-config", "Set DSSD Async Event Config", set_dssd_async_event_config)
+ ENTRY("get-dssd-async-event-config", "Get DSSD Async Event Config", get_dssd_async_event_config)
)
);
diff --git a/plugins/ocp/ocp-utils.c b/plugins/ocp/ocp-utils.c
index 1257b30..8a1462e 100644
--- a/plugins/ocp/ocp-utils.c
+++ b/plugins/ocp/ocp-utils.c
@@ -1,19 +1,32 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2022 Solidigm.
+ * Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
#include <unistd.h>
+#include <errno.h>
#include "ocp-utils.h"
-#include "nvme-print.h"
const unsigned char ocp_uuid[NVME_UUID_LEN] = {
0xc1, 0x94, 0xd5, 0x5b, 0xe0, 0x94, 0x47, 0x94, 0xa2, 0x1d,
0x29, 0x99, 0x8f, 0x56, 0xbe, 0x6f };
-int ocp_get_uuid_index(struct nvme_dev *dev, int *index)
+int ocp_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index)
+{
+ int i = nvme_uuid_find(uuid_list, ocp_uuid);
+
+ *index = 0;
+ if (i > 0)
+ *index = i;
+ else
+ return -errno;
+
+ return 0;
+}
+
+int ocp_get_uuid_index(struct nvme_dev *dev, __u8 *index)
{
struct nvme_id_uuid_list uuid_list;
int err = nvme_identify_uuid(dev_fd(dev), &uuid_list);
@@ -22,11 +35,5 @@ int ocp_get_uuid_index(struct nvme_dev *dev, int *index)
if (err)
return err;
- for (int i = 0; i < NVME_ID_UUID_LIST_MAX; i++) {
- if (memcmp(ocp_uuid, &uuid_list.entry[i].uuid, NVME_UUID_LEN) == 0) {
- *index = i + 1;
- break;
- }
- }
- return err;
+ return ocp_find_uuid_index(&uuid_list, index);
}
diff --git a/plugins/ocp/ocp-utils.h b/plugins/ocp/ocp-utils.h
index d02bea9..1512db8 100644
--- a/plugins/ocp/ocp-utils.h
+++ b/plugins/ocp/ocp-utils.h
@@ -1,18 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (c) 2022 Solidigm.
+ * Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
#include "nvme.h"
+/*
+ * UUID assigned for OCP.
+ */
+extern const unsigned char ocp_uuid[NVME_UUID_LEN];
+
/**
* ocp_get_uuid_index() - Get OCP UUID index
* @dev: nvme device
* @index: integer pointer to here to save the index
- * @result: The command completion result from CQE dword0
*
- * Return: Zero if nvme device has UUID list log page, or result of get uuid list otherwise.
+ * Return: Zero if nvme device has UUID list identify page, or positive result of get uuid list
+ * or negative POSIX error code otherwise.
+ */
+int ocp_get_uuid_index(struct nvme_dev *dev, __u8 *index);
+
+/**
+ * ocp_find_uuid_index() - Find OCP UUID index in UUID list
+ * @uuid_list: uuid_list retrieved from Identify UUID List (CNS 0x17)
+ * @index: integer pointer to here to save the index
+ *
+ * Return: Zero if nvme device has UUID list log page, Negative POSIX error code otherwise.
*/
-int ocp_get_uuid_index(struct nvme_dev *dev, int *index);
+int ocp_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index);
diff --git a/plugins/sed/sedopal_cmd.c b/plugins/sed/sedopal_cmd.c
index 649e0b2..21ebd36 100644
--- a/plugins/sed/sedopal_cmd.c
+++ b/plugins/sed/sedopal_cmd.c
@@ -169,8 +169,10 @@ int sedopal_cmd_initialize(int fd)
struct opal_key key;
struct opal_lr_act lr_act = {};
struct opal_user_lr_setup lr_setup = {};
+ struct opal_new_pw new_pw = {};
sedopal_ask_key = true;
+ sedopal_ask_new_key = true;
rc = sedopal_set_key(&key);
if (rc != 0)
return rc;
@@ -217,6 +219,21 @@ int sedopal_cmd_initialize(int fd)
return rc;
}
+ /*
+ * set password
+ */
+ new_pw.new_user_pw.who = OPAL_ADMIN1;
+ new_pw.new_user_pw.opal_key.lr = 0;
+ new_pw.session.who = OPAL_ADMIN1;
+ new_pw.session.sum = 0;
+ new_pw.session.opal_key.lr = 0;
+ new_pw.session.opal_key = key;
+ new_pw.new_user_pw.opal_key = key;
+
+ rc = ioctl(fd, IOC_OPAL_SET_PW, &new_pw);
+ if (rc != 0)
+ fprintf(stderr, "Error: failed setting password - %d\n", rc);
+
return rc;
}
@@ -455,7 +472,7 @@ int sedopal_cmd_discover(int fd)
struct level_0_discovery_features *feat;
struct level_0_discovery_features *feat_end;
uint16_t code;
- uint8_t locking_flags;
+ uint8_t locking_flags = 0;
char buf[4096];
discover.data = (__u64)buf;
diff --git a/plugins/solidigm/solidigm-garbage-collection.c b/plugins/solidigm/solidigm-garbage-collection.c
index a37e9c5..002b187 100644
--- a/plugins/solidigm/solidigm-garbage-collection.c
+++ b/plugins/solidigm/solidigm-garbage-collection.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2022 Solidigm.
+ * Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@@ -97,7 +97,7 @@ int solidigm_get_garbage_collection_log(int argc, char **argv, struct command *c
return -EINVAL;
}
- uuid_index = solidigm_get_vu_uuid_index(dev);
+ sldgm_get_uuid_index(dev, &uuid_index);
struct garbage_control_collection_log gc_log;
const int solidigm_vu_gc_log_id = 0xfd;
diff --git a/plugins/solidigm/solidigm-latency-tracking.c b/plugins/solidigm/solidigm-latency-tracking.c
index 66f3c56..c6c3315 100644
--- a/plugins/solidigm/solidigm-latency-tracking.c
+++ b/plugins/solidigm/solidigm-latency-tracking.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2022 Solidigm.
+ * Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@@ -430,7 +430,7 @@ int solidigm_get_latency_tracking_log(int argc, char **argv, struct command *cmd
return -EINVAL;
}
- lt.uuid_index = solidigm_get_vu_uuid_index(dev);
+ sldgm_get_uuid_index(dev, &lt.uuid_index);
err = latency_tracking_enable(&lt);
if (err) {
diff --git a/plugins/solidigm/solidigm-log-page-dir.c b/plugins/solidigm/solidigm-log-page-dir.c
index bf272f8..7d7c027 100644
--- a/plugins/solidigm/solidigm-log-page-dir.c
+++ b/plugins/solidigm/solidigm-log-page-dir.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2023 Solidigm.
+ * Copyright (c) 2023-2024 Solidigm.
*
* Author: karl.dedow@solidigm.com
*/
@@ -15,6 +15,7 @@
#include "nvme-print.h"
#include "plugins/ocp/ocp-utils.h"
+#include "solidigm-util.h"
#define MIN_VENDOR_LID 0xC0
#define SOLIDIGM_MAX_UUID 2
@@ -38,41 +39,9 @@ static void init_lid_dir(struct lid_dir *lid_dir)
}
}
-static bool is_invalid_uuid(const struct nvme_id_uuid_list_entry entry)
-{
- static const unsigned char ALL_ZERO_UUID[NVME_UUID_LEN] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
-
- return memcmp(ALL_ZERO_UUID, entry.uuid, NVME_UUID_LEN) == 0;
-}
-
-static bool is_solidigm_uuid(const struct nvme_id_uuid_list_entry entry)
-{
- static const unsigned char SOLIDIGM_UUID[NVME_UUID_LEN] = {
- 0x96, 0x19, 0x58, 0x6e, 0xc1, 0x1b, 0x43, 0xad,
- 0xaa, 0xaa, 0x65, 0x41, 0x87, 0xf6, 0xbb, 0xb2
- };
-
- return memcmp(SOLIDIGM_UUID, entry.uuid, NVME_UUID_LEN) == 0;
-}
-
-static bool is_ocp_uuid(const struct nvme_id_uuid_list_entry entry)
-{
- static const unsigned char OCP_UUID[NVME_UUID_LEN] = {
- 0xc1, 0x94, 0xd5, 0x5b, 0xe0, 0x94, 0x47, 0x94,
- 0xa2, 0x1d, 0x29, 0x99, 0x8f, 0x56, 0xbe, 0x6f
- };
-
- return memcmp(OCP_UUID, entry.uuid, NVME_UUID_LEN) == 0;
-}
-
static int get_supported_log_pages_log(struct nvme_dev *dev, int uuid_index,
struct nvme_supported_log_pages *supported)
{
- static const __u8 LID;
-
memset(supported, 0, sizeof(*supported));
struct nvme_get_log_args args = {
.lpo = 0,
@@ -81,7 +50,7 @@ static int get_supported_log_pages_log(struct nvme_dev *dev, int uuid_index,
.args_size = sizeof(args),
.fd = dev_fd(dev),
.timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
- .lid = LID,
+ .lid = NVME_LOG_LID_SUPPORTED_LOG_PAGES,
.len = sizeof(*supported),
.nsid = NVME_NSID_ALL,
.csi = NVME_CSI_NVM,
@@ -101,8 +70,8 @@ static struct lid_dir *get_standard_lids(struct nvme_supported_log_pages *suppor
init_lid_dir(&standard_dir);
- for (int lid = 0; lid < NVME_LOG_SUPPORTED_LOG_PAGES_MAX; lid++) {
- if (!supported->lid_support[lid] || lid >= MIN_VENDOR_LID)
+ for (int lid = 0; lid < MIN_VENDOR_LID; lid++) {
+ if (!supported->lid_support[lid])
continue;
standard_dir.lid[lid].supported = true;
@@ -128,12 +97,15 @@ static struct lid_dir *get_solidigm_lids(struct nvme_supported_log_pages *suppor
static struct lid_dir solidigm_dir = { 0 };
init_lid_dir(&solidigm_dir);
+ solidigm_dir.lid[0xC0].str = "OCP SMART / Health Information Extended";
solidigm_dir.lid[0xC1].str = "Read Commands Latency Statistics";
solidigm_dir.lid[0xC2].str = "Write Commands Latency Statistics";
+ solidigm_dir.lid[0xC3].str = "OCP Latency Monitor";
solidigm_dir.lid[0xC4].str = "Endurance Manager Statistics";
solidigm_dir.lid[0xC5].str = "Temperature Statistics";
solidigm_dir.lid[0xCA].str = "SMART Attributes";
solidigm_dir.lid[0xCB].str = "VU NVMe IO Queue Metrics Log Page";
+ solidigm_dir.lid[0xD5].str = solidigm_dir.lid[0xC5].str;
solidigm_dir.lid[0xDD].str = "VU Marketing Description Log Page";
solidigm_dir.lid[0xEF].str = "Performance Rating and LBA Access Histogram";
solidigm_dir.lid[0xF2].str = "Get Power Usage Log Page";
@@ -222,7 +194,7 @@ int solidigm_get_log_page_directory_log(int argc, char **argv, struct command *c
OPT_END()
};
- struct nvme_dev *dev = NULL;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
int err = parse_and_open(&dev, argc, argv, description, options);
if (err)
@@ -247,16 +219,21 @@ int solidigm_get_log_page_directory_log(int argc, char **argv, struct command *c
lid_dirs[NO_UUID_INDEX]->lid[lid] = solidigm_lid_dir->lid[lid];
}
} else {
- for (int uuid_index = 1; uuid_index <= SOLIDIGM_MAX_UUID; uuid_index++) {
- if (is_invalid_uuid(uuid_list.entry[uuid_index - 1]))
- break;
- else if (get_supported_log_pages_log(dev, uuid_index, &supported))
- continue;
-
- if (is_solidigm_uuid(uuid_list.entry[uuid_index - 1]))
- lid_dirs[uuid_index] = get_solidigm_lids(&supported);
- else if (is_ocp_uuid(uuid_list.entry[uuid_index - 1]))
- lid_dirs[uuid_index] = get_ocp_lids(&supported);
+ __u8 sldgm_idx;
+ __u8 ocp_idx;
+
+ sldgm_find_uuid_index(&uuid_list, &sldgm_idx);
+ ocp_find_uuid_index(&uuid_list, &ocp_idx);
+
+ if (sldgm_idx && (sldgm_idx <= SOLIDIGM_MAX_UUID)) {
+ err = get_supported_log_pages_log(dev, sldgm_idx, &supported);
+ if (!err)
+ lid_dirs[sldgm_idx] = get_solidigm_lids(&supported);
+ }
+ if (ocp_idx && (ocp_idx <= SOLIDIGM_MAX_UUID)) {
+ err = get_supported_log_pages_log(dev, ocp_idx, &supported);
+ if (!err)
+ lid_dirs[ocp_idx] = get_ocp_lids(&supported);
}
}
} else {
@@ -279,8 +256,5 @@ int solidigm_get_log_page_directory_log(int argc, char **argv, struct command *c
}
}
- /* Redundant close() to make static code analysis happy */
- close(dev->direct.fd);
- dev_close(dev);
return err;
}
diff --git a/plugins/solidigm/solidigm-nvme.h b/plugins/solidigm/solidigm-nvme.h
index bee8266..a639fd2 100644
--- a/plugins/solidigm/solidigm-nvme.h
+++ b/plugins/solidigm/solidigm-nvme.h
@@ -13,7 +13,7 @@
#include "cmd.h"
-#define SOLIDIGM_PLUGIN_VERSION "1.1"
+#define SOLIDIGM_PLUGIN_VERSION "1.2"
PLUGIN(NAME("solidigm", "Solidigm vendor specific extensions", SOLIDIGM_PLUGIN_VERSION),
COMMAND_LIST(
diff --git a/plugins/solidigm/solidigm-smart.c b/plugins/solidigm/solidigm-smart.c
index 62245fa..a97abe2 100644
--- a/plugins/solidigm/solidigm-smart.c
+++ b/plugins/solidigm/solidigm-smart.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2022 Solidigm.
+ * Copyright (c) 2022-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@@ -229,7 +229,7 @@ int solidigm_get_additional_smart_log(int argc, char **argv, struct command *cmd
return err;
}
- uuid_index = solidigm_get_vu_uuid_index(dev);
+ sldgm_get_uuid_index(dev, &uuid_index);
struct nvme_get_log_args args = {
.lpo = 0,
diff --git a/plugins/solidigm/solidigm-temp-stats.c b/plugins/solidigm/solidigm-temp-stats.c
index 85a3c37..7f385db 100644
--- a/plugins/solidigm/solidigm-temp-stats.c
+++ b/plugins/solidigm/solidigm-temp-stats.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2023 Solidigm.
+ * Copyright (c) 2023-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@@ -11,7 +11,8 @@
#include "nvme-print.h"
#include "solidigm-util.h"
-#define SLDGM_TEMP_STATS_LID 0xC5
+#define SLDGM_LEGACY_TEMP_STATS_LID 0xC5
+#define SLDGM_TEMP_STATS_LID 0xD5
struct temp_stats {
__le64 curr;
@@ -40,7 +41,7 @@ static void show_temp_stats(struct temp_stats *stats)
int sldgm_get_temp_stats_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
{
unsigned char buffer[4096] = {0};
- struct nvme_dev *dev;
+ _cleanup_nvme_dev_ struct nvme_dev *dev = NULL;
__u8 uuid_idx;
int err;
@@ -63,7 +64,7 @@ int sldgm_get_temp_stats_log(int argc, char **argv, struct command *cmd, struct
if (err)
return err;
- uuid_idx = solidigm_get_vu_uuid_index(dev);
+ sldgm_get_uuid_index(dev, &uuid_idx);
struct nvme_get_log_args args = {
.lpo = 0,
@@ -84,25 +85,26 @@ int sldgm_get_temp_stats_log(int argc, char **argv, struct command *cmd, struct
};
err = nvme_get_log(&args);
- if (!err) {
- uint64_t *guid = (uint64_t *)&buffer[4080];
+ if (err > 0) {
+ args.lid = SLDGM_LEGACY_TEMP_STATS_LID;
+ err = nvme_get_log(&args);
+ if (!err) {
+ uint64_t *guid = (uint64_t *)&buffer[4080];
- if (guid[1] == 0xC7BB98B7D0324863 && guid[0] == 0xBB2C23990E9C722F) {
- fprintf(stderr, "Error: Log page has 'OCP unsupported Requirements' GUID\n");
- err = -EBADMSG;
- goto closefd;
+ if (guid[1] == 0xC7BB98B7D0324863 && guid[0] == 0xBB2C23990E9C722F) {
+ fprintf(stderr,
+ "Error: Log page has OCP unsupported Requirements GUID\n");
+ return -EBADMSG;
+ }
}
+ }
+ if (!err) {
if (!cfg.raw_binary)
show_temp_stats((struct temp_stats *) buffer);
else
d_raw(buffer, sizeof(struct temp_stats));
- } else if (err > 0) {
+ } else if (err > 0)
nvme_show_status(err);
- }
-closefd:
- /* Redundant close() to make static code analysis happy */
- close(dev->direct.fd);
- dev_close(dev);
return err;
}
diff --git a/plugins/solidigm/solidigm-util.c b/plugins/solidigm/solidigm-util.c
index 0171a49..05d1537 100644
--- a/plugins/solidigm/solidigm-util.c
+++ b/plugins/solidigm/solidigm-util.c
@@ -1,20 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2023 Solidigm.
+ * Copyright (c) 2023-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
-#include "plugins/ocp/ocp-utils.h"
+#include <errno.h>
#include "solidigm-util.h"
-__u8 solidigm_get_vu_uuid_index(struct nvme_dev *dev)
+const unsigned char solidigm_uuid[NVME_UUID_LEN] = {
+ 0x96, 0x19, 0x58, 0x6e, 0xc1, 0x1b, 0x43, 0xad,
+ 0xaa, 0xaa, 0x65, 0x41, 0x87, 0xf6, 0xbb, 0xb2
+};
+
+int sldgm_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index)
{
- int ocp_uuid_index = 0;
+ int i = nvme_uuid_find(uuid_list, solidigm_uuid);
- if (ocp_get_uuid_index(dev, &ocp_uuid_index) == 0)
- if (ocp_uuid_index == 2)
- return 1;
+ *index = 0;
+ if (i > 0)
+ *index = i;
+ else
+ return -errno;
return 0;
}
+
+int sldgm_get_uuid_index(struct nvme_dev *dev, __u8 *index)
+{
+ struct nvme_id_uuid_list uuid_list;
+ int err = nvme_identify_uuid(dev_fd(dev), &uuid_list);
+
+ *index = 0;
+ if (err)
+ return err;
+
+ return sldgm_find_uuid_index(&uuid_list, index);
+}
diff --git a/plugins/solidigm/solidigm-util.h b/plugins/solidigm/solidigm-util.h
index fa5032f..ed7bf0f 100644
--- a/plugins/solidigm/solidigm-util.h
+++ b/plugins/solidigm/solidigm-util.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (c) 2023 Solidigm.
+ * Copyright (c) 2023-2024 Solidigm.
*
* Author: leonardo.da.cunha@solidigm.com
*/
@@ -9,4 +9,5 @@
#define DRIVER_MAX_TX_256K (256 * 1024)
-__u8 solidigm_get_vu_uuid_index(struct nvme_dev *dev);
+int sldgm_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index);
+int sldgm_get_uuid_index(struct nvme_dev *dev, __u8 *index);
diff --git a/plugins/ssstc/ssstc-nvme.c b/plugins/ssstc/ssstc-nvme.c
new file mode 100644
index 0000000..03e4fe3
--- /dev/null
+++ b/plugins/ssstc/ssstc-nvme.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include "common.h"
+#include "nvme.h"
+#include "libnvme.h"
+#include "plugin.h"
+#include "linux/types.h"
+#include "nvme-print.h"
+
+#define CREATE_CMD
+#include "ssstc-nvme.h"
+
+struct __packed nvme_additional_smart_log_item
+{
+ __u8 key;
+ __u8 norm;
+ union __packed {
+ __u8 raw[6];
+ struct __packed wear_level
+ {
+ __le16 min;
+ __le16 max;
+ __le16 avg;
+ } wear_level;
+ };
+ __u8 _rp[2];
+};
+
+struct nvme_additional_smart_log {
+ struct nvme_additional_smart_log_item program_fail_cnt;
+ struct nvme_additional_smart_log_item erase_fail_cnt;
+ struct nvme_additional_smart_log_item wear_leveling_cnt;
+ struct nvme_additional_smart_log_item e2e_err_cnt;
+ struct nvme_additional_smart_log_item crc_err_cnt;
+ struct nvme_additional_smart_log_item nand_bytes_written;
+ struct nvme_additional_smart_log_item host_bytes_written;
+ struct nvme_additional_smart_log_item reallocated_sector_count;
+ struct nvme_additional_smart_log_item uncorrectable_sector_count;
+ struct nvme_additional_smart_log_item NAND_ECC_Detection_Count;
+ struct nvme_additional_smart_log_item NAND_ECC_Correction_Count;
+ struct nvme_additional_smart_log_item Bad_Block_Failure_Rate;
+ struct nvme_additional_smart_log_item GC_Count;
+ struct nvme_additional_smart_log_item DRAM_UECC_Detection_Count;
+ struct nvme_additional_smart_log_item SRAM_UECC_Detection_Count;
+ struct nvme_additional_smart_log_item Raid_Recovery_Fail_Count;
+ struct nvme_additional_smart_log_item Inflight_Command;
+ struct nvme_additional_smart_log_item Internal_End_to_End_Dect_Count;
+ struct nvme_additional_smart_log_item PCIe_Correctable_Error_Count;
+ struct nvme_additional_smart_log_item die_fail_count;
+ struct nvme_additional_smart_log_item wear_leveling_exec_count;
+ struct nvme_additional_smart_log_item read_disturb_count;
+ struct nvme_additional_smart_log_item data_retention_count;
+};
+
+
+static
+void show_ssstc_add_smart_log_jsn(struct nvme_additional_smart_log *smart,
+ unsigned int nsid, const char *devname)
+{
+ struct json_object *root, *entry_stats, *dev_stats, *multi;
+ __uint16_t wear_level_min = 0;
+ __uint16_t wear_level_max = 0;
+ __uint16_t wear_level_avg = 0;
+ uint64_t raw_val = 0;
+
+ root = json_create_object();
+ json_object_add_value_string(root, "SSSTC Smart log", devname);
+
+ dev_stats = json_create_object();
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->program_fail_cnt.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->program_fail_cnt.norm);
+ raw_val = int48_to_long(smart->program_fail_cnt.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "program_fail_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->erase_fail_cnt.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->erase_fail_cnt.norm);
+ raw_val = int48_to_long(smart->erase_fail_cnt.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "erase_fail_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->wear_leveling_cnt.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->wear_leveling_cnt.norm);
+ multi = json_create_object();
+ wear_level_min = le16_to_cpu(smart->wear_leveling_cnt.wear_level.min);
+ wear_level_max = le16_to_cpu(smart->wear_leveling_cnt.wear_level.max);
+ wear_level_avg = le16_to_cpu(smart->wear_leveling_cnt.wear_level.avg);
+ json_object_add_value_int(multi, "min", wear_level_min);
+ json_object_add_value_int(multi, "max", wear_level_max);
+ json_object_add_value_int(multi, "avg", wear_level_avg);
+ json_object_add_value_object(entry_stats, "raw", multi);
+ json_object_add_value_object(dev_stats, "wear_leveling", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->e2e_err_cnt.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->e2e_err_cnt.norm);
+ multi = json_create_object();
+ wear_level_min = le16_to_cpu(smart->e2e_err_cnt.wear_level.min);
+ wear_level_max = le16_to_cpu(smart->e2e_err_cnt.wear_level.max);
+ wear_level_avg = le16_to_cpu(smart->e2e_err_cnt.wear_level.avg);
+ json_object_add_value_int(multi, "guard check error", wear_level_min);
+ json_object_add_value_int(multi, "application tag check error", wear_level_max);
+ json_object_add_value_int(multi, "reference tag check error", wear_level_avg);
+ json_object_add_value_object(entry_stats, "raw", multi);
+ json_object_add_value_object(dev_stats, "end_to_end_error_dect_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->crc_err_cnt.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->crc_err_cnt.norm);
+ raw_val = int48_to_long(smart->crc_err_cnt.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "crc_error_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->nand_bytes_written.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->nand_bytes_written.norm);
+ raw_val = int48_to_long(smart->nand_bytes_written.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "nand_bytes_written", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->host_bytes_written.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->host_bytes_written.norm);
+ raw_val = int48_to_long(smart->host_bytes_written.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "host_bytes_written", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->reallocated_sector_count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->reallocated_sector_count.norm);
+ raw_val = int48_to_long(smart->reallocated_sector_count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "reallocated_sector_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->uncorrectable_sector_count.key);
+ json_object_add_value_int(entry_stats, "normalized",
+ smart->uncorrectable_sector_count.norm);
+ raw_val = int48_to_long(smart->uncorrectable_sector_count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "uncorrectable_sector_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->NAND_ECC_Detection_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->NAND_ECC_Detection_Count.norm);
+ raw_val = int48_to_long(smart->NAND_ECC_Detection_Count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "NAND_ECC_detection_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->NAND_ECC_Correction_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->NAND_ECC_Correction_Count.norm);
+ raw_val = int48_to_long(smart->NAND_ECC_Correction_Count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "NAND_ECC_correction_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->GC_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->GC_Count.norm);
+ raw_val = int48_to_long(smart->GC_Count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "GC_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->DRAM_UECC_Detection_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->DRAM_UECC_Detection_Count.norm);
+ multi = json_create_object();
+ wear_level_max = le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.max);
+ wear_level_avg = le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.avg);
+ json_object_add_value_int(multi, "1-Bit Err", wear_level_max);
+ json_object_add_value_int(multi, "2-Bit Err", wear_level_avg);
+ json_object_add_value_object(entry_stats, "raw", multi);
+ json_object_add_value_object(dev_stats, "DRAM_UECC_detection_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->SRAM_UECC_Detection_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->SRAM_UECC_Detection_Count.norm);
+ multi = json_create_object();
+ wear_level_min = le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.min);
+ wear_level_max = le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.max);
+ wear_level_avg = le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.avg);
+ json_object_add_value_int(multi, "parity error detected", wear_level_min);
+ json_object_add_value_int(multi, "ecc error detection", wear_level_max);
+ json_object_add_value_int(multi, "axi data parity errors", wear_level_avg);
+ json_object_add_value_object(entry_stats, "raw", multi);
+ json_object_add_value_object(dev_stats, "SRAM_UECC_Detection_Count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->Raid_Recovery_Fail_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->Raid_Recovery_Fail_Count.norm);
+ raw_val = int48_to_long(smart->Raid_Recovery_Fail_Count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "raid_Recovery_fail_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->Inflight_Command.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->Inflight_Command.norm);
+ multi = json_create_object();
+ wear_level_min = le16_to_cpu(smart->Inflight_Command.wear_level.min);
+ wear_level_max = le16_to_cpu(smart->Inflight_Command.wear_level.max);
+ wear_level_avg = le16_to_cpu(smart->Inflight_Command.wear_level.avg);
+ json_object_add_value_int(multi, "Read Cmd", wear_level_min);
+ json_object_add_value_int(multi, "Write Cmd", wear_level_max);
+ json_object_add_value_int(multi, "Admin Cmd", wear_level_avg);
+ json_object_add_value_object(entry_stats, "raw", multi);
+ json_object_add_value_object(dev_stats, "Inflight_Command", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->Internal_End_to_End_Dect_Count.key);
+ json_object_add_value_int(entry_stats, "normalized", 100);
+ multi = json_create_object();
+ wear_level_min = le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.min);
+ wear_level_max = le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.max);
+ wear_level_avg = le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.avg);
+ json_object_add_value_int(multi, "read hcrc", wear_level_min);
+ json_object_add_value_int(multi, "write hcrc", wear_level_max);
+ json_object_add_value_int(multi, "reserved", wear_level_avg);
+ json_object_add_value_object(entry_stats, "raw", multi);
+ json_object_add_value_object(dev_stats, "internal_end_to_end_dect_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->die_fail_count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->die_fail_count.norm);
+ raw_val = int48_to_long(smart->die_fail_count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "die_fail_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->wear_leveling_exec_count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->wear_leveling_exec_count.norm);
+ raw_val = int48_to_long(smart->wear_leveling_exec_count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "wear_leveling_exec_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->read_disturb_count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->read_disturb_count.norm);
+ raw_val = int48_to_long(smart->read_disturb_count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "read_disturb_count", entry_stats);
+
+ entry_stats = json_create_object();
+ json_object_add_value_int(entry_stats, "#id", smart->data_retention_count.key);
+ json_object_add_value_int(entry_stats, "normalized", smart->data_retention_count.norm);
+ raw_val = int48_to_long(smart->data_retention_count.raw);
+ json_object_add_value_int(entry_stats, "raw", raw_val);
+ json_object_add_value_object(dev_stats, "data_retention_count", entry_stats);
+
+ json_object_add_value_object(root, "Device stats", dev_stats);
+
+ json_print_object(root, NULL);
+ json_free_object(root);
+}
+
+static
+void show_ssstc_add_smart_log(struct nvme_additional_smart_log *smart,
+ unsigned int nsid, const char *devname)
+{
+ printf("Additional Smart Log for NVME device:%s namespace-id:%x\n",
+ devname, nsid);
+ printf("key #id normalized raw\n");
+ printf("program_fail_count : %03d %3d%% %"PRIu64"\n",
+ smart->program_fail_cnt.key,
+ smart->program_fail_cnt.norm,
+ int48_to_long(smart->program_fail_cnt.raw));
+ printf("erase_fail_count : %03d %3d%% %"PRIu64"\n",
+ smart->erase_fail_cnt.key,
+ smart->erase_fail_cnt.norm,
+ int48_to_long(smart->erase_fail_cnt.raw));
+ printf("wear_leveling : %03d %3d%% min: %u, max: %u, avg: %u\n",
+ smart->wear_leveling_cnt.key,
+ smart->wear_leveling_cnt.norm,
+ le16_to_cpu(smart->wear_leveling_cnt.wear_level.min),
+ le16_to_cpu(smart->wear_leveling_cnt.wear_level.max),
+ le16_to_cpu(smart->wear_leveling_cnt.wear_level.avg));
+ printf("end_to_end_error_dect_count : %03d %3d%% "
+ "guard check error: %u, "
+ "application tag check error: %u, "
+ "reference tag check error: %u\n",
+ smart->e2e_err_cnt.key,
+ smart->e2e_err_cnt.norm,
+ le16_to_cpu(smart->e2e_err_cnt.wear_level.min),
+ le16_to_cpu(smart->e2e_err_cnt.wear_level.max),
+ le16_to_cpu(smart->e2e_err_cnt.wear_level.avg));
+ printf("crc_error_count : %03d %3d%% %"PRIu64"\n",
+ smart->crc_err_cnt.key,
+ smart->crc_err_cnt.norm,
+ int48_to_long(smart->crc_err_cnt.raw));
+ printf("nand_bytes_written : %03d %3d%% sectors: %"PRIu64"\n",
+ smart->nand_bytes_written.key,
+ smart->nand_bytes_written.norm,
+ int48_to_long(smart->nand_bytes_written.raw));
+ printf("host_bytes_written : %3d %3d%% sectors: %"PRIu64"\n",
+ smart->host_bytes_written.key,
+ smart->host_bytes_written.norm,
+ int48_to_long(smart->host_bytes_written.raw));
+ printf("reallocated_sector_count : %03d %3d%% %"PRIu64"\n",
+ smart->reallocated_sector_count.key,
+ smart->reallocated_sector_count.norm,
+ int48_to_long(smart->reallocated_sector_count.raw));
+ printf("uncorrectable_sector_count : %03d %3d%% %"PRIu64"\n",
+ smart->uncorrectable_sector_count.key,
+ smart->uncorrectable_sector_count.norm,
+ int48_to_long(smart->uncorrectable_sector_count.raw));
+ printf("NAND_ECC_detection_count : %03d %3d%% %"PRIu64"\n",
+ smart->NAND_ECC_Detection_Count.key,
+ smart->NAND_ECC_Detection_Count.norm,
+ int48_to_long(smart->NAND_ECC_Detection_Count.raw));
+ printf("NAND_ECC_correction_count : %03d %3d%% %"PRIu64"\n",
+ smart->NAND_ECC_Correction_Count.key,
+ smart->NAND_ECC_Correction_Count.norm,
+ int48_to_long(smart->NAND_ECC_Correction_Count.raw));
+ printf("GC_count : %03d %3d%% %"PRIu64"\n",
+ smart->GC_Count.key,
+ smart->GC_Count.norm,
+ int48_to_long(smart->GC_Count.raw));
+ printf("DRAM_UECC_detection_count : %03d %3d%% 1-Bit Err: %u, 2-Bit Err: %u\n",
+ smart->DRAM_UECC_Detection_Count.key,
+ smart->DRAM_UECC_Detection_Count.norm,
+ le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.max),
+ le16_to_cpu(smart->DRAM_UECC_Detection_Count.wear_level.avg));
+ printf("SRAM_UECC_Detection_Count : %03d %3d%% "
+ "parity error detected: %u, "
+ "ecc error detection: %u, "
+ "axi data parity errors: %u\n",
+ smart->SRAM_UECC_Detection_Count.key,
+ smart->SRAM_UECC_Detection_Count.norm,
+ le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.min),
+ le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.max),
+ le16_to_cpu(smart->SRAM_UECC_Detection_Count.wear_level.avg));
+ printf("raid_recovery_fail_count : %03d %3d%% %"PRIu64"\n",
+ smart->Raid_Recovery_Fail_Count.key,
+ smart->Raid_Recovery_Fail_Count.norm,
+ int48_to_long(smart->Raid_Recovery_Fail_Count.raw));
+ printf("Inflight_Command : %03d %3d%% "
+ "Read Cmd: %u, Write Cmd: %u, Admin Cmd: %u\n",
+ smart->Inflight_Command.key,
+ smart->Inflight_Command.norm,
+ le16_to_cpu(smart->Inflight_Command.wear_level.min),
+ le16_to_cpu(smart->Inflight_Command.wear_level.max),
+ le16_to_cpu(smart->Inflight_Command.wear_level.avg));
+ printf("internal_end_to_end_dect_count : %03d %3d%% "
+ "read hcrc: %u, write hcrc: %u, reserved: %u\n",
+ smart->Internal_End_to_End_Dect_Count.key,
+ 100,
+ le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.min),
+ le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.max),
+ le16_to_cpu(smart->Internal_End_to_End_Dect_Count.wear_level.avg));
+ printf("die_fail_count : %03d %3d%% %"PRIu64"\n",
+ smart->die_fail_count.key,
+ smart->die_fail_count.norm,
+ int48_to_long(smart->die_fail_count.raw));
+ printf("wear_leveling_exec_count : %03d %3d%% %"PRIu64"\n",
+ smart->wear_leveling_exec_count.key,
+ smart->wear_leveling_exec_count.norm,
+ int48_to_long(smart->wear_leveling_exec_count.raw));
+ printf("read_disturb_count : %03d %3d%% %"PRIu64"\n",
+ smart->read_disturb_count.key,
+ smart->read_disturb_count.norm,
+ int48_to_long(smart->read_disturb_count.raw));
+ printf("data_retention_count : %03d %3d%% %"PRIu64"\n",
+ smart->data_retention_count.key,
+ smart->data_retention_count.norm,
+ int48_to_long(smart->data_retention_count.raw));
+}
+
+static
+int ssstc_get_add_smart_log(int argc, char **argv, struct command *cmd, struct plugin *plugin)
+{
+
+ const char *desc =
+ "Get SSSTC vendor specific additional smart log\n"
+ "(optionally, for the specified namespace), and show it.";
+ const char *namespace = "(optional) desired namespace";
+ const char *raw = "Dump output in binary format";
+ const char *json = "Dump output in json format";
+
+ struct nvme_additional_smart_log smart_log_add;
+ struct nvme_dev *dev;
+ int err;
+
+ struct config {
+ __u32 namespace_id;
+ bool raw_binary;
+ bool json;
+ };
+
+ struct config cfg = {
+ .namespace_id = NVME_NSID_ALL,
+ };
+
+ OPT_ARGS(opts) = {
+ OPT_UINT("namespace-id", 'n', &cfg.namespace_id, namespace),
+ OPT_FLAG("raw-binary", 'b', &cfg.raw_binary, raw),
+ OPT_FLAG("json", 'j', &cfg.json, json),
+ OPT_END()
+ };
+
+ err = parse_and_open(&dev, argc, argv, desc, opts);
+ if (err)
+ return err;
+
+ err = nvme_get_log_simple(dev_fd(dev), 0xca, sizeof(smart_log_add),
+ &smart_log_add);
+ if (!err) {
+ if (cfg.json)
+ show_ssstc_add_smart_log_jsn(&smart_log_add, cfg.namespace_id,
+ dev->name);
+ else if (!cfg.raw_binary)
+ show_ssstc_add_smart_log(&smart_log_add, cfg.namespace_id,
+ dev->name);
+ else
+ d_raw((unsigned char *)&smart_log_add, sizeof(smart_log_add));
+ } else if (err > 0) {
+ nvme_show_status(err);
+ }
+ dev_close(dev);
+ return err;
+
+}
diff --git a/plugins/ssstc/ssstc-nvme.h b/plugins/ssstc/ssstc-nvme.h
new file mode 100644
index 0000000..e34fa50
--- /dev/null
+++ b/plugins/ssstc/ssstc-nvme.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#undef CMD_INC_FILE
+#define CMD_INC_FILE plugins/ssstc/ssstc-nvme
+
+#if !defined(SSSTC_NVME) || defined(CMD_HEADER_MULTI_READ)
+#define SSSTC_NVME
+
+#include "cmd.h"
+PLUGIN(NAME("ssstc", "SSSTC vendor specific extensions", NVME_VERSION),
+ COMMAND_LIST(
+ ENTRY("smart-log-add", "Retrieve ssstc SMART Log, show it", ssstc_get_add_smart_log)
+ )
+);
+#endif
+
+#include "define_cmd.h"
diff --git a/plugins/wdc/wdc-nvme.c b/plugins/wdc/wdc-nvme.c
index 8cbcf2e..7525055 100644
--- a/plugins/wdc/wdc-nvme.c
+++ b/plugins/wdc/wdc-nvme.c
@@ -1383,6 +1383,11 @@ struct __packed wdc_fw_act_history_log_format_c2 {
__u8 log_page_guid[WDC_C2_GUID_LENGTH];
};
+static __u8 ocp_C2_guid[WDC_C2_GUID_LENGTH] = {
+ 0x6D, 0x79, 0x9A, 0x76, 0xB4, 0xDA, 0xF6, 0xA3,
+ 0xE2, 0x4D, 0xB2, 0x8A, 0xAC, 0xF3, 0x1C, 0xD1
+};
+
#define WDC_OCP_C4_GUID_LENGTH 16
#define WDC_DEV_CAP_LOG_BUF_LEN 4096
#define WDC_DEV_CAP_LOG_ID 0xC4
@@ -1726,7 +1731,6 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
case WDC_NVME_VID_2:
switch (read_device_id) {
case WDC_NVME_SN630_DEV_ID:
- fallthrough;
case WDC_NVME_SN630_DEV_ID_1:
capabilities = (WDC_DRIVE_CAP_CAP_DIAG | WDC_DRIVE_CAP_INTERNAL_LOG |
WDC_DRIVE_CAP_DRIVE_STATUS | WDC_DRIVE_CAP_CLEAR_ASSERT |
@@ -1743,19 +1747,12 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN640_DEV_ID:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN560_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN560_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN560_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN660_DEV_ID:
/* verify the 0xC0 log page is supported */
if (wdc_nvme_check_supported_log_page(r, dev,
@@ -1816,9 +1813,7 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN840_DEV_ID:
- fallthrough;
case WDC_NVME_SN840_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN860_DEV_ID:
/* verify the 0xC0 log page is supported */
if (wdc_nvme_check_supported_log_page(r, dev,
@@ -1826,7 +1821,6 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
capabilities |= WDC_DRIVE_CAP_C0_LOG_PAGE;
fallthrough;
case WDC_NVME_ZN540_DEV_ID:
- fallthrough;
case WDC_NVME_SN540_DEV_ID:
capabilities |= (WDC_DRIVE_CAP_CAP_DIAG | WDC_DRIVE_CAP_INTERNAL_LOG |
WDC_DRIVE_CAP_DRIVE_STATUS | WDC_DRIVE_CAP_CLEAR_ASSERT |
@@ -1847,17 +1841,11 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN650_DEV_ID:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_4:
- fallthrough;
case WDC_NVME_SN655_DEV_ID:
- fallthrough;
case WDC_NVME_SN550_DEV_ID:
/* verify the 0xC0 log page is supported */
if (wdc_nvme_check_supported_log_page(r, dev,
@@ -1907,7 +1895,6 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN861_DEV_ID:
- fallthrough;
case WDC_NVME_SN861_DEV_ID_1:
capabilities |= (WDC_DRIVE_CAP_C0_LOG_PAGE |
WDC_DRIVE_CAP_C3_LOG_PAGE |
@@ -1921,6 +1908,7 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
WDC_DRIVE_CAP_INFO |
WDC_DRIVE_CAP_CLOUD_SSD_VERSION |
WDC_DRIVE_CAP_LOG_PAGE_DIR |
+ WDC_DRIVE_CAP_DRIVE_STATUS |
WDC_DRIVE_CAP_SET_LATENCY_MONITOR);
break;
@@ -1936,11 +1924,8 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
break;
case WDC_NVME_SN520_DEV_ID:
- fallthrough;
case WDC_NVME_SN520_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN520_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN810_DEV_ID:
capabilities = WDC_DRIVE_CAP_DUI_DATA;
break;
@@ -2010,19 +1995,14 @@ static __u64 wdc_get_drive_capabilities(nvme_root_t r, struct nvme_dev *dev)
case WDC_NVME_SN8000S_DEV_ID:
fallthrough;
case WDC_NVME_SN740_DEV_ID:
- fallthrough;
case WDC_NVME_SN740_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN740_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN740_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN340_DEV_ID:
capabilities = WDC_DRIVE_CAP_DUI;
break;
case WDC_NVME_ZN350_DEV_ID:
- fallthrough;
case WDC_NVME_ZN350_DEV_ID_1:
capabilities = WDC_DRIVE_CAP_DUI_DATA | WDC_DRIVE_CAP_VU_FID_CLEAR_PCIE |
WDC_DRIVE_CAP_C0_LOG_PAGE |
@@ -2450,23 +2430,32 @@ static bool get_dev_mgment_cbs_data(nvme_root_t r, struct nvme_dev *dev,
uuid_index = index + 1;
}
- if (!uuid_index && needs_c2_log_page_check(device_id)) {
- /* In certain devices that don't support UUID lists, there are multiple
- * definitions of the C2 logpage. In those cases, the code
- * needs to try two UUID indexes and use an identification algorithm
- * to determine which is returning the correct log page data.
- */
- uuid_ix = 1;
- }
+ if (uuid_present) {
+ /* use the uuid index found above */
+ found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_index);
+ } else if (device_id == WDC_NVME_ZN350_DEV_ID || device_id == WDC_NVME_ZN350_DEV_ID_1) {
+ uuid_index = 0;
+ found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_index);
+ } else {
+ if (!uuid_index && needs_c2_log_page_check(device_id)) {
+ /* In certain devices that don't support UUID lists, there are multiple
+ * definitions of the C2 logpage. In those cases, the code
+ * needs to try two UUID indexes and use an identification algorithm
+ * to determine which is returning the correct log page data.
+ */
+ uuid_ix = 1;
+ }
- found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
+ found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
- if (!found) {
- /* not found with uuid = 1 try with uuid = 0 */
- uuid_ix = 0;
- fprintf(stderr, "Not found, requesting log page with uuid_index %d\n", uuid_index);
+ if (!found) {
+ /* not found with uuid = 1 try with uuid = 0 */
+ uuid_ix = 0;
+ fprintf(stderr, "Not found, requesting log page with uuid_index %d\n",
+ uuid_index);
- found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
+ found = get_dev_mgmt_log_page_lid_data(dev, cbs_data, lid, log_id, uuid_ix);
+ }
}
return found;
@@ -5538,17 +5527,18 @@ static void wdc_print_fw_act_history_log_normal(__u8 *data, int num_entries,
char previous_fw[9];
char new_fw[9];
char commit_action_bin[8];
- char time_str[11];
+ char time_str[100];
__u16 oldestEntryIdx = 0, entryIdx = 0;
+ uint64_t timestamp;
+ __u64 timestamp_sec;
char *null_fw = "--------";
- memset((void *)time_str, 0, 11);
+ memset((void *)time_str, '\0', 100);
if (data[0] == WDC_NVME_GET_FW_ACT_HISTORY_C2_LOG_ID) {
printf(" Firmware Activate History Log\n");
if (cust_id == WDC_CUSTOMER_ID_0x1005 ||
- vendor_id == WDC_NVME_SNDK_VID ||
- wdc_is_sn861(device_id)) {
+ vendor_id == WDC_NVME_SNDK_VID) {
printf(" Power on Hour Power Cycle Previous New\n");
printf(" Entry hh:mm:ss Count Firmware Firmware Slot Action Result\n");
printf(" ----- ----------------- ----------------- --------- --------- ----- ------ -------\n");
@@ -5589,48 +5579,33 @@ static void wdc_print_fw_act_history_log_normal(__u8 *data, int num_entries,
memcpy(new_fw, null_fw, 8);
printf("%5"PRIu16"", (uint16_t)le16_to_cpu(fw_act_history_entry->entry[entryIdx].fw_act_hist_entries));
+
+ timestamp = (0x0000FFFFFFFFFFFF &
+ le64_to_cpu(
+ fw_act_history_entry->entry[entryIdx].timestamp));
+ timestamp_sec = timestamp / 1000;
if (cust_id == WDC_CUSTOMER_ID_0x1005) {
printf(" ");
memset((void *)time_str, 0, 9);
- sprintf((char *)time_str, "%04d:%02d:%02d", (int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)/3600),
- (int)((le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%3600)/60)),
- (int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%60)));
+ sprintf((char *)time_str, "%"PRIu32":%u:%u",
+ (__u32)(timestamp_sec/3600),
+ (__u8)(timestamp_sec%3600/60),
+ (__u8)(timestamp_sec%60));
printf("%s", time_str);
printf(" ");
} else if (vendor_id == WDC_NVME_SNDK_VID) {
printf(" ");
- uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
memset((void *)time_str, 0, 9);
- sprintf((char *)time_str, "%04d:%02d:%02d", (int)((timestamp/(3600*1000))%24), (int)((timestamp/(1000*60))%60),
- (int)((timestamp/1000)%60));
+ sprintf((char *)time_str, "%"PRIu32":%u:%u",
+ (__u32)((timestamp_sec/3600)%24),
+ (__u8)((timestamp_sec/60)%60),
+ (__u8)(timestamp_sec%60));
printf("%s", time_str);
printf(" ");
- } else if (wdc_is_sn861(device_id)) {
- printf(" ");
- char timestamp[20];
- __u64 hour;
- __u8 min;
- __u8 sec;
- __u64 timestamp_sec;
-
- timestamp_sec =
- le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)
- / 1000;
- hour = timestamp_sec / 3600;
- min = (timestamp_sec % 3600) / 60;
- sec = timestamp_sec % 60;
-
- sprintf(timestamp,
- "%"PRIu64":%02"PRIu8":%02"PRIu8,
- (uint64_t)hour, min, sec);
- printf("%-11s", timestamp);
- printf(" ");
} else {
printf(" ");
- uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
-
printf("%16"PRIu64"", timestamp);
printf(" ");
}
@@ -5735,13 +5710,15 @@ static void wdc_print_fw_act_history_log_json(__u8 *data, int num_entries,
char new_fw[9];
char commit_action_bin[8];
char fail_str[32];
- char time_str[11];
+ char time_str[100];
char ext_time_str[20];
+ uint64_t timestamp;
+ __u64 timestamp_sec;
memset((void *)previous_fw, 0, 9);
memset((void *)new_fw, 0, 9);
memset((void *)commit_action_bin, 0, 8);
- memset((void *)time_str, 0, 11);
+ memset((void *)time_str, '\0', 100);
memset((void *)ext_time_str, 0, 20);
memset((void *)fail_str, 0, 11);
char *null_fw = "--------";
@@ -5781,33 +5758,25 @@ static void wdc_print_fw_act_history_log_json(__u8 *data, int num_entries,
json_object_add_value_int(root, "Entry",
le16_to_cpu(fw_act_history_entry->entry[entryIdx].fw_act_hist_entries));
+ timestamp = (0x0000FFFFFFFFFFFF &
+ le64_to_cpu(
+ fw_act_history_entry->entry[entryIdx].timestamp));
+ timestamp_sec = timestamp / 1000;
if (cust_id == WDC_CUSTOMER_ID_0x1005) {
- sprintf((char *)time_str, "%04d:%02d:%02d", (int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)/3600),
- (int)((le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%3600)/60)),
- (int)(le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp%60)));
+ sprintf((char *)time_str, "%"PRIu32":%u:%u",
+ (__u32)(timestamp_sec/3600),
+ (__u8)(timestamp_sec%3600/60),
+ (__u8)(timestamp_sec%60));
json_object_add_value_string(root, "Power on Hour", time_str);
} else if (vendor_id == WDC_NVME_SNDK_VID) {
- uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
-
- sprintf((char *)time_str, "%04d:%02d:%02d", (int)((timestamp/(3600*1000))%24), (int)((timestamp/(1000*60))%60),
- (int)((timestamp/1000)%60));
+ sprintf((char *)time_str, "%"PRIu32":%u:%u",
+ (__u32)((timestamp_sec/3600)%24),
+ (__u8)((timestamp_sec/60)%60),
+ (__u8)(timestamp_sec%60));
json_object_add_value_string(root, "Power on Hour", time_str);
- } else if (wdc_is_sn861(device_id)) {
- __u64 timestamp_sec =
- le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp)
- / 1000;
-
- sprintf((char *)ext_time_str,
- "%"PRIu64":%02"PRIu8":%02"PRIu8,
- (uint64_t)(__u64)(timestamp_sec/3600),
- (__u8)((timestamp_sec%3600)/60),
- (__u8)(timestamp_sec%60));
- json_object_add_value_string(root, "Power on Hour", ext_time_str);
} else {
- uint64_t timestamp = (0x0000FFFFFFFFFFFF & le64_to_cpu(fw_act_history_entry->entry[entryIdx].timestamp));
-
json_object_add_value_uint64(root, "Timestamp", timestamp);
}
@@ -7047,39 +7016,23 @@ static int wdc_get_c0_log_page(nvme_root_t r, struct nvme_dev *dev, char *format
switch (device_id) {
case WDC_NVME_SN640_DEV_ID:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN840_DEV_ID:
- fallthrough;
case WDC_NVME_SN840_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN860_DEV_ID:
- fallthrough;
case WDC_NVME_SN560_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN560_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN560_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN550_DEV_ID:
ret = wdc_get_c0_log_page_sn(r, dev, uuid_index, format, namespace_id, fmt);
break;
-
case WDC_NVME_SN650_DEV_ID:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN650_DEV_ID_4:
- fallthrough;
case WDC_NVME_SN655_DEV_ID:
if (uuid_index == 0) {
log_id = WDC_NVME_GET_SMART_CLOUD_ATTR_LOG_ID;
@@ -7137,9 +7090,7 @@ static int wdc_get_c0_log_page(nvme_root_t r, struct nvme_dev *dev, char *format
}
free(data);
break;
-
case WDC_NVME_ZN350_DEV_ID:
- fallthrough;
case WDC_NVME_ZN350_DEV_ID_1:
data = (__u8 *)malloc(sizeof(__u8) * WDC_NVME_SMART_CLOUD_ATTR_LEN);
if (!data) {
@@ -7405,17 +7356,11 @@ static int wdc_get_ca_log_page(nvme_root_t r, struct nvme_dev *dev, char *format
}
break;
case WDC_NVME_SN640_DEV_ID:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_2:
- fallthrough;
case WDC_NVME_SN640_DEV_ID_3:
- fallthrough;
case WDC_NVME_SN840_DEV_ID:
- fallthrough;
case WDC_NVME_SN840_DEV_ID_1:
- fallthrough;
case WDC_NVME_SN860_DEV_ID:
if (cust_id == WDC_CUSTOMER_ID_0x1005) {
data = (__u8 *)malloc(sizeof(__u8) * WDC_FB_CA_LOG_BUF_LEN);
@@ -9074,6 +9019,7 @@ static int wdc_get_fw_act_history_C2(nvme_root_t r, struct nvme_dev *dev,
enum nvme_print_flags fmt;
__u8 *data;
int ret;
+ bool c2GuidMatch = false;
if (!wdc_check_device(r, dev))
return -1;
@@ -9102,29 +9048,40 @@ static int wdc_get_fw_act_history_C2(nvme_root_t r, struct nvme_dev *dev,
nvme_show_status(ret);
if (!ret) {
- /* parse the data */
+ /* Get the log page data and verify the GUID */
fw_act_history_log = (struct wdc_fw_act_history_log_format_c2 *)(data);
- tot_entries = le32_to_cpu(fw_act_history_log->num_entries);
- if (tot_entries > 0) {
- /* get the FW customer id */
- if (!wdc_is_sn861(device_id)) {
- cust_id = wdc_get_fw_cust_id(r, dev);
- if (cust_id == WDC_INVALID_CUSTOMER_ID) {
- fprintf(stderr,
- "%s: ERROR: WDC: invalid customer id\n",
- __func__);
- ret = -1;
- goto freeData;
+ c2GuidMatch = !memcmp(ocp_C2_guid,
+ fw_act_history_log->log_page_guid,
+ WDC_C2_GUID_LENGTH);
+
+ if (c2GuidMatch) {
+ /* parse the data */
+ tot_entries = le32_to_cpu(fw_act_history_log->num_entries);
+
+ if (tot_entries > 0) {
+ /* get the FW customer id */
+ if (!wdc_is_sn861(device_id)) {
+ cust_id = wdc_get_fw_cust_id(r, dev);
+ if (cust_id == WDC_INVALID_CUSTOMER_ID) {
+ fprintf(stderr,
+ "%s: ERROR: WDC: invalid customer id\n",
+ __func__);
+ ret = -1;
+ goto freeData;
+ }
}
+ num_entries = (tot_entries < WDC_MAX_NUM_ACT_HIST_ENTRIES) ?
+ tot_entries : WDC_MAX_NUM_ACT_HIST_ENTRIES;
+ ret = wdc_print_fw_act_history_log(data, num_entries,
+ fmt, cust_id, vendor_id, device_id);
+ } else {
+ fprintf(stderr, "INFO: WDC: No entries found.\n");
+ ret = 0;
}
- num_entries = (tot_entries < WDC_MAX_NUM_ACT_HIST_ENTRIES) ? tot_entries :
- WDC_MAX_NUM_ACT_HIST_ENTRIES;
- ret = wdc_print_fw_act_history_log(data, num_entries,
- fmt, cust_id, vendor_id, device_id);
- } else {
- fprintf(stderr, "INFO: WDC: No FW Activate History entries found.\n");
- ret = 0;
+ } else {
+ fprintf(stderr, "ERROR: WDC: Invalid C2 log page GUID\n");
+ ret = -1;
}
} else {
fprintf(stderr, "ERROR: WDC: Unable to read FW Activate History Log Page data\n");
@@ -9143,7 +9100,7 @@ static int wdc_vs_fw_activate_history(int argc, char **argv, struct command *com
__u64 capabilities = 0;
struct nvme_dev *dev;
nvme_root_t r;
- int ret;
+ int ret = -1;
struct config {
char *output_format;
@@ -9171,61 +9128,23 @@ static int wdc_vs_fw_activate_history(int argc, char **argv, struct command *com
}
if (capabilities & WDC_DRIVE_CAP_FW_ACTIVATE_HISTORY) {
- int uuid_index = 0;
- bool c0GuidMatch = false;
- __u8 *data;
- int i;
-
- /*
- * check for the GUID in the 0xC0 log page to determine which log page to use to
- * retrieve fw activate history data
- */
- data = (__u8 *)malloc(sizeof(__u8) * WDC_NVME_SMART_CLOUD_ATTR_LEN);
- if (!data) {
- fprintf(stderr, "ERROR: WDC: malloc: %s\n", strerror(errno));
+ __u32 cust_fw_id = 0;
+ /* get the FW customer id */
+ cust_fw_id = wdc_get_fw_cust_id(r, dev);
+ if (cust_fw_id == WDC_INVALID_CUSTOMER_ID) {
+ fprintf(stderr, "%s: ERROR: WDC: invalid customer id\n", __func__);
ret = -1;
goto out;
}
- /* Get the 0xC0 log data */
- struct nvme_get_log_args args = {
- .args_size = sizeof(args),
- .fd = dev_fd(dev),
- .lid = WDC_NVME_GET_SMART_CLOUD_ATTR_LOG_ID,
- .nsid = 0xFFFFFFFF,
- .lpo = 0,
- .lsp = NVME_LOG_LSP_NONE,
- .lsi = 0,
- .rae = false,
- .uuidx = uuid_index,
- .csi = NVME_CSI_NVM,
- .ot = false,
- .len = WDC_NVME_SMART_CLOUD_ATTR_LEN,
- .log = data,
- .timeout = NVME_DEFAULT_IOCTL_TIMEOUT,
- .result = NULL,
- };
- ret = nvme_get_log(&args);
-
- if (!ret) {
- /* Verify GUID matches */
- for (i = 0; i < 16; i++) {
- if (scao_guid[i] != data[SCAO_LPG + i]) {
- c0GuidMatch = false;
- break;
- }
- }
-
- if (i == 16)
- c0GuidMatch = true;
- }
-
- free(data);
- if (c0GuidMatch)
+ if ((cust_fw_id == WDC_CUSTOMER_ID_0x1004) ||
+ (cust_fw_id == WDC_CUSTOMER_ID_0x1008) ||
+ (cust_fw_id == WDC_CUSTOMER_ID_0x1005) ||
+ (cust_fw_id == WDC_CUSTOMER_ID_0x1304))
ret = wdc_get_fw_act_history_C2(r, dev, cfg.output_format);
else
ret = wdc_get_fw_act_history(r, dev, cfg.output_format);
- } else {
+ } else if (capabilities & WDC_DRIVE_CAP_FW_ACTIVATE_HISTORY_C2) {
ret = wdc_get_fw_act_history_C2(r, dev, cfg.output_format);
}
@@ -9648,10 +9567,10 @@ static int wdc_fetch_log_file_from_device(struct nvme_dev *dev, __u32 fileId,
__u16 spiDestn, __u64 fileSize, __u8 *dataBuffer)
{
int ret = WDC_STATUS_FAILURE;
- __u32 chunckSize = WDC_DE_VU_READ_BUFFER_STANDARD_OFFSET;
- __u32 maximumTransferLength = 0;
- __u32 buffSize = 0;
- __u64 offsetIdx = 0;
+ __u32 chunckSize = WDC_DE_VU_READ_BUFFER_STANDARD_OFFSET;
+ __u32 maximumTransferLength = 0;
+ __u32 buffSize = 0;
+ __u64 offsetIdx = 0;
if (!dev || !dataBuffer || !fileSize) {
ret = WDC_STATUS_INVALID_PARAMETER;
@@ -9699,18 +9618,17 @@ end:
static int wdc_de_get_dump_trace(struct nvme_dev *dev, char *filePath, __u16 binFileNameLen, char *binFileName)
{
- int ret = WDC_STATUS_FAILURE;
- __u8 *readBuffer = NULL;
- __u32 readBufferLen = 0;
- __u32 lastPktReadBufferLen = 0;
- __u32 maxTransferLen = 0;
- __u32 dumptraceSize = 0;
- __u32 chunkSize = 0;
- __u32 chunks = 0;
- __u32 offset = 0;
- __u8 loop = 0;
- __u16 i = 0;
- __u32 maximumTransferLength = 0;
+ int ret = WDC_STATUS_FAILURE;
+ __u8 *readBuffer = NULL;
+ __u32 readBufferLen = 0;
+ __u32 lastPktReadBufferLen = 0;
+ __u32 maxTransferLen = 0;
+ __u32 dumptraceSize = 0;
+ __u32 chunkSize;
+ __u32 chunks;
+ __u32 offset;
+ __u32 i;
+ __u32 maximumTransferLength = 0;
if (!dev || !binFileName || !filePath) {
ret = WDC_STATUS_INVALID_PARAMETER;
@@ -9759,7 +9677,7 @@ static int wdc_de_get_dump_trace(struct nvme_dev *dev, char *filePath, __u16 bin
}
for (i = 0; i < chunks; i++) {
- offset = ((i*chunkSize) / 4);
+ offset = (i * chunkSize) / 4;
/* Last loop call, Assign readBufferLen to read only left over bytes */
if (i == (chunks - 1))
@@ -9774,7 +9692,7 @@ static int wdc_de_get_dump_trace(struct nvme_dev *dev, char *filePath, __u16 bin
break;
}
}
- } while (loop);
+ } while (0);
if (ret == WDC_STATUS_SUCCESS) {
ret = wdc_WriteToFile(binFileName, (char *)readBuffer, dumptraceSize);
@@ -11678,7 +11596,6 @@ static int wdc_vs_drive_info(int argc, char **argv,
break;
case WDC_NVME_SN861_DEV_ID:
- fallthrough;
case WDC_NVME_SN861_DEV_ID_1:
data_len = sizeof(info);
num_dwords = data_len / 4;
diff --git a/plugins/wdc/wdc-nvme.h b/plugins/wdc/wdc-nvme.h
index d3692bc..65d2de3 100644
--- a/plugins/wdc/wdc-nvme.h
+++ b/plugins/wdc/wdc-nvme.h
@@ -5,7 +5,7 @@
#if !defined(WDC_NVME) || defined(CMD_HEADER_MULTI_READ)
#define WDC_NVME
-#define WDC_PLUGIN_VERSION "2.7.0"
+#define WDC_PLUGIN_VERSION "2.8.1"
#include "cmd.h"
PLUGIN(NAME("wdc", "Western Digital vendor specific extensions", WDC_PLUGIN_VERSION),
diff --git a/plugins/wdc/wdc-utils.c b/plugins/wdc/wdc-utils.c
index 414a06a..1b52e7c 100644
--- a/plugins/wdc/wdc-utils.c
+++ b/plugins/wdc/wdc-utils.c
@@ -192,5 +192,5 @@ bool wdc_CheckUuidListSupport(struct nvme_dev *dev, struct nvme_id_uuid_list *uu
bool wdc_UuidEqual(struct nvme_id_uuid_list_entry *entry1, struct nvme_id_uuid_list_entry *entry2)
{
- return !memcmp(entry1, entry2, NVME_UUID_LEN);
+ return !memcmp(entry1->uuid, entry2->uuid, NVME_UUID_LEN);
}