summaryrefslogtreecommitdiffstats
path: root/src/tools/cephfs/top/cephfs-top
diff options
context:
space:
mode:
Diffstat (limited to 'src/tools/cephfs/top/cephfs-top')
-rwxr-xr-xsrc/tools/cephfs/top/cephfs-top888
1 files changed, 888 insertions, 0 deletions
diff --git a/src/tools/cephfs/top/cephfs-top b/src/tools/cephfs/top/cephfs-top
new file mode 100755
index 000000000..d57c3ab83
--- /dev/null
+++ b/src/tools/cephfs/top/cephfs-top
@@ -0,0 +1,888 @@
+#!/usr/bin/python3
+
+import argparse
+import sys
+import curses
+import errno
+import json
+import signal
+import time
+import math
+import threading
+
+from collections import OrderedDict
+from datetime import datetime
+from enum import Enum, unique
+
+import rados
+
+
+class FSTopException(Exception):
+ def __init__(self, msg=''):
+ self.error_msg = msg
+
+ def get_error_msg(self):
+ return self.error_msg
+
+
+@unique
+class MetricType(Enum):
+ METRIC_TYPE_NONE = 0
+ METRIC_TYPE_PERCENTAGE = 1
+ METRIC_TYPE_LATENCY = 2
+ METRIC_TYPE_SIZE = 3
+ METRIC_TYPE_STDEV = 4
+
+
+FS_TOP_PROG_STR = 'cephfs-top'
+FS_TOP_ALL_FS_APP = 'ALL_FS_APP'
+FS_TOP_FS_SELECTED_APP = 'SELECTED_FS_APP'
+
+# version match b/w fstop and stats emitted by mgr/stats
+FS_TOP_SUPPORTED_VER = 2
+
+ITEMS_PAD_LEN = 3
+ITEMS_PAD = " " * ITEMS_PAD_LEN
+DEFAULT_REFRESH_INTERVAL = 1
+# min refresh interval allowed
+MIN_REFRESH_INTERVAL = 0.5
+
+# metadata provided by mgr/stats
+FS_TOP_MAIN_WINDOW_COL_CLIENT_ID = "client_id"
+FS_TOP_MAIN_WINDOW_COL_MNT_ROOT = "mount_root"
+FS_TOP_MAIN_WINDOW_COL_MNTPT_HOST_ADDR = "mount_point@host/addr"
+
+MAIN_WINDOW_TOP_LINE_ITEMS_START = [ITEMS_PAD,
+ FS_TOP_MAIN_WINDOW_COL_CLIENT_ID,
+ FS_TOP_MAIN_WINDOW_COL_MNT_ROOT]
+MAIN_WINDOW_TOP_LINE_ITEMS_END = [FS_TOP_MAIN_WINDOW_COL_MNTPT_HOST_ADDR]
+
+MAIN_WINDOW_TOP_LINE_METRICS_LEGACY = ["READ_LATENCY",
+ "WRITE_LATENCY",
+ "METADATA_LATENCY"
+ ]
+
+# adjust this map according to stats version and maintain order
+# as emitted by mgr/stast
+MAIN_WINDOW_TOP_LINE_METRICS = OrderedDict([
+ ("CAP_HIT", MetricType.METRIC_TYPE_PERCENTAGE),
+ ("READ_LATENCY", MetricType.METRIC_TYPE_LATENCY),
+ ("WRITE_LATENCY", MetricType.METRIC_TYPE_LATENCY),
+ ("METADATA_LATENCY", MetricType.METRIC_TYPE_LATENCY),
+ ("DENTRY_LEASE", MetricType.METRIC_TYPE_PERCENTAGE),
+ ("OPENED_FILES", MetricType.METRIC_TYPE_NONE),
+ ("PINNED_ICAPS", MetricType.METRIC_TYPE_NONE),
+ ("OPENED_INODES", MetricType.METRIC_TYPE_NONE),
+ ("READ_IO_SIZES", MetricType.METRIC_TYPE_SIZE),
+ ("WRITE_IO_SIZES", MetricType.METRIC_TYPE_SIZE),
+ ("AVG_READ_LATENCY", MetricType.METRIC_TYPE_LATENCY),
+ ("STDEV_READ_LATENCY", MetricType.METRIC_TYPE_STDEV),
+ ("AVG_WRITE_LATENCY", MetricType.METRIC_TYPE_LATENCY),
+ ("STDEV_WRITE_LATENCY", MetricType.METRIC_TYPE_STDEV),
+ ("AVG_METADATA_LATENCY", MetricType.METRIC_TYPE_LATENCY),
+ ("STDEV_METADATA_LATENCY", MetricType.METRIC_TYPE_STDEV),
+])
+MGR_STATS_COUNTERS = list(MAIN_WINDOW_TOP_LINE_METRICS.keys())
+
+FS_TOP_VERSION_HEADER_FMT = '{prog_name} - {now}'
+FS_TOP_CLIENT_HEADER_FMT = 'Total Client(s): {num_clients} - '\
+ '{num_mounts} FUSE, {num_kclients} kclient, {num_libs} libcephfs'
+FS_TOP_NAME_TOPL_FMT = 'Filesystem: {fs_name} - {client_count} client(s)'
+
+CLIENT_METADATA_KEY = "client_metadata"
+CLIENT_METADATA_MOUNT_POINT_KEY = "mount_point"
+CLIENT_METADATA_MOUNT_ROOT_KEY = "root"
+CLIENT_METADATA_IP_KEY = "IP"
+CLIENT_METADATA_HOSTNAME_KEY = "hostname"
+CLIENT_METADATA_VALID_METRICS_KEY = "valid_metrics"
+
+GLOBAL_METRICS_KEY = "global_metrics"
+GLOBAL_COUNTERS_KEY = "global_counters"
+
+last_time = time.time()
+last_read_size = {}
+last_write_size = {}
+
+fs_list = []
+
+
+def calc_perc(c):
+ if c[0] == 0 and c[1] == 0:
+ return 0.0
+ return round((c[0] / (c[0] + c[1])) * 100, 2)
+
+
+def calc_lat(c):
+ return round(c[0] * 1000 + c[1] / 1000000, 2)
+
+
+def calc_stdev(c):
+ stdev = 0.0
+ if c[1] > 1:
+ stdev = math.sqrt(c[0] / (c[1] - 1)) / 1000000
+ return round(stdev, 2)
+
+
+# in MB
+def calc_size(c):
+ return round(c[1] / (1024 * 1024), 2)
+
+
+# in MB
+def calc_avg_size(c):
+ if c[0] == 0:
+ return 0.0
+ return round(c[1] / (c[0] * 1024 * 1024), 2)
+
+
+# in MB/s
+def calc_speed(size, duration):
+ if duration == 0:
+ return 0.0
+ return round(size / (duration * 1024 * 1024), 2)
+
+
+def wrap(s, sl):
+ """return a '+' suffixed wrapped string"""
+ if len(s) < sl:
+ return s
+ return f'{s[0:sl-1]}+'
+
+
+class FSTop(object):
+ def __init__(self, args):
+ self.rados = None
+ self.stdscr = None # curses instance
+ self.current_screen = ""
+ self.client_name = args.id
+ self.cluster_name = args.cluster
+ self.conffile = args.conffile
+ self.refresh_interval_secs = args.delay
+ self.PAD_HEIGHT = 10000 # height of the fstop_pad
+ self.PAD_WIDTH = 300 # width of the fstop_pad
+ self.exit_ev = threading.Event()
+
+ def handle_signal(self, signum, _):
+ self.exit_ev.set()
+
+ def init(self):
+ try:
+ if self.conffile:
+ r_rados = rados.Rados(rados_id=self.client_name,
+ clustername=self.cluster_name,
+ conffile=self.conffile)
+ else:
+ r_rados = rados.Rados(rados_id=self.client_name,
+ clustername=self.cluster_name)
+ r_rados.conf_read_file()
+ r_rados.connect()
+ self.rados = r_rados
+ except rados.Error as e:
+ if e.errno == errno.ENOENT:
+ raise FSTopException(f'cluster {self.cluster_name}'
+ ' does not exist')
+ else:
+ raise FSTopException(f'error connecting to cluster: {e}')
+ self.verify_perf_stats_support()
+ signal.signal(signal.SIGTERM, self.handle_signal)
+ signal.signal(signal.SIGINT, self.handle_signal)
+
+ def fini(self):
+ if self.rados:
+ self.rados.shutdown()
+ self.rados = None
+
+ def selftest(self):
+ stats_json = self.perf_stats_query()
+ if not stats_json['version'] == FS_TOP_SUPPORTED_VER:
+ raise FSTopException('perf stats version mismatch!')
+ missing = [m for m in stats_json["global_counters"]
+ if m.upper() not in MGR_STATS_COUNTERS]
+ if missing:
+ raise FSTopException('Cannot handle unknown metrics from'
+ f'\'ceph fs perf stats\': {missing}')
+
+ def get_fs_names(self):
+ mon_cmd = {'prefix': 'fs ls', 'format': 'json'}
+ try:
+ ret, buf, out = self.rados.mon_command(json.dumps(mon_cmd), b'')
+ except Exception as e:
+ raise FSTopException(f'Error in fs ls: {e}')
+ fs_map = json.loads(buf.decode('utf-8'))
+ global fs_list
+ fs_list.clear()
+ for filesystem in fs_map:
+ fs = filesystem['name']
+ fs_list.append(fs)
+ return fs_list
+
+ def setup_curses(self, win):
+ self.stdscr = win
+ self.stdscr.keypad(True)
+ curses.use_default_colors()
+ curses.start_color()
+ try:
+ curses.curs_set(0)
+ except curses.error:
+ # If the terminal do not support the visibility
+ # requested it will raise an exception
+ pass
+ self.fstop_pad = curses.newpad(self.PAD_HEIGHT, self.PAD_WIDTH)
+ self.run_all_display()
+
+ def display_fs_menu(self, stdscr, selected_row_idx):
+ stdscr.clear()
+ h, w = stdscr.getmaxyx()
+ global fs_list
+ if not fs_list:
+ title = ['No filesystem available',
+ 'Press "q" to go back to home (All Filesystem Info) screen']
+ pos_x1 = w // 2 - len(title[0]) // 2
+ pos_x2 = w // 2 - len(title[1]) // 2
+ stdscr.addstr(1, pos_x1, title[0], curses.A_STANDOUT | curses.A_BOLD)
+ stdscr.addstr(3, pos_x2, title[1])
+ else:
+ title = ['Filesystems', 'Press "q" to go back to home (All Filesystem Info) screen']
+ pos_x1 = w // 2 - len(title[0]) // 2
+ pos_x2 = w // 2 - len(title[1]) // 2
+ stdscr.addstr(1, pos_x1, title[0], curses.A_STANDOUT | curses.A_BOLD)
+ stdscr.addstr(3, pos_x2, title[1])
+ for index, name in enumerate(fs_list):
+ x = w // 2 - len(name) // 2
+ y = h // 2 - len(fs_list) // 2 + index
+ if index == selected_row_idx:
+ stdscr.attron(curses.color_pair(1))
+ stdscr.addstr(y, x, name)
+ stdscr.attroff(curses.color_pair(1))
+ else:
+ stdscr.addstr(y, x, name)
+ stdscr.refresh()
+
+ def set_key(self, stdscr):
+ curses.curs_set(0)
+ curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
+ curr_row = 0
+ key = 0
+ endmenu = False
+ while not endmenu:
+ global fs_list
+ fs_list = self.get_fs_names()
+
+ if key == curses.KEY_UP and curr_row > 0:
+ curr_row -= 1
+ elif key == curses.KEY_DOWN and curr_row < len(fs_list) - 1:
+ curr_row += 1
+ elif (key in [curses.KEY_ENTER, 10, 13]) and fs_list:
+ self.stdscr.erase()
+ self.run_display(fs_list[curr_row])
+ endmenu = True
+ elif key == ord('q'):
+ self.stdscr.erase()
+ self.run_all_display()
+ endmenu = True
+
+ try:
+ self.display_fs_menu(stdscr, curr_row)
+ except curses.error:
+ pass
+ curses.halfdelay(self.refresh_interval_secs)
+ key = stdscr.getch()
+
+ def set_option(self, opt):
+ if opt == ord('m'):
+ curses.wrapper(self.set_key)
+ elif opt == ord('q'):
+ if self.current_screen == FS_TOP_ALL_FS_APP:
+ quit()
+ else:
+ self.run_all_display()
+
+ def verify_perf_stats_support(self):
+ mon_cmd = {'prefix': 'mgr module ls', 'format': 'json'}
+ try:
+ ret, buf, out = self.rados.mon_command(json.dumps(mon_cmd), b'')
+ except Exception as e:
+ raise FSTopException(f'error checking \'stats\' module: {e}')
+ if ret != 0:
+ raise FSTopException(f'error checking \'stats\' module: {out}')
+ if 'stats' not in json.loads(buf.decode('utf-8'))['enabled_modules']:
+ raise FSTopException('\'stats\' module not enabled. Use'
+ '\'ceph mgr module enable stats\' to enable')
+
+ def perf_stats_query(self):
+ mgr_cmd = {'prefix': 'fs perf stats', 'format': 'json'}
+ try:
+ ret, buf, out = self.rados.mgr_command(json.dumps(mgr_cmd), b'')
+ except Exception as e:
+ raise FSTopException(f'error in \'perf stats\' query: {e}')
+ if ret != 0:
+ raise FSTopException(f'error in \'perf stats\' query: {out}')
+ return json.loads(buf.decode('utf-8'))
+
+ def items(self, item):
+ if item == "CAP_HIT":
+ return "chit"
+ if item == "READ_LATENCY":
+ return "rlat"
+ if item == "WRITE_LATENCY":
+ return "wlat"
+ if item == "METADATA_LATENCY":
+ return "mlat"
+ if item == "DENTRY_LEASE":
+ return "dlease"
+ if item == "OPENED_FILES":
+ return "ofiles"
+ if item == "PINNED_ICAPS":
+ return "oicaps"
+ if item == "OPENED_INODES":
+ return "oinodes"
+ if item == "READ_IO_SIZES":
+ return "rtio"
+ if item == "WRITE_IO_SIZES":
+ return "wtio"
+ if item == 'AVG_READ_LATENCY':
+ return 'rlatavg'
+ if item == 'STDEV_READ_LATENCY':
+ return 'rlatsd'
+ if item == 'AVG_WRITE_LATENCY':
+ return 'wlatavg'
+ if item == 'STDEV_WRITE_LATENCY':
+ return 'wlatsd'
+ if item == 'AVG_METADATA_LATENCY':
+ return 'mlatavg'
+ if item == 'STDEV_METADATA_LATENCY':
+ return 'mlatsd'
+ else:
+ # return empty string for none type
+ return ''
+
+ def mtype(self, typ):
+ if typ == MetricType.METRIC_TYPE_PERCENTAGE:
+ return "(%)"
+ elif typ == MetricType.METRIC_TYPE_LATENCY:
+ return "(ms)"
+ elif typ == MetricType.METRIC_TYPE_SIZE:
+ return "(MB)"
+ elif typ == MetricType.METRIC_TYPE_STDEV:
+ return "(ms)"
+ else:
+ # return empty string for none type
+ return ''
+
+ def avg_items(self, item):
+ if item == "READ_IO_SIZES":
+ return "raio"
+ if item == "WRITE_IO_SIZES":
+ return "waio"
+ else:
+ # return empty string for none type
+ return ''
+
+ def speed_items(self, item):
+ if item == "READ_IO_SIZES":
+ return "rsp"
+ if item == "WRITE_IO_SIZES":
+ return "wsp"
+ else:
+ # return empty string for none type
+ return ''
+
+ def speed_mtype(self, typ):
+ if typ == MetricType.METRIC_TYPE_SIZE:
+ return "(MB/s)"
+ else:
+ # return empty string for none type
+ return ''
+
+ @staticmethod
+ def has_metric(metadata, metrics_key):
+ return metrics_key in metadata
+
+ @staticmethod
+ def has_metrics(metadata, metrics_keys):
+ for key in metrics_keys:
+ if not FSTop.has_metric(metadata, key):
+ return False
+ return True
+
+ def create_top_line_and_build_coord(self):
+ xp = 0
+ x_coord_map = {}
+
+ heading = []
+ for item in MAIN_WINDOW_TOP_LINE_ITEMS_START:
+ heading.append(item)
+ nlen = len(item) + len(ITEMS_PAD)
+ x_coord_map[item] = (xp, nlen)
+ xp += nlen
+
+ for item, typ in MAIN_WINDOW_TOP_LINE_METRICS.items():
+ if item in MAIN_WINDOW_TOP_LINE_METRICS_LEGACY:
+ continue
+ it = f'{self.items(item)}{self.mtype(typ)}'
+ heading.append(it)
+ nlen = len(it) + len(ITEMS_PAD)
+ x_coord_map[item] = (xp, nlen)
+ xp += nlen
+
+ if item == "READ_IO_SIZES" or item == "WRITE_IO_SIZES":
+ # average io sizes
+ it = f'{self.avg_items(item)}{self.mtype(typ)}'
+ heading.append(it)
+ nlen = len(it) + len(ITEMS_PAD)
+ if item == "READ_IO_SIZES":
+ x_coord_map["READ_IO_AVG"] = (xp, nlen)
+ if item == "WRITE_IO_SIZES":
+ x_coord_map["WRITE_IO_AVG"] = (xp, nlen)
+ xp += nlen
+
+ # io speeds
+ it = f'{self.speed_items(item)}{self.speed_mtype(typ)}'
+ heading.append(it)
+ nlen = len(it) + len(ITEMS_PAD)
+ if item == "READ_IO_SIZES":
+ x_coord_map["READ_IO_SPEED"] = (xp, nlen)
+ if item == "WRITE_IO_SIZES":
+ x_coord_map["WRITE_IO_SPEED"] = (xp, nlen)
+ xp += nlen
+
+ for item in MAIN_WINDOW_TOP_LINE_ITEMS_END:
+ heading.append(item)
+ nlen = len(item) + len(ITEMS_PAD)
+ x_coord_map[item] = (xp, nlen)
+ xp += nlen
+ title = ITEMS_PAD.join(heading)
+ self.fsstats.addstr(self.tablehead_y, 0, title, curses.A_STANDOUT | curses.A_BOLD)
+ return x_coord_map
+
+ def create_client(self, client_id, metrics, counters,
+ client_meta, x_coord_map, y_coord):
+ global last_time
+ size = 0
+ cur_time = time.time()
+ duration = cur_time - last_time
+ last_time = cur_time
+ for item in MAIN_WINDOW_TOP_LINE_ITEMS_START:
+ coord = x_coord_map[item]
+ hlen = coord[1] - 1
+ if item == FS_TOP_MAIN_WINDOW_COL_CLIENT_ID:
+ self.fsstats.addstr(y_coord, coord[0],
+ wrap(client_id.split('.')[1], hlen), curses.A_DIM)
+ elif item == FS_TOP_MAIN_WINDOW_COL_MNT_ROOT:
+ if FSTop.has_metric(client_meta,
+ CLIENT_METADATA_MOUNT_ROOT_KEY):
+ self.fsstats.addstr(
+ y_coord, coord[0],
+ wrap(client_meta[CLIENT_METADATA_MOUNT_ROOT_KEY], hlen), curses.A_DIM)
+ else:
+ self.fsstats.addstr(y_coord, coord[0], "N/A", curses.A_DIM)
+
+ cidx = 0
+ for item in counters:
+ if item in MAIN_WINDOW_TOP_LINE_METRICS_LEGACY:
+ cidx += 1
+ continue
+ coord = x_coord_map[item]
+ m = metrics[cidx]
+ key = MGR_STATS_COUNTERS[cidx]
+ typ = MAIN_WINDOW_TOP_LINE_METRICS[key]
+ if item.lower() in client_meta.get(
+ CLIENT_METADATA_VALID_METRICS_KEY, []):
+ if typ == MetricType.METRIC_TYPE_PERCENTAGE:
+ self.fsstats.addstr(y_coord, coord[0],
+ f'{calc_perc(m)}', curses.A_DIM)
+ elif typ == MetricType.METRIC_TYPE_LATENCY:
+ self.fsstats.addstr(y_coord, coord[0],
+ f'{calc_lat(m)}', curses.A_DIM)
+ elif typ == MetricType.METRIC_TYPE_STDEV:
+ self.fsstats.addstr(y_coord, coord[0],
+ f'{calc_stdev(m)}', curses.A_DIM)
+ elif typ == MetricType.METRIC_TYPE_SIZE:
+ self.fsstats.addstr(y_coord, coord[0],
+ f'{calc_size(m)}', curses.A_DIM)
+
+ # average io sizes
+ if key == "READ_IO_SIZES":
+ coord = x_coord_map["READ_IO_AVG"]
+ elif key == "WRITE_IO_SIZES":
+ coord = x_coord_map["WRITE_IO_AVG"]
+ self.fsstats.addstr(y_coord, coord[0],
+ f'{calc_avg_size(m)}', curses.A_DIM)
+
+ # io speeds
+ if key == "READ_IO_SIZES":
+ coord = x_coord_map["READ_IO_SPEED"]
+ elif key == "WRITE_IO_SIZES":
+ coord = x_coord_map["WRITE_IO_SPEED"]
+ size = 0
+ if key == "READ_IO_SIZES":
+ if m[1] > 0:
+ global last_read_size
+ last_size = last_read_size.get(client_id, 0)
+ size = m[1] - last_size
+ last_read_size[client_id] = m[1]
+ if key == "WRITE_IO_SIZES":
+ if m[1] > 0:
+ global last_write_size
+ last_size = last_write_size.get(client_id, 0)
+ size = m[1] - last_size
+ last_write_size[client_id] = m[1]
+ self.fsstats.addstr(y_coord, coord[0],
+ f'{calc_speed(abs(size), duration)}', curses.A_DIM)
+ else:
+ # display 0th element from metric tuple
+ self.fsstats.addstr(y_coord, coord[0], f'{m[0]}', curses.A_DIM)
+ else:
+ self.fsstats.addstr(y_coord, coord[0], "N/A", curses.A_DIM)
+ cidx += 1
+
+ for item in MAIN_WINDOW_TOP_LINE_ITEMS_END:
+ coord = x_coord_map[item]
+ wrapLen = self.PAD_WIDTH - coord[0]
+ # always place the FS_TOP_MAIN_WINDOW_COL_MNTPT_HOST_ADDR in the
+ # last, it will be a very long string to display
+ if item == FS_TOP_MAIN_WINDOW_COL_MNTPT_HOST_ADDR:
+ if FSTop.has_metrics(client_meta,
+ [CLIENT_METADATA_MOUNT_POINT_KEY,
+ CLIENT_METADATA_HOSTNAME_KEY,
+ CLIENT_METADATA_IP_KEY]):
+ mount_point = f'{client_meta[CLIENT_METADATA_MOUNT_POINT_KEY]}@'\
+ f'{client_meta[CLIENT_METADATA_HOSTNAME_KEY]}/'\
+ f'{client_meta[CLIENT_METADATA_IP_KEY]}'
+ self.fsstats.addstr(
+ y_coord, coord[0],
+ wrap(mount_point, wrapLen), curses.A_DIM)
+ else:
+ self.fsstats.addstr(y_coord, coord[0], "N/A", curses.A_DIM)
+
+ def create_clients(self, x_coord_map, stats_json, fs_name):
+ counters = [m.upper() for m in stats_json[GLOBAL_COUNTERS_KEY]]
+ self.tablehead_y += 2
+ res = stats_json[CLIENT_METADATA_KEY].get(fs_name, {})
+ client_cnt = len(res)
+ self.fsstats.addstr(self.tablehead_y, 0, FS_TOP_NAME_TOPL_FMT.format(
+ fs_name=fs_name, client_count=client_cnt), curses.A_BOLD | curses.A_ITALIC)
+ self.tablehead_y += 2
+ if client_cnt:
+ for client_id, metrics in \
+ stats_json[GLOBAL_METRICS_KEY][fs_name].items():
+ self.create_client(
+ client_id, metrics, counters, res[client_id],
+ x_coord_map, self.tablehead_y)
+ self.tablehead_y += 1
+
+ def create_header(self, stats_json, help, screen_title="", color_id=0):
+ num_clients, num_mounts, num_kclients, num_libs = 0, 0, 0, 0
+ if not stats_json['version'] == FS_TOP_SUPPORTED_VER:
+ self.header.addstr(0, 0, 'perf stats version mismatch!', curses.A_BOLD)
+ return False
+ global fs_list
+ for fs_name in fs_list:
+ client_metadata = stats_json[CLIENT_METADATA_KEY].get(fs_name, {})
+ client_cnt = len(client_metadata)
+ if client_cnt:
+ num_clients = num_clients + client_cnt
+ num_mounts = num_mounts + len(
+ [client for client, metadata in client_metadata.items() if
+ CLIENT_METADATA_MOUNT_POINT_KEY in metadata
+ and metadata[CLIENT_METADATA_MOUNT_POINT_KEY] != 'N/A'])
+ num_kclients = num_kclients + len(
+ [client for client, metadata in client_metadata.items() if
+ "kernel_version" in metadata])
+ num_libs = num_clients - (num_mounts + num_kclients)
+ now = datetime.now().ctime()
+ self.header.addstr(0, 0, FS_TOP_VERSION_HEADER_FMT.format(prog_name=FS_TOP_PROG_STR,
+ now=now), curses.A_BOLD)
+ self.header.addstr(2, 0, screen_title, curses.color_pair(color_id) | curses.A_BOLD)
+ self.header.addstr(3, 0, FS_TOP_CLIENT_HEADER_FMT.format(num_clients=num_clients,
+ num_mounts=num_mounts,
+ num_kclients=num_kclients,
+ num_libs=num_libs), curses.A_DIM)
+ self.header.addstr(4, 0, help, curses.A_DIM)
+ return True
+
+ def run_display(self, fs):
+ # clear the pads to have a smooth refresh
+ self.header.erase()
+ self.fsstats.erase()
+
+ self.current_screen = FS_TOP_FS_SELECTED_APP
+ screen_title = "Selected Filesystem Info"
+ help_commands = "Press 'q' to go back to home (All Filesystem Info) screen"\
+ " | Press 'm' to select another filesystem"
+ curses.init_pair(3, curses.COLOR_MAGENTA, -1)
+
+ top, left = 0, 0 # where to place pad
+ vscrollOffset, hscrollOffset = 0, 0 # scroll offsets
+
+ # calculate the initial viewport height and width
+ windowsize = self.stdscr.getmaxyx()
+ self.viewportHeight, self.viewportWidth = windowsize[0] - 1, windowsize[1] - 1
+
+ # create header subpad
+ self.header_height = 7
+ self.header = self.fstop_pad.subwin(self.header_height, self.viewportWidth, 0, 0)
+
+ # create fsstats subpad
+ fsstats_begin_y = self.header_height
+ fsstats_height = self.PAD_HEIGHT - self.header_height
+ self.fsstats = self.fstop_pad.subwin(fsstats_height, self.PAD_WIDTH, fsstats_begin_y, 0)
+
+ curses.halfdelay(1)
+ cmd = self.stdscr.getch()
+ while not self.exit_ev.is_set():
+ if cmd in [ord('m'), ord('q')]:
+ self.set_option(cmd)
+ self.exit_ev.set()
+
+ # header display
+ global fs_list
+ fs_list = self.get_fs_names()
+ stats_json = self.perf_stats_query()
+ vscrollEnd = 0
+ if fs not in fs_list:
+ help = "Error: The selected filesystem is not available now. " + help_commands
+ self.header.erase() # erase previous text
+ self.create_header(stats_json, help, screen_title, 3)
+ else:
+ self.tablehead_y = 0
+ help = "HELP: " + help_commands
+ self.fsstats.erase() # erase previous text
+
+ vscrollEnd = len(stats_json[CLIENT_METADATA_KEY].get(fs, {}))
+ if self.create_header(stats_json, help, screen_title, 3):
+ x_coord_map = self.create_top_line_and_build_coord()
+ self.create_clients(x_coord_map, stats_json, fs)
+
+ # scroll and refresh
+ if cmd == curses.KEY_DOWN:
+ if (vscrollEnd - vscrollOffset) > 1:
+ vscrollOffset += 1
+ else:
+ vscrollOffset = vscrollEnd
+ elif cmd == curses.KEY_UP:
+ if vscrollOffset > 0:
+ vscrollOffset -= 1
+ elif cmd == curses.KEY_NPAGE:
+ if (vscrollEnd - vscrollOffset) / 20 > 1:
+ vscrollOffset += 20
+ else:
+ vscrollOffset = vscrollEnd
+ elif cmd == curses.KEY_PPAGE:
+ if vscrollOffset / 20 >= 1:
+ vscrollOffset -= 20
+ else:
+ vscrollOffset = 0
+ elif cmd == curses.KEY_RIGHT:
+ if hscrollOffset < self.PAD_WIDTH - self.viewportWidth - 1:
+ hscrollOffset += 1
+ elif cmd == curses.KEY_LEFT:
+ if hscrollOffset > 0:
+ hscrollOffset -= 1
+ elif cmd == curses.KEY_HOME:
+ hscrollOffset = 0
+ elif cmd == curses.KEY_END:
+ hscrollOffset = self.PAD_WIDTH - self.viewportWidth - 1
+ elif cmd == curses.KEY_RESIZE:
+ # terminal resize event. Update the viewport dimensions
+ windowsize = self.stdscr.getmaxyx()
+ self.viewportHeight, self.viewportWidth = windowsize[0] - 1, windowsize[1] - 1
+
+ if cmd:
+ try:
+ # refresh the viewport for the header portion
+ if cmd not in [curses.KEY_DOWN,
+ curses.KEY_UP,
+ curses.KEY_NPAGE,
+ curses.KEY_PPAGE,
+ curses.KEY_RIGHT,
+ curses.KEY_LEFT]:
+ self.fstop_pad.refresh(0, 0,
+ top, left,
+ top + self.header_height, left + self.viewportWidth)
+ # refresh the viewport for the current table header portion in the fsstats pad
+ if cmd not in [curses.KEY_DOWN,
+ curses.KEY_UP,
+ curses.KEY_NPAGE,
+ curses.KEY_PPAGE]:
+ self.fstop_pad.refresh(fsstats_begin_y, hscrollOffset,
+ top + fsstats_begin_y, left,
+ 7, left + self.viewportWidth)
+ # refresh the viewport for the current client records portion in the fsstats pad
+ self.fstop_pad.refresh(fsstats_begin_y + 1 + vscrollOffset, hscrollOffset,
+ top + fsstats_begin_y + 2, left,
+ top + self.viewportHeight, left + self.viewportWidth)
+ except curses.error:
+ # This happens when the user switches to a terminal of different zoom size.
+ # just retry it.
+ pass
+ # End scroll and refresh
+
+ curses.halfdelay(self.refresh_interval_secs * 10)
+ cmd = self.stdscr.getch()
+
+ def run_all_display(self):
+ # clear text from the previous screen
+ if self.current_screen == FS_TOP_FS_SELECTED_APP:
+ self.header.erase()
+
+ self.current_screen = FS_TOP_ALL_FS_APP
+ screen_title = "All Filesystem Info"
+ curses.init_pair(2, curses.COLOR_CYAN, -1)
+
+ top, left = 0, 0 # where to place pad
+ vscrollOffset, hscrollOffset = 0, 0 # scroll offsets
+
+ # calculate the initial viewport height and width
+ windowsize = self.stdscr.getmaxyx()
+ self.viewportHeight, self.viewportWidth = windowsize[0] - 1, windowsize[1] - 1
+
+ # create header subpad
+ self.header_height = 7
+ self.header = self.fstop_pad.subwin(self.header_height, self.viewportWidth, 0, 0)
+
+ # create fsstats subpad
+ fsstats_begin_y = self.header_height
+ fsstats_height = self.PAD_HEIGHT - self.header_height
+ self.fsstats = self.fstop_pad.subwin(fsstats_height, self.PAD_WIDTH, fsstats_begin_y, 0)
+
+ curses.halfdelay(1)
+ cmd = self.stdscr.getch()
+ while not self.exit_ev.is_set():
+ if cmd in [ord('m'), ord('q')]:
+ self.set_option(cmd)
+ self.exit_ev.set()
+
+ # header display
+ global fs_list
+ fs_list = self.get_fs_names()
+ stats_json = self.perf_stats_query()
+ vscrollEnd = 0
+ if not fs_list:
+ help = "INFO: No filesystem is available [Press 'q' to quit]"
+ self.header.erase() # erase previous text
+ self.fsstats.erase()
+ self.create_header(stats_json, help, screen_title, 2)
+ else:
+ self.tablehead_y = 0
+ help = "HELP: Press 'm' to select a filesystem | Press 'q' to quit"
+ self.fsstats.erase() # erase previous text
+ for index, fs in enumerate(fs_list):
+ # Get the vscrollEnd in advance
+ vscrollEnd += len(stats_json[CLIENT_METADATA_KEY].get(fs, {}))
+ if self.create_header(stats_json, help, screen_title, 2):
+ if not index: # do it only for the first fs
+ x_coord_map = self.create_top_line_and_build_coord()
+ self.create_clients(x_coord_map, stats_json, fs)
+
+ # scroll and refresh
+ if cmd == curses.KEY_DOWN:
+ if (vscrollEnd - vscrollOffset) > 1:
+ vscrollOffset += 1
+ else:
+ vscrollOffset = vscrollEnd
+ elif cmd == curses.KEY_UP:
+ if vscrollOffset > 0:
+ vscrollOffset -= 1
+ elif cmd == curses.KEY_NPAGE:
+ if (vscrollEnd - vscrollOffset) / 20 > 1:
+ vscrollOffset += 20
+ else:
+ vscrollOffset = vscrollEnd
+ elif cmd == curses.KEY_PPAGE:
+ if vscrollOffset / 20 >= 1:
+ vscrollOffset -= 20
+ else:
+ vscrollOffset = 0
+ elif cmd == curses.KEY_RIGHT:
+ if hscrollOffset < self.PAD_WIDTH - self.viewportWidth - 1:
+ hscrollOffset += 1
+ elif cmd == curses.KEY_LEFT:
+ if hscrollOffset > 0:
+ hscrollOffset -= 1
+ elif cmd == curses.KEY_HOME:
+ hscrollOffset = 0
+ elif cmd == curses.KEY_END:
+ hscrollOffset = self.PAD_WIDTH - self.viewportWidth - 1
+ elif cmd == curses.KEY_RESIZE:
+ # terminal resize event. Update the viewport dimensions
+ windowsize = self.stdscr.getmaxyx()
+ self.viewportHeight, self.viewportWidth = windowsize[0] - 1, windowsize[1] - 1
+ if cmd:
+ try:
+ # refresh the viewport for the header portion
+ if cmd not in [curses.KEY_DOWN,
+ curses.KEY_UP,
+ curses.KEY_NPAGE,
+ curses.KEY_PPAGE,
+ curses.KEY_RIGHT,
+ curses.KEY_LEFT]:
+ self.fstop_pad.refresh(0, 0,
+ top, left,
+ top + self.header_height, left + self.viewportWidth)
+ # refresh the viewport for the current table header portion in the fsstats pad
+ if cmd not in [curses.KEY_DOWN,
+ curses.KEY_UP,
+ curses.KEY_NPAGE,
+ curses.KEY_PPAGE]:
+ self.fstop_pad.refresh(fsstats_begin_y, hscrollOffset,
+ top + fsstats_begin_y, left,
+ 7, left + self.viewportWidth)
+ # refresh the viewport for the current client records portion in the fsstats pad
+ self.fstop_pad.refresh(fsstats_begin_y + 1 + vscrollOffset, hscrollOffset,
+ top + fsstats_begin_y + 2, left,
+ top + self.viewportHeight, left + self.viewportWidth)
+ except curses.error:
+ # This happens when the user switches to a terminal of different zoom size.
+ # just retry it.
+ pass
+ # End scroll and refresh
+
+ curses.halfdelay(self.refresh_interval_secs * 10)
+ cmd = self.stdscr.getch()
+# End class FSTop
+
+
+if __name__ == '__main__':
+ def float_greater_than(x):
+ value = float(x)
+ if value < MIN_REFRESH_INTERVAL:
+ raise argparse.ArgumentTypeError(
+ 'Refresh interval should be greater than or equal to'
+ f' {MIN_REFRESH_INTERVAL}')
+ return value
+
+ parser = argparse.ArgumentParser(description='Ceph Filesystem top utility')
+ parser.add_argument('--cluster', nargs='?', const='ceph', default='ceph',
+ help='Ceph cluster to connect (defualt: ceph)')
+ parser.add_argument('--id', nargs='?', const='fstop', default='fstop',
+ help='Ceph user to use to connection (default: fstop)')
+ parser.add_argument('--conffile', nargs='?', default=None,
+ help='Path to cluster configuration file')
+ parser.add_argument('--selftest', dest='selftest', action='store_true',
+ help='Run in selftest mode')
+ parser.add_argument('-d', '--delay', nargs='?',
+ default=DEFAULT_REFRESH_INTERVAL,
+ type=float_greater_than,
+ help='Refresh interval in seconds '
+ f'(default: {DEFAULT_REFRESH_INTERVAL})')
+
+ args = parser.parse_args()
+ err = False
+ ft = FSTop(args)
+ try:
+ ft.init()
+ if args.selftest:
+ ft.selftest()
+ sys.stdout.write("selftest ok\n")
+ else:
+ curses.wrapper(ft.setup_curses)
+ except FSTopException as fst:
+ err = True
+ sys.stderr.write(f'{fst.get_error_msg()}\n')
+ except Exception as e:
+ err = True
+ sys.stderr.write(f'exception: {e}\n')
+ finally:
+ ft.fini()
+ sys.exit(0 if not err else -1)