summaryrefslogtreecommitdiffstats
path: root/tools/workqueue
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /tools/workqueue
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/workqueue')
-rw-r--r--tools/workqueue/wq_dump.py177
-rw-r--r--tools/workqueue/wq_monitor.py175
2 files changed, 352 insertions, 0 deletions
diff --git a/tools/workqueue/wq_dump.py b/tools/workqueue/wq_dump.py
new file mode 100644
index 0000000000..d0df5833f2
--- /dev/null
+++ b/tools/workqueue/wq_dump.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2023 Tejun Heo <tj@kernel.org>
+# Copyright (C) 2023 Meta Platforms, Inc. and affiliates.
+
+desc = """
+This is a drgn script to show the current workqueue configuration. For more
+info on drgn, visit https://github.com/osandov/drgn.
+
+Affinity Scopes
+===============
+
+Shows the CPUs that can be used for unbound workqueues and how they will be
+grouped by each available affinity type. For each type:
+
+ nr_pods number of CPU pods in the affinity type
+ pod_cpus CPUs in each pod
+ pod_node NUMA node for memory allocation for each pod
+ cpu_pod pod that each CPU is associated to
+
+Worker Pools
+============
+
+Lists all worker pools indexed by their ID. For each pool:
+
+ ref number of pool_workqueue's associated with this pool
+ nice nice value of the worker threads in the pool
+ idle number of idle workers
+ workers number of all workers
+ cpu CPU the pool is associated with (per-cpu pool)
+ cpus CPUs the workers in the pool can run on (unbound pool)
+
+Workqueue CPU -> pool
+=====================
+
+Lists all workqueues along with their type and worker pool association. For
+each workqueue:
+
+ NAME TYPE[,FLAGS] POOL_ID...
+
+ NAME name of the workqueue
+ TYPE percpu, unbound or ordered
+ FLAGS S: strict affinity scope
+ POOL_ID worker pool ID associated with each possible CPU
+"""
+
+import sys
+
+import drgn
+from drgn.helpers.linux.list import list_for_each_entry,list_empty
+from drgn.helpers.linux.percpu import per_cpu_ptr
+from drgn.helpers.linux.cpumask import for_each_cpu,for_each_possible_cpu
+from drgn.helpers.linux.idr import idr_for_each
+
+import argparse
+parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+args = parser.parse_args()
+
+def err(s):
+ print(s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+def cpumask_str(cpumask):
+ output = ""
+ base = 0
+ v = 0
+ for cpu in for_each_cpu(cpumask[0]):
+ while cpu - base >= 32:
+ output += f'{hex(v)} '
+ base += 32
+ v = 0
+ v |= 1 << (cpu - base)
+ if v > 0:
+ output += f'{v:08x}'
+ return output.strip()
+
+worker_pool_idr = prog['worker_pool_idr']
+workqueues = prog['workqueues']
+wq_unbound_cpumask = prog['wq_unbound_cpumask']
+wq_pod_types = prog['wq_pod_types']
+wq_affn_dfl = prog['wq_affn_dfl']
+wq_affn_names = prog['wq_affn_names']
+
+WQ_UNBOUND = prog['WQ_UNBOUND']
+WQ_ORDERED = prog['__WQ_ORDERED']
+WQ_MEM_RECLAIM = prog['WQ_MEM_RECLAIM']
+
+WQ_AFFN_CPU = prog['WQ_AFFN_CPU']
+WQ_AFFN_SMT = prog['WQ_AFFN_SMT']
+WQ_AFFN_CACHE = prog['WQ_AFFN_CACHE']
+WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA']
+WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM']
+
+print('Affinity Scopes')
+print('===============')
+
+print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}')
+
+def print_pod_type(pt):
+ print(f' nr_pods {pt.nr_pods.value_()}')
+
+ print(' pod_cpus', end='')
+ for pod in range(pt.nr_pods):
+ print(f' [{pod}]={cpumask_str(pt.pod_cpus[pod])}', end='')
+ print('')
+
+ print(' pod_node', end='')
+ for pod in range(pt.nr_pods):
+ print(f' [{pod}]={pt.pod_node[pod].value_()}', end='')
+ print('')
+
+ print(f' cpu_pod ', end='')
+ for cpu in for_each_possible_cpu(prog):
+ print(f' [{cpu}]={pt.cpu_pod[cpu].value_()}', end='')
+ print('')
+
+for affn in [WQ_AFFN_CPU, WQ_AFFN_SMT, WQ_AFFN_CACHE, WQ_AFFN_NUMA, WQ_AFFN_SYSTEM]:
+ print('')
+ print(f'{wq_affn_names[affn].string_().decode().upper()}{" (default)" if affn == wq_affn_dfl else ""}')
+ print_pod_type(wq_pod_types[affn])
+
+print('')
+print('Worker Pools')
+print('============')
+
+max_pool_id_len = 0
+max_ref_len = 0
+for pi, pool in idr_for_each(worker_pool_idr):
+ pool = drgn.Object(prog, 'struct worker_pool', address=pool)
+ max_pool_id_len = max(max_pool_id_len, len(f'{pi}'))
+ max_ref_len = max(max_ref_len, len(f'{pool.refcnt.value_()}'))
+
+for pi, pool in idr_for_each(worker_pool_idr):
+ pool = drgn.Object(prog, 'struct worker_pool', address=pool)
+ print(f'pool[{pi:0{max_pool_id_len}}] ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='')
+ print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
+ if pool.cpu >= 0:
+ print(f'cpu={pool.cpu.value_():3}', end='')
+ else:
+ print(f'cpus={cpumask_str(pool.attrs.cpumask)}', end='')
+ print(f' pod_cpus={cpumask_str(pool.attrs.__pod_cpumask)}', end='')
+ if pool.attrs.affn_strict:
+ print(' strict', end='')
+ print('')
+
+print('')
+print('Workqueue CPU -> pool')
+print('=====================')
+
+print('[ workqueue \ type CPU', end='')
+for cpu in for_each_possible_cpu(prog):
+ print(f' {cpu:{max_pool_id_len}}', end='')
+print(' dfl]')
+
+for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
+ print(f'{wq.name.string_().decode()[-24:]:24}', end='')
+ if wq.flags & WQ_UNBOUND:
+ if wq.flags & WQ_ORDERED:
+ print(' ordered ', end='')
+ else:
+ print(' unbound', end='')
+ if wq.unbound_attrs.affn_strict:
+ print(',S ', end='')
+ else:
+ print(' ', end='')
+ else:
+ print(' percpu ', end='')
+
+ for cpu in for_each_possible_cpu(prog):
+ pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_()
+ field_len = max(len(str(cpu)), max_pool_id_len)
+ print(f' {pool_id:{field_len}}', end='')
+
+ if wq.flags & WQ_UNBOUND:
+ print(f' {wq.dfl_pwq.pool.id.value_():{max_pool_id_len}}', end='')
+ print('')
diff --git a/tools/workqueue/wq_monitor.py b/tools/workqueue/wq_monitor.py
new file mode 100644
index 0000000000..a8856a9c45
--- /dev/null
+++ b/tools/workqueue/wq_monitor.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2023 Tejun Heo <tj@kernel.org>
+# Copyright (C) 2023 Meta Platforms, Inc. and affiliates.
+
+desc = """
+This is a drgn script to monitor workqueues. For more info on drgn, visit
+https://github.com/osandov/drgn.
+
+ total Total number of work items executed by the workqueue.
+
+ infl The number of currently in-flight work items.
+
+ CPUtime Total CPU time consumed by the workqueue in seconds. This is
+ sampled from scheduler ticks and only provides ballpark
+ measurement. "nohz_full=" CPUs are excluded from measurement.
+
+ CPUitsv The number of times a concurrency-managed work item hogged CPU
+ longer than the threshold (workqueue.cpu_intensive_thresh_us)
+ and got excluded from concurrency management to avoid stalling
+ other work items.
+
+ CMW/RPR For per-cpu workqueues, the number of concurrency-management
+ wake-ups while executing a work item of the workqueue. For
+ unbound workqueues, the number of times a worker was repatriated
+ to its affinity scope after being migrated to an off-scope CPU by
+ the scheduler.
+
+ mayday The number of times the rescuer was requested while waiting for
+ new worker creation.
+
+ rescued The number of work items executed by the rescuer.
+"""
+
+import sys
+import signal
+import os
+import re
+import time
+import json
+
+import drgn
+from drgn.helpers.linux.list import list_for_each_entry,list_empty
+from drgn.helpers.linux.cpumask import for_each_possible_cpu
+
+import argparse
+parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('workqueue', metavar='REGEX', nargs='*',
+ help='Target workqueue name patterns (all if empty)')
+parser.add_argument('-i', '--interval', metavar='SECS', type=float, default=1,
+ help='Monitoring interval (0 to print once and exit)')
+parser.add_argument('-j', '--json', action='store_true',
+ help='Output in json')
+args = parser.parse_args()
+
+def err(s):
+ print(s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+workqueues = prog['workqueues']
+
+WQ_UNBOUND = prog['WQ_UNBOUND']
+WQ_MEM_RECLAIM = prog['WQ_MEM_RECLAIM']
+
+PWQ_STAT_STARTED = prog['PWQ_STAT_STARTED'] # work items started execution
+PWQ_STAT_COMPLETED = prog['PWQ_STAT_COMPLETED'] # work items completed execution
+PWQ_STAT_CPU_TIME = prog['PWQ_STAT_CPU_TIME'] # total CPU time consumed
+PWQ_STAT_CPU_INTENSIVE = prog['PWQ_STAT_CPU_INTENSIVE'] # wq_cpu_intensive_thresh_us violations
+PWQ_STAT_CM_WAKEUP = prog['PWQ_STAT_CM_WAKEUP'] # concurrency-management worker wakeups
+PWQ_STAT_REPATRIATED = prog['PWQ_STAT_REPATRIATED'] # unbound workers brought back into scope
+PWQ_STAT_MAYDAY = prog['PWQ_STAT_MAYDAY'] # maydays to rescuer
+PWQ_STAT_RESCUED = prog['PWQ_STAT_RESCUED'] # linked work items executed by rescuer
+PWQ_NR_STATS = prog['PWQ_NR_STATS']
+
+class WqStats:
+ def __init__(self, wq):
+ self.name = wq.name.string_().decode()
+ self.unbound = wq.flags & WQ_UNBOUND != 0
+ self.mem_reclaim = wq.flags & WQ_MEM_RECLAIM != 0
+ self.stats = [0] * PWQ_NR_STATS
+ for pwq in list_for_each_entry('struct pool_workqueue', wq.pwqs.address_of_(), 'pwqs_node'):
+ for i in range(PWQ_NR_STATS):
+ self.stats[i] += int(pwq.stats[i])
+
+ def dict(self, now):
+ return { 'timestamp' : now,
+ 'name' : self.name,
+ 'unbound' : self.unbound,
+ 'mem_reclaim' : self.mem_reclaim,
+ 'started' : self.stats[PWQ_STAT_STARTED],
+ 'completed' : self.stats[PWQ_STAT_COMPLETED],
+ 'cpu_time' : self.stats[PWQ_STAT_CPU_TIME],
+ 'cpu_intensive' : self.stats[PWQ_STAT_CPU_INTENSIVE],
+ 'cm_wakeup' : self.stats[PWQ_STAT_CM_WAKEUP],
+ 'repatriated' : self.stats[PWQ_STAT_REPATRIATED],
+ 'mayday' : self.stats[PWQ_STAT_MAYDAY],
+ 'rescued' : self.stats[PWQ_STAT_RESCUED], }
+
+ def table_header_str():
+ return f'{"":>24} {"total":>8} {"infl":>5} {"CPUtime":>8} '\
+ f'{"CPUitsv":>7} {"CMW/RPR":>7} {"mayday":>7} {"rescued":>7}'
+
+ def table_row_str(self):
+ cpu_intensive = '-'
+ cmw_rpr = '-'
+ mayday = '-'
+ rescued = '-'
+
+ if self.unbound:
+ cmw_rpr = str(self.stats[PWQ_STAT_REPATRIATED]);
+ else:
+ cpu_intensive = str(self.stats[PWQ_STAT_CPU_INTENSIVE])
+ cmw_rpr = str(self.stats[PWQ_STAT_CM_WAKEUP])
+
+ if self.mem_reclaim:
+ mayday = str(self.stats[PWQ_STAT_MAYDAY])
+ rescued = str(self.stats[PWQ_STAT_RESCUED])
+
+ out = f'{self.name[-24:]:24} ' \
+ f'{self.stats[PWQ_STAT_STARTED]:8} ' \
+ f'{max(self.stats[PWQ_STAT_STARTED] - self.stats[PWQ_STAT_COMPLETED], 0):5} ' \
+ f'{self.stats[PWQ_STAT_CPU_TIME] / 1000000:8.1f} ' \
+ f'{cpu_intensive:>7} ' \
+ f'{cmw_rpr:>7} ' \
+ f'{mayday:>7} ' \
+ f'{rescued:>7} '
+ return out.rstrip(':')
+
+exit_req = False
+
+def sigint_handler(signr, frame):
+ global exit_req
+ exit_req = True
+
+def main():
+ # handle args
+ table_fmt = not args.json
+ interval = args.interval
+
+ re_str = None
+ if args.workqueue:
+ for r in args.workqueue:
+ if re_str is None:
+ re_str = r
+ else:
+ re_str += '|' + r
+
+ filter_re = re.compile(re_str) if re_str else None
+
+ # monitoring loop
+ signal.signal(signal.SIGINT, sigint_handler)
+
+ while not exit_req:
+ now = time.time()
+
+ if table_fmt:
+ print()
+ print(WqStats.table_header_str())
+
+ for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
+ stats = WqStats(wq)
+ if filter_re and not filter_re.search(stats.name):
+ continue
+ if table_fmt:
+ print(stats.table_row_str())
+ else:
+ print(stats.dict(now))
+
+ if interval == 0:
+ break
+ time.sleep(interval)
+
+if __name__ == "__main__":
+ main()