summaryrefslogtreecommitdiffstats
path: root/tools/cgroup
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /tools/cgroup
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/cgroup')
-rw-r--r--tools/cgroup/.gitignore2
-rw-r--r--tools/cgroup/Makefile11
-rw-r--r--tools/cgroup/cgroup_event_listener.c83
-rw-r--r--tools/cgroup/iocost_coef_gen.py178
-rw-r--r--tools/cgroup/iocost_monitor.py270
-rw-r--r--tools/cgroup/memcg_shrinker.py71
-rw-r--r--tools/cgroup/memcg_slabinfo.py226
7 files changed, 841 insertions, 0 deletions
diff --git a/tools/cgroup/.gitignore b/tools/cgroup/.gitignore
new file mode 100644
index 000000000..46a82775f
--- /dev/null
+++ b/tools/cgroup/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+cgroup_event_listener
diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile
new file mode 100644
index 000000000..ffca068e4
--- /dev/null
+++ b/tools/cgroup/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for cgroup tools
+
+CFLAGS = -Wall -Wextra
+
+all: cgroup_event_listener
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $^
+
+clean:
+ $(RM) cgroup_event_listener
diff --git a/tools/cgroup/cgroup_event_listener.c b/tools/cgroup/cgroup_event_listener.c
new file mode 100644
index 000000000..3d70dc831
--- /dev/null
+++ b/tools/cgroup/cgroup_event_listener.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cgroup_event_listener.c - Simple listener of cgroup events
+ *
+ * Copyright (C) Kirill A. Shutemov <kirill@shutemov.name>
+ */
+
+#include <assert.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/eventfd.h>
+
+#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>"
+
+int main(int argc, char **argv)
+{
+ int efd = -1;
+ int cfd = -1;
+ int event_control = -1;
+ char event_control_path[PATH_MAX];
+ char line[LINE_MAX];
+ int ret;
+
+ if (argc != 3)
+ errx(1, "%s", USAGE_STR);
+
+ cfd = open(argv[1], O_RDONLY);
+ if (cfd == -1)
+ err(1, "Cannot open %s", argv[1]);
+
+ ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control",
+ dirname(argv[1]));
+ if (ret >= PATH_MAX)
+ errx(1, "Path to cgroup.event_control is too long");
+
+ event_control = open(event_control_path, O_WRONLY);
+ if (event_control == -1)
+ err(1, "Cannot open %s", event_control_path);
+
+ efd = eventfd(0, 0);
+ if (efd == -1)
+ err(1, "eventfd() failed");
+
+ ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]);
+ if (ret >= LINE_MAX)
+ errx(1, "Arguments string is too long");
+
+ ret = write(event_control, line, strlen(line) + 1);
+ if (ret == -1)
+ err(1, "Cannot write to cgroup.event_control");
+
+ while (1) {
+ uint64_t result;
+
+ ret = read(efd, &result, sizeof(result));
+ if (ret == -1) {
+ if (errno == EINTR)
+ continue;
+ err(1, "Cannot read from eventfd");
+ }
+ assert(ret == sizeof(result));
+
+ ret = access(event_control_path, W_OK);
+ if ((ret == -1) && (errno == ENOENT)) {
+ puts("The cgroup seems to have removed.");
+ break;
+ }
+
+ if (ret == -1)
+ err(1, "cgroup.event_control is not accessible any more");
+
+ printf("%s %s: crossed\n", argv[1], argv[2]);
+ }
+
+ return 0;
+}
diff --git a/tools/cgroup/iocost_coef_gen.py b/tools/cgroup/iocost_coef_gen.py
new file mode 100644
index 000000000..df17a2ae8
--- /dev/null
+++ b/tools/cgroup/iocost_coef_gen.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2019 Tejun Heo <tj@kernel.org>
+# Copyright (C) 2019 Andy Newell <newella@fb.com>
+# Copyright (C) 2019 Facebook
+
+desc = """
+Generate linear IO cost model coefficients used by the blk-iocost
+controller. If the target raw testdev is specified, destructive tests
+are performed against the whole device; otherwise, on
+./iocost-coef-fio.testfile. The result can be written directly to
+/sys/fs/cgroup/io.cost.model.
+
+On high performance devices, --numjobs > 1 is needed to achieve
+saturation.
+
+See Documentation/admin-guide/cgroup-v2.rst and block/blk-iocost.c
+for more details.
+"""
+
+import argparse
+import re
+import json
+import glob
+import os
+import sys
+import atexit
+import shutil
+import tempfile
+import subprocess
+
+parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('--testdev', metavar='DEV',
+ help='Raw block device to use for testing, ignores --testfile-size')
+parser.add_argument('--testfile-size-gb', type=float, metavar='GIGABYTES', default=16,
+ help='Testfile size in gigabytes (default: %(default)s)')
+parser.add_argument('--duration', type=int, metavar='SECONDS', default=120,
+ help='Individual test run duration in seconds (default: %(default)s)')
+parser.add_argument('--seqio-block-mb', metavar='MEGABYTES', type=int, default=128,
+ help='Sequential test block size in megabytes (default: %(default)s)')
+parser.add_argument('--seq-depth', type=int, metavar='DEPTH', default=64,
+ help='Sequential test queue depth (default: %(default)s)')
+parser.add_argument('--rand-depth', type=int, metavar='DEPTH', default=64,
+ help='Random test queue depth (default: %(default)s)')
+parser.add_argument('--numjobs', type=int, metavar='JOBS', default=1,
+ help='Number of parallel fio jobs to run (default: %(default)s)')
+parser.add_argument('--quiet', action='store_true')
+parser.add_argument('--verbose', action='store_true')
+
+def info(msg):
+ if not args.quiet:
+ print(msg)
+
+def dbg(msg):
+ if args.verbose and not args.quiet:
+ print(msg)
+
+# determine ('DEVNAME', 'MAJ:MIN') for @path
+def dir_to_dev(path):
+ # find the block device the current directory is on
+ devname = subprocess.run(f'findmnt -nvo SOURCE -T{path}',
+ stdout=subprocess.PIPE, shell=True).stdout
+ devname = os.path.basename(devname).decode('utf-8').strip()
+
+ # partition -> whole device
+ parents = glob.glob('/sys/block/*/' + devname)
+ if len(parents):
+ devname = os.path.basename(os.path.dirname(parents[0]))
+ rdev = os.stat(f'/dev/{devname}').st_rdev
+ return (devname, f'{os.major(rdev)}:{os.minor(rdev)}')
+
+def create_testfile(path, size):
+ global args
+
+ if os.path.isfile(path) and os.stat(path).st_size == size:
+ return
+
+ info(f'Creating testfile {path}')
+ subprocess.check_call(f'rm -f {path}', shell=True)
+ subprocess.check_call(f'touch {path}', shell=True)
+ subprocess.call(f'chattr +C {path}', shell=True)
+ subprocess.check_call(
+ f'pv -s {size} -pr /dev/urandom {"-q" if args.quiet else ""} | '
+ f'dd of={path} count={size} '
+ f'iflag=count_bytes,fullblock oflag=direct bs=16M status=none',
+ shell=True)
+
+def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs):
+ global args
+
+ eta = 'never' if args.quiet else 'always'
+ outfile = tempfile.NamedTemporaryFile()
+ cmd = (f'fio --direct=1 --ioengine=libaio --name=coef '
+ f'--filename={testfile} --runtime={round(duration)} '
+ f'--readwrite={iotype} --iodepth={iodepth} --blocksize={blocksize} '
+ f'--eta={eta} --output-format json --output={outfile.name} '
+ f'--time_based --numjobs={jobs}')
+ if args.verbose:
+ dbg(f'Running {cmd}')
+ subprocess.check_call(cmd, shell=True)
+ with open(outfile.name, 'r') as f:
+ d = json.loads(f.read())
+ return sum(j['read']['bw_bytes'] + j['write']['bw_bytes'] for j in d['jobs'])
+
+def restore_elevator_nomerges():
+ global elevator_path, nomerges_path, elevator, nomerges
+
+ info(f'Restoring elevator to {elevator} and nomerges to {nomerges}')
+ with open(elevator_path, 'w') as f:
+ f.write(elevator)
+ with open(nomerges_path, 'w') as f:
+ f.write(nomerges)
+
+
+args = parser.parse_args()
+
+missing = False
+for cmd in [ 'findmnt', 'pv', 'dd', 'fio' ]:
+ if not shutil.which(cmd):
+ print(f'Required command "{cmd}" is missing', file=sys.stderr)
+ missing = True
+if missing:
+ sys.exit(1)
+
+if args.testdev:
+ devname = os.path.basename(args.testdev)
+ rdev = os.stat(f'/dev/{devname}').st_rdev
+ devno = f'{os.major(rdev)}:{os.minor(rdev)}'
+ testfile = f'/dev/{devname}'
+ info(f'Test target: {devname}({devno})')
+else:
+ devname, devno = dir_to_dev('.')
+ testfile = 'iocost-coef-fio.testfile'
+ testfile_size = int(args.testfile_size_gb * 2 ** 30)
+ create_testfile(testfile, testfile_size)
+ info(f'Test target: {testfile} on {devname}({devno})')
+
+elevator_path = f'/sys/block/{devname}/queue/scheduler'
+nomerges_path = f'/sys/block/{devname}/queue/nomerges'
+
+with open(elevator_path, 'r') as f:
+ elevator = re.sub(r'.*\[(.*)\].*', r'\1', f.read().strip())
+with open(nomerges_path, 'r') as f:
+ nomerges = f.read().strip()
+
+info(f'Temporarily disabling elevator and merges')
+atexit.register(restore_elevator_nomerges)
+with open(elevator_path, 'w') as f:
+ f.write('none')
+with open(nomerges_path, 'w') as f:
+ f.write('1')
+
+info('Determining rbps...')
+rbps = run_fio(testfile, args.duration, 'read',
+ 1, args.seqio_block_mb * (2 ** 20), args.numjobs)
+info(f'\nrbps={rbps}, determining rseqiops...')
+rseqiops = round(run_fio(testfile, args.duration, 'read',
+ args.seq_depth, 4096, args.numjobs) / 4096)
+info(f'\nrseqiops={rseqiops}, determining rrandiops...')
+rrandiops = round(run_fio(testfile, args.duration, 'randread',
+ args.rand_depth, 4096, args.numjobs) / 4096)
+info(f'\nrrandiops={rrandiops}, determining wbps...')
+wbps = run_fio(testfile, args.duration, 'write',
+ 1, args.seqio_block_mb * (2 ** 20), args.numjobs)
+info(f'\nwbps={wbps}, determining wseqiops...')
+wseqiops = round(run_fio(testfile, args.duration, 'write',
+ args.seq_depth, 4096, args.numjobs) / 4096)
+info(f'\nwseqiops={wseqiops}, determining wrandiops...')
+wrandiops = round(run_fio(testfile, args.duration, 'randwrite',
+ args.rand_depth, 4096, args.numjobs) / 4096)
+info(f'\nwrandiops={wrandiops}')
+restore_elevator_nomerges()
+atexit.unregister(restore_elevator_nomerges)
+info('')
+
+print(f'{devno} rbps={rbps} rseqiops={rseqiops} rrandiops={rrandiops} '
+ f'wbps={wbps} wseqiops={wseqiops} wrandiops={wrandiops}')
diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py
new file mode 100644
index 000000000..0dbbc6740
--- /dev/null
+++ b/tools/cgroup/iocost_monitor.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2019 Tejun Heo <tj@kernel.org>
+# Copyright (C) 2019 Facebook
+
+desc = """
+This is a drgn script to monitor the blk-iocost cgroup controller.
+See the comment at the top of block/blk-iocost.c for more details.
+For drgn, visit https://github.com/osandov/drgn.
+"""
+
+import sys
+import re
+import time
+import json
+import math
+
+import drgn
+from drgn import container_of
+from drgn.helpers.linux.list import list_for_each_entry,list_empty
+from drgn.helpers.linux.radixtree import radix_tree_for_each,radix_tree_lookup
+
+import argparse
+parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument('devname', metavar='DEV',
+ help='Target block device name (e.g. sda)')
+parser.add_argument('--cgroup', action='append', metavar='REGEX',
+ help='Regex for target cgroups, ')
+parser.add_argument('--interval', '-i', metavar='SECONDS', type=float, default=1,
+ help='Monitoring interval in seconds (0 exits immediately '
+ 'after checking requirements)')
+parser.add_argument('--json', action='store_true',
+ help='Output in json')
+args = parser.parse_args()
+
+def err(s):
+ print(s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+try:
+ blkcg_root = prog['blkcg_root']
+ plid = prog['blkcg_policy_iocost'].plid.value_()
+except:
+ err('The kernel does not have iocost enabled')
+
+IOC_RUNNING = prog['IOC_RUNNING'].value_()
+WEIGHT_ONE = prog['WEIGHT_ONE'].value_()
+VTIME_PER_SEC = prog['VTIME_PER_SEC'].value_()
+VTIME_PER_USEC = prog['VTIME_PER_USEC'].value_()
+AUTOP_SSD_FAST = prog['AUTOP_SSD_FAST'].value_()
+AUTOP_SSD_DFL = prog['AUTOP_SSD_DFL'].value_()
+AUTOP_SSD_QD1 = prog['AUTOP_SSD_QD1'].value_()
+AUTOP_HDD = prog['AUTOP_HDD'].value_()
+
+autop_names = {
+ AUTOP_SSD_FAST: 'ssd_fast',
+ AUTOP_SSD_DFL: 'ssd_dfl',
+ AUTOP_SSD_QD1: 'ssd_qd1',
+ AUTOP_HDD: 'hdd',
+}
+
+class BlkgIterator:
+ def __init__(self, root_blkcg, q_id, include_dying=False):
+ self.include_dying = include_dying
+ self.blkgs = []
+ self.walk(root_blkcg, q_id, '')
+
+ def blkcg_name(blkcg):
+ return blkcg.css.cgroup.kn.name.string_().decode('utf-8')
+
+ def walk(self, blkcg, q_id, parent_path):
+ if not self.include_dying and \
+ not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()):
+ return
+
+ name = BlkgIterator.blkcg_name(blkcg)
+ path = parent_path + '/' + name if parent_path else name
+ blkg = drgn.Object(prog, 'struct blkcg_gq',
+ address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id))
+ if not blkg.address_:
+ return
+
+ self.blkgs.append((path if path else '/', blkg))
+
+ for c in list_for_each_entry('struct blkcg',
+ blkcg.css.children.address_of_(), 'css.sibling'):
+ self.walk(c, q_id, path)
+
+ def __iter__(self):
+ return iter(self.blkgs)
+
+class IocStat:
+ def __init__(self, ioc):
+ global autop_names
+
+ self.enabled = ioc.enabled.value_()
+ self.running = ioc.running.value_() == IOC_RUNNING
+ self.period_ms = ioc.period_us.value_() / 1_000
+ self.period_at = ioc.period_at.value_() / 1_000_000
+ self.vperiod_at = ioc.period_at_vtime.value_() / VTIME_PER_SEC
+ self.vrate_pct = ioc.vtime_base_rate.value_() * 100 / VTIME_PER_USEC
+ self.busy_level = ioc.busy_level.value_()
+ self.autop_idx = ioc.autop_idx.value_()
+ self.user_cost_model = ioc.user_cost_model.value_()
+ self.user_qos_params = ioc.user_qos_params.value_()
+
+ if self.autop_idx in autop_names:
+ self.autop_name = autop_names[self.autop_idx]
+ else:
+ self.autop_name = '?'
+
+ def dict(self, now):
+ return { 'device' : devname,
+ 'timestamp' : now,
+ 'enabled' : self.enabled,
+ 'running' : self.running,
+ 'period_ms' : self.period_ms,
+ 'period_at' : self.period_at,
+ 'period_vtime_at' : self.vperiod_at,
+ 'busy_level' : self.busy_level,
+ 'vrate_pct' : self.vrate_pct, }
+
+ def table_preamble_str(self):
+ state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF'
+ output = f'{devname} {state:4} ' \
+ f'per={self.period_ms}ms ' \
+ f'cur_per={self.period_at:.3f}:v{self.vperiod_at:.3f} ' \
+ f'busy={self.busy_level:+3} ' \
+ f'vrate={self.vrate_pct:6.2f}% ' \
+ f'params={self.autop_name}'
+ if self.user_cost_model or self.user_qos_params:
+ output += f'({"C" if self.user_cost_model else ""}{"Q" if self.user_qos_params else ""})'
+ return output
+
+ def table_header_str(self):
+ return f'{"":25} active {"weight":>9} {"hweight%":>13} {"inflt%":>6} ' \
+ f'{"debt":>7} {"delay":>7} {"usage%"}'
+
+class IocgStat:
+ def __init__(self, iocg):
+ ioc = iocg.ioc
+ blkg = iocg.pd.blkg
+
+ self.is_active = not list_empty(iocg.active_list.address_of_())
+ self.weight = iocg.weight.value_() / WEIGHT_ONE
+ self.active = iocg.active.value_() / WEIGHT_ONE
+ self.inuse = iocg.inuse.value_() / WEIGHT_ONE
+ self.hwa_pct = iocg.hweight_active.value_() * 100 / WEIGHT_ONE
+ self.hwi_pct = iocg.hweight_inuse.value_() * 100 / WEIGHT_ONE
+ self.address = iocg.value_()
+
+ vdone = iocg.done_vtime.counter.value_()
+ vtime = iocg.vtime.counter.value_()
+ vrate = ioc.vtime_rate.counter.value_()
+ period_vtime = ioc.period_us.value_() * vrate
+ if period_vtime:
+ self.inflight_pct = (vtime - vdone) * 100 / period_vtime
+ else:
+ self.inflight_pct = 0
+
+ self.usage = (100 * iocg.usage_delta_us.value_() /
+ ioc.period_us.value_()) if self.active else 0
+ self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
+ if blkg.use_delay.counter.value_() != 0:
+ self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
+ else:
+ self.delay_ms = 0
+
+ def dict(self, now, path):
+ out = { 'cgroup' : path,
+ 'timestamp' : now,
+ 'is_active' : self.is_active,
+ 'weight' : self.weight,
+ 'weight_active' : self.active,
+ 'weight_inuse' : self.inuse,
+ 'hweight_active_pct' : self.hwa_pct,
+ 'hweight_inuse_pct' : self.hwi_pct,
+ 'inflight_pct' : self.inflight_pct,
+ 'debt_ms' : self.debt_ms,
+ 'delay_ms' : self.delay_ms,
+ 'usage_pct' : self.usage,
+ 'address' : self.address }
+ return out
+
+ def table_row_str(self, path):
+ out = f'{path[-28:]:28} ' \
+ f'{"*" if self.is_active else " "} ' \
+ f'{round(self.inuse):5}/{round(self.active):5} ' \
+ f'{self.hwi_pct:6.2f}/{self.hwa_pct:6.2f} ' \
+ f'{self.inflight_pct:6.2f} ' \
+ f'{self.debt_ms:7.2f} ' \
+ f'{self.delay_ms:7.2f} '\
+ f'{min(self.usage, 999):6.2f}'
+ out = out.rstrip(':')
+ return out
+
+# handle args
+table_fmt = not args.json
+interval = args.interval
+devname = args.devname
+
+if args.json:
+ table_fmt = False
+
+re_str = None
+if args.cgroup:
+ for r in args.cgroup:
+ if re_str is None:
+ re_str = r
+ else:
+ re_str += '|' + r
+
+filter_re = re.compile(re_str) if re_str else None
+
+# Locate the roots
+q_id = None
+root_iocg = None
+ioc = None
+
+for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()):
+ blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr)
+ try:
+ if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'):
+ q_id = blkg.q.id.value_()
+ if blkg.pd[plid]:
+ root_iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
+ ioc = root_iocg.ioc
+ break
+ except:
+ pass
+
+if ioc is None:
+ err(f'Could not find ioc for {devname}');
+
+if interval == 0:
+ sys.exit(0)
+
+# Keep printing
+while True:
+ now = time.time()
+ iocstat = IocStat(ioc)
+ output = ''
+
+ if table_fmt:
+ output += '\n' + iocstat.table_preamble_str()
+ output += '\n' + iocstat.table_header_str()
+ else:
+ output += json.dumps(iocstat.dict(now))
+
+ for path, blkg in BlkgIterator(blkcg_root, q_id):
+ if filter_re and not filter_re.match(path):
+ continue
+ if not blkg.pd[plid]:
+ continue
+
+ iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
+ iocg_stat = IocgStat(iocg)
+
+ if not filter_re and not iocg_stat.is_active:
+ continue
+
+ if table_fmt:
+ output += '\n' + iocg_stat.table_row_str(path)
+ else:
+ output += '\n' + json.dumps(iocg_stat.dict(now, path))
+
+ print(output)
+ sys.stdout.flush()
+ time.sleep(interval)
diff --git a/tools/cgroup/memcg_shrinker.py b/tools/cgroup/memcg_shrinker.py
new file mode 100644
index 000000000..706ab2766
--- /dev/null
+++ b/tools/cgroup/memcg_shrinker.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Roman Gushchin <roman.gushchin@linux.dev>
+# Copyright (C) 2022 Meta
+
+import os
+import argparse
+import sys
+
+
+def scan_cgroups(cgroup_root):
+ cgroups = {}
+
+ for root, subdirs, _ in os.walk(cgroup_root):
+ for cgroup in subdirs:
+ path = os.path.join(root, cgroup)
+ ino = os.stat(path).st_ino
+ cgroups[ino] = path
+
+ # (memcg ino, path)
+ return cgroups
+
+
+def scan_shrinkers(shrinker_debugfs):
+ shrinkers = []
+
+ for root, subdirs, _ in os.walk(shrinker_debugfs):
+ for shrinker in subdirs:
+ count_path = os.path.join(root, shrinker, "count")
+ with open(count_path) as f:
+ for line in f.readlines():
+ items = line.split(' ')
+ ino = int(items[0])
+ # (count, shrinker, memcg ino)
+ shrinkers.append((int(items[1]), shrinker, ino))
+ return shrinkers
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Display biggest shrinkers')
+ parser.add_argument('-n', '--lines', type=int, help='Number of lines to print')
+
+ args = parser.parse_args()
+
+ cgroups = scan_cgroups("/sys/fs/cgroup/")
+ shrinkers = scan_shrinkers("/sys/kernel/debug/shrinker/")
+ shrinkers = sorted(shrinkers, reverse = True, key = lambda x: x[0])
+
+ n = 0
+ for s in shrinkers:
+ count, name, ino = (s[0], s[1], s[2])
+ if count == 0:
+ break
+
+ if ino == 0 or ino == 1:
+ cg = "/"
+ else:
+ try:
+ cg = cgroups[ino]
+ except KeyError:
+ cg = "unknown (%d)" % ino
+
+ print("%-8s %-20s %s" % (count, name, cg))
+
+ n += 1
+ if args.lines and n >= args.lines:
+ break
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
new file mode 100644
index 000000000..1d3a90d93
--- /dev/null
+++ b/tools/cgroup/memcg_slabinfo.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2020 Roman Gushchin <guro@fb.com>
+# Copyright (C) 2020 Facebook
+
+from os import stat
+import argparse
+import sys
+
+from drgn.helpers.linux import list_for_each_entry, list_empty
+from drgn.helpers.linux import for_each_page
+from drgn.helpers.linux.cpumask import for_each_online_cpu
+from drgn.helpers.linux.percpu import per_cpu_ptr
+from drgn import container_of, FaultError, Object, cast
+
+
+DESC = """
+This is a drgn script to provide slab statistics for memory cgroups.
+It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo
+interface of cgroup v1.
+For drgn, visit https://github.com/osandov/drgn.
+"""
+
+
+MEMCGS = {}
+
+OO_SHIFT = 16
+OO_MASK = ((1 << OO_SHIFT) - 1)
+
+
+def err(s):
+ print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+
+def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''):
+ if not list_empty(css.children.address_of_()):
+ for css in list_for_each_entry('struct cgroup_subsys_state',
+ css.children.address_of_(),
+ 'sibling'):
+ name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8')
+ memcg = container_of(css, 'struct mem_cgroup', 'css')
+ MEMCGS[css.cgroup.kn.id.value_()] = memcg
+ find_memcg_ids(css, name)
+
+
+def is_root_cache(s):
+ try:
+ return False if s.memcg_params.root_cache else True
+ except AttributeError:
+ return True
+
+
+def cache_name(s):
+ if is_root_cache(s):
+ return s.name.string_().decode('utf-8')
+ else:
+ return s.memcg_params.root_cache.name.string_().decode('utf-8')
+
+
+# SLUB
+
+def oo_order(s):
+ return s.oo.x >> OO_SHIFT
+
+
+def oo_objects(s):
+ return s.oo.x & OO_MASK
+
+
+def count_partial(n, fn):
+ nr_objs = 0
+ for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
+ 'slab_list'):
+ nr_objs += fn(slab)
+ return nr_objs
+
+
+def count_free(slab):
+ return slab.objects - slab.inuse
+
+
+def slub_get_slabinfo(s, cfg):
+ nr_slabs = 0
+ nr_objs = 0
+ nr_free = 0
+
+ for node in range(cfg['nr_nodes']):
+ n = s.node[node]
+ nr_slabs += n.nr_slabs.counter.value_()
+ nr_objs += n.total_objects.counter.value_()
+ nr_free += count_partial(n, count_free)
+
+ return {'active_objs': nr_objs - nr_free,
+ 'num_objs': nr_objs,
+ 'active_slabs': nr_slabs,
+ 'num_slabs': nr_slabs,
+ 'objects_per_slab': oo_objects(s),
+ 'cache_order': oo_order(s),
+ 'limit': 0,
+ 'batchcount': 0,
+ 'shared': 0,
+ 'shared_avail': 0}
+
+
+def cache_show(s, cfg, objs):
+ if cfg['allocator'] == 'SLUB':
+ sinfo = slub_get_slabinfo(s, cfg)
+ else:
+ err('SLAB isn\'t supported yet')
+
+ if cfg['shared_slab_pages']:
+ sinfo['active_objs'] = objs
+ sinfo['num_objs'] = objs
+
+ print('%-17s %6lu %6lu %6u %4u %4d'
+ ' : tunables %4u %4u %4u'
+ ' : slabdata %6lu %6lu %6lu' % (
+ cache_name(s), sinfo['active_objs'], sinfo['num_objs'],
+ s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'],
+ sinfo['limit'], sinfo['batchcount'], sinfo['shared'],
+ sinfo['active_slabs'], sinfo['num_slabs'],
+ sinfo['shared_avail']))
+
+
+def detect_kernel_config():
+ cfg = {}
+
+ cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
+
+ if prog.type('struct kmem_cache').members[1].name == 'flags':
+ cfg['allocator'] = 'SLUB'
+ elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
+ cfg['allocator'] = 'SLAB'
+ else:
+ err('Can\'t determine the slab allocator')
+
+ cfg['shared_slab_pages'] = False
+ try:
+ if prog.type('struct obj_cgroup'):
+ cfg['shared_slab_pages'] = True
+ except:
+ pass
+
+ return cfg
+
+
+def for_each_slab(prog):
+ PGSlab = 1 << prog.constant('PG_slab')
+ PGHead = 1 << prog.constant('PG_head')
+
+ for page in for_each_page(prog):
+ try:
+ if page.flags.value_() & PGSlab:
+ yield cast('struct slab *', page)
+ except FaultError:
+ pass
+
+
+def main():
+ parser = argparse.ArgumentParser(description=DESC,
+ formatter_class=
+ argparse.RawTextHelpFormatter)
+ parser.add_argument('cgroup', metavar='CGROUP',
+ help='Target memory cgroup')
+ args = parser.parse_args()
+
+ try:
+ cgroup_id = stat(args.cgroup).st_ino
+ find_memcg_ids()
+ memcg = MEMCGS[cgroup_id]
+ except KeyError:
+ err('Can\'t find the memory cgroup')
+
+ cfg = detect_kernel_config()
+
+ print('# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>'
+ ' : tunables <limit> <batchcount> <sharedfactor>'
+ ' : slabdata <active_slabs> <num_slabs> <sharedavail>')
+
+ if cfg['shared_slab_pages']:
+ obj_cgroups = set()
+ stats = {}
+ caches = {}
+
+ # find memcg pointers belonging to the specified cgroup
+ obj_cgroups.add(memcg.objcg.value_())
+ for ptr in list_for_each_entry('struct obj_cgroup',
+ memcg.objcg_list.address_of_(),
+ 'list'):
+ obj_cgroups.add(ptr.value_())
+
+ # look over all slab folios and look for objects belonging
+ # to the given memory cgroup
+ for slab in for_each_slab(prog):
+ objcg_vec_raw = slab.memcg_data.value_()
+ if objcg_vec_raw == 0:
+ continue
+ cache = slab.slab_cache
+ if not cache:
+ continue
+ addr = cache.value_()
+ caches[addr] = cache
+ # clear the lowest bit to get the true obj_cgroups
+ objcg_vec = Object(prog, 'struct obj_cgroup **',
+ value=objcg_vec_raw & ~1)
+
+ if addr not in stats:
+ stats[addr] = 0
+
+ for i in range(oo_objects(cache)):
+ if objcg_vec[i].value_() in obj_cgroups:
+ stats[addr] += 1
+
+ for addr in caches:
+ if stats[addr] > 0:
+ cache_show(caches[addr], cfg, stats[addr])
+
+ else:
+ for s in list_for_each_entry('struct kmem_cache',
+ memcg.kmem_caches.address_of_(),
+ 'memcg_params.kmem_caches_node'):
+ cache_show(s, cfg, None)
+
+
+main()