summaryrefslogtreecommitdiffstats
path: root/src/seastar/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'src/seastar/scripts')
-rwxr-xr-xsrc/seastar/scripts/dpdk_nic_bind.py539
-rwxr-xr-xsrc/seastar/scripts/perftune.py1157
-rw-r--r--src/seastar/scripts/perftune.yaml30
-rwxr-xr-xsrc/seastar/scripts/posix_net_conf.sh81
-rwxr-xr-xsrc/seastar/scripts/run_with_dpdk.sh98
-rwxr-xr-xsrc/seastar/scripts/seastar-addr2line252
-rwxr-xr-xsrc/seastar/scripts/seastar-cpu-map.sh54
-rwxr-xr-xsrc/seastar/scripts/seastar-json2code.py540
-rw-r--r--src/seastar/scripts/tap.sh31
9 files changed, 2782 insertions, 0 deletions
diff --git a/src/seastar/scripts/dpdk_nic_bind.py b/src/seastar/scripts/dpdk_nic_bind.py
new file mode 100755
index 00000000..029e98ba
--- /dev/null
+++ b/src/seastar/scripts/dpdk_nic_bind.py
@@ -0,0 +1,539 @@
+#! /usr/bin/python2
+#
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+import sys, os, getopt, subprocess
+from os.path import exists, abspath, dirname, basename
+
+
+# The PCI device class for ETHERNET devices
+ETHERNET_CLASS = "0200"
+
+# global dict ethernet devices present. Dictionary indexed by PCI address.
+# Each device within this is itself a dictionary of device properties
+devices = {}
+# list of supported DPDK drivers
+dpdk_drivers = [ "igb_uio", "vfio-pci", "uio_pci_generic" ]
+
+# command-line arg flags
+b_flag = None
+status_flag = False
+force_flag = False
+args = []
+
+def usage():
+ '''Print usage information for the program'''
+ argv0 = basename(sys.argv[0])
+ print """
+Usage:
+------
+
+ %(argv0)s [options] DEVICE1 DEVICE2 ....
+
+where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
+or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
+also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
+
+Options:
+ --help, --usage:
+ Display usage information and quit
+
+ --status:
+ Print the current status of all known network interfaces.
+ For each device, it displays the PCI domain, bus, slot and function,
+ along with a text description of the device. Depending upon whether the
+ device is being used by a kernel driver, the igb_uio driver, or no
+ driver, other relevant information will be displayed:
+ * the Linux interface name e.g. if=eth0
+ * the driver being used e.g. drv=igb_uio
+ * any suitable drivers not currently using that device
+ e.g. unused=igb_uio
+ NOTE: if this flag is passed along with a bind/unbind option, the status
+ display will always occur after the other operations have taken place.
+
+ -b driver, --bind=driver:
+ Select the driver to use or \"none\" to unbind the device
+
+ -u, --unbind:
+ Unbind a device (Equivalent to \"-b none\")
+
+ --force:
+ By default, devices which are used by Linux - as indicated by having
+ routes in the routing table - cannot be modified. Using the --force
+ flag overrides this behavior, allowing active links to be forcibly
+ unbound.
+ WARNING: This can lead to loss of network connection and should be used
+ with caution.
+
+Examples:
+---------
+
+To display current device status:
+ %(argv0)s --status
+
+To bind eth1 from the current driver and move to use igb_uio
+ %(argv0)s --bind=igb_uio eth1
+
+To unbind 0000:01:00.0 from using any driver
+ %(argv0)s -u 0000:01:00.0
+
+To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
+ %(argv0)s -b ixgbe 02:00.0 02:00.1
+
+ """ % locals() # replace items from local variables
+
+# This is roughly compatible with check_output function in subprocess module
+# which is only available in python 2.7.
+def check_output(args, stderr=None):
+ '''Run a command and capture its output'''
+ return subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=stderr).communicate()[0]
+
+def find_module(mod):
+ '''find the .ko file for kernel module named mod.
+ Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
+ modules directory and finally under the parent directory of
+ the script '''
+ # check $RTE_SDK/$RTE_TARGET directory
+ if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
+ path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],\
+ os.environ['RTE_TARGET'], mod)
+ if exists(path):
+ return path
+
+ # check using depmod
+ try:
+ depmod_out = check_output(["modinfo", "-n", mod], \
+ stderr=subprocess.STDOUT).lower()
+ if "error" not in depmod_out:
+ path = depmod_out.strip()
+ if exists(path):
+ return path
+ except: # if modinfo can't find module, it fails, so continue
+ pass
+
+ # check for a copy based off current path
+ tools_dir = dirname(abspath(sys.argv[0]))
+ if (tools_dir.endswith("tools")):
+ base_dir = dirname(tools_dir)
+ find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
+ if len(find_out) > 0: #something matched
+ path = find_out.splitlines()[0]
+ if exists(path):
+ return path
+
+def check_modules():
+ '''Checks that igb_uio is loaded'''
+ global dpdk_drivers
+
+ fd = file("/proc/modules")
+ loaded_mods = fd.readlines()
+ fd.close()
+
+ # list of supported modules
+ mods = [{"Name" : driver, "Found" : False} for driver in dpdk_drivers]
+
+ # first check if module is loaded
+ for line in loaded_mods:
+ for mod in mods:
+ if line.startswith(mod["Name"]):
+ mod["Found"] = True
+ # special case for vfio_pci (module is named vfio-pci,
+ # but its .ko is named vfio_pci)
+ elif line.replace("_", "-").startswith(mod["Name"]):
+ mod["Found"] = True
+
+ # check if we have at least one loaded module
+ if True not in [mod["Found"] for mod in mods] and b_flag is not None:
+ if b_flag in dpdk_drivers:
+ print "Error - no supported modules(DPDK driver) are loaded"
+ sys.exit(1)
+ else:
+ print "Warning - no supported modules(DPDK driver) are loaded"
+
+ # change DPDK driver list to only contain drivers that are loaded
+ dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
+
+def has_driver(dev_id):
+ '''return true if a device is assigned to a driver. False otherwise'''
+ return "Driver_str" in devices[dev_id]
+
+def get_pci_device_details(dev_id):
+ '''This function gets additional details for a PCI device'''
+ device = {}
+
+ extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
+
+ # parse lspci details
+ for line in extra_info:
+ if len(line) == 0:
+ continue
+ name, value = line.split("\t", 1)
+ name = name.strip(":") + "_str"
+ device[name] = value
+ # check for a unix interface name
+ sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
+ if exists(sys_path):
+ device["Interface"] = ",".join(os.listdir(sys_path))
+ else:
+ device["Interface"] = ""
+ # check if a port is used for ssh connection
+ device["Ssh_if"] = False
+ device["Active"] = ""
+
+ return device
+
+def get_nic_details():
+ '''This function populates the "devices" dictionary. The keys used are
+ the pci addresses (domain:bus:slot.func). The values are themselves
+ dictionaries - one for each NIC.'''
+ global devices
+ global dpdk_drivers
+
+ # clear any old data
+ devices = {}
+ # first loop through and read details for all devices
+ # request machine readable format, with numeric IDs
+ dev = {};
+ dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
+ for dev_line in dev_lines:
+ if (len(dev_line) == 0):
+ if dev["Class"] == ETHERNET_CLASS:
+ #convert device and vendor ids to numbers, then add to global
+ dev["Vendor"] = int(dev["Vendor"],16)
+ dev["Device"] = int(dev["Device"],16)
+ devices[dev["Slot"]] = dict(dev) # use dict to make copy of dev
+ else:
+ name, value = dev_line.split("\t", 1)
+ dev[name.rstrip(":")] = value
+
+ # check what is the interface if any for an ssh connection if
+ # any to this host, so we can mark it later.
+ ssh_if = []
+ route = check_output(["ip", "-o", "route"])
+ # filter out all lines for 169.254 routes
+ route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
+ route.splitlines()))
+ rt_info = route.split()
+ for i in xrange(len(rt_info) - 1):
+ if rt_info[i] == "dev":
+ ssh_if.append(rt_info[i+1])
+
+ # based on the basic info, get extended text details
+ for d in devices.keys():
+ # get additional info and add it to existing data
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d).items())
+
+ for _if in ssh_if:
+ if _if in devices[d]["Interface"].split(","):
+ devices[d]["Ssh_if"] = True
+ devices[d]["Active"] = "*Active*"
+ break;
+
+ # add igb_uio to list of supporting modules if needed
+ if "Module_str" in devices[d]:
+ for driver in dpdk_drivers:
+ if driver not in devices[d]["Module_str"]:
+ devices[d]["Module_str"] = devices[d]["Module_str"] + ",%s" % driver
+ else:
+ devices[d]["Module_str"] = ",".join(dpdk_drivers)
+
+ # make sure the driver and module strings do not have any duplicates
+ if has_driver(d):
+ modules = devices[d]["Module_str"].split(",")
+ if devices[d]["Driver_str"] in modules:
+ modules.remove(devices[d]["Driver_str"])
+ devices[d]["Module_str"] = ",".join(modules)
+
+def dev_id_from_dev_name(dev_name):
+ '''Take a device "name" - a string passed in by user to identify a NIC
+ device, and determine the device id - i.e. the domain:bus:slot.func - for
+ it, which can then be used to index into the devices array'''
+ dev = None
+ # check if it's already a suitable index
+ if dev_name in devices:
+ return dev_name
+ # check if it's an index just missing the domain part
+ elif "0000:" + dev_name in devices:
+ return "0000:" + dev_name
+ else:
+ # check if it's an interface name, e.g. eth1
+ for d in devices.keys():
+ if dev_name in devices[d]["Interface"].split(","):
+ return devices[d]["Slot"]
+ # if nothing else matches - error
+ print "Unknown device: %s. " \
+ "Please specify device in \"bus:slot.func\" format" % dev_name
+ sys.exit(1)
+
+def unbind_one(dev_id, force):
+ '''Unbind the device identified by "dev_id" from its current driver'''
+ dev = devices[dev_id]
+ if not has_driver(dev_id):
+ print "%s %s %s is not currently managed by any driver\n" % \
+ (dev["Slot"], dev["Device_str"], dev["Interface"])
+ return
+
+ # prevent us disconnecting ourselves
+ if dev["Ssh_if"] and not force:
+ print "Routing table indicates that interface %s is active" \
+ ". Skipping unbind" % (dev_id)
+ return
+
+ # write to /sys to unbind
+ filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
+ try:
+ f = open(filename, "a")
+ except:
+ print "Error: unbind failed for %s - Cannot open %s" % (dev_id, filename)
+ sys/exit(1)
+ f.write(dev_id)
+ f.close()
+
+def bind_one(dev_id, driver, force):
+ '''Bind the device given by "dev_id" to the driver "driver". If the device
+ is already bound to a different driver, it will be unbound first'''
+ dev = devices[dev_id]
+ saved_driver = None # used to rollback any unbind in case of failure
+
+ # prevent disconnection of our ssh session
+ if dev["Ssh_if"] and not force:
+ print "Routing table indicates that interface %s is active" \
+ ". Not modifying" % (dev_id)
+ return
+
+ # unbind any existing drivers we don't want
+ if has_driver(dev_id):
+ if dev["Driver_str"] == driver:
+ print "%s already bound to driver %s, skipping\n" % (dev_id, driver)
+ return
+ else:
+ saved_driver = dev["Driver_str"]
+ unbind_one(dev_id, force)
+ dev["Driver_str"] = "" # clear driver string
+
+ # if we are binding to one of DPDK drivers, add PCI id's to that driver
+ if driver in dpdk_drivers:
+ filename = "/sys/bus/pci/drivers/%s/new_id" % driver
+ try:
+ f = open(filename, "w")
+ except:
+ print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
+ return
+ try:
+ f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
+ f.close()
+ except:
+ print "Error: bind failed for %s - Cannot write new PCI ID to " \
+ "driver %s" % (dev_id, driver)
+ return
+
+ # do the bind by writing to /sys
+ filename = "/sys/bus/pci/drivers/%s/bind" % driver
+ try:
+ f = open(filename, "a")
+ except:
+ print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
+ if saved_driver is not None: # restore any previous driver
+ bind_one(dev_id, saved_driver, force)
+ return
+ try:
+ f.write(dev_id)
+ f.close()
+ except:
+ # for some reason, closing dev_id after adding a new PCI ID to new_id
+ # results in IOError. however, if the device was successfully bound,
+ # we don't care for any errors and can safely ignore IOError
+ tmp = get_pci_device_details(dev_id)
+ if "Driver_str" in tmp and tmp["Driver_str"] == driver:
+ return
+ print "Error: bind failed for %s - Cannot bind to driver %s" % (dev_id, driver)
+ if saved_driver is not None: # restore any previous driver
+ bind_one(dev_id, saved_driver, force)
+ return
+
+
+def unbind_all(dev_list, force=False):
+ """Unbind method, takes a list of device locations"""
+ dev_list = map(dev_id_from_dev_name, dev_list)
+ for d in dev_list:
+ unbind_one(d, force)
+
+def bind_all(dev_list, driver, force=False):
+ """Bind method, takes a list of device locations"""
+ global devices
+
+ dev_list = map(dev_id_from_dev_name, dev_list)
+
+ for d in dev_list:
+ bind_one(d, driver, force)
+
+ # when binding devices to a generic driver (i.e. one that doesn't have a
+ # PCI ID table), some devices that are not bound to any other driver could
+ # be bound even if no one has asked them to. hence, we check the list of
+ # drivers again, and see if some of the previously-unbound devices were
+ # erroneously bound.
+ for d in devices.keys():
+ # skip devices that were already bound or that we know should be bound
+ if "Driver_str" in devices[d] or d in dev_list:
+ continue
+
+ # update information about this device
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d).items())
+
+ # check if updated information indicates that the device was bound
+ if "Driver_str" in devices[d]:
+ unbind_one(d, force)
+
+def display_devices(title, dev_list, extra_params = None):
+ '''Displays to the user the details of a list of devices given in "dev_list"
+ The "extra_params" parameter, if given, should contain a string with
+ %()s fields in it for replacement by the named fields in each device's
+ dictionary.'''
+ strings = [] # this holds the strings to print. We sort before printing
+ print "\n%s" % title
+ print "="*len(title)
+ if len(dev_list) == 0:
+ strings.append("<none>")
+ else:
+ for dev in dev_list:
+ if extra_params is not None:
+ strings.append("%s '%s' %s" % (dev["Slot"], \
+ dev["Device_str"], extra_params % dev))
+ else:
+ strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
+ # sort before printing, so that the entries appear in PCI order
+ strings.sort()
+ print "\n".join(strings) # print one per line
+
+def show_status():
+ '''Function called when the script is passed the "--status" option. Displays
+ to the user what devices are bound to the igb_uio driver, the kernel driver
+ or to no driver'''
+ global dpdk_drivers
+ kernel_drv = []
+ dpdk_drv = []
+ no_drv = []
+
+ # split our list of devices into the three categories above
+ for d in devices.keys():
+ if not has_driver(d):
+ no_drv.append(devices[d])
+ continue
+ if devices[d]["Driver_str"] in dpdk_drivers:
+ dpdk_drv.append(devices[d])
+ else:
+ kernel_drv.append(devices[d])
+
+ # print each category separately, so we can clearly see what's used by DPDK
+ display_devices("Network devices using DPDK-compatible driver", dpdk_drv, \
+ "drv=%(Driver_str)s unused=%(Module_str)s")
+ display_devices("Network devices using kernel driver", kernel_drv,
+ "if=%(Interface)s drv=%(Driver_str)s unused=%(Module_str)s %(Active)s")
+ display_devices("Other network devices", no_drv,\
+ "unused=%(Module_str)s")
+
+def parse_args():
+ '''Parses the command-line arguments given by the user and takes the
+ appropriate action for each'''
+ global b_flag
+ global status_flag
+ global force_flag
+ global args
+ if len(sys.argv) <= 1:
+ usage()
+ sys.exit(0)
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "b:u",
+ ["help", "usage", "status", "force",
+ "bind=", "unbind"])
+ except getopt.GetoptError, error:
+ print str(error)
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ for opt, arg in opts:
+ if opt == "--help" or opt == "--usage":
+ usage()
+ sys.exit(0)
+ if opt == "--status":
+ status_flag = True
+ if opt == "--force":
+ force_flag = True
+ if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
+ if b_flag is not None:
+ print "Error - Only one bind or unbind may be specified\n"
+ sys.exit(1)
+ if opt == "-u" or opt == "--unbind":
+ b_flag = "none"
+ else:
+ b_flag = arg
+
+def do_arg_actions():
+ '''do the actual action requested by the user'''
+ global b_flag
+ global status_flag
+ global force_flag
+ global args
+
+ if b_flag is None and not status_flag:
+ print "Error: No action specified for devices. Please give a -b or -u option"
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ if b_flag is not None and len(args) == 0:
+ print "Error: No devices specified."
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ if b_flag == "none" or b_flag == "None":
+ unbind_all(args, force_flag)
+ elif b_flag is not None:
+ bind_all(args, b_flag, force_flag)
+ if status_flag:
+ if b_flag is not None:
+ get_nic_details() # refresh if we have changed anything
+ show_status()
+
+def main():
+ '''program main function'''
+ parse_args()
+ check_modules()
+ get_nic_details()
+ do_arg_actions()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/seastar/scripts/perftune.py b/src/seastar/scripts/perftune.py
new file mode 100755
index 00000000..50016215
--- /dev/null
+++ b/src/seastar/scripts/perftune.py
@@ -0,0 +1,1157 @@
+#!/usr/bin/python3
+
+import abc
+import argparse
+import enum
+import functools
+import glob
+import itertools
+import multiprocessing
+import os
+import pathlib
+import pyudev
+import re
+import shutil
+import subprocess
+import sys
+import urllib.request
+import yaml
+
+def run_one_command(prog_args, my_stderr=None, check=True):
+ proc = subprocess.Popen(prog_args, stdout = subprocess.PIPE, stderr = my_stderr)
+ outs, errs = proc.communicate()
+ outs = str(outs, 'utf-8')
+
+ if check and proc.returncode != 0:
+ raise subprocess.CalledProcessError(returncode=proc.returncode, cmd=" ".join(prog_args), output=outs, stderr=errs)
+
+ return outs
+
+def run_hwloc_distrib(prog_args):
+ """
+ Returns a list of strings - each representing a single line of hwloc-distrib output.
+ """
+ return run_one_command(['hwloc-distrib'] + prog_args).splitlines()
+
+def run_hwloc_calc(prog_args):
+ """
+ Returns a single string with the result of the execution.
+ """
+ return run_one_command(['hwloc-calc'] + prog_args).rstrip()
+
+def fwriteln(fname, line):
+ try:
+ with open(fname, 'w') as f:
+ f.write(line)
+ except:
+ print("Failed to write into {}: {}".format(fname, sys.exc_info()))
+
+def readlines(fname):
+ try:
+ with open(fname, 'r') as f:
+ return f.readlines()
+ except:
+ print("Failed to read {}: {}".format(fname, sys.exc_info()))
+ return []
+
+def fwriteln_and_log(fname, line):
+ print("Writing '{}' to {}".format(line, fname))
+ fwriteln(fname, line)
+
+double_commas_pattern = re.compile(',,')
+
+def set_one_mask(conf_file, mask):
+ if not os.path.exists(conf_file):
+ raise Exception("Configure file to set mask doesn't exist: {}".format(conf_file))
+ mask = re.sub('0x', '', mask)
+
+ while double_commas_pattern.search(mask):
+ mask = double_commas_pattern.sub(',0,', mask)
+
+ print("Setting mask {} in {}".format(mask, conf_file))
+ fwriteln(conf_file, mask)
+
+def distribute_irqs(irqs, cpu_mask):
+ # If IRQs' list is empty - do nothing
+ if not irqs:
+ return
+
+ for i, mask in enumerate(run_hwloc_distrib(["{}".format(len(irqs)), '--single', '--restrict', cpu_mask])):
+ set_one_mask("/proc/irq/{}/smp_affinity".format(irqs[i]), mask)
+
+def is_process_running(name):
+ return len(list(filter(lambda ps_line : not re.search('<defunct>', ps_line), run_one_command(['ps', '--no-headers', '-C', name], check=False).splitlines()))) > 0
+
+def restart_irqbalance(banned_irqs):
+ """
+ Restart irqbalance if it's running and ban it from moving the IRQs from the
+ given list.
+ """
+ config_file = '/etc/default/irqbalance'
+ options_key = 'OPTIONS'
+ systemd = False
+ banned_irqs_list = list(banned_irqs)
+
+ # If there is nothing to ban - quit
+ if not banned_irqs_list:
+ return
+
+ # return early if irqbalance is not running
+ if not is_process_running('irqbalance'):
+ print("irqbalance is not running")
+ return
+
+ if not os.path.exists(config_file):
+ if os.path.exists('/etc/sysconfig/irqbalance'):
+ config_file = '/etc/sysconfig/irqbalance'
+ options_key = 'IRQBALANCE_ARGS'
+ systemd = True
+ elif os.path.exists('/etc/conf.d/irqbalance'):
+ config_file = '/etc/conf.d/irqbalance'
+ options_key = 'IRQBALANCE_OPTS'
+ with open('/proc/1/comm', 'r') as comm:
+ systemd = 'systemd' in comm.read()
+ else:
+ print("Unknown system configuration - not restarting irqbalance!")
+ print("You have to prevent it from moving IRQs {} manually!".format(banned_irqs_list))
+ return
+
+ orig_file = "{}.scylla.orig".format(config_file)
+
+ # Save the original file
+ if not os.path.exists(orig_file):
+ print("Saving the original irqbalance configuration is in {}".format(orig_file))
+ shutil.copyfile(config_file, orig_file)
+ else:
+ print("File {} already exists - not overwriting.".format(orig_file))
+
+ # Read the config file lines
+ cfile_lines = open(config_file, 'r').readlines()
+
+ # Build the new config_file contents with the new options configuration
+ print("Restarting irqbalance: going to ban the following IRQ numbers: {} ...".format(", ".join(banned_irqs_list)))
+
+ # Search for the original options line
+ opt_lines = list(filter(lambda line : re.search("^\s*{}".format(options_key), line), cfile_lines))
+ if not opt_lines:
+ new_options = "{}=\"".format(options_key)
+ elif len(opt_lines) == 1:
+ # cut the last "
+ new_options = re.sub("\"\s*$", "", opt_lines[0].rstrip())
+ else:
+ raise Exception("Invalid format in {}: more than one lines with {} key".format(config_file, options_key))
+
+ for irq in banned_irqs_list:
+ # prevent duplicate "ban" entries for the same IRQ
+ patt_str = "\-\-banirq\={}\Z|\-\-banirq\={}\s".format(irq, irq)
+ if not re.search(patt_str, new_options):
+ new_options += " --banirq={}".format(irq)
+
+ new_options += "\""
+
+ with open(config_file, 'w') as cfile:
+ for line in cfile_lines:
+ if not re.search("^\s*{}".format(options_key), line):
+ cfile.write(line)
+
+ cfile.write(new_options + "\n")
+
+ if systemd:
+ print("Restarting irqbalance via systemctl...")
+ run_one_command(['systemctl', 'try-restart', 'irqbalance'])
+ else:
+ print("Restarting irqbalance directly (init.d)...")
+ run_one_command(['/etc/init.d/irqbalance', 'restart'])
+
+def learn_irqs_from_proc_interrupts(pattern, irq2procline):
+ return [ irq for irq, proc_line in filter(lambda irq_proc_line_pair : re.search(pattern, irq_proc_line_pair[1]), irq2procline.items()) ]
+
+def learn_all_irqs_one(irq_conf_dir, irq2procline, xen_dev_name):
+ """
+ Returns a list of IRQs of a single device.
+
+ irq_conf_dir: a /sys/... directory with the IRQ information for the given device
+ irq2procline: a map of IRQs to the corresponding lines in the /proc/interrupts
+ xen_dev_name: a device name pattern as it appears in the /proc/interrupts on Xen systems
+ """
+ msi_irqs_dir_name = os.path.join(irq_conf_dir, 'msi_irqs')
+ # Device uses MSI IRQs
+ if os.path.exists(msi_irqs_dir_name):
+ return os.listdir(msi_irqs_dir_name)
+
+ irq_file_name = os.path.join(irq_conf_dir, 'irq')
+ # Device uses INT#x
+ if os.path.exists(irq_file_name):
+ return [ line.lstrip().rstrip() for line in open(irq_file_name, 'r').readlines() ]
+
+ # No irq file detected
+ modalias = open(os.path.join(irq_conf_dir, 'modalias'), 'r').readline()
+
+ # virtio case
+ if re.search("^virtio", modalias):
+ return list(itertools.chain.from_iterable(
+ map(lambda dirname : learn_irqs_from_proc_interrupts(dirname, irq2procline),
+ filter(lambda dirname : re.search('virtio', dirname),
+ itertools.chain.from_iterable([ dirnames for dirpath, dirnames, filenames in os.walk(os.path.join(irq_conf_dir, 'driver')) ])))))
+
+ # xen case
+ if re.search("^xen:", modalias):
+ return learn_irqs_from_proc_interrupts(xen_dev_name, irq2procline)
+
+ return []
+
+def get_irqs2procline_map():
+ return { line.split(':')[0].lstrip().rstrip() : line for line in open('/proc/interrupts', 'r').readlines() }
+
+################################################################################
+class PerfTunerBase(metaclass=abc.ABCMeta):
+ def __init__(self, args):
+ self.__args = args
+ self.__args.cpu_mask = run_hwloc_calc(['--restrict', self.__args.cpu_mask, 'all'])
+ self.__mode = None
+ self.__compute_cpu_mask = None
+ self.__irq_cpu_mask = None
+
+#### Public methods ##########################
+ class SupportedModes(enum.IntEnum):
+ """
+ Modes are ordered from the one that cuts the biggest number of CPUs
+ from the compute CPUs' set to the one that takes the smallest ('mq' doesn't
+ cut any CPU from the compute set).
+
+ This fact is used when we calculate the 'common quotient' mode out of a
+ given set of modes (e.g. default modes of different Tuners) - this would
+ be the smallest among the given modes.
+ """
+ sq_split = 0
+ sq = 1
+ mq = 2
+
+ @staticmethod
+ def names():
+ return PerfTunerBase.SupportedModes.__members__.keys()
+
+ @staticmethod
+ def cpu_mask_is_zero(cpu_mask):
+ """
+ The irqs_cpu_mask is a coma-separated list of 32-bit hex values, e.g. 0xffff,0x0,0xffff
+ We want to estimate if the whole mask is all-zeros.
+ :param cpu_mask: hwloc-calc generated CPU mask
+ :return: True if mask is zero, False otherwise
+ """
+ for cur_irqs_cpu_mask in cpu_mask.split(','):
+ if int(cur_irqs_cpu_mask, 16) != 0:
+ return False
+
+ return True
+
+ @staticmethod
+ def compute_cpu_mask_for_mode(mq_mode, cpu_mask):
+ mq_mode = PerfTunerBase.SupportedModes(mq_mode)
+ irqs_cpu_mask = 0
+
+ if mq_mode == PerfTunerBase.SupportedModes.sq:
+ # all but CPU0
+ irqs_cpu_mask = run_hwloc_calc([cpu_mask, '~PU:0'])
+ elif mq_mode == PerfTunerBase.SupportedModes.sq_split:
+ # all but CPU0 and its HT siblings
+ irqs_cpu_mask = run_hwloc_calc([cpu_mask, '~core:0'])
+ elif mq_mode == PerfTunerBase.SupportedModes.mq:
+ # all available cores
+ irqs_cpu_mask = cpu_mask
+ else:
+ raise Exception("Unsupported mode: {}".format(mq_mode))
+
+ if PerfTunerBase.cpu_mask_is_zero(irqs_cpu_mask):
+ raise Exception("Bad configuration mode ({}) and cpu-mask value ({}): this results in a zero-mask for "
+ "compute".format(mq_mode.name, cpu_mask))
+
+ return irqs_cpu_mask
+
+ @staticmethod
+ def irqs_cpu_mask_for_mode(mq_mode, cpu_mask):
+ mq_mode = PerfTunerBase.SupportedModes(mq_mode)
+ irqs_cpu_mask = 0
+
+ if mq_mode != PerfTunerBase.SupportedModes.mq:
+ irqs_cpu_mask = run_hwloc_calc([cpu_mask, "~{}".format(PerfTunerBase.compute_cpu_mask_for_mode(mq_mode, cpu_mask))])
+ else: # mq_mode == PerfTunerBase.SupportedModes.mq
+ # distribute equally between all available cores
+ irqs_cpu_mask = cpu_mask
+
+ if PerfTunerBase.cpu_mask_is_zero(irqs_cpu_mask):
+ raise Exception("Bad configuration mode ({}) and cpu-mask value ({}): this results in a zero-mask for "
+ "IRQs".format(mq_mode.name, cpu_mask))
+
+ return irqs_cpu_mask
+
+ @property
+ def mode(self):
+ """
+ Return the configuration mode
+ """
+ # Make sure the configuration mode is set (see the __set_mode_and_masks() description).
+ if self.__mode is None:
+ self.__set_mode_and_masks()
+
+ return self.__mode
+
+ @mode.setter
+ def mode(self, new_mode):
+ """
+ Set the new configuration mode and recalculate the corresponding masks.
+ """
+ # Make sure the new_mode is of PerfTunerBase.AllowedModes type
+ self.__mode = PerfTunerBase.SupportedModes(new_mode)
+ self.__compute_cpu_mask = PerfTunerBase.compute_cpu_mask_for_mode(self.__mode, self.__args.cpu_mask)
+ self.__irq_cpu_mask = PerfTunerBase.irqs_cpu_mask_for_mode(self.__mode, self.__args.cpu_mask)
+
+ @property
+ def compute_cpu_mask(self):
+ """
+ Return the CPU mask to use for seastar application binding.
+ """
+ # see the __set_mode_and_masks() description
+ if self.__compute_cpu_mask is None:
+ self.__set_mode_and_masks()
+
+ return self.__compute_cpu_mask
+
+ @property
+ def irqs_cpu_mask(self):
+ """
+ Return the mask of CPUs used for IRQs distribution.
+ """
+ # see the __set_mode_and_masks() description
+ if self.__irq_cpu_mask is None:
+ self.__set_mode_and_masks()
+
+ return self.__irq_cpu_mask
+
+ @property
+ def args(self):
+ return self.__args
+
+ @property
+ def irqs(self):
+ return self._get_irqs()
+
+#### "Protected"/Public (pure virtual) methods ###########
+ @abc.abstractmethod
+ def tune(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_def_mode(self):
+ """
+ Return a default configuration mode.
+ """
+ pass
+
+ @abc.abstractmethod
+ def _get_irqs(self):
+ """
+ Return the iteratable value with all IRQs to be configured.
+ """
+ pass
+
+#### Private methods ############################
+ def __set_mode_and_masks(self):
+ """
+ Sets the configuration mode and the corresponding CPU masks. We can't
+ initialize them in the constructor because the default mode may depend
+ on the child-specific values that are set in its constructor.
+
+ That's why we postpone the mode's and the corresponding masks'
+ initialization till after the child instance creation.
+ """
+ if self.__args.mode:
+ self.mode = PerfTunerBase.SupportedModes[self.__args.mode]
+ else:
+ self.mode = self._get_def_mode()
+
+#################################################
+class NetPerfTuner(PerfTunerBase):
+ def __init__(self, args):
+ super().__init__(args)
+
+ self.__nic_is_bond_iface = self.__check_dev_is_bond_iface()
+ self.__slaves = self.__learn_slaves()
+
+ # check that self.nic is either a HW device or a bonding interface
+ self.__check_nic()
+
+ self.__irqs2procline = get_irqs2procline_map()
+ self.__nic2irqs = self.__learn_irqs()
+
+#### Public methods ############################
+ def tune(self):
+ """
+ Tune the networking server configuration.
+ """
+ if self.nic_is_hw_iface:
+ print("Setting a physical interface {}...".format(self.nic))
+ self.__setup_one_hw_iface(self.nic)
+ else:
+ print("Setting {} bonding interface...".format(self.nic))
+ self.__setup_bonding_iface()
+
+ # Increase the socket listen() backlog
+ fwriteln_and_log('/proc/sys/net/core/somaxconn', '4096')
+
+ # Increase the maximum number of remembered connection requests, which are still
+ # did not receive an acknowledgment from connecting client.
+ fwriteln_and_log('/proc/sys/net/ipv4/tcp_max_syn_backlog', '4096')
+
+ @property
+ def nic_is_bond_iface(self):
+ return self.__nic_is_bond_iface
+
+ @property
+ def nic(self):
+ return self.args.nic
+
+ @property
+ def nic_is_hw_iface(self):
+ return self.__dev_is_hw_iface(self.nic)
+
+ @property
+ def slaves(self):
+ """
+ Returns an iterator for all slaves of the args.nic.
+ If agrs.nic is not a bonding interface an attempt to use the returned iterator
+ will immediately raise a StopIteration exception - use __dev_is_bond_iface() check to avoid this.
+ """
+ return iter(self.__slaves)
+
+#### Protected methods ##########################
+ def _get_def_mode(self):
+ if self.nic_is_bond_iface:
+ return min(map(self.__get_hw_iface_def_mode, filter(self.__dev_is_hw_iface, self.slaves)))
+ else:
+ return self.__get_hw_iface_def_mode(self.nic)
+
+ def _get_irqs(self):
+ """
+ Returns the iterator for all IRQs that are going to be configured (according to args.nic parameter).
+ For instance, for a bonding interface that's going to include IRQs of all its slaves.
+ """
+ return itertools.chain.from_iterable(self.__nic2irqs.values())
+
+#### Private methods ############################
+ @property
+ def __rfs_table_size(self):
+ return 32768
+
+ def __check_nic(self):
+ """
+ Checks that self.nic is a supported interface
+ """
+ if not self.nic_is_hw_iface and not self.nic_is_bond_iface:
+ raise Exception("Not supported virtual device {}".format(self.nic))
+
+ def __get_irqs_one(self, iface):
+ """
+ Returns the list of IRQ numbers for the given interface.
+ """
+ return self.__nic2irqs[iface]
+
+ def __setup_rfs(self, iface):
+ rps_limits = glob.glob("/sys/class/net/{}/queues/*/rps_flow_cnt".format(iface))
+ one_q_limit = int(self.__rfs_table_size / len(rps_limits))
+
+ # If RFS feature is not present - get out
+ try:
+ run_one_command(['sysctl', 'net.core.rps_sock_flow_entries'])
+ except:
+ return
+
+ # Enable RFS
+ print("Setting net.core.rps_sock_flow_entries to {}".format(self.__rfs_table_size))
+ run_one_command(['sysctl', '-w', 'net.core.rps_sock_flow_entries={}'.format(self.__rfs_table_size)])
+
+ # Set each RPS queue limit
+ for rfs_limit_cnt in rps_limits:
+ print("Setting limit {} in {}".format(one_q_limit, rfs_limit_cnt))
+ fwriteln(rfs_limit_cnt, "{}".format(one_q_limit))
+
+ # Enable ntuple filtering HW offload on the NIC
+ print("Trying to enable ntuple filtering HW offload for {}...".format(iface), end='')
+ try:
+ run_one_command(['ethtool','-K', iface, 'ntuple', 'on'], stderr=subprocess.DEVNULL)
+ print("ok")
+ except:
+ print("not supported")
+
+ def __setup_rps(self, iface, mask):
+ for one_rps_cpus in self.__get_rps_cpus(iface):
+ set_one_mask(one_rps_cpus, mask)
+
+ self.__setup_rfs(iface)
+
+ def __setup_xps(self, iface):
+ xps_cpus_list = glob.glob("/sys/class/net/{}/queues/*/xps_cpus".format(iface))
+ masks = run_hwloc_distrib(["{}".format(len(xps_cpus_list))])
+
+ for i, mask in enumerate(masks):
+ set_one_mask(xps_cpus_list[i], mask)
+
+ def __dev_is_hw_iface(self, iface):
+ return os.path.exists("/sys/class/net/{}/device".format(iface))
+
+ def __check_dev_is_bond_iface(self):
+ if not os.path.exists('/sys/class/net/bonding_masters'):
+ return False
+
+ return any([re.search(self.nic, line) for line in open('/sys/class/net/bonding_masters', 'r').readlines()])
+
+ def __learn_slaves(self):
+ if self.nic_is_bond_iface:
+ return list(itertools.chain.from_iterable([ line.split() for line in open("/sys/class/net/{}/bonding/slaves".format(self.nic), 'r').readlines() ]))
+
+ return []
+
+ def __intel_irq_to_queue_idx(self, irq):
+ """
+ Return the HW queue index for a given IRQ for Intel NICs in order to sort the IRQs' list by this index.
+
+ Intel's fast path IRQs have the following name convention:
+ <bla-bla>-TxRx-<queue index>
+
+ Intel NICs also have the IRQ for Flow Director (which is not a regular fast path IRQ) which name looks like
+ this:
+ <bla-bla>:fdir-TxRx-<index>
+
+ We want to put the Flow Director's IRQ at the end of the sorted list of IRQs.
+
+ :param irq: IRQ number
+ :return: HW queue index for Intel NICs and 0 for all other NICs
+ """
+ intel_fp_irq_re = re.compile("\-TxRx\-(\d+)")
+ fdir_re = re.compile("fdir\-TxRx\-\d+")
+
+ m = intel_fp_irq_re.search(self.__irqs2procline[irq])
+ m1 = fdir_re.search(self.__irqs2procline[irq])
+ if m and not m1:
+ return int(m.group(1))
+ else:
+ return sys.maxsize
+
+ def __learn_irqs_one(self, iface):
+ """
+ This is a slow method that is going to read from the system files. Never
+ use it outside the initialization code. Use __get_irqs_one() instead.
+
+ Filter the fast path queues IRQs from the __get_all_irqs_one() result according to the known
+ patterns.
+ Right now we know about the following naming convention of the fast path queues vectors:
+ - Intel: <bla-bla>-TxRx-<bla-bla>
+ - Broadcom: <bla-bla>-fp-<bla-bla>
+ - ena: <bla-bla>-Tx-Rx-<bla-bla>
+
+ So, we will try to filter the etries in /proc/interrupts for IRQs we've got from get_all_irqs_one()
+ according to the patterns above.
+
+ If as a result all IRQs are filtered out (if there are no IRQs with the names from the patterns above) then
+ this means that the given NIC uses a different IRQs naming pattern. In this case we won't filter any IRQ.
+
+ Otherwise, we will use only IRQs which names fit one of the patterns above.
+
+ For NICs with a limited number of Rx queues the IRQs that handle Rx are going to be at the beginning of the
+ list.
+ """
+ # filter 'all_irqs' to only reference valid keys from 'irqs2procline' and avoid an IndexError on the 'irqs' search below
+ all_irqs = set(learn_all_irqs_one("/sys/class/net/{}/device".format(iface), self.__irqs2procline, iface)).intersection(self.__irqs2procline.keys())
+ fp_irqs_re = re.compile("\-TxRx\-|\-fp\-|\-Tx\-Rx\-")
+ irqs = list(filter(lambda irq : fp_irqs_re.search(self.__irqs2procline[irq]), all_irqs))
+ if irqs:
+ irqs.sort(key=self.__intel_irq_to_queue_idx)
+ return irqs
+ else:
+ return list(all_irqs)
+
+ def __learn_irqs(self):
+ """
+ This is a slow method that is going to read from the system files. Never
+ use it outside the initialization code.
+ """
+ if self.nic_is_bond_iface:
+ return { slave : self.__learn_irqs_one(slave) for slave in filter(self.__dev_is_hw_iface, self.slaves) }
+ else:
+ return { self.nic : self.__learn_irqs_one(self.nic) }
+
+ def __get_rps_cpus(self, iface):
+ """
+ Prints all rps_cpus files names for the given HW interface.
+
+ There is a single rps_cpus file for each RPS queue and there is a single RPS
+ queue for each HW Rx queue. Each HW Rx queue should have an IRQ.
+ Therefore the number of these files is equal to the number of fast path Rx IRQs for this interface.
+ """
+ return glob.glob("/sys/class/net/{}/queues/*/rps_cpus".format(iface))
+
+ def __setup_one_hw_iface(self, iface):
+ max_num_rx_queues = self.__max_rx_queue_count(iface)
+ all_irqs = self.__get_irqs_one(iface)
+
+ # Bind the NIC's IRQs according to the configuration mode
+ #
+ # If this NIC has a limited number of Rx queues then we want to distribute their IRQs separately.
+ # For such NICs we've sorted IRQs list so that IRQs that handle Rx are all at the head of the list.
+ if max_num_rx_queues < len(all_irqs):
+ num_rx_queues = self.__get_rx_queue_count(iface)
+ print("Distributing IRQs handling Rx:")
+ distribute_irqs(all_irqs[0:num_rx_queues], self.irqs_cpu_mask)
+ print("Distributing the rest of IRQs")
+ distribute_irqs(all_irqs[num_rx_queues:], self.irqs_cpu_mask)
+ else:
+ print("Distributing all IRQs")
+ distribute_irqs(all_irqs, self.irqs_cpu_mask)
+
+ self.__setup_rps(iface, self.compute_cpu_mask)
+ self.__setup_xps(iface)
+
+ def __setup_bonding_iface(self):
+ for slave in self.slaves:
+ if self.__dev_is_hw_iface(slave):
+ print("Setting up {}...".format(slave))
+ self.__setup_one_hw_iface(slave)
+ else:
+ print("Skipping {} (not a physical slave device?)".format(slave))
+
+ def __max_rx_queue_count(self, iface):
+ """
+ :param iface: Interface to check
+ :return: The maximum number of RSS queues for the given interface if there is known limitation and sys.maxsize
+ otherwise.
+
+ Networking drivers serving HW with the known maximum RSS queue limitation (due to lack of RSS bits):
+
+ ixgbe: PF NICs support up to 16 RSS queues.
+ ixgbevf: VF NICs support up to 4 RSS queues.
+ i40e: PF NICs support up to 64 RSS queues.
+ i40evf: VF NICs support up to 16 RSS queues.
+
+ """
+ driver_to_max_rss = {'ixgbe': 16, 'ixgbevf': 4, 'i40e': 64, 'i40evf': 16}
+
+ driver_name = ''
+ ethtool_i_lines = run_one_command(['ethtool', '-i', iface]).splitlines()
+ driver_re = re.compile("driver:")
+ driver_lines = list(filter(lambda one_line: driver_re.search(one_line), ethtool_i_lines))
+
+ if driver_lines:
+ if len(driver_lines) > 1:
+ raise Exception("More than one 'driver:' entries in the 'ethtool -i {}' output. Unable to continue.".format(iface))
+
+ driver_name = driver_lines[0].split()[1].strip()
+
+ return driver_to_max_rss.get(driver_name, sys.maxsize)
+
+ def __get_rx_queue_count(self, iface):
+ """
+ :return: the RSS Rx queues count for the given interface.
+ """
+ num_irqs = len(self.__get_irqs_one(iface))
+ rx_queues_count = len(self.__get_rps_cpus(iface))
+
+ if rx_queues_count == 0:
+ rx_queues_count = num_irqs
+
+ return min(self.__max_rx_queue_count(iface), rx_queues_count)
+
+ def __get_hw_iface_def_mode(self, iface):
+ """
+ Returns the default configuration mode for the given interface.
+ """
+ rx_queues_count = self.__get_rx_queue_count(iface)
+
+ num_cores = int(run_hwloc_calc(['--number-of', 'core', 'machine:0', '--restrict', self.args.cpu_mask]))
+ num_PUs = int(run_hwloc_calc(['--number-of', 'PU', 'machine:0', '--restrict', self.args.cpu_mask]))
+
+ if num_PUs <= 4 or rx_queues_count == num_PUs:
+ return PerfTunerBase.SupportedModes.mq
+ elif num_cores <= 4:
+ return PerfTunerBase.SupportedModes.sq
+ else:
+ return PerfTunerBase.SupportedModes.sq_split
+
+#################################################
+class DiskPerfTuner(PerfTunerBase):
+ class SupportedDiskTypes(enum.IntEnum):
+ nvme = 0
+ non_nvme = 1
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ if not (self.args.dirs or self.args.devs):
+ raise Exception("'disks' tuning was requested but neither directories nor storage devices were given")
+
+ self.__pyudev_ctx = pyudev.Context()
+ self.__dir2disks = self.__learn_directories()
+ self.__irqs2procline = get_irqs2procline_map()
+ self.__disk2irqs = self.__learn_irqs()
+ self.__type2diskinfo = self.__group_disks_info_by_type()
+
+ # sets of devices that have already been tuned
+ self.__io_scheduler_tuned_devs = set()
+ self.__nomerges_tuned_devs = set()
+
+#### Public methods #############################
+ def tune(self):
+ """
+ Distribute IRQs according to the requested mode (args.mode):
+ - Distribute NVMe disks' IRQs equally among all available CPUs.
+ - Distribute non-NVMe disks' IRQs equally among designated CPUs or among
+ all available CPUs in the 'mq' mode.
+ """
+ mode_cpu_mask = PerfTunerBase.irqs_cpu_mask_for_mode(self.mode, self.args.cpu_mask)
+
+ non_nvme_disks, non_nvme_irqs = self.__disks_info_by_type(DiskPerfTuner.SupportedDiskTypes.non_nvme)
+ if non_nvme_disks:
+ print("Setting non-NVMe disks: {}...".format(", ".join(non_nvme_disks)))
+ distribute_irqs(non_nvme_irqs, mode_cpu_mask)
+ self.__tune_disks(non_nvme_disks)
+ else:
+ print("No non-NVMe disks to tune")
+
+ nvme_disks, nvme_irqs = self.__disks_info_by_type(DiskPerfTuner.SupportedDiskTypes.nvme)
+ if nvme_disks:
+ print("Setting NVMe disks: {}...".format(", ".join(nvme_disks)))
+ distribute_irqs(nvme_irqs, self.args.cpu_mask)
+ self.__tune_disks(nvme_disks)
+ else:
+ print("No NVMe disks to tune")
+
+#### Protected methods ##########################
+ def _get_def_mode(self):
+ """
+ Return a default configuration mode.
+ """
+ # if the only disks we are tuning are NVMe disks - return the MQ mode
+ non_nvme_disks, non_nvme_irqs = self.__disks_info_by_type(DiskPerfTuner.SupportedDiskTypes.non_nvme)
+ if not non_nvme_disks:
+ return PerfTunerBase.SupportedModes.mq
+
+ num_cores = int(run_hwloc_calc(['--number-of', 'core', 'machine:0', '--restrict', self.args.cpu_mask]))
+ num_PUs = int(run_hwloc_calc(['--number-of', 'PU', 'machine:0', '--restrict', self.args.cpu_mask]))
+ if num_PUs <= 4:
+ return PerfTunerBase.SupportedModes.mq
+ elif num_cores <= 4:
+ return PerfTunerBase.SupportedModes.sq
+ else:
+ return PerfTunerBase.SupportedModes.sq_split
+
+ def _get_irqs(self):
+ return itertools.chain.from_iterable(irqs for disks, irqs in self.__type2diskinfo.values())
+
+#### Private methods ############################
+ @property
+ def __io_schedulers(self):
+ """
+ :return: An ordered list of IO schedulers that we want to configure. Schedulers are ordered by their priority
+ from the highest (left most) to the lowest.
+ """
+ return ["none", "noop"]
+
+ @property
+ def __nomerges(self):
+ return '2'
+
+ def __disks_info_by_type(self, disks_type):
+ """
+ Returns a tuple ( [<disks>], [<irqs>] ) for the given disks type.
+ IRQs numbers in the second list are promised to be unique.
+ """
+ return self.__type2diskinfo[DiskPerfTuner.SupportedDiskTypes(disks_type)]
+
+ def __nvme_fast_path_irq_filter(self, irq):
+ """
+ Return True for fast path NVMe IRQs.
+ For NVMe device only queues 1-<number of CPUs> are going to do fast path work.
+
+ NVMe IRQs have the following name convention:
+ nvme<device index>q<queue index>, e.g. nvme0q7
+
+ :param irq: IRQ number
+ :return: True if this IRQ is an IRQ of a FP NVMe queue.
+ """
+ nvme_irq_re = re.compile(r'(\s|^)nvme\d+q(\d+)(\s|$)')
+
+ # There may be more than an single HW queue bound to the same IRQ. In this case queue names are going to be
+ # coma separated
+ split_line = self.__irqs2procline[irq].split(",")
+
+ for line in split_line:
+ m = nvme_irq_re.search(line)
+ if m and 0 < int(m.group(2)) <= multiprocessing.cpu_count():
+ return True
+
+ return False
+
+ def __group_disks_info_by_type(self):
+ """
+ Return a map of tuples ( [<disks>], [<irqs>] ), where "disks" are all disks of the specific type
+ and "irqs" are the corresponding IRQs.
+
+ It's promised that every element is "disks" and "irqs" is unique.
+
+ The disk types are 'nvme' and 'non-nvme'
+ """
+ disks_info_by_type = {}
+ nvme_disks = set()
+ nvme_irqs = set()
+ non_nvme_disks = set()
+ non_nvme_irqs = set()
+ nvme_disk_name_pattern = re.compile('^nvme')
+
+ for disk, irqs in self.__disk2irqs.items():
+ if nvme_disk_name_pattern.search(disk):
+ nvme_disks.add(disk)
+ for irq in irqs:
+ nvme_irqs.add(irq)
+ else:
+ non_nvme_disks.add(disk)
+ for irq in irqs:
+ non_nvme_irqs.add(irq)
+
+ if not (nvme_disks or non_nvme_disks):
+ raise Exception("'disks' tuning was requested but no disks were found")
+
+ nvme_irqs = list(nvme_irqs)
+
+ # There is a known issue with Xen hypervisor that exposes itself on AWS i3 instances where nvme module
+ # over-allocates HW queues and uses only queues 1,2,3,..., <up to number of CPUs> for data transfer.
+ # On these instances we will distribute only these queues.
+ try:
+ aws_instance_type = urllib.request.urlopen("http://169.254.169.254/latest/meta-data/instance-type", timeout=0.1).read().decode()
+ if re.match(r'i3\.\w+', aws_instance_type):
+ nvme_irqs = list(filter(self.__nvme_fast_path_irq_filter, nvme_irqs))
+ except urllib.error.URLError:
+ # Non-AWS case
+ pass
+
+ # Sort IRQs for easier verification
+ nvme_irqs.sort(key=lambda irq_num_str: int(irq_num_str))
+
+ disks_info_by_type[DiskPerfTuner.SupportedDiskTypes.nvme] = (list(nvme_disks), nvme_irqs)
+ disks_info_by_type[DiskPerfTuner.SupportedDiskTypes.non_nvme] = ( list(non_nvme_disks), list(non_nvme_irqs) )
+
+ return disks_info_by_type
+
+ def __learn_directories(self):
+ return { directory : self.__learn_directory(directory) for directory in self.args.dirs }
+
+ def __learn_directory(self, directory, recur=False):
+ """
+ Returns a list of disks the given directory is mounted on (there will be more than one if
+ the mount point is on the RAID volume)
+ """
+ if not os.path.exists(directory):
+ if not recur:
+ print("{} doesn't exist - skipping".format(directory))
+
+ return []
+
+ try:
+ udev_obj = pyudev.Device.from_device_number(self.__pyudev_ctx, 'block', os.stat(directory).st_dev)
+ return self.__get_phys_devices(udev_obj)
+ except:
+ # handle cases like ecryptfs where the directory is mounted to another directory and not to some block device
+ filesystem = run_one_command(['df', '-P', directory]).splitlines()[-1].split()[0].strip()
+ if not re.search(r'^/dev/', filesystem):
+ devs = self.__learn_directory(filesystem, True)
+ else:
+ raise Exception("Logic error: failed to create a udev device while 'df -P' {} returns a {}".format(directory, filesystem))
+
+ # log error only for the original directory
+ if not recur and not devs:
+ print("Can't get a block device for {} - skipping".format(directory))
+
+ return devs
+
+ def __get_phys_devices(self, udev_obj):
+ # if device is a virtual device - the underlying physical devices are going to be its slaves
+ if re.search(r'virtual', udev_obj.sys_path):
+ return list(itertools.chain.from_iterable([ self.__get_phys_devices(pyudev.Device.from_device_file(self.__pyudev_ctx, "/dev/{}".format(slave))) for slave in os.listdir(os.path.join(udev_obj.sys_path, 'slaves')) ]))
+ else:
+ # device node is something like /dev/sda1 - we need only the part without /dev/
+ return [ re.match(r'/dev/(\S+\d*)', udev_obj.device_node).group(1) ]
+
+ def __learn_irqs(self):
+ disk2irqs = {}
+
+ for devices in list(self.__dir2disks.values()) + [ self.args.devs ]:
+ for device in devices:
+ # There could be that some of the given directories are on the same disk.
+ # There is no need to rediscover IRQs of the disk we've already handled.
+ if device in disk2irqs.keys():
+ continue
+
+ udev_obj = pyudev.Device.from_device_file(self.__pyudev_ctx, "/dev/{}".format(device))
+ dev_sys_path = udev_obj.sys_path
+ split_sys_path = list(pathlib.PurePath(dev_sys_path).parts)
+
+ # first part is always /sys/devices/pciXXX ...
+ controller_path_parts = split_sys_path[0:4]
+
+ # ...then there is a chain of one or more "domain:bus:device.function" followed by the storage device enumeration crap
+ # e.g. /sys/devices/pci0000:00/0000:00:1f.2/ata2/host1/target1:0:0/1:0:0:0/block/sda/sda3 or
+ # /sys/devices/pci0000:00/0000:00:02.0/0000:02:00.0/host6/target6:2:0/6:2:0:0/block/sda/sda1
+ # We want only the path till the last BDF including - it contains the IRQs information.
+
+ patt = re.compile("^[0-9ABCDEFabcdef]{4}\:[0-9ABCDEFabcdef]{2}\:[0-9ABCDEFabcdef]{2}\.[0-9ABCDEFabcdef]$")
+ for split_sys_path_branch in split_sys_path[4:]:
+ if patt.search(split_sys_path_branch):
+ controller_path_parts.append(split_sys_path_branch)
+ else:
+ break
+
+ controler_path_str = functools.reduce(lambda x, y : os.path.join(x, y), controller_path_parts)
+ disk2irqs[device] = learn_all_irqs_one(controler_path_str, self.__irqs2procline, 'blkif')
+
+ return disk2irqs
+
+ def __get_feature_file(self, dev_node, path_creator):
+ """
+ Find the closest ancestor with the given feature and return its ('feature file', 'device node') tuple.
+
+ If there isn't such an ancestor - return (None, None) tuple.
+
+ :param dev_node Device node file name, e.g. /dev/sda1
+ :param path_creator A functor that creates a feature file name given a device system file name
+ """
+ udev = pyudev.Device.from_device_file(pyudev.Context(), dev_node)
+ feature_file = path_creator(udev.sys_path)
+
+ if os.path.exists(feature_file):
+ return feature_file, dev_node
+ elif udev.parent is not None:
+ return self.__get_feature_file(udev.parent.device_node, path_creator)
+ else:
+ return None, None
+
+ def __tune_one_feature(self, dev_node, path_creator, value, tuned_devs_set):
+ """
+ Find the closest ancestor that has the given feature, configure it and
+ return True.
+
+ If there isn't such ancestor - return False.
+
+ :param dev_node Device node file name, e.g. /dev/sda1
+ :param path_creator A functor that creates a feature file name given a device system file name
+ """
+ feature_file, feature_node = self.__get_feature_file(dev_node, path_creator)
+
+ if feature_file is None:
+ return False
+
+ if feature_node not in tuned_devs_set:
+ fwriteln_and_log(feature_file, value)
+ tuned_devs_set.add(feature_node)
+
+ return True
+
+ def __tune_io_scheduler(self, dev_node, io_scheduler):
+ return self.__tune_one_feature(dev_node, lambda p : os.path.join(p, 'queue', 'scheduler'), io_scheduler, self.__io_scheduler_tuned_devs)
+
+ def __tune_nomerges(self, dev_node):
+ return self.__tune_one_feature(dev_node, lambda p : os.path.join(p, 'queue', 'nomerges'), self.__nomerges, self.__nomerges_tuned_devs)
+
+ def __get_io_scheduler(self, dev_node):
+ """
+ Return a supported scheduler that is also present in the required schedulers list (__io_schedulers).
+
+ If there isn't such a supported scheduler - return None.
+ """
+ feature_file, feature_node = self.__get_feature_file(dev_node, lambda p : os.path.join(p, 'queue', 'scheduler'))
+
+ lines = readlines(feature_file)
+ if not lines:
+ return None
+
+ # Supported schedulers appear in the config file as a single line as follows:
+ #
+ # sched1 [sched2] sched3
+ #
+ # ...with one or more schedulers where currently selected scheduler is the one in brackets.
+ #
+ # Return the scheduler with the highest priority among those that are supported for the current device.
+ supported_schedulers = frozenset([scheduler.lstrip("[").rstrip("]") for scheduler in lines[0].split(" ")])
+ return next((scheduler for scheduler in self.__io_schedulers if scheduler in supported_schedulers), None)
+
+ def __tune_disk(self, device):
+ dev_node = "/dev/{}".format(device)
+ io_scheduler = self.__get_io_scheduler(dev_node)
+
+ if not io_scheduler:
+ print("Not setting I/O Scheduler for {} - required schedulers ({}) are not supported".format(device, list(self.__io_schedulers)))
+ elif not self.__tune_io_scheduler(dev_node, io_scheduler):
+ print("Not setting I/O Scheduler for {} - feature not present".format(device))
+
+ if not self.__tune_nomerges(dev_node):
+ print("Not setting 'nomerges' for {} - feature not present".format(device))
+
+ def __tune_disks(self, disks):
+ for disk in disks:
+ self.__tune_disk(disk)
+
+################################################################################
+class TuneModes(enum.Enum):
+ disks = 0
+ net = 1
+
+ @staticmethod
+ def names():
+ return list(TuneModes.__members__.keys())
+
+argp = argparse.ArgumentParser(description = 'Configure various system parameters in order to improve the seastar application performance.', formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog=
+'''
+This script will:
+
+ - Ban relevant IRQs from being moved by irqbalance.
+ - Configure various system parameters in /proc/sys.
+ - Distribute the IRQs (using SMP affinity configuration) among CPUs according to the configuration mode (see below).
+
+As a result some of the CPUs may be destined to only handle the IRQs and taken out of the CPU set
+that should be used to run the seastar application ("compute CPU set").
+
+Modes description:
+
+ sq - set all IRQs of a given NIC to CPU0 and configure RPS
+ to spreads NAPIs' handling between other CPUs.
+
+ sq_split - divide all IRQs of a given NIC between CPU0 and its HT siblings and configure RPS
+ to spreads NAPIs' handling between other CPUs.
+
+ mq - distribute NIC's IRQs among all CPUs instead of binding
+ them all to CPU0. In this mode RPS is always enabled to
+ spreads NAPIs' handling between all CPUs.
+
+ If there isn't any mode given script will use a default mode:
+ - If number of physical CPU cores per Rx HW queue is greater than 4 - use the 'sq-split' mode.
+ - Otherwise, if number of hyperthreads per Rx HW queue is greater than 4 - use the 'sq' mode.
+ - Otherwise use the 'mq' mode.
+
+Default values:
+
+ --nic NIC - default: eth0
+ --cpu-mask MASK - default: all available cores mask
+''')
+argp.add_argument('--mode', choices=PerfTunerBase.SupportedModes.names(), help='configuration mode')
+argp.add_argument('--nic', help='network interface name, by default uses \'eth0\'')
+argp.add_argument('--get-cpu-mask', action='store_true', help="print the CPU mask to be used for compute")
+argp.add_argument('--tune', choices=TuneModes.names(), help="components to configure (may be given more than once)", action='append', default=[])
+argp.add_argument('--cpu-mask', help="mask of cores to use, by default use all available cores", metavar='MASK')
+argp.add_argument('--dir', help="directory to optimize (may appear more than once)", action='append', dest='dirs', default=[])
+argp.add_argument('--dev', help="device to optimize (may appear more than once), e.g. sda1", action='append', dest='devs', default=[])
+argp.add_argument('--options-file', help="configuration YAML file")
+argp.add_argument('--dump-options-file', action='store_true', help="Print the configuration YAML file containing the current configuration")
+
+def parse_options_file(prog_args):
+ if not prog_args.options_file:
+ return
+
+ y = yaml.load(open(prog_args.options_file))
+ if y is None:
+ return
+
+ if 'mode' in y and not prog_args.mode:
+ if not y['mode'] in PerfTunerBase.SupportedModes.names():
+ raise Exception("Bad 'mode' value in {}: {}".format(prog_args.options_file, y['mode']))
+ prog_args.mode = y['mode']
+
+ if 'nic' in y and not prog_args.nic:
+ prog_args.nic = y['nic']
+
+ if 'tune' in y:
+ if set(y['tune']) <= set(TuneModes.names()):
+ prog_args.tune.extend(y['tune'])
+ else:
+ raise Exception("Bad 'tune' value in {}: {}".format(prog_args.options_file, y['tune']))
+
+ if 'cpu_mask' in y and not prog_args.cpu_mask:
+ hex_32bit_pattern='0x[0-9a-fA-F]{1,8}'
+ mask_pattern = re.compile('^{}((,({})?)*,{})*$'.format(hex_32bit_pattern, hex_32bit_pattern, hex_32bit_pattern))
+ if mask_pattern.match(str(y['cpu_mask'])):
+ prog_args.cpu_mask = y['cpu_mask']
+ else:
+ raise Exception("Bad 'cpu_mask' value in {}: {}".format(prog_args.options_file, str(y['cpu_mask'])))
+
+ if 'dir' in y:
+ prog_args.dirs.extend(y['dir'])
+
+ if 'dev' in y:
+ prog_args.devs.extend(y['dev'])
+
+def dump_config(prog_args):
+ prog_options = {}
+
+ if prog_args.mode:
+ prog_options['mode'] = prog_args.mode
+
+ if prog_args.nic:
+ prog_options['nic'] = prog_args.nic
+
+ if prog_args.tune:
+ prog_options['tune'] = prog_args.tune
+
+ if prog_args.cpu_mask:
+ prog_options['cpu_mask'] = prog_args.cpu_mask
+
+ if prog_args.dirs:
+ prog_options['dir'] = prog_args.dirs
+
+ if prog_args.devs:
+ prog_options['dev'] = prog_args.devs
+
+ print(yaml.dump(prog_options, default_flow_style=False))
+################################################################################
+
+args = argp.parse_args()
+parse_options_file(args)
+
+# if nothing needs to be configured - quit
+if args.tune is None:
+ sys.exit("ERROR: At least one tune mode MUST be given.")
+
+# set default values #####################
+if not args.nic:
+ args.nic = 'eth0'
+
+if not args.cpu_mask:
+ args.cpu_mask = run_hwloc_calc(['all'])
+##########################################
+
+if args.dump_options_file:
+ dump_config(args)
+ sys.exit(0)
+
+try:
+ tuners = []
+
+ if TuneModes.disks.name in args.tune:
+ tuners.append(DiskPerfTuner(args))
+
+ if TuneModes.net.name in args.tune:
+ tuners.append(NetPerfTuner(args))
+
+ # Set the minimum mode among all tuners
+ mode = min([ tuner.mode for tuner in tuners ])
+ for tuner in tuners:
+ tuner.mode = mode
+
+ if args.get_cpu_mask:
+ # Print the compute mask from the first tuner - it's going to be the same in all of them
+ print(tuners[0].compute_cpu_mask)
+ else:
+ # Tune the system
+ restart_irqbalance(itertools.chain.from_iterable([ tuner.irqs for tuner in tuners ]))
+
+ for tuner in tuners:
+ tuner.tune()
+except Exception as e:
+ sys.exit("ERROR: {}. Your system can't be tuned until the issue is fixed.".format(e))
+
diff --git a/src/seastar/scripts/perftune.yaml b/src/seastar/scripts/perftune.yaml
new file mode 100644
index 00000000..e8237339
--- /dev/null
+++ b/src/seastar/scripts/perftune.yaml
@@ -0,0 +1,30 @@
+# Mode is one of the following values:
+# - 'mq'
+# - 'sq'
+# - 'sq_split'
+#mode: 'sq_split'
+
+# Name of the NIC to tune, e.g. eth7.
+# By default would use 'eth0'.
+#nic: eth7
+
+# If 'true' the script will the CPU mask to be used for compute.
+#get_cpu_mask: true
+
+# Define what to tune: could be any combination of the values from {'net', 'disks'} set.
+#tune:
+# - net
+# - disks
+
+# Mask of cores to use, by default use all available cores.
+#cpu_mask: '0x00f,,,0x0,,0x00f'
+
+# Set of directories to optimize.
+#dir:
+# - /root
+# - /home
+
+# Set of disk devices to optimize
+#dev:
+# - /dev/sda2
+# - /dev/md0
diff --git a/src/seastar/scripts/posix_net_conf.sh b/src/seastar/scripts/posix_net_conf.sh
new file mode 100755
index 00000000..d3fa52d3
--- /dev/null
+++ b/src/seastar/scripts/posix_net_conf.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+# !
+# ! Usage: posix_net_conf.sh [iface name, eth0 by default] [-mq|-sq] [--cpu-mask] [-h|--help] [--use-cpu-mask <mask>]
+# !
+# ! Ban NIC IRQs from being moved by irqbalance.
+# !
+# ! -sq - set all IRQs of a given NIC to CPU0 and configure RPS
+# ! to spreads NAPIs' handling between other CPUs.
+# !
+# ! -mq - distribute NIC's IRQs among all CPUs instead of binding
+# ! them all to CPU0. In this mode RPS is always enabled to
+# ! spreads NAPIs' handling between all CPUs.
+# !
+# ! --options-file <YAML file> - YAML file with perftune.py options
+# !
+# ! If there isn't any mode given script will use a default mode:
+# ! - If number of physical CPU cores per Rx HW queue is greater than 4 - use the '-sq' mode.
+# ! - Otherwise use the '-mq' mode.
+# !
+# ! --use-cpu-mask <mask> - mask of cores to use, by default use all available cores
+# !
+# ! --cpu-mask - Print out RPS CPU assignments. On MQ NIC, just print all cpus.
+# !
+# ! -h|--help - print this help information
+# !
+# ! Enable XPS, increase the default values of somaxconn and tcp_max_syn_backlog.
+# !
+
+usage()
+{
+ cat $0 | grep ^"# !" | cut -d"!" -f2-
+}
+
+parse_args()
+{
+ local i
+ local arg
+
+ until [ -z "$1" ]
+ do
+ arg=$1
+ case "$arg" in
+ "-mq")
+ MQ_MODE="--mode mq"
+ ;;
+ "-sq")
+ MQ_MODE="--mode sq"
+ ;;
+ "--cpu-mask")
+ CPU_MASK="--get-cpu-mask"
+ ;;
+ "--use-cpu-mask")
+ CPU_FILTER_MASK="--cpu-mask $2"
+ shift
+ ;;
+ "--options-file")
+ OPTIONS_FILE="--options-file $2"
+ shift
+ ;;
+ "-h"|"--help")
+ usage
+ exit 0
+ ;;
+ *)
+ IFACE=$arg
+ ;;
+ esac
+ shift
+ done
+}
+
+IFACE="eth0"
+MQ_MODE=""
+CPU_FILTER_MASK=""
+CPU_MASK=""
+MY_DIR=`dirname $0`
+OPTIONS_FILE=""
+
+parse_args $@
+
+$MY_DIR/perftune.py --nic $IFACE $MQ_MODE $CPU_FILTER_MASK $CPU_MASK $OPTIONS_FILE --tune net
diff --git a/src/seastar/scripts/run_with_dpdk.sh b/src/seastar/scripts/run_with_dpdk.sh
new file mode 100755
index 00000000..e00fbc29
--- /dev/null
+++ b/src/seastar/scripts/run_with_dpdk.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+# !
+# ! Usage: ./prepare_dpdk_env.sh <NIC to use> <number of huge pages per NUMA Node> <command to execute> [command parameters]
+# !
+# ! Prepares the DPDK environment (binds a given NIC to UIO, allocates the required
+# ! number of huge pages) and executes the given command in it.
+# ! After the command terminates the original environment is restored apart from
+# ! huge pages, that remain allocated.
+# !
+
+usage()
+{
+ cat $0 | grep ^"# !" | cut -d"!" -f2-
+}
+
+#
+# check_stat_and_exit <error message>
+#
+check_stat_and_exit()
+{
+ if [[ $? -ne 0 ]]; then
+ echo $@
+ exit 1
+ fi
+}
+
+rollback()
+{
+ echo "Binding $NIC($BDF) back to $DRIVER..."
+ $SCRIPTS_DIR/dpdk_nic_bind.py -u $BDF
+ $SCRIPTS_DIR/dpdk_nic_bind.py -b $DRIVER $BDF
+}
+
+check_stat_and_rollback()
+{
+ if [[ $? -ne 0 ]]; then
+ echo $@
+ rollback
+ exit 1
+ fi
+}
+
+# Check number of parameters
+if [[ $# -lt 3 ]]; then
+ usage
+ exit 1
+fi
+
+NIC=$1
+shift
+NUM_HUGE_PAGES_PER_NODE=$1
+shift
+SCRIPTS_DIR=`dirname $0`
+
+
+ifconfig $NIC down
+check_stat_and_exit "Failed to shut down $NIC. Is $NIC present? Are your permissions sufficient?"
+
+DRIVER=`ethtool -i $NIC | grep driver | cut -d":" -f2- | tr -d ' '`
+BDF=`ethtool -i $NIC | grep bus-info | cut -d":" -f2- | tr -d ' '`
+
+# command to execute
+CMD=$@
+
+echo "Binding $NIC($BDF) to uio_pci_generic..."
+$SCRIPTS_DIR/dpdk_nic_bind.py -u $BDF
+check_stat_and_exit
+$SCRIPTS_DIR/dpdk_nic_bind.py -b uio_pci_generic $BDF
+check_stat_and_rollback
+
+echo "Allocating $NUM_HUGE_PAGES_PER_NODE 2MB huge pages on each NUMA Node:"
+for d in /sys/devices/system/node/node? ; do
+ echo $NUM_HUGE_PAGES_PER_NODE > $d/hugepages/hugepages-2048kB/nr_hugepages
+ check_stat_and_rollback
+ cur_node=`basename $d`
+ echo "...$cur_node done..."
+done
+
+mkdir -p /mnt/huge
+check_stat_and_rollback
+
+grep -s '/mnt/huge' /proc/mounts > /dev/null
+if [[ $? -ne 0 ]] ; then
+ echo "Mounting hugetlbfs at /mnt/huge..."
+ mount -t hugetlbfs nodev /mnt/huge
+ check_stat_and_rollback
+fi
+
+# Run scylla
+echo "Running: $CMD"
+$CMD
+ret=$?
+
+# Revert the NIC binding
+rollback
+
+exit $ret
+
diff --git a/src/seastar/scripts/seastar-addr2line b/src/seastar/scripts/seastar-addr2line
new file mode 100755
index 00000000..050b449e
--- /dev/null
+++ b/src/seastar/scripts/seastar-addr2line
@@ -0,0 +1,252 @@
+#!/usr/bin/env python3
+#
+# This file is open source software, licensed to you under the terms
+# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
+# distributed with this work for additional information regarding copyright
+# ownership. You may not use this file except in compliance with the License.
+#
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Copyright (C) 2017 ScyllaDB
+
+import argparse
+import collections
+import re
+import sys
+import subprocess
+
+class Addr2Line:
+ def __init__(self, binary):
+ self._binary = binary
+
+ # Print warning if binary has no debug info according to `file`.
+ # Note: no message is printed for system errors as they will be
+ # printed also by addr2line later on.
+ output = subprocess.check_output(["file", self._binary])
+ s = output.decode("utf-8")
+ if s.find('ELF') >= 0 and s.find('debug_info', len(self._binary)) < 0:
+ print('{}'.format(s))
+
+ self._addr2line = subprocess.Popen(["addr2line", "-Cfpia", "-e", self._binary], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
+
+ # If a library doesn't exist in a particular path, addr2line
+ # will just exit. We need to be robust against that. We
+ # can't just wait on self._addr2line since there is no
+ # guarantee on what timeout is sufficient.
+ self._addr2line.stdin.write('\n')
+ self._addr2line.stdin.flush()
+ res = self._addr2line.stdout.readline()
+ self._missing = res == ''
+
+ def _read_resolved_address(self):
+ res = self._addr2line.stdout.readline()
+ # remove the address
+ res = res.split(': ', 1)[1]
+ dummy = '0x0000000000000000: ?? ??:0\n'
+ line = ''
+ while line != dummy:
+ res += line
+ line = self._addr2line.stdout.readline()
+ return res
+
+ def __call__(self, address):
+ if self._missing:
+ return " ".join([self._binary, address, '\n'])
+ # print two lines to force addr2line to output a dummy
+ # line which we can look for in _read_address
+ self._addr2line.stdin.write(address + '\n\n')
+ self._addr2line.stdin.flush()
+ return self._read_resolved_address()
+
+class BacktraceResolver(object):
+ object_address_re = re.compile('^.*?(((/[^/]+)+)\+)?(0x[0-9a-f]+)\W*$')
+
+ def __init__(self, executable, before_lines, verbose):
+ self._executable = executable
+ self._current_backtrace = []
+ self._before_lines = before_lines
+ self._before_lines_queue = collections.deque(maxlen=before_lines)
+ self._i = 0
+ self._known_backtraces = {}
+ self._verbose = verbose
+ self._known_modules = {self._executable: Addr2Line(self._executable)}
+
+ def _get_resolver_for_module(self, module):
+ if not module in self._known_modules:
+ self._known_modules[module] = Addr2Line(module)
+ return self._known_modules[module]
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self._print_current_backtrace()
+
+ def _print_resolved_address(self, module, address):
+ resolved_address = self._get_resolver_for_module(module)(address)
+ if self._verbose:
+ resolved_address = '{{{}}} {}: {}'.format(module, address, resolved_address)
+ sys.stdout.write(resolved_address)
+
+ def _print_current_backtrace(self):
+ if len(self._current_backtrace) == 0:
+ return
+
+ for line in self._before_lines_queue:
+ sys.stdout.write(line)
+
+ backtrace = "".join(map(str, self._current_backtrace))
+ if backtrace in self._known_backtraces:
+ print("[Backtrace #{}] Already seen, not resolving again.".format(self._known_backtraces[backtrace]))
+ print("") # To separate traces with an empty line
+ self._current_backtrace = []
+ return
+
+ self._known_backtraces[backtrace] = self._i
+
+ print("[Backtrace #{}]".format(self._i))
+
+ for module, addr in self._current_backtrace:
+ self._print_resolved_address(module, addr)
+
+ print("") # To separate traces with an empty line
+
+ self._current_backtrace = []
+ self._i += 1
+
+ def __call__(self, line):
+ match = re.match(self.object_address_re, line)
+
+ if match:
+ _, object_path, _, addr = match.groups()
+
+ if object_path:
+ self._current_backtrace.append((object_path, addr))
+ else:
+ self._current_backtrace.append((self._executable, addr))
+ else:
+ self._print_current_backtrace()
+ if self._before_lines > 0:
+ self._before_lines_queue.append(line)
+ elif self._before_lines < 0:
+ sys.stdout.write(line) # line already has a trailing newline
+ else:
+ pass # when == 0 no non-backtrace lines are printed
+
+
+class StdinBacktraceIterator(object):
+ """
+ Read stdin char-by-char and stop when when user pressed Ctrl+D or the
+ Enter twice. Altough reading char-by-char is slow this won't be a
+ problem here as backtraces shouldn't be huge.
+ """
+ def __iter__(self):
+ linefeeds = 0
+ lines = []
+ line = []
+
+ while True:
+ char = sys.stdin.read(1)
+
+ if char == '\n':
+ linefeeds += 1
+
+ if len(line) > 0:
+ lines.append(''.join(line))
+ line = []
+ else:
+ line.append(char)
+ linefeeds = 0
+
+ if char == '' or linefeeds > 1:
+ break
+
+ return iter(lines)
+
+
+description='Massage and pass addresses to the real addr2line for symbol lookup.'
+epilog='''
+There are three operational modes:
+ 1) If -f is specified input will be read from FILE
+ 2) If -f is omitted and there are ADDRESS args they will be read as input
+ 3) If -f is omitted and there are no ADDRESS args input will be read from stdin
+'''
+
+cmdline_parser = argparse.ArgumentParser(
+ description=description,
+ epilog=epilog,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+)
+
+cmdline_parser.add_argument(
+ '-e',
+ '--executable',
+ type=str,
+ required=True,
+ metavar='EXECUTABLE',
+ dest='executable',
+ help='The executable where the addresses originate from')
+
+cmdline_parser.add_argument(
+ '-f',
+ '--file',
+ type=str,
+ required=False,
+ metavar='FILE',
+ dest='file',
+ help='The file containing the addresses (one per line)')
+
+cmdline_parser.add_argument(
+ '-b',
+ '--before',
+ type=int,
+ metavar='BEFORE',
+ default=1,
+ help='Non-backtrace lines to print before resolved backtraces for context.'
+ 'Set to 0 to print only resolved backtraces.'
+ 'Set to -1 to print all non-backtrace lines. Default is 1.')
+
+cmdline_parser.add_argument(
+ '-v',
+ '--verbose',
+ action='store_true',
+ default=False,
+ help='Make resolved backtraces verbose, prepend to each line the module'
+ ' it originates from, as well as the address being resolved')
+
+cmdline_parser.add_argument(
+ 'addresses',
+ type=str,
+ metavar='ADDRESS',
+ nargs='*',
+ help='Addresses to parse')
+
+args = cmdline_parser.parse_args()
+
+if args.addresses and args.file:
+ print("Cannot use both -f and ADDRESS")
+ cmdline_parser.print_help()
+
+
+if args.file:
+ lines = open(args.file, 'r')
+elif args.addresses:
+ lines = args.addresses
+else:
+ if sys.stdin.isatty():
+ lines = StdinBacktraceIterator()
+ else:
+ lines = sys.stdin
+
+with BacktraceResolver(args.executable, args.before, args.verbose) as resolve:
+ for line in lines:
+ resolve(line)
diff --git a/src/seastar/scripts/seastar-cpu-map.sh b/src/seastar/scripts/seastar-cpu-map.sh
new file mode 100755
index 00000000..ebc00009
--- /dev/null
+++ b/src/seastar/scripts/seastar-cpu-map.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# !
+# ! Usage: ./seastar-cpu-map.sh -p <process_PID> -n <process_Name> -s (optional) <shard>
+# !
+# ! List CPU affinity for a particular running process
+# ! providing a map of threads -> shard, for any seastar apps.
+# ! Ex.: ./seastar-cpu-map.sh -n scylla
+# ! ./seastar-cpu-map.sh -n scylla -s 0
+# ! ./seastar-cpu-map.sh -p 34
+# ! ./seastar-cpu-map.sh -p 32 -s 12
+
+usage() {
+ cat $0 | grep ^"# !" | cut -d"!" -f2-
+}
+
+while getopts 'p:n:s:' option; do
+ case "$option" in
+ p) PID=$OPTARG
+ ;;
+ n) PID=`pidof $OPTARG`
+ ;;
+ s) SHARD=$OPTARG
+ ;;
+ :) printf "missing argument for -%s\n" "$OPTARG" >&2
+ usage >&2
+ exit 1
+ ;;
+ \?) printf "illegal option: -%s\n" "$OPTARG" >&2
+ usage >&2
+ exit 1
+ ;;
+ esac
+done
+
+if [ $# -eq 0 ]; then usage >&2; exit 1; fi
+
+if [ -e "/proc/$PID/task" ]; then
+ # get list of threads for given PID
+ THREADS=`ls /proc/$PID/task`
+ for i in $THREADS; do
+ # get shards from threads
+ # there were three options here to get the shard number:
+ # reactor-xx, syscall-xx and timer-xx
+ # syscall was preferred because reactor as a special case (reactor-0 is called scylla)
+ SYSCALL=`grep syscall /proc/$i/comm | cut -d"-" -f2`
+ if [ -n "$SYSCALL" ] && [ "$SYSCALL" = "$SHARD" ]; then
+ echo -e "shard: $SYSCALL, cpu:$(taskset -c -p $i | cut -d":" -f2)"
+ elif [ -n "$SYSCALL" ] && [ -z "$SHARD" ]; then
+ echo -e "shard: $SYSCALL, cpu:$(taskset -c -p $i | cut -d":" -f2)"
+ fi
+ done
+else
+ echo "Process does not exist"
+fi
diff --git a/src/seastar/scripts/seastar-json2code.py b/src/seastar/scripts/seastar-json2code.py
new file mode 100755
index 00000000..6d74c801
--- /dev/null
+++ b/src/seastar/scripts/seastar-json2code.py
@@ -0,0 +1,540 @@
+#!/usr/bin/env python3
+
+# C++ Code generation utility from Swagger definitions.
+# This utility support Both the swagger 1.2 format
+# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/1.2.md
+# And the 2.0 format
+# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md
+#
+# Swagger 2.0 is not only different in its structure (apis have moved, and
+# models are now under definitions) It also moved from multiple file structure
+# to a single file.
+# To keep the multiple file support, each group of APIs will be placed in a single file
+# Each group can have a .def.json file with its definitions (What used to be models)
+# Because the APIs in definitions are snippets, they are not legal json objects
+# and need to be formated as such so that a json parser would work.
+
+import json
+import sys
+import re
+import glob
+import argparse
+import os
+from string import Template
+
+parser = argparse.ArgumentParser(description="""Generate C++ class for json
+handling from swagger definition""")
+
+parser.add_argument('--outdir', help='the output directory', default='autogen')
+parser.add_argument('-o', help='Output file', default='')
+parser.add_argument('-f', help='input file', default='api-java.json')
+parser.add_argument('-ns', help="""namespace when set struct will be created
+under the namespace""", default='')
+parser.add_argument('-jsoninc', help='relative path to the jsaon include',
+ default='json/')
+parser.add_argument('-jsonns', help='set the json namespace', default='json')
+parser.add_argument('-indir', help="""when set all json file in the given
+directory will be parsed, do not use with -f""", default='')
+parser.add_argument('-debug', help='debug level 0 -quite,1-error,2-verbose',
+ default='1', type=int)
+parser.add_argument('-combined', help='set the name of the combined file',
+ default='autogen/pathautogen.ee')
+config = parser.parse_args()
+
+
+valid_vars = {'string': 'sstring', 'int': 'int', 'double': 'double',
+ 'float': 'float', 'long': 'long', 'boolean': 'bool', 'char': 'char',
+ 'datetime': 'json::date_time'}
+
+current_file = ''
+
+spacing = " "
+def getitem(d, key, name):
+ if key in d:
+ return d[key]
+ else:
+ raise Exception("'" + key + "' not found in " + name)
+
+def fprint(f, *args):
+ for arg in args:
+ f.write(arg)
+
+def fprintln(f, *args):
+ for arg in args:
+ f.write(arg)
+ f.write('\n')
+
+
+def open_namespace(f, ns=config.ns):
+ fprintln(f, "namespace ", ns , ' {\n')
+
+
+def close_namespace(f):
+ fprintln(f, '}')
+
+
+def add_include(f, includes):
+ for include in includes:
+ fprintln(f, '#include ', include)
+ fprintln(f, "")
+
+def trace_verbose(*params):
+ if config.debug > 1:
+ print(''.join(params))
+
+
+def trace_err(*params):
+ if config.debug > 0:
+ print(current_file + ':' + ''.join(params))
+
+
+def valid_type(param):
+ if param in valid_vars:
+ return valid_vars[param]
+ trace_err("Type [", param, "] not defined")
+ return param
+
+
+def type_change(param, member):
+ if param == "array":
+ if "items" not in member:
+ trace_err("array without item declaration in ", param)
+ return ""
+ item = member["items"]
+ if "type" in item:
+ t = item["type"]
+ elif "$ref" in item:
+ t = item["$ref"]
+ else:
+ trace_err("array items with no type or ref declaration ", param)
+ return ""
+ return "json_list< " + valid_type(t) + " >"
+ return "json_element< " + valid_type(param) + " >"
+
+
+
+def print_ind_comment(f, ind, *params):
+ fprintln(f, ind, "/**")
+ for s in params:
+ fprintln(f, ind, " * ", s)
+ fprintln(f, ind, " */")
+
+def print_comment(f, *params):
+ print_ind_comment(f, spacing, *params)
+
+def print_copyrights(f):
+ fprintln(f, "/*")
+ fprintln(f, "* Copyright (C) 2014 Cloudius Systems, Ltd.")
+ fprintln(f, "*")
+ fprintln(f, "* This work is open source software, licensed under the",
+ " terms of the")
+ fprintln(f, "* BSD license as described in the LICENSE f in the top-",
+ "level directory.")
+ fprintln(f, "*")
+ fprintln(f, "* This is an Auto-Generated-code ")
+ fprintln(f, "* Changes you do in this file will be erased on next",
+ " code generation")
+ fprintln(f, "*/\n")
+
+
+def print_h_file_headers(f, name):
+ print_copyrights(f)
+ fprintln(f, "#ifndef __JSON_AUTO_GENERATED_" + name)
+ fprintln(f, "#define __JSON_AUTO_GENERATED_" + name + "\n")
+
+
+def clean_param(param):
+ match = re.match(r"^\{\s*([^\}]+)\s*}", param)
+ if match:
+ return [match.group(1), False]
+ return [param, True]
+
+
+def get_parameter_by_name(obj, name):
+ for p in obj["parameters"]:
+ if p["name"] == name:
+ return p
+ trace_err ("No Parameter declaration found for ", name)
+
+
+def clear_path_ending(path):
+ if not path or path[-1] != '/':
+ return path
+ return path[0:-1]
+
+# check if a parameter is query required.
+# It will return true if the required flag is set
+# and if it is a query parameter, both swagger 1.2 'paramType' and swagger 2.0 'in' attributes
+# are supported
+def is_required_query_param(param):
+ return "required" in param and param["required"] and ("paramType" in param and param["paramType"] == "query" or "in" in param and param["in"] == "query")
+
+def add_path(f, path, details):
+ if "summary" in details:
+ print_comment(f, details["summary"])
+ param_starts = path.find("{")
+ if param_starts >= 0:
+ path_reminder = path[param_starts:]
+ vals = path.split("/")
+ vals.reverse()
+ fprintln(f, spacing, 'path_description::add_path("', clear_path_ending(vals.pop()),
+ '",', details["method"], ',"', details["nickname"], '")')
+ while vals:
+ param, is_url = clean_param(vals.pop())
+ if is_url:
+ fprintln(f, spacing, ' ->pushurl("', param, '")')
+ else:
+ param_type = get_parameter_by_name(details, param)
+ if ("allowMultiple" in param_type and
+ param_type["allowMultiple"] == True):
+ fprintln(f, spacing, ' ->pushparam("', param, '",true)')
+ else:
+ fprintln(f, spacing, ' ->pushparam("', param, '")')
+ else:
+ fprintln(f, spacing, 'path_description::add_path("', clear_path_ending(path), '",',
+ details["method"], ',"', details["nickname"], '")')
+ if "parameters" in details:
+ for param in details["parameters"]:
+ if is_required_query_param(param):
+ fprintln(f, spacing, ' ->pushmandatory_param("', param["name"], '")')
+ fprintln(f, spacing, ";")
+
+
+def get_base_name(param):
+ return os.path.basename(param)
+
+
+def is_model_valid(name, model):
+ if name in valid_vars:
+ return ""
+ properties = getitem(model[name], "properties", name)
+ for var in properties:
+ type = getitem(properties[var], "type", name + ":" + var)
+ if type == "array":
+ type = getitem(getitem(properties[var], "items", name + ":" + var), "type", name + ":" + var + ":items")
+ if type not in valid_vars:
+ if type not in model:
+ raise Exception("Unknown type '" + type + "' in Model '" + name + "'")
+ return type
+ valid_vars[name] = name
+ return ""
+
+def resolve_model_order(data):
+ res = []
+ models = set()
+ for model_name in data:
+ visited = set(model_name)
+ missing = is_model_valid(model_name, data)
+ resolved = missing == ''
+ if not resolved:
+ stack = [model_name]
+ while not resolved:
+ if missing in visited:
+ raise Exception("Cyclic dependency found: " + missing)
+ missing_depends = is_model_valid(missing, data)
+ if missing_depends == '':
+ if missing not in models:
+ res.append(missing)
+ models.add(missing)
+ resolved = len(stack) == 0
+ if not resolved:
+ missing = stack.pop()
+ else:
+ stack.append(missing)
+ missing = missing_depends
+ elif model_name not in models:
+ res.append(model_name)
+ models.add(model_name)
+ return res
+
+def create_enum_wrapper(model_name, name, values):
+ enum_name = model_name + "_" + name
+ res = " enum class " + enum_name + " {"
+ for enum_entry in values:
+ res = res + " " + enum_entry + ", "
+ res = res + "NUM_ITEMS};\n"
+ wrapper = name + "_wrapper"
+ res = res + Template(""" struct $wrapper : public json::jsonable {
+ $wrapper() = default;
+ virtual std::string to_json() const {
+ switch(v) {
+ """).substitute({'wrapper' : wrapper})
+ for enum_entry in values:
+ res = res + " case " + enum_name + "::" + enum_entry + ": return \"\\\"" + enum_entry + "\\\"\";\n"
+ res = res + Template(""" default: return \"\\\"Unknown\\\"\";
+ }
+ }
+ template<class T>
+ $wrapper (const T& _v) {
+ switch(_v) {
+ """).substitute({'wrapper' : wrapper})
+ for enum_entry in values:
+ res = res + " case T::" + enum_entry + ": v = " + enum_name + "::" + enum_entry + "; break;\n"
+ res = res + Template(""" default: v = $enum_name::NUM_ITEMS;
+ }
+ }
+ template<class T>
+ operator T() const {
+ switch(v) {
+ """).substitute({'enum_name': enum_name})
+ for enum_entry in values:
+ res = res + " case " + enum_name + "::" + enum_entry + ": return T::" + enum_entry + ";\n"
+ return res + Template(""" default: return T::$value;
+ }
+ }
+ typedef typename std::underlying_type<$enum_name>::type pos_type;
+ $wrapper& operator++() {
+ v = static_cast<$enum_name>(static_cast<pos_type>(v) + 1);
+ return *this;
+ }
+ $wrapper & operator++(int) {
+ return ++(*this);
+ }
+ bool operator==(const $wrapper& c) const {
+ return v == c.v;
+ }
+ bool operator!=(const $wrapper& c) const {
+ return v != c.v;
+ }
+ bool operator<=(const $wrapper& c) const {
+ return static_cast<pos_type>(v) <= static_cast<pos_type>(c.v);
+ }
+ static $wrapper begin() {
+ return $wrapper ($enum_name::$value);
+ }
+ static $wrapper end() {
+ return $wrapper ($enum_name::NUM_ITEMS);
+ }
+ static boost::integer_range<$wrapper> all_items() {
+ return boost::irange(begin(), end());
+ }
+ $enum_name v;
+ };
+ """).substitute({'enum_name': enum_name, 'wrapper' : wrapper, 'value':values[0]})
+
+def to_operation(opr, data):
+ data["method"] = opr.upper()
+ data["nickname"] = data["operationId"]
+ return data
+
+def to_path(path, data):
+ data["operations"] = [to_operation(k, data[k]) for k in data]
+ data["path"] = path
+
+ return data
+
+def create_h_file(data, hfile_name, api_name, init_method, base_api):
+ if config.o != '':
+ hfile = open(config.o, "w")
+ else:
+ hfile = open(config.outdir + "/" + hfile_name, "w")
+ print_h_file_headers(hfile, api_name)
+ add_include(hfile, ['<seastar/core/sstring.hh>',
+ '<seastar/json/json_elements.hh>',
+ '<seastar/http/json_path.hh>'])
+
+ add_include(hfile, ['<iostream>', '<boost/range/irange.hpp>'])
+ open_namespace(hfile, "seastar")
+ open_namespace(hfile, "httpd")
+ open_namespace(hfile, api_name)
+
+ if "models" in data:
+ models_order = resolve_model_order(data["models"])
+ for model_name in models_order:
+ model = data["models"][model_name]
+ if 'description' in model:
+ print_ind_comment(hfile, "", model["description"])
+ fprintln(hfile, "struct ", model_name, " : public json::json_base {")
+ member_init = ''
+ member_assignment = ''
+ member_copy = ''
+ for member_name in model["properties"]:
+ member = model["properties"][member_name]
+ if "description" in member:
+ print_comment(hfile, member["description"])
+ if "enum" in member:
+ enum_name = model_name + "_" + member_name
+ fprintln(hfile, create_enum_wrapper(model_name, member_name, member["enum"]))
+ fprintln(hfile, " ", config.jsonns, "::json_element<",
+ member_name, "_wrapper> ",
+ member_name, ";\n")
+ else:
+ fprintln(hfile, " ", config.jsonns, "::",
+ type_change(member["type"], member), " ",
+ member_name, ";\n")
+ member_init += " add(&" + member_name + ',"'
+ member_init += member_name + '");\n'
+ member_assignment += " " + member_name + " = " + "e." + member_name + ";\n"
+ member_copy += " e." + member_name + " = " + member_name + ";\n"
+ fprintln(hfile, "void register_params() {")
+ fprintln(hfile, member_init)
+ fprintln(hfile, '}')
+
+ fprintln(hfile, model_name, '() {')
+ fprintln(hfile, ' register_params();')
+ fprintln(hfile, '}')
+ fprintln(hfile, model_name, '(const ' + model_name + ' & e) {')
+ fprintln(hfile, ' register_params();')
+ fprintln(hfile, member_assignment)
+ fprintln(hfile, '}')
+ fprintln(hfile, "template<class T>")
+ fprintln(hfile, model_name, "& operator=(const ", "T& e) {")
+ fprintln(hfile, member_assignment)
+ fprintln(hfile, " return *this;")
+ fprintln(hfile, "}")
+ fprintln(hfile, model_name, "& operator=(const ", model_name, "& e) {")
+ fprintln(hfile, member_assignment)
+ fprintln(hfile, " return *this;")
+ fprintln(hfile, "}")
+ fprintln(hfile, "template<class T>")
+ fprintln(hfile, model_name, "& update(T& e) {")
+ fprintln(hfile, member_copy)
+ fprintln(hfile, " return *this;")
+ fprintln(hfile, "}")
+ fprintln(hfile, "};\n\n")
+
+ # print_ind_comment(hfile, "", "Initialize the path")
+# fprintln(hfile, init_method + "(const std::string& description);")
+ fprintln(hfile, 'static const sstring name = "', base_api, '";')
+ for item in data["apis"]:
+ path = item["path"]
+ if "operations" in item:
+ for oper in item["operations"]:
+ if "summary" in oper:
+ print_comment(hfile, oper["summary"])
+
+ param_starts = path.find("{")
+ base_url = path
+ vals = []
+ if param_starts >= 0:
+ vals = path[param_starts:].split("/")
+ vals.reverse()
+ base_url = path[:param_starts]
+
+ fprintln(hfile, 'static const path_description ', getitem(oper, "nickname", oper), '("', clear_path_ending(base_url),
+ '",', oper["method"], ',"', oper["nickname"], '",')
+ fprint(hfile, '{')
+ first = True
+ while vals:
+ path_param, is_url = clean_param(vals.pop())
+ if path_param == "":
+ continue
+ if first == True:
+ first = False
+ else:
+ fprint(hfile, "\n,")
+ if is_url:
+ fprint(hfile, '{', '"/', path_param , '", path_description::url_component_type::FIXED_STRING', '}')
+ else:
+ path_param_type = get_parameter_by_name(oper, path_param)
+ if ("allowMultiple" in path_param_type and
+ path_param_type["allowMultiple"] == True):
+ fprint(hfile, '{', '"', path_param , '", path_description::url_component_type::PARAM_UNTIL_END_OF_PATH', '}')
+ else:
+ fprint(hfile, '{', '"', path_param , '", path_description::url_component_type::PARAM', '}')
+ fprint(hfile, '}')
+ fprint(hfile, ',{')
+ first = True
+ enum_definitions = ""
+ if "enum" in oper:
+ enum_definitions = ("namespace ns_" + oper["nickname"] + " {\n" +
+ create_enum_wrapper(oper["nickname"], "return_type", oper["enum"]) +
+ "}\n")
+ if "parameters" in oper:
+ for param in oper["parameters"]:
+ if is_required_query_param(param):
+ if first == True:
+ first = False
+ else:
+ fprint(hfile, "\n,")
+ fprint(hfile, '"', param["name"], '"')
+ if "enum" in param:
+ enum_definitions = enum_definitions + 'namespace ns_' + oper["nickname"] + '{\n'
+ enm = param["name"]
+ enum_definitions = enum_definitions + 'enum class ' + enm + ' {'
+ for val in param["enum"]:
+ enum_definitions = enum_definitions + val + ", "
+ enum_definitions = enum_definitions + 'NUM_ITEMS};\n'
+ enum_definitions = enum_definitions + enm + ' str2' + enm + '(const sstring& str) {\n'
+ enum_definitions = enum_definitions + ' static const sstring arr[] = {"' + '","'.join(param["enum"]) + '"};\n'
+ enum_definitions = enum_definitions + ' int i;\n'
+ enum_definitions = enum_definitions + ' for (i=0; i < ' + str(len(param["enum"])) + '; i++) {\n'
+ enum_definitions = enum_definitions + ' if (arr[i] == str) {return (' + enm + ')i;}\n}\n'
+ enum_definitions = enum_definitions + ' return (' + enm + ')i;\n'
+ enum_definitions = enum_definitions + '}\n}\n'
+
+ fprintln(hfile, '});')
+ fprintln(hfile, enum_definitions)
+
+ close_namespace(hfile)
+ close_namespace(hfile)
+ close_namespace(hfile)
+ hfile.write("#endif //__JSON_AUTO_GENERATED_HEADERS\n")
+ hfile.close()
+
+def remove_leading_comma(data):
+ return re.sub(r'^\s*,','', data)
+
+def format_as_json_object(data):
+ return "{" + remove_leading_comma(data) + "}"
+
+def check_for_models(data, param):
+ model_name = param.replace(".json", ".def.json")
+ if not os.path.isfile(model_name):
+ return
+ try:
+ with open(model_name) as myfile:
+ json_data = myfile.read()
+ def_data = json.loads(format_as_json_object(json_data))
+ data["models"] = def_data
+ except Exception as e:
+ type, value, tb = sys.exc_info()
+ print("Bad formatted JSON definition file '" + model_name + "' error ", value.message)
+ sys.exit(-1)
+
+def set_apis(data):
+ return {"apis": [to_path(p, data[p]) for p in data]}
+
+def parse_file(param, combined):
+ global current_file
+ trace_verbose("parsing ", param, " file")
+ with open(param) as myfile:
+ json_data = myfile.read()
+ try:
+ data = json.loads(json_data)
+ except Exception as e:
+ try:
+ # the passed data is not a valid json, so maybe its a swagger 2.0
+ # snippet, format it as json and try again
+ # set_apis and check_for_models will create an object with a similiar format
+ # to a swagger 1.2 so the code generation would work
+ data = set_apis(json.loads(format_as_json_object(json_data)))
+ check_for_models(data, param)
+ except:
+ # The problem is with the file,
+ # just report the error and exit.
+ type, value, tb = sys.exc_info()
+ print("Bad formatted JSON file '" + param + "' error ", value.message)
+ sys.exit(-1)
+ try:
+ base_file_name = get_base_name(param)
+ current_file = base_file_name
+ hfile_name = base_file_name + ".hh"
+ api_name = base_file_name.replace('.', '_')
+ base_api = base_file_name.replace('.json', '')
+ init_method = "void " + api_name + "_init_path"
+ trace_verbose("creating ", hfile_name)
+ if (combined):
+ fprintln(combined, '#include "', base_file_name, ".cc", '"')
+ create_h_file(data, hfile_name, api_name, init_method, base_api)
+ except:
+ type, value, tb = sys.exc_info()
+ print("Error while parsing JSON file '" + param + "' error ", value.message)
+ sys.exit(-1)
+
+if "indir" in config and config.indir != '':
+ combined = open(config.combined, "w")
+ for f in glob.glob(os.path.join(config.indir, "*.json")):
+ parse_file(f, combined)
+else:
+ parse_file(config.f, None)
diff --git a/src/seastar/scripts/tap.sh b/src/seastar/scripts/tap.sh
new file mode 100644
index 00000000..a8cc6d3a
--- /dev/null
+++ b/src/seastar/scripts/tap.sh
@@ -0,0 +1,31 @@
+#
+# This file is open source software, licensed to you under the terms
+# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
+# distributed with this work for additional information regarding copyright
+# ownership. You may not use this file except in compliance with the License.
+#
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+### Set up a tap device for seastar
+tap=tap0
+bridge=virbr0
+user=`whoami`
+sudo ip link del $tap
+sudo ip tuntap add mode tap dev $tap user $user one_queue vnet_hdr
+sudo ifconfig $tap up
+sudo brctl addif $bridge $tap
+sudo brctl stp $bridge off
+sudo modprobe vhost-net
+sudo chown $user.$user /dev/vhost-net
+sudo brctl show $bridge
+sudo ifconfig $bridge