diff options
Diffstat (limited to 'arch/um/drivers')
72 files changed, 17986 insertions, 0 deletions
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig new file mode 100644 index 0000000000..b94b2618e7 --- /dev/null +++ b/arch/um/drivers/Kconfig @@ -0,0 +1,385 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "UML Character Devices" + +config STDERR_CONSOLE + bool "stderr console" + default y + help + console driver which dumps all printk messages to stderr. + +config SSL + bool "Virtual serial line" + help + The User-Mode Linux environment allows you to create virtual serial + lines on the UML that are usually made to show up on the host as + ttys or ptys. + + See <http://user-mode-linux.sourceforge.net/old/input.html> for more + information and command line examples of how to use this facility. + + Unless you have a specific reason for disabling this, say Y. + +config NULL_CHAN + bool "null channel support" + help + This option enables support for attaching UML consoles and serial + lines to a device similar to /dev/null. Data written to it disappears + and there is never any data to be read. + +config PORT_CHAN + bool "port channel support" + help + This option enables support for attaching UML consoles and serial + lines to host portals. They may be accessed with 'telnet <host> + <port number>'. Any number of consoles and serial lines may be + attached to a single portal, although what UML device you get when + you telnet to that portal will be unpredictable. + It is safe to say 'Y' here. + +config PTY_CHAN + bool "pty channel support" + help + This option enables support for attaching UML consoles and serial + lines to host pseudo-terminals. Access to both traditional + pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled + with this option. The assignment of UML devices to host devices + will be announced in the kernel message log. + It is safe to say 'Y' here. + +config TTY_CHAN + bool "tty channel support" + help + This option enables support for attaching UML consoles and serial + lines to host terminals. Access to both virtual consoles + (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and + /dev/pts/*) are controlled by this option. + It is safe to say 'Y' here. + +config XTERM_CHAN + bool "xterm channel support" + help + This option enables support for attaching UML consoles and serial + lines to xterms. Each UML device so assigned will be brought up in + its own xterm. + It is safe to say 'Y' here. + +config XTERM_CHAN_DEFAULT_EMULATOR + string "xterm channel default terminal emulator" + depends on XTERM_CHAN + default "xterm" + help + This option allows changing the default terminal emulator. + +config NOCONFIG_CHAN + bool + default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN) + +config CON_ZERO_CHAN + string "Default main console channel initialization" + default "fd:0,fd:1" + help + This is the string describing the channel to which the main console + will be attached by default. This value can be overridden from the + command line. The default value is "fd:0,fd:1", which attaches the + main console to stdin and stdout. + It is safe to leave this unchanged. + +config CON_CHAN + string "Default console channel initialization" + default "xterm" + help + This is the string describing the channel to which all consoles + except the main console will be attached by default. This value can + be overridden from the command line. The default value is "xterm", + which brings them up in xterms. + It is safe to leave this unchanged, although you may wish to change + this if you expect the UML that you build to be run in environments + which don't have X or xterm available. + +config SSL_CHAN + string "Default serial line channel initialization" + default "pty" + help + This is the string describing the channel to which the serial lines + will be attached by default. This value can be overridden from the + command line. The default value is "pty", which attaches them to + traditional pseudo-terminals. + It is safe to leave this unchanged, although you may wish to change + this if you expect the UML that you build to be run in environments + which don't have a set of /dev/pty* devices. + +config UML_SOUND + tristate "Sound support" + depends on SOUND + select SOUND_OSS_CORE + help + This option enables UML sound support. If enabled, it will pull in + the UML hostaudio relay, which acts as a intermediary + between the host's dsp and mixer devices and the UML sound system. + It is safe to say 'Y' here. + +endmenu + +menu "UML Network Devices" + depends on NET + +# UML virtual driver +config UML_NET + bool "Virtual network device" + help + While the User-Mode port cannot directly talk to any physical + hardware devices, this choice and the following transport options + provide one or more virtual network devices through which the UML + kernels can talk to each other, the host, and with the host's help, + machines on the outside world. + + For more information, including explanations of the networking and + sample configurations, see + <http://user-mode-linux.sourceforge.net/old/networking.html>. + + If you'd like to be able to enable networking in the User-Mode + linux environment, say Y; otherwise say N. Note that you must + enable at least one of the following transport options to actually + make use of UML networking. + +config UML_NET_ETHERTAP + bool "Ethertap transport (obsolete)" + depends on UML_NET + help + The Ethertap User-Mode Linux network transport allows a single + running UML to exchange packets with its host over one of the + host's Ethertap devices, such as /dev/tap0. Additional running + UMLs can use additional Ethertap devices, one per running UML. + While the UML believes it's on a (multi-device, broadcast) virtual + Ethernet network, it's in fact communicating over a point-to-point + link with the host. + + To use this, your host kernel must have support for Ethertap + devices. Also, if your host kernel is 2.4.x, it must have + CONFIG_NETLINK_DEV configured as Y or M. + + For more information, see + <http://user-mode-linux.sourceforge.net/old/networking.html> That site + has examples of the UML command line to use to enable Ethertap + networking. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_TUNTAP + bool "TUN/TAP transport (obsolete)" + depends on UML_NET + help + The UML TUN/TAP network transport allows a UML instance to exchange + packets with the host over a TUN/TAP device. This option will only + work with a 2.4 host, unless you've applied the TUN/TAP patch to + your 2.2 host kernel. + + To use this transport, your host kernel must have support for TUN/TAP + devices, either built-in or as a module. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_SLIP + bool "SLIP transport (obsolete)" + depends on UML_NET + help + The slip User-Mode Linux network transport allows a running UML to + network with its host over a point-to-point link. Unlike Ethertap, + which can carry any Ethernet frame (and hence even non-IP packets), + the slip transport can only carry IP packets. + + To use this, your host must support slip devices. + + For more information, see + <http://user-mode-linux.sourceforge.net/old/networking.html>. + has examples of the UML command line to use to enable slip + networking, and details of a few quirks with it. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_DAEMON + bool "Daemon transport (obsolete)" + depends on UML_NET + help + This User-Mode Linux network transport allows one or more running + UMLs on a single host to communicate with each other, but not to + the host. + + To use this form of networking, you'll need to run the UML + networking daemon on the host. + + For more information, see + <http://user-mode-linux.sourceforge.net/old/networking.html> That site + has examples of the UML command line to use to enable Daemon + networking. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_DAEMON_DEFAULT_SOCK + string "Default socket for daemon transport" + default "/tmp/uml.ctl" + depends on UML_NET_DAEMON + help + This option allows setting the default socket for the daemon + transport, normally it defaults to /tmp/uml.ctl. + +config UML_NET_VECTOR + bool "Vector I/O high performance network devices" + depends on UML_NET + select MAY_HAVE_RUNTIME_DEPS + help + This User-Mode Linux network driver uses multi-message send + and receive functions. The host running the UML guest must have + a linux kernel version above 3.0 and a libc version > 2.13. + This driver provides tap, raw, gre and l2tpv3 network transports + with up to 4 times higher network throughput than the UML network + drivers. + +config UML_NET_VDE + bool "VDE transport (obsolete)" + depends on UML_NET + depends on !MODVERSIONS + select MAY_HAVE_RUNTIME_DEPS + help + This User-Mode Linux network transport allows one or more running + UMLs on a single host to communicate with each other and also + with the rest of the world using Virtual Distributed Ethernet, + an improved fork of uml_switch. + + You must have libvdeplug installed in order to build the vde + transport into UML. + + To use this form of networking, you will need to run vde_switch + on the host. + + For more information, see <http://wiki.virtualsquare.org/> + That site has a good overview of what VDE is and also examples + of the UML command line to use to enable VDE networking. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_MCAST + bool "Multicast transport (obsolete)" + depends on UML_NET + help + This Multicast User-Mode Linux network transport allows multiple + UMLs (even ones running on different host machines!) to talk to + each other over a virtual ethernet network. However, it requires + at least one UML with one of the other transports to act as a + bridge if any of them need to be able to talk to their hosts or any + other IP machines. + + To use this, your host kernel(s) must support IP Multicasting. + + For more information, see + <http://user-mode-linux.sourceforge.net/old/networking.html> That site + has examples of the UML command line to use to enable Multicast + networking, and notes about the security of this approach. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_PCAP + bool "pcap transport (obsolete)" + depends on UML_NET + depends on !MODVERSIONS + select MAY_HAVE_RUNTIME_DEPS + help + The pcap transport makes a pcap packet stream on the host look + like an ethernet device inside UML. This is useful for making + UML act as a network monitor for the host. You must have libcap + installed in order to build the pcap transport into UML. + + For more information, see + <http://user-mode-linux.sourceforge.net/old/networking.html> That site + has examples of the UML command line to use to enable this option. + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + +config UML_NET_SLIRP + bool "SLiRP transport (obsolete)" + depends on UML_NET + help + The SLiRP User-Mode Linux network transport allows a running UML + to network by invoking a program that can handle SLIP encapsulated + packets. This is commonly (but not limited to) the application + known as SLiRP, a program that can re-socket IP packets back onto + he host on which it is run. Only IP packets are supported, + unlike other network transports that can handle all Ethernet + frames. In general, slirp allows the UML the same IP connectivity + to the outside world that the host user is permitted, and unlike + other transports, SLiRP works without the need of root level + privileges, setuid binaries, or SLIP devices on the host. This + also means not every type of connection is possible, but most + situations can be accommodated with carefully crafted slirp + commands that can be passed along as part of the network device's + setup string. The effect of this transport on the UML is similar + that of a host behind a firewall that masquerades all network + connections passing through it (but is less secure). + + NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please + migrate to UML_NET_VECTOR. + + If unsure, say N. + + Startup example: "eth0=slirp,FE:FD:01:02:03:04,/usr/local/bin/slirp" + +endmenu + +config VIRTIO_UML + bool "UML driver for virtio devices" + select VIRTIO + help + This driver provides support for virtio based paravirtual device + drivers over vhost-user sockets. + +config UML_RTC + bool "UML RTC driver" + depends on RTC_CLASS + # there's no use in this if PM_SLEEP isn't enabled ... + depends on PM_SLEEP + help + When PM_SLEEP is configured, it may be desirable to wake up using + rtcwake, especially in time-travel mode. This driver enables that + by providing a fake RTC clock that causes a wakeup at the right + time. + +config UML_PCI_OVER_VIRTIO + bool "Enable PCI over VIRTIO device simulation" + # in theory, just VIRTIO is enough, but that causes recursion + depends on VIRTIO_UML + select FORCE_PCI + select UML_IOMEM_EMULATION + select UML_DMA_EMULATION + select PCI_MSI + select PCI_LOCKLESS_CONFIG + +config UML_PCI_OVER_VIRTIO_DEVICE_ID + int "set the virtio device ID for PCI emulation" + default -1 + depends on UML_PCI_OVER_VIRTIO + help + There's no official device ID assigned (yet), set the one you + wish to use for experimentation here. The default of -1 is + not valid and will cause the driver to fail at probe. diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile new file mode 100644 index 0000000000..0e6af81096 --- /dev/null +++ b/arch/um/drivers/Makefile @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2000, 2002, 2003 Jeff Dike (jdike@karaya.com) +# + +# pcap is broken in 2.5 because kbuild doesn't allow pcap.a to be linked +# in to pcap.o + +slip-objs := slip_kern.o slip_user.o +slirp-objs := slirp_kern.o slirp_user.o +daemon-objs := daemon_kern.o daemon_user.o +vector-objs := vector_kern.o vector_user.o vector_transports.o +umcast-objs := umcast_kern.o umcast_user.o +net-objs := net_kern.o net_user.o +mconsole-objs := mconsole_kern.o mconsole_user.o +hostaudio-objs := hostaudio_kern.o +ubd-objs := ubd_kern.o ubd_user.o +port-objs := port_kern.o port_user.o +harddog-objs := harddog_kern.o +harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o +rtc-objs := rtc_kern.o rtc_user.o + +LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) + +LDFLAGS_vde.o = $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a) + +targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o + +$(obj)/pcap.o: $(obj)/pcap_kern.o $(obj)/pcap_user.o + $(LD) -r -dp -o $@ $^ $(ld_flags) + +$(obj)/vde.o: $(obj)/vde_kern.o $(obj)/vde_user.o + $(LD) -r -dp -o $@ $^ $(ld_flags) + +#XXX: The call below does not work because the flags are added before the +# object name, so nothing from the library gets linked. +#$(call if_changed,ld) + +# When the above is fixed, don't forget to add this too! +#targets += $(obj)/pcap.o + +obj-y := stdio_console.o fd.o chan_kern.o chan_user.o line.o +obj-$(CONFIG_SSL) += ssl.o +obj-$(CONFIG_STDERR_CONSOLE) += stderr_console.o + +obj-$(CONFIG_UML_NET_SLIP) += slip.o slip_common.o +obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o +obj-$(CONFIG_UML_NET_DAEMON) += daemon.o +obj-$(CONFIG_UML_NET_VECTOR) += vector.o +obj-$(CONFIG_UML_NET_VDE) += vde.o +obj-$(CONFIG_UML_NET_MCAST) += umcast.o +obj-$(CONFIG_UML_NET_PCAP) += pcap.o +obj-$(CONFIG_UML_NET) += net.o +obj-$(CONFIG_MCONSOLE) += mconsole.o +obj-$(CONFIG_MMAPPER) += mmapper_kern.o +obj-$(CONFIG_BLK_DEV_UBD) += ubd.o +obj-$(CONFIG_UML_SOUND) += hostaudio.o +obj-$(CONFIG_NULL_CHAN) += null.o +obj-$(CONFIG_PORT_CHAN) += port.o +obj-$(CONFIG_PTY_CHAN) += pty.o +obj-$(CONFIG_TTY_CHAN) += tty.o +obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o +obj-$(CONFIG_UML_WATCHDOG) += harddog.o +obj-y += $(harddog-builtin-y) $(harddog-builtin-m) +obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o +obj-$(CONFIG_UML_RANDOM) += random.o +obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o +obj-$(CONFIG_UML_RTC) += rtc.o +obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o + +# pcap_user.o must be added explicitly. +USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o +CFLAGS_null.o = -DDEV_NULL=$(DEV_NULL_PATH) + +CFLAGS_xterm.o += '-DCONFIG_XTERM_CHAN_DEFAULT_EMULATOR="$(CONFIG_XTERM_CHAN_DEFAULT_EMULATOR)"' + +include $(srctree)/arch/um/scripts/Makefile.rules diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h new file mode 100644 index 0000000000..3fec3b8406 --- /dev/null +++ b/arch/um/drivers/chan.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __CHAN_KERN_H__ +#define __CHAN_KERN_H__ + +#include <linux/tty.h> +#include <linux/list.h> +#include <linux/console.h> +#include "chan_user.h" +#include "line.h" + +struct chan { + struct list_head list; + struct list_head free_list; + struct line *line; + char *dev; + unsigned int primary:1; + unsigned int input:1; + unsigned int output:1; + unsigned int opened:1; + unsigned int enabled:1; + int fd; + const struct chan_ops *ops; + void *data; +}; + +extern void chan_interrupt(struct line *line, int irq); +extern int parse_chan_pair(char *str, struct line *line, int device, + const struct chan_opts *opts, char **error_out); +extern int write_chan(struct chan *chan, const char *buf, int len, + int write_irq); +extern int console_write_chan(struct chan *chan, const char *buf, + int len); +extern int console_open_chan(struct line *line, struct console *co); +extern void deactivate_chan(struct chan *chan, int irq); +extern void chan_enable_winch(struct chan *chan, struct tty_port *port); +extern int enable_chan(struct line *line); +extern void close_chan(struct line *line); +extern int chan_window_size(struct line *line, + unsigned short *rows_out, + unsigned short *cols_out); +extern int chan_config_string(struct line *line, char *str, int size, + char **error_out); + +#endif diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c new file mode 100644 index 0000000000..26a702a065 --- /dev/null +++ b/arch/um/drivers/chan_kern.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <linux/slab.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include "chan.h" +#include <os.h> +#include <irq_kern.h> + +#ifdef CONFIG_NOCONFIG_CHAN +static void *not_configged_init(char *str, int device, + const struct chan_opts *opts) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); + return NULL; +} + +static int not_configged_open(int input, int output, int primary, void *data, + char **dev_out) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); + return -ENODEV; +} + +static void not_configged_close(int fd, void *data) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); +} + +static int not_configged_read(int fd, char *c_out, void *data) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); + return -EIO; +} + +static int not_configged_write(int fd, const char *buf, int len, void *data) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); + return -EIO; +} + +static int not_configged_console_write(int fd, const char *buf, int len) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); + return -EIO; +} + +static int not_configged_window_size(int fd, void *data, unsigned short *rows, + unsigned short *cols) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); + return -ENODEV; +} + +static void not_configged_free(void *data) +{ + printk(KERN_ERR "Using a channel type which is configured out of " + "UML\n"); +} + +static const struct chan_ops not_configged_ops = { + .init = not_configged_init, + .open = not_configged_open, + .close = not_configged_close, + .read = not_configged_read, + .write = not_configged_write, + .console_write = not_configged_console_write, + .window_size = not_configged_window_size, + .free = not_configged_free, + .winch = 0, +}; +#endif /* CONFIG_NOCONFIG_CHAN */ + +static int open_one_chan(struct chan *chan) +{ + int fd, err; + + if (chan->opened) + return 0; + + if (chan->ops->open == NULL) + fd = 0; + else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary, + chan->data, &chan->dev); + if (fd < 0) + return fd; + + err = os_set_fd_block(fd, 0); + if (err) { + (*chan->ops->close)(fd, chan->data); + return err; + } + + chan->fd = fd; + + chan->opened = 1; + return 0; +} + +static int open_chan(struct list_head *chans) +{ + struct list_head *ele; + struct chan *chan; + int ret, err = 0; + + list_for_each(ele, chans) { + chan = list_entry(ele, struct chan, list); + ret = open_one_chan(chan); + if (chan->primary) + err = ret; + } + return err; +} + +void chan_enable_winch(struct chan *chan, struct tty_port *port) +{ + if (chan && chan->primary && chan->ops->winch) + register_winch(chan->fd, port); +} + +static void line_timer_cb(struct work_struct *work) +{ + struct line *line = container_of(work, struct line, task.work); + + if (!line->throttled) + chan_interrupt(line, line->read_irq); +} + +int enable_chan(struct line *line) +{ + struct list_head *ele; + struct chan *chan; + int err; + + INIT_DELAYED_WORK(&line->task, line_timer_cb); + + list_for_each(ele, &line->chan_list) { + chan = list_entry(ele, struct chan, list); + err = open_one_chan(chan); + if (err) { + if (chan->primary) + goto out_close; + + continue; + } + + if (chan->enabled) + continue; + err = line_setup_irq(chan->fd, chan->input, chan->output, line, + chan); + if (err) + goto out_close; + + chan->enabled = 1; + } + + return 0; + + out_close: + close_chan(line); + return err; +} + +/* Items are added in IRQ context, when free_irq can't be called, and + * removed in process context, when it can. + * This handles interrupt sources which disappear, and which need to + * be permanently disabled. This is discovered in IRQ context, but + * the freeing of the IRQ must be done later. + */ +static DEFINE_SPINLOCK(irqs_to_free_lock); +static LIST_HEAD(irqs_to_free); + +void free_irqs(void) +{ + struct chan *chan; + LIST_HEAD(list); + struct list_head *ele; + unsigned long flags; + + spin_lock_irqsave(&irqs_to_free_lock, flags); + list_splice_init(&irqs_to_free, &list); + spin_unlock_irqrestore(&irqs_to_free_lock, flags); + + list_for_each(ele, &list) { + chan = list_entry(ele, struct chan, free_list); + + if (chan->input && chan->enabled) + um_free_irq(chan->line->read_irq, chan); + if (chan->output && chan->enabled) + um_free_irq(chan->line->write_irq, chan); + chan->enabled = 0; + } +} + +static void close_one_chan(struct chan *chan, int delay_free_irq) +{ + unsigned long flags; + + if (!chan->opened) + return; + + if (delay_free_irq) { + spin_lock_irqsave(&irqs_to_free_lock, flags); + list_add(&chan->free_list, &irqs_to_free); + spin_unlock_irqrestore(&irqs_to_free_lock, flags); + } else { + if (chan->input && chan->enabled) + um_free_irq(chan->line->read_irq, chan); + if (chan->output && chan->enabled) + um_free_irq(chan->line->write_irq, chan); + chan->enabled = 0; + } + if (chan->ops->close != NULL) + (*chan->ops->close)(chan->fd, chan->data); + + chan->opened = 0; + chan->fd = -1; +} + +void close_chan(struct line *line) +{ + struct chan *chan; + + /* Close in reverse order as open in case more than one of them + * refers to the same device and they save and restore that device's + * state. Then, the first one opened will have the original state, + * so it must be the last closed. + */ + list_for_each_entry_reverse(chan, &line->chan_list, list) { + close_one_chan(chan, 0); + } +} + +void deactivate_chan(struct chan *chan, int irq) +{ + if (chan && chan->enabled) + deactivate_fd(chan->fd, irq); +} + +int write_chan(struct chan *chan, const char *buf, int len, + int write_irq) +{ + int n, ret = 0; + + if (len == 0 || !chan || !chan->ops->write) + return 0; + + n = chan->ops->write(chan->fd, buf, len, chan->data); + if (chan->primary) { + ret = n; + } + return ret; +} + +int console_write_chan(struct chan *chan, const char *buf, int len) +{ + int n, ret = 0; + + if (!chan || !chan->ops->console_write) + return 0; + + n = chan->ops->console_write(chan->fd, buf, len); + if (chan->primary) + ret = n; + return ret; +} + +int console_open_chan(struct line *line, struct console *co) +{ + int err; + + err = open_chan(&line->chan_list); + if (err) + return err; + + printk(KERN_INFO "Console initialized on /dev/%s%d\n", co->name, + co->index); + return 0; +} + +int chan_window_size(struct line *line, unsigned short *rows_out, + unsigned short *cols_out) +{ + struct chan *chan; + + chan = line->chan_in; + if (chan && chan->primary) { + if (chan->ops->window_size == NULL) + return 0; + return chan->ops->window_size(chan->fd, chan->data, + rows_out, cols_out); + } + chan = line->chan_out; + if (chan && chan->primary) { + if (chan->ops->window_size == NULL) + return 0; + return chan->ops->window_size(chan->fd, chan->data, + rows_out, cols_out); + } + return 0; +} + +static void free_one_chan(struct chan *chan) +{ + list_del(&chan->list); + + close_one_chan(chan, 0); + + if (chan->ops->free != NULL) + (*chan->ops->free)(chan->data); + + if (chan->primary && chan->output) + ignore_sigio_fd(chan->fd); + kfree(chan); +} + +static void free_chan(struct list_head *chans) +{ + struct list_head *ele, *next; + struct chan *chan; + + list_for_each_safe(ele, next, chans) { + chan = list_entry(ele, struct chan, list); + free_one_chan(chan); + } +} + +static int one_chan_config_string(struct chan *chan, char *str, int size, + char **error_out) +{ + int n = 0; + + if (chan == NULL) { + CONFIG_CHUNK(str, size, n, "none", 1); + return n; + } + + CONFIG_CHUNK(str, size, n, chan->ops->type, 0); + + if (chan->dev == NULL) { + CONFIG_CHUNK(str, size, n, "", 1); + return n; + } + + CONFIG_CHUNK(str, size, n, ":", 0); + CONFIG_CHUNK(str, size, n, chan->dev, 0); + + return n; +} + +static int chan_pair_config_string(struct chan *in, struct chan *out, + char *str, int size, char **error_out) +{ + int n; + + n = one_chan_config_string(in, str, size, error_out); + str += n; + size -= n; + + if (in == out) { + CONFIG_CHUNK(str, size, n, "", 1); + return n; + } + + CONFIG_CHUNK(str, size, n, ",", 1); + n = one_chan_config_string(out, str, size, error_out); + str += n; + size -= n; + CONFIG_CHUNK(str, size, n, "", 1); + + return n; +} + +int chan_config_string(struct line *line, char *str, int size, + char **error_out) +{ + struct chan *in = line->chan_in, *out = line->chan_out; + + if (in && !in->primary) + in = NULL; + if (out && !out->primary) + out = NULL; + + return chan_pair_config_string(in, out, str, size, error_out); +} + +struct chan_type { + char *key; + const struct chan_ops *ops; +}; + +static const struct chan_type chan_table[] = { + { "fd", &fd_ops }, + +#ifdef CONFIG_NULL_CHAN + { "null", &null_ops }, +#else + { "null", ¬_configged_ops }, +#endif + +#ifdef CONFIG_PORT_CHAN + { "port", &port_ops }, +#else + { "port", ¬_configged_ops }, +#endif + +#ifdef CONFIG_PTY_CHAN + { "pty", &pty_ops }, + { "pts", &pts_ops }, +#else + { "pty", ¬_configged_ops }, + { "pts", ¬_configged_ops }, +#endif + +#ifdef CONFIG_TTY_CHAN + { "tty", &tty_ops }, +#else + { "tty", ¬_configged_ops }, +#endif + +#ifdef CONFIG_XTERM_CHAN + { "xterm", &xterm_ops }, +#else + { "xterm", ¬_configged_ops }, +#endif +}; + +static struct chan *parse_chan(struct line *line, char *str, int device, + const struct chan_opts *opts, char **error_out) +{ + const struct chan_type *entry; + const struct chan_ops *ops; + struct chan *chan; + void *data; + int i; + + ops = NULL; + data = NULL; + for(i = 0; i < ARRAY_SIZE(chan_table); i++) { + entry = &chan_table[i]; + if (!strncmp(str, entry->key, strlen(entry->key))) { + ops = entry->ops; + str += strlen(entry->key); + break; + } + } + if (ops == NULL) { + *error_out = "No match for configured backends"; + return NULL; + } + + data = (*ops->init)(str, device, opts); + if (data == NULL) { + *error_out = "Configuration failed"; + return NULL; + } + + chan = kmalloc(sizeof(*chan), GFP_ATOMIC); + if (chan == NULL) { + *error_out = "Memory allocation failed"; + return NULL; + } + *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list), + .free_list = + LIST_HEAD_INIT(chan->free_list), + .line = line, + .primary = 1, + .input = 0, + .output = 0, + .opened = 0, + .enabled = 0, + .fd = -1, + .ops = ops, + .data = data }); + return chan; +} + +int parse_chan_pair(char *str, struct line *line, int device, + const struct chan_opts *opts, char **error_out) +{ + struct list_head *chans = &line->chan_list; + struct chan *new; + char *in, *out; + + if (!list_empty(chans)) { + line->chan_in = line->chan_out = NULL; + free_chan(chans); + INIT_LIST_HEAD(chans); + } + + if (!str) + return 0; + + out = strchr(str, ','); + if (out != NULL) { + in = str; + *out = '\0'; + out++; + new = parse_chan(line, in, device, opts, error_out); + if (new == NULL) + return -1; + + new->input = 1; + list_add(&new->list, chans); + line->chan_in = new; + + new = parse_chan(line, out, device, opts, error_out); + if (new == NULL) + return -1; + + list_add(&new->list, chans); + new->output = 1; + line->chan_out = new; + } + else { + new = parse_chan(line, str, device, opts, error_out); + if (new == NULL) + return -1; + + list_add(&new->list, chans); + new->input = 1; + new->output = 1; + line->chan_in = line->chan_out = new; + } + return 0; +} + +void chan_interrupt(struct line *line, int irq) +{ + struct tty_port *port = &line->port; + struct chan *chan = line->chan_in; + int err; + char c; + + if (!chan || !chan->ops->read) + goto out; + + do { + if (!tty_buffer_request_room(port, 1)) { + schedule_delayed_work(&line->task, 1); + goto out; + } + err = chan->ops->read(chan->fd, &c, chan->data); + if (err > 0) + tty_insert_flip_char(port, c, TTY_NORMAL); + } while (err > 0); + + if (err == -EIO) { + if (chan->primary) { + tty_port_tty_hangup(&line->port, false); + if (line->chan_out != chan) + close_one_chan(line->chan_out, 1); + } + close_one_chan(chan, 1); + if (chan->primary) + return; + } + out: + tty_flip_buffer_push(port); +} diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c new file mode 100644 index 0000000000..25727ed648 --- /dev/null +++ b/arch/um/drivers/chan_user.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <sched.h> +#include <signal.h> +#include <termios.h> +#include <sys/ioctl.h> +#include "chan_user.h" +#include <os.h> +#include <um_malloc.h> + +void generic_close(int fd, void *unused) +{ + close(fd); +} + +int generic_read(int fd, char *c_out, void *unused) +{ + int n; + + n = read(fd, c_out, sizeof(*c_out)); + if (n > 0) + return n; + else if (n == 0) + return -EIO; + else if (errno == EAGAIN) + return 0; + return -errno; +} + +/* XXX Trivial wrapper around write */ + +int generic_write(int fd, const char *buf, int n, void *unused) +{ + int err; + + err = write(fd, buf, n); + if (err > 0) + return err; + else if (errno == EAGAIN) + return 0; + else if (err == 0) + return -EIO; + return -errno; +} + +int generic_window_size(int fd, void *unused, unsigned short *rows_out, + unsigned short *cols_out) +{ + struct winsize size; + int ret; + + if (ioctl(fd, TIOCGWINSZ, &size) < 0) + return -errno; + + ret = ((*rows_out != size.ws_row) || (*cols_out != size.ws_col)); + + *rows_out = size.ws_row; + *cols_out = size.ws_col; + + return ret; +} + +void generic_free(void *data) +{ + kfree(data); +} + +int generic_console_write(int fd, const char *buf, int n) +{ + sigset_t old, no_sigio; + struct termios save, new; + int err; + + if (isatty(fd)) { + sigemptyset(&no_sigio); + sigaddset(&no_sigio, SIGIO); + if (sigprocmask(SIG_BLOCK, &no_sigio, &old)) + goto error; + + CATCH_EINTR(err = tcgetattr(fd, &save)); + if (err) + goto error; + new = save; + /* + * The terminal becomes a bit less raw, to handle \n also as + * "Carriage Return", not only as "New Line". Otherwise, the new + * line won't start at the first column. + */ + new.c_oflag |= OPOST; + CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &new)); + if (err) + goto error; + } + err = generic_write(fd, buf, n, NULL); + /* + * Restore raw mode, in any case; we *must* ignore any error apart + * EINTR, except for debug. + */ + if (isatty(fd)) { + CATCH_EINTR(tcsetattr(fd, TCSAFLUSH, &save)); + sigprocmask(SIG_SETMASK, &old, NULL); + } + + return err; +error: + return -errno; +} + +/* + * UML SIGWINCH handling + * + * The point of this is to handle SIGWINCH on consoles which have host + * ttys and relay them inside UML to whatever might be running on the + * console and cares about the window size (since SIGWINCH notifies + * about terminal size changes). + * + * So, we have a separate thread for each host tty attached to a UML + * device (side-issue - I'm annoyed that one thread can't have + * multiple controlling ttys for the purpose of handling SIGWINCH, but + * I imagine there are other reasons that doesn't make any sense). + * + * SIGWINCH can't be received synchronously, so you have to set up to + * receive it as a signal. That being the case, if you are going to + * wait for it, it is convenient to sit in sigsuspend() and wait for + * the signal to bounce you out of it (see below for how we make sure + * to exit only on SIGWINCH). + */ + +static void winch_handler(int sig) +{ +} + +struct winch_data { + int pty_fd; + int pipe_fd; +}; + +static int winch_thread(void *arg) +{ + struct winch_data *data = arg; + sigset_t sigs; + int pty_fd, pipe_fd; + int count; + char c = 1; + + pty_fd = data->pty_fd; + pipe_fd = data->pipe_fd; + count = write(pipe_fd, &c, sizeof(c)); + if (count != sizeof(c)) + printk(UM_KERN_ERR "winch_thread : failed to write " + "synchronization byte, err = %d\n", -count); + + /* + * We are not using SIG_IGN on purpose, so don't fix it as I thought to + * do! If using SIG_IGN, the sigsuspend() call below would not stop on + * SIGWINCH. + */ + + signal(SIGWINCH, winch_handler); + sigfillset(&sigs); + /* Block all signals possible. */ + if (sigprocmask(SIG_SETMASK, &sigs, NULL) < 0) { + printk(UM_KERN_ERR "winch_thread : sigprocmask failed, " + "errno = %d\n", errno); + exit(1); + } + /* In sigsuspend(), block anything else than SIGWINCH. */ + sigdelset(&sigs, SIGWINCH); + + if (setsid() < 0) { + printk(UM_KERN_ERR "winch_thread : setsid failed, errno = %d\n", + errno); + exit(1); + } + + if (ioctl(pty_fd, TIOCSCTTY, 0) < 0) { + printk(UM_KERN_ERR "winch_thread : TIOCSCTTY failed on " + "fd %d err = %d\n", pty_fd, errno); + exit(1); + } + + if (tcsetpgrp(pty_fd, os_getpid()) < 0) { + printk(UM_KERN_ERR "winch_thread : tcsetpgrp failed on " + "fd %d err = %d\n", pty_fd, errno); + exit(1); + } + + /* + * These are synchronization calls between various UML threads on the + * host - since they are not different kernel threads, we cannot use + * kernel semaphores. We don't use SysV semaphores because they are + * persistent. + */ + count = read(pipe_fd, &c, sizeof(c)); + if (count != sizeof(c)) + printk(UM_KERN_ERR "winch_thread : failed to read " + "synchronization byte, err = %d\n", errno); + + while(1) { + /* + * This will be interrupted by SIGWINCH only, since + * other signals are blocked. + */ + sigsuspend(&sigs); + + count = write(pipe_fd, &c, sizeof(c)); + if (count != sizeof(c)) + printk(UM_KERN_ERR "winch_thread : write failed, " + "err = %d\n", errno); + } +} + +static int winch_tramp(int fd, struct tty_port *port, int *fd_out, + unsigned long *stack_out) +{ + struct winch_data data; + int fds[2], n, err, pid; + char c; + + err = os_pipe(fds, 1, 1); + if (err < 0) { + printk(UM_KERN_ERR "winch_tramp : os_pipe failed, err = %d\n", + -err); + goto out; + } + + data = ((struct winch_data) { .pty_fd = fd, + .pipe_fd = fds[1] } ); + /* + * CLONE_FILES so this thread doesn't hold open files which are open + * now, but later closed in a different thread. This is a + * problem with /dev/net/tun, which if held open by this + * thread, prevents the TUN/TAP device from being reused. + */ + pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out); + if (pid < 0) { + err = pid; + printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n", + -err); + goto out_close; + } + + *fd_out = fds[0]; + n = read(fds[0], &c, sizeof(c)); + if (n != sizeof(c)) { + printk(UM_KERN_ERR "winch_tramp : failed to read " + "synchronization byte\n"); + printk(UM_KERN_ERR "read failed, err = %d\n", errno); + printk(UM_KERN_ERR "fd %d will not support SIGWINCH\n", fd); + err = -EINVAL; + goto out_close; + } + + err = os_set_fd_block(*fd_out, 0); + if (err) { + printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd " + "non-blocking.\n"); + goto out_close; + } + + return pid; + + out_close: + close(fds[1]); + close(fds[0]); + out: + return err; +} + +void register_winch(int fd, struct tty_port *port) +{ + unsigned long stack; + int pid, thread, count, thread_fd = -1; + char c = 1; + + if (!isatty(fd)) + return; + + pid = tcgetpgrp(fd); + if (is_skas_winch(pid, fd, port)) { + register_winch_irq(-1, fd, -1, port, 0); + return; + } + + if (pid == -1) { + thread = winch_tramp(fd, port, &thread_fd, &stack); + if (thread < 0) + return; + + register_winch_irq(thread_fd, fd, thread, port, stack); + + count = write(thread_fd, &c, sizeof(c)); + if (count != sizeof(c)) + printk(UM_KERN_ERR "register_winch : failed to write " + "synchronization byte, err = %d\n", errno); + } +} diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h new file mode 100644 index 0000000000..4e51b85e2a --- /dev/null +++ b/arch/um/drivers/chan_user.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __CHAN_USER_H__ +#define __CHAN_USER_H__ + +#include <init.h> + +struct chan_opts { + void (*const announce)(char *dev_name, int dev); + char *xterm_title; + int raw; +}; + +struct chan_ops { + char *type; + void *(*init)(char *, int, const struct chan_opts *); + int (*open)(int, int, int, void *, char **); + void (*close)(int, void *); + int (*read)(int, char *, void *); + int (*write)(int, const char *, int, void *); + int (*console_write)(int, const char *, int); + int (*window_size)(int, void *, unsigned short *, unsigned short *); + void (*free)(void *); + int winch; +}; + +extern const struct chan_ops fd_ops, null_ops, port_ops, pts_ops, pty_ops, + tty_ops, xterm_ops; + +extern void generic_close(int fd, void *unused); +extern int generic_read(int fd, char *c_out, void *unused); +extern int generic_write(int fd, const char *buf, int n, void *unused); +extern int generic_console_write(int fd, const char *buf, int n); +extern int generic_window_size(int fd, void *unused, unsigned short *rows_out, + unsigned short *cols_out); +extern void generic_free(void *data); + +struct tty_port; +extern void register_winch(int fd, struct tty_port *port); +extern void register_winch_irq(int fd, int tty_fd, int pid, + struct tty_port *port, unsigned long stack); + +#define __channel_help(fn, prefix) \ +__uml_help(fn, prefix "[0-9]*=<channel description>\n" \ +" Attach a console or serial line to a host channel. See\n" \ +" http://user-mode-linux.sourceforge.net/old/input.html for a complete\n" \ +" description of this switch.\n\n" \ +); + +#endif diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h new file mode 100644 index 0000000000..9a67c01700 --- /dev/null +++ b/arch/um/drivers/cow.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __COW_H__ +#define __COW_H__ + +#include <asm/types.h> + +extern int init_cow_file(int fd, char *cow_file, char *backing_file, + int sectorsize, int alignment, int *bitmap_offset_out, + unsigned long *bitmap_len_out, int *data_offset_out); + +extern int file_reader(__u64 offset, char *buf, int len, void *arg); +extern int read_cow_header(int (*reader)(__u64, char *, int, void *), + void *arg, __u32 *version_out, + char **backing_file_out, long long *mtime_out, + unsigned long long *size_out, int *sectorsize_out, + __u32 *align_out, int *bitmap_offset_out); + +extern int write_cow_header(char *cow_file, int fd, char *backing_file, + int sectorsize, int alignment, + unsigned long long *size); + +extern void cow_sizes(int version, __u64 size, int sectorsize, int align, + int bitmap_offset, unsigned long *bitmap_len_out, + int *data_offset_out); + +#endif diff --git a/arch/um/drivers/cow_sys.h b/arch/um/drivers/cow_sys.h new file mode 100644 index 0000000000..916811ef53 --- /dev/null +++ b/arch/um/drivers/cow_sys.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __COW_SYS_H__ +#define __COW_SYS_H__ + +#include <kern_util.h> +#include <os.h> +#include <um_malloc.h> + +static inline void *cow_malloc(int size) +{ + return uml_kmalloc(size, UM_GFP_KERNEL); +} + +static inline void cow_free(void *ptr) +{ + kfree(ptr); +} + +#define cow_printf printk + +static inline char *cow_strdup(char *str) +{ + return uml_strdup(str); +} + +static inline int cow_seek_file(int fd, __u64 offset) +{ + return os_seek_file(fd, offset); +} + +static inline int cow_file_size(char *file, unsigned long long *size_out) +{ + return os_file_size(file, size_out); +} + +static inline int cow_write_file(int fd, void *buf, int size) +{ + return os_write_file(fd, buf, size); +} + +#endif diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c new file mode 100644 index 0000000000..29b46581dd --- /dev/null +++ b/arch/um/drivers/cow_user.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +/* + * _XOPEN_SOURCE is needed for pread, but we define _GNU_SOURCE, which defines + * that. + */ +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <arpa/inet.h> +#include <endian.h> +#include "cow.h" +#include "cow_sys.h" + +#define PATH_LEN_V1 256 + +/* unsigned time_t works until year 2106 */ +typedef __u32 time32_t; + +struct cow_header_v1 { + __s32 magic; + __s32 version; + char backing_file[PATH_LEN_V1]; + time32_t mtime; + __u64 size; + __s32 sectorsize; +} __attribute__((packed)); + +/* + * Define PATH_LEN_V3 as the usual value of MAXPATHLEN, just hard-code it in + * case other systems have different values for MAXPATHLEN. + * + * The same must hold for V2 - we want file format compatibility, not anything + * else. + */ +#define PATH_LEN_V3 4096 +#define PATH_LEN_V2 PATH_LEN_V3 + +struct cow_header_v2 { + __u32 magic; + __u32 version; + char backing_file[PATH_LEN_V2]; + time32_t mtime; + __u64 size; + __s32 sectorsize; +} __attribute__((packed)); + +/* + * Changes from V2 - + * PATH_LEN_V3 as described above + * Explicitly specify field bit lengths for systems with different + * lengths for the usual C types. Not sure whether char or + * time_t should be changed, this can be changed later without + * breaking compatibility + * Add alignment field so that different alignments can be used for the + * bitmap and data + * Add cow_format field to allow for the possibility of different ways + * of specifying the COW blocks. For now, the only value is 0, + * for the traditional COW bitmap. + * Move the backing_file field to the end of the header. This allows + * for the possibility of expanding it into the padding required + * by the bitmap alignment. + * The bitmap and data portions of the file will be aligned as specified + * by the alignment field. This is to allow COW files to be + * put on devices with restrictions on access alignments, such as + * /dev/raw, with a 512 byte alignment restriction. This also + * allows the data to be more aligned more strictly than on + * sector boundaries. This is needed for ubd-mmap, which needs + * the data to be page aligned. + * Fixed (finally!) the rounding bug + */ + +/* + * Until Dec2005, __attribute__((packed)) was left out from the below + * definition, leading on 64-bit systems to 4 bytes of padding after mtime, to + * align size to 8-byte alignment. This shifted all fields above (no padding + * was present on 32-bit, no other padding was added). + * + * However, this _can be detected_: it means that cow_format (always 0 until + * now) is shifted onto the first 4 bytes of backing_file, where it is otherwise + * impossible to find 4 zeros. -bb */ + +struct cow_header_v3 { + __u32 magic; + __u32 version; + __u32 mtime; + __u64 size; + __u32 sectorsize; + __u32 alignment; + __u32 cow_format; + char backing_file[PATH_LEN_V3]; +} __attribute__((packed)); + +/* This is the broken layout used by some 64-bit binaries. */ +struct cow_header_v3_broken { + __u32 magic; + __u32 version; + __s64 mtime; + __u64 size; + __u32 sectorsize; + __u32 alignment; + __u32 cow_format; + char backing_file[PATH_LEN_V3]; +}; + +/* COW format definitions - for now, we have only the usual COW bitmap */ +#define COW_BITMAP 0 + +union cow_header { + struct cow_header_v1 v1; + struct cow_header_v2 v2; + struct cow_header_v3 v3; + struct cow_header_v3_broken v3_b; +}; + +#define COW_MAGIC 0x4f4f4f4d /* MOOO */ +#define COW_VERSION 3 + +#define DIV_ROUND(x, len) (((x) + (len) - 1) / (len)) +#define ROUND_UP(x, align) DIV_ROUND(x, align) * (align) + +void cow_sizes(int version, __u64 size, int sectorsize, int align, + int bitmap_offset, unsigned long *bitmap_len_out, + int *data_offset_out) +{ + if (version < 3) { + *bitmap_len_out = (size + sectorsize - 1) / (8 * sectorsize); + + *data_offset_out = bitmap_offset + *bitmap_len_out; + *data_offset_out = (*data_offset_out + sectorsize - 1) / + sectorsize; + *data_offset_out *= sectorsize; + } + else { + *bitmap_len_out = DIV_ROUND(size, sectorsize); + *bitmap_len_out = DIV_ROUND(*bitmap_len_out, 8); + + *data_offset_out = bitmap_offset + *bitmap_len_out; + *data_offset_out = ROUND_UP(*data_offset_out, align); + } +} + +static int absolutize(char *to, int size, char *from) +{ + char save_cwd[256], *slash; + int remaining; + + if (getcwd(save_cwd, sizeof(save_cwd)) == NULL) { + cow_printf("absolutize : unable to get cwd - errno = %d\n", + errno); + return -1; + } + slash = strrchr(from, '/'); + if (slash != NULL) { + *slash = '\0'; + if (chdir(from)) { + *slash = '/'; + cow_printf("absolutize : Can't cd to '%s' - " + "errno = %d\n", from, errno); + return -1; + } + *slash = '/'; + if (getcwd(to, size) == NULL) { + cow_printf("absolutize : unable to get cwd of '%s' - " + "errno = %d\n", from, errno); + return -1; + } + remaining = size - strlen(to); + if (strlen(slash) + 1 > remaining) { + cow_printf("absolutize : unable to fit '%s' into %d " + "chars\n", from, size); + return -1; + } + strcat(to, slash); + } + else { + if (strlen(save_cwd) + 1 + strlen(from) + 1 > size) { + cow_printf("absolutize : unable to fit '%s' into %d " + "chars\n", from, size); + return -1; + } + strcpy(to, save_cwd); + strcat(to, "/"); + strcat(to, from); + } + if (chdir(save_cwd)) { + cow_printf("absolutize : Can't cd to '%s' - " + "errno = %d\n", save_cwd, errno); + return -1; + } + return 0; +} + +int write_cow_header(char *cow_file, int fd, char *backing_file, + int sectorsize, int alignment, unsigned long long *size) +{ + struct cow_header_v3 *header; + long long modtime; + int err; + + err = cow_seek_file(fd, 0); + if (err < 0) { + cow_printf("write_cow_header - lseek failed, err = %d\n", -err); + goto out; + } + + err = -ENOMEM; + header = cow_malloc(sizeof(*header)); + if (header == NULL) { + cow_printf("write_cow_header - failed to allocate COW V3 " + "header\n"); + goto out; + } + header->magic = htobe32(COW_MAGIC); + header->version = htobe32(COW_VERSION); + + err = -EINVAL; + if (strlen(backing_file) > sizeof(header->backing_file) - 1) { + /* Below, %zd is for a size_t value */ + cow_printf("Backing file name \"%s\" is too long - names are " + "limited to %zd characters\n", backing_file, + sizeof(header->backing_file) - 1); + goto out_free; + } + + if (absolutize(header->backing_file, sizeof(header->backing_file), + backing_file)) + goto out_free; + + err = os_file_modtime(header->backing_file, &modtime); + if (err < 0) { + cow_printf("write_cow_header - backing file '%s' mtime " + "request failed, err = %d\n", header->backing_file, + -err); + goto out_free; + } + + err = cow_file_size(header->backing_file, size); + if (err < 0) { + cow_printf("write_cow_header - couldn't get size of " + "backing file '%s', err = %d\n", + header->backing_file, -err); + goto out_free; + } + + header->mtime = htobe32(modtime); + header->size = htobe64(*size); + header->sectorsize = htobe32(sectorsize); + header->alignment = htobe32(alignment); + header->cow_format = COW_BITMAP; + + err = cow_write_file(fd, header, sizeof(*header)); + if (err != sizeof(*header)) { + cow_printf("write_cow_header - write of header to " + "new COW file '%s' failed, err = %d\n", cow_file, + -err); + goto out_free; + } + err = 0; + out_free: + cow_free(header); + out: + return err; +} + +int file_reader(__u64 offset, char *buf, int len, void *arg) +{ + int fd = *((int *) arg); + + return pread(fd, buf, len, offset); +} + +/* XXX Need to sanity-check the values read from the header */ + +int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, + __u32 *version_out, char **backing_file_out, + long long *mtime_out, unsigned long long *size_out, + int *sectorsize_out, __u32 *align_out, + int *bitmap_offset_out) +{ + union cow_header *header; + char *file; + int err, n; + unsigned long version, magic; + + header = cow_malloc(sizeof(*header)); + if (header == NULL) { + cow_printf("read_cow_header - Failed to allocate header\n"); + return -ENOMEM; + } + err = -EINVAL; + n = (*reader)(0, (char *) header, sizeof(*header), arg); + if (n < offsetof(typeof(header->v1), backing_file)) { + cow_printf("read_cow_header - short header\n"); + goto out; + } + + magic = header->v1.magic; + if (magic == COW_MAGIC) + version = header->v1.version; + else if (magic == be32toh(COW_MAGIC)) + version = be32toh(header->v1.version); + /* No error printed because the non-COW case comes through here */ + else goto out; + + *version_out = version; + + if (version == 1) { + if (n < sizeof(header->v1)) { + cow_printf("read_cow_header - failed to read V1 " + "header\n"); + goto out; + } + *mtime_out = header->v1.mtime; + *size_out = header->v1.size; + *sectorsize_out = header->v1.sectorsize; + *bitmap_offset_out = sizeof(header->v1); + *align_out = *sectorsize_out; + file = header->v1.backing_file; + } + else if (version == 2) { + if (n < sizeof(header->v2)) { + cow_printf("read_cow_header - failed to read V2 " + "header\n"); + goto out; + } + *mtime_out = be32toh(header->v2.mtime); + *size_out = be64toh(header->v2.size); + *sectorsize_out = be32toh(header->v2.sectorsize); + *bitmap_offset_out = sizeof(header->v2); + *align_out = *sectorsize_out; + file = header->v2.backing_file; + } + /* This is very subtle - see above at union cow_header definition */ + else if (version == 3 && (*((int*)header->v3.backing_file) != 0)) { + if (n < sizeof(header->v3)) { + cow_printf("read_cow_header - failed to read V3 " + "header\n"); + goto out; + } + *mtime_out = be32toh(header->v3.mtime); + *size_out = be64toh(header->v3.size); + *sectorsize_out = be32toh(header->v3.sectorsize); + *align_out = be32toh(header->v3.alignment); + if (*align_out == 0) { + cow_printf("read_cow_header - invalid COW header, " + "align == 0\n"); + } + *bitmap_offset_out = ROUND_UP(sizeof(header->v3), *align_out); + file = header->v3.backing_file; + } + else if (version == 3) { + cow_printf("read_cow_header - broken V3 file with" + " 64-bit layout - recovering content.\n"); + + if (n < sizeof(header->v3_b)) { + cow_printf("read_cow_header - failed to read V3 " + "header\n"); + goto out; + } + + /* + * this was used until Dec2005 - 64bits are needed to represent + * 2106+. I.e. we can safely do this truncating cast. + * + * Additionally, we must use be32toh() instead of be64toh(), since + * the program used to use the former (tested - I got mtime + * mismatch "0 vs whatever"). + * + * Ever heard about bug-to-bug-compatibility ? ;-) */ + *mtime_out = (time32_t) be32toh(header->v3_b.mtime); + + *size_out = be64toh(header->v3_b.size); + *sectorsize_out = be32toh(header->v3_b.sectorsize); + *align_out = be32toh(header->v3_b.alignment); + if (*align_out == 0) { + cow_printf("read_cow_header - invalid COW header, " + "align == 0\n"); + } + *bitmap_offset_out = ROUND_UP(sizeof(header->v3_b), *align_out); + file = header->v3_b.backing_file; + } + else { + cow_printf("read_cow_header - invalid COW version\n"); + goto out; + } + err = -ENOMEM; + *backing_file_out = cow_strdup(file); + if (*backing_file_out == NULL) { + cow_printf("read_cow_header - failed to allocate backing " + "file\n"); + goto out; + } + err = 0; + out: + cow_free(header); + return err; +} + +int init_cow_file(int fd, char *cow_file, char *backing_file, int sectorsize, + int alignment, int *bitmap_offset_out, + unsigned long *bitmap_len_out, int *data_offset_out) +{ + unsigned long long size, offset; + char zero = 0; + int err; + + err = write_cow_header(cow_file, fd, backing_file, sectorsize, + alignment, &size); + if (err) + goto out; + + *bitmap_offset_out = ROUND_UP(sizeof(struct cow_header_v3), alignment); + cow_sizes(COW_VERSION, size, sectorsize, alignment, *bitmap_offset_out, + bitmap_len_out, data_offset_out); + + offset = *data_offset_out + size - sizeof(zero); + err = cow_seek_file(fd, offset); + if (err < 0) { + cow_printf("cow bitmap lseek failed : err = %d\n", -err); + goto out; + } + + /* + * does not really matter how much we write it is just to set EOF + * this also sets the entire COW bitmap + * to zero without having to allocate it + */ + err = cow_write_file(fd, &zero, sizeof(zero)); + if (err != sizeof(zero)) { + cow_printf("Write of bitmap to new COW file '%s' failed, " + "err = %d\n", cow_file, -err); + if (err >= 0) + err = -EINVAL; + goto out; + } + + return 0; + out: + return err; +} diff --git a/arch/um/drivers/daemon.h b/arch/um/drivers/daemon.h new file mode 100644 index 0000000000..1509cc7eb9 --- /dev/null +++ b/arch/um/drivers/daemon.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __DAEMON_H__ +#define __DAEMON_H__ + +#include <net_user.h> + +#define SWITCH_VERSION 3 + +struct daemon_data { + char *sock_type; + char *ctl_sock; + void *ctl_addr; + void *data_addr; + void *local_addr; + int fd; + int control; + void *dev; +}; + +extern const struct net_user_info daemon_user_info; + +extern int daemon_user_write(int fd, void *buf, int len, + struct daemon_data *pri); + +#endif diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c new file mode 100644 index 0000000000..afde1e82c0 --- /dev/null +++ b/arch/um/drivers/daemon_kern.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright (C) 2001 by various other people who didn't put their name here. + */ + +#include <linux/init.h> +#include <linux/netdevice.h> +#include <net_kern.h> +#include "daemon.h" + +struct daemon_init { + char *sock_type; + char *ctl_sock; +}; + +static void daemon_init(struct net_device *dev, void *data) +{ + struct uml_net_private *pri; + struct daemon_data *dpri; + struct daemon_init *init = data; + + pri = netdev_priv(dev); + dpri = (struct daemon_data *) pri->user; + dpri->sock_type = init->sock_type; + dpri->ctl_sock = init->ctl_sock; + dpri->fd = -1; + dpri->control = -1; + dpri->dev = dev; + /* We will free this pointer. If it contains crap we're burned. */ + dpri->ctl_addr = NULL; + dpri->data_addr = NULL; + dpri->local_addr = NULL; + + printk("daemon backend (uml_switch version %d) - %s:%s", + SWITCH_VERSION, dpri->sock_type, dpri->ctl_sock); + printk("\n"); +} + +static int daemon_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return net_recvfrom(fd, skb_mac_header(skb), + skb->dev->mtu + ETH_HEADER_OTHER); +} + +static int daemon_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return daemon_user_write(fd, skb->data, skb->len, + (struct daemon_data *) &lp->user); +} + +static const struct net_kern_info daemon_kern_info = { + .init = daemon_init, + .protocol = eth_protocol, + .read = daemon_read, + .write = daemon_write, +}; + +static int daemon_setup(char *str, char **mac_out, void *data) +{ + struct daemon_init *init = data; + char *remain; + + *init = ((struct daemon_init) + { .sock_type = "unix", + .ctl_sock = CONFIG_UML_NET_DAEMON_DEFAULT_SOCK }); + + remain = split_if_spec(str, mac_out, &init->sock_type, &init->ctl_sock, + NULL); + if (remain != NULL) + printk(KERN_WARNING "daemon_setup : Ignoring data socket " + "specification\n"); + + return 1; +} + +static struct transport daemon_transport = { + .list = LIST_HEAD_INIT(daemon_transport.list), + .name = "daemon", + .setup = daemon_setup, + .user = &daemon_user_info, + .kern = &daemon_kern_info, + .private_size = sizeof(struct daemon_data), + .setup_size = sizeof(struct daemon_init), +}; + +static int register_daemon(void) +{ + register_transport(&daemon_transport); + return 0; +} + +late_initcall(register_daemon); diff --git a/arch/um/drivers/daemon_user.c b/arch/um/drivers/daemon_user.c new file mode 100644 index 0000000000..785baedc35 --- /dev/null +++ b/arch/um/drivers/daemon_user.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 by various other people who didn't put their name here. + */ + +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <errno.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <sys/un.h> +#include "daemon.h" +#include <net_user.h> +#include <os.h> +#include <um_malloc.h> + +enum request_type { REQ_NEW_CONTROL }; + +#define SWITCH_MAGIC 0xfeedface + +struct request_v3 { + uint32_t magic; + uint32_t version; + enum request_type type; + struct sockaddr_un sock; +}; + +static struct sockaddr_un *new_addr(void *name, int len) +{ + struct sockaddr_un *sun; + + sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL); + if (sun == NULL) { + printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un " + "failed\n"); + return NULL; + } + sun->sun_family = AF_UNIX; + memcpy(sun->sun_path, name, len); + return sun; +} + +static int connect_to_switch(struct daemon_data *pri) +{ + struct sockaddr_un *ctl_addr = pri->ctl_addr; + struct sockaddr_un *local_addr = pri->local_addr; + struct sockaddr_un *sun; + struct request_v3 req; + int fd, n, err; + + pri->control = socket(AF_UNIX, SOCK_STREAM, 0); + if (pri->control < 0) { + err = -errno; + printk(UM_KERN_ERR "daemon_open : control socket failed, " + "errno = %d\n", -err); + return err; + } + + if (connect(pri->control, (struct sockaddr *) ctl_addr, + sizeof(*ctl_addr)) < 0) { + err = -errno; + printk(UM_KERN_ERR "daemon_open : control connect failed, " + "errno = %d\n", -err); + goto out; + } + + fd = socket(AF_UNIX, SOCK_DGRAM, 0); + if (fd < 0) { + err = -errno; + printk(UM_KERN_ERR "daemon_open : data socket failed, " + "errno = %d\n", -err); + goto out; + } + if (bind(fd, (struct sockaddr *) local_addr, sizeof(*local_addr)) < 0) { + err = -errno; + printk(UM_KERN_ERR "daemon_open : data bind failed, " + "errno = %d\n", -err); + goto out_close; + } + + sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL); + if (sun == NULL) { + printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un " + "failed\n"); + err = -ENOMEM; + goto out_close; + } + + req.magic = SWITCH_MAGIC; + req.version = SWITCH_VERSION; + req.type = REQ_NEW_CONTROL; + req.sock = *local_addr; + n = write(pri->control, &req, sizeof(req)); + if (n != sizeof(req)) { + printk(UM_KERN_ERR "daemon_open : control setup request " + "failed, err = %d\n", -errno); + err = -ENOTCONN; + goto out_free; + } + + n = read(pri->control, sun, sizeof(*sun)); + if (n != sizeof(*sun)) { + printk(UM_KERN_ERR "daemon_open : read of data socket failed, " + "err = %d\n", -errno); + err = -ENOTCONN; + goto out_free; + } + + pri->data_addr = sun; + return fd; + + out_free: + kfree(sun); + out_close: + close(fd); + out: + close(pri->control); + return err; +} + +static int daemon_user_init(void *data, void *dev) +{ + struct daemon_data *pri = data; + struct timeval tv; + struct { + char zero; + int pid; + int usecs; + } name; + + if (!strcmp(pri->sock_type, "unix")) + pri->ctl_addr = new_addr(pri->ctl_sock, + strlen(pri->ctl_sock) + 1); + name.zero = 0; + name.pid = os_getpid(); + gettimeofday(&tv, NULL); + name.usecs = tv.tv_usec; + pri->local_addr = new_addr(&name, sizeof(name)); + pri->dev = dev; + pri->fd = connect_to_switch(pri); + if (pri->fd < 0) { + kfree(pri->local_addr); + pri->local_addr = NULL; + return pri->fd; + } + + return 0; +} + +static int daemon_open(void *data) +{ + struct daemon_data *pri = data; + return pri->fd; +} + +static void daemon_remove(void *data) +{ + struct daemon_data *pri = data; + + close(pri->fd); + pri->fd = -1; + close(pri->control); + pri->control = -1; + + kfree(pri->data_addr); + pri->data_addr = NULL; + kfree(pri->ctl_addr); + pri->ctl_addr = NULL; + kfree(pri->local_addr); + pri->local_addr = NULL; +} + +int daemon_user_write(int fd, void *buf, int len, struct daemon_data *pri) +{ + struct sockaddr_un *data_addr = pri->data_addr; + + return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr)); +} + +const struct net_user_info daemon_user_info = { + .init = daemon_user_init, + .open = daemon_open, + .close = NULL, + .remove = daemon_remove, + .add_address = NULL, + .delete_address = NULL, + .mtu = ETH_MAX_PACKET, + .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER, +}; diff --git a/arch/um/drivers/fd.c b/arch/um/drivers/fd.c new file mode 100644 index 0000000000..082d739dc0 --- /dev/null +++ b/arch/um/drivers/fd.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <termios.h> +#include "chan_user.h" +#include <os.h> +#include <um_malloc.h> + +struct fd_chan { + int fd; + int raw; + struct termios tt; + char str[sizeof("1234567890\0")]; +}; + +static void *fd_init(char *str, int device, const struct chan_opts *opts) +{ + struct fd_chan *data; + char *end; + int n; + + if (*str != ':') { + printk(UM_KERN_ERR "fd_init : channel type 'fd' must specify a " + "file descriptor\n"); + return NULL; + } + str++; + n = strtoul(str, &end, 0); + if ((*end != '\0') || (end == str)) { + printk(UM_KERN_ERR "fd_init : couldn't parse file descriptor " + "'%s'\n", str); + return NULL; + } + + data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL); + if (data == NULL) + return NULL; + + *data = ((struct fd_chan) { .fd = n, + .raw = opts->raw }); + return data; +} + +static int fd_open(int input, int output, int primary, void *d, char **dev_out) +{ + struct fd_chan *data = d; + int err; + + if (data->raw && isatty(data->fd)) { + CATCH_EINTR(err = tcgetattr(data->fd, &data->tt)); + if (err) + return err; + + err = raw(data->fd); + if (err) + return err; + } + sprintf(data->str, "%d", data->fd); + *dev_out = data->str; + return data->fd; +} + +static void fd_close(int fd, void *d) +{ + struct fd_chan *data = d; + int err; + + if (!data->raw || !isatty(fd)) + return; + + CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &data->tt)); + if (err) + printk(UM_KERN_ERR "Failed to restore terminal state - " + "errno = %d\n", -err); + data->raw = 0; +} + +const struct chan_ops fd_ops = { + .type = "fd", + .init = fd_init, + .open = fd_open, + .close = fd_close, + .read = generic_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = generic_free, + .winch = 1, +}; diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h new file mode 100644 index 0000000000..6d9ea60e71 --- /dev/null +++ b/arch/um/drivers/harddog.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef UM_WATCHDOG_H +#define UM_WATCHDOG_H + +int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); +void stop_watchdog(int in_fd, int out_fd); +int ping_watchdog(int fd); + +#endif /* UM_WATCHDOG_H */ diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c new file mode 100644 index 0000000000..60d1c6cab8 --- /dev/null +++ b/arch/um/drivers/harddog_kern.c @@ -0,0 +1,174 @@ +/* UML hardware watchdog, shamelessly stolen from: + * + * SoftDog 0.05: A Software Watchdog Device + * + * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. + * http://www.redhat.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide + * warranty for any of this software. This material is provided + * "AS-IS" and at no charge. + * + * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> + * + * Software only watchdog driver. Unlike its big brother the WDT501P + * driver this won't always recover a failed machine. + * + * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> : + * Modularised. + * Added soft_margin; use upon insmod to change the timer delay. + * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate + * minors. + * + * 19980911 Alan Cox + * Made SMP safe for 2.3.x + * + * 20011127 Joel Becker (jlbec@evilplan.org> + * Added soft_noboot; Allows testing the softdog trigger without + * requiring a recompile. + * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/miscdevice.h> +#include <linux/watchdog.h> +#include <linux/reboot.h> +#include <linux/mutex.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include "mconsole.h" +#include "harddog.h" + +MODULE_LICENSE("GPL"); + +static DEFINE_MUTEX(harddog_mutex); +static DEFINE_SPINLOCK(lock); +static int timer_alive; +static int harddog_in_fd = -1; +static int harddog_out_fd = -1; + +/* + * Allow only one person to hold it open + */ + +static int harddog_open(struct inode *inode, struct file *file) +{ + int err = -EBUSY; + char *sock = NULL; + + mutex_lock(&harddog_mutex); + spin_lock(&lock); + if(timer_alive) + goto err; +#ifdef CONFIG_WATCHDOG_NOWAYOUT + __module_get(THIS_MODULE); +#endif + +#ifdef CONFIG_MCONSOLE + sock = mconsole_notify_socket(); +#endif + err = start_watchdog(&harddog_in_fd, &harddog_out_fd, sock); + if(err) + goto err; + + timer_alive = 1; + spin_unlock(&lock); + mutex_unlock(&harddog_mutex); + return stream_open(inode, file); +err: + spin_unlock(&lock); + mutex_unlock(&harddog_mutex); + return err; +} + +static int harddog_release(struct inode *inode, struct file *file) +{ + /* + * Shut off the timer. + */ + + spin_lock(&lock); + + stop_watchdog(harddog_in_fd, harddog_out_fd); + harddog_in_fd = -1; + harddog_out_fd = -1; + + timer_alive=0; + spin_unlock(&lock); + + return 0; +} + +static ssize_t harddog_write(struct file *file, const char __user *data, size_t len, + loff_t *ppos) +{ + /* + * Refresh the timer. + */ + if(len) + return ping_watchdog(harddog_out_fd); + return 0; +} + +static int harddog_ioctl_unlocked(struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp= (void __user *)arg; + static struct watchdog_info ident = { + WDIOC_SETTIMEOUT, + 0, + "UML Hardware Watchdog" + }; + switch (cmd) { + default: + return -ENOTTY; + case WDIOC_GETSUPPORT: + if(copy_to_user(argp, &ident, sizeof(ident))) + return -EFAULT; + return 0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0,(int __user *)argp); + case WDIOC_KEEPALIVE: + return ping_watchdog(harddog_out_fd); + } +} + +static long harddog_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long ret; + + mutex_lock(&harddog_mutex); + ret = harddog_ioctl_unlocked(file, cmd, arg); + mutex_unlock(&harddog_mutex); + + return ret; +} + +static const struct file_operations harddog_fops = { + .owner = THIS_MODULE, + .write = harddog_write, + .unlocked_ioctl = harddog_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .open = harddog_open, + .release = harddog_release, + .llseek = no_llseek, +}; + +static struct miscdevice harddog_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &harddog_fops, +}; +module_misc_device(harddog_miscdev); diff --git a/arch/um/drivers/harddog_user.c b/arch/um/drivers/harddog_user.c new file mode 100644 index 0000000000..9ed8930497 --- /dev/null +++ b/arch/um/drivers/harddog_user.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <stdio.h> +#include <unistd.h> +#include <errno.h> +#include <os.h> +#include "harddog.h" + +struct dog_data { + int stdin_fd; + int stdout_fd; + int close_me[2]; +}; + +static void pre_exec(void *d) +{ + struct dog_data *data = d; + + dup2(data->stdin_fd, 0); + dup2(data->stdout_fd, 1); + dup2(data->stdout_fd, 2); + close(data->stdin_fd); + close(data->stdout_fd); + close(data->close_me[0]); + close(data->close_me[1]); +} + +int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock) +{ + struct dog_data data; + int in_fds[2], out_fds[2], pid, n, err; + char pid_buf[sizeof("nnnnnnn\0")], c; + char *pid_args[] = { "/usr/bin/uml_watchdog", "-pid", pid_buf, NULL }; + char *mconsole_args[] = { "/usr/bin/uml_watchdog", "-mconsole", NULL, + NULL }; + char **args = NULL; + + err = os_pipe(in_fds, 1, 0); + if (err < 0) { + printk("harddog_open - os_pipe failed, err = %d\n", -err); + goto out; + } + + err = os_pipe(out_fds, 1, 0); + if (err < 0) { + printk("harddog_open - os_pipe failed, err = %d\n", -err); + goto out_close_in; + } + + data.stdin_fd = out_fds[0]; + data.stdout_fd = in_fds[1]; + data.close_me[0] = out_fds[1]; + data.close_me[1] = in_fds[0]; + + if (sock != NULL) { + mconsole_args[2] = sock; + args = mconsole_args; + } + else { + /* XXX The os_getpid() is not SMP correct */ + sprintf(pid_buf, "%d", os_getpid()); + args = pid_args; + } + + pid = run_helper(pre_exec, &data, args); + + close(out_fds[0]); + close(in_fds[1]); + + if (pid < 0) { + err = -pid; + printk("harddog_open - run_helper failed, errno = %d\n", -err); + goto out_close_out; + } + + n = read(in_fds[0], &c, sizeof(c)); + if (n == 0) { + printk("harddog_open - EOF on watchdog pipe\n"); + helper_wait(pid); + err = -EIO; + goto out_close_out; + } + else if (n < 0) { + printk("harddog_open - read of watchdog pipe failed, " + "err = %d\n", errno); + helper_wait(pid); + err = n; + goto out_close_out; + } + *in_fd_ret = in_fds[0]; + *out_fd_ret = out_fds[1]; + return 0; + + out_close_in: + close(in_fds[0]); + close(in_fds[1]); + out_close_out: + close(out_fds[0]); + close(out_fds[1]); + out: + return err; +} + +void stop_watchdog(int in_fd, int out_fd) +{ + close(in_fd); + close(out_fd); +} + +int ping_watchdog(int fd) +{ + int n; + char c = '\n'; + + n = write(fd, &c, sizeof(c)); + if (n != sizeof(c)) { + printk("ping_watchdog - write failed, ret = %d, err = %d\n", + n, errno); + if (n < 0) + return n; + return -EIO; + } + return 1; + +} diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c new file mode 100644 index 0000000000..c74d4b815d --- /dev/null +++ b/arch/um/drivers/harddog_user_exp.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> +#include "harddog.h" + +#if IS_MODULE(CONFIG_UML_WATCHDOG) +EXPORT_SYMBOL(start_watchdog); +EXPORT_SYMBOL(stop_watchdog); +EXPORT_SYMBOL(ping_watchdog); +#endif diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c new file mode 100644 index 0000000000..c42b793bce --- /dev/null +++ b/arch/um/drivers/hostaudio_kern.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2002 Steve Schmidtke + */ + +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/sound.h> +#include <linux/soundcard.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> +#include <init.h> +#include <os.h> + +struct hostaudio_state { + int fd; +}; + +struct hostmixer_state { + int fd; +}; + +#define HOSTAUDIO_DEV_DSP "/dev/sound/dsp" +#define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer" + +/* + * Changed either at boot time or module load time. At boot, this is + * single-threaded; at module load, multiple modules would each have + * their own copy of these variables. + */ +static char *dsp = HOSTAUDIO_DEV_DSP; +static char *mixer = HOSTAUDIO_DEV_MIXER; + +#define DSP_HELP \ +" This is used to specify the host dsp device to the hostaudio driver.\n" \ +" The default is \"" HOSTAUDIO_DEV_DSP "\".\n\n" + +#define MIXER_HELP \ +" This is used to specify the host mixer device to the hostaudio driver.\n"\ +" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" + +module_param(dsp, charp, 0644); +MODULE_PARM_DESC(dsp, DSP_HELP); +module_param(mixer, charp, 0644); +MODULE_PARM_DESC(mixer, MIXER_HELP); + +#ifndef MODULE +static int set_dsp(char *name, int *add) +{ + dsp = name; + return 0; +} + +__uml_setup("dsp=", set_dsp, "dsp=<dsp device>\n" DSP_HELP); + +static int set_mixer(char *name, int *add) +{ + mixer = name; + return 0; +} + +__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); +#endif + +static DEFINE_MUTEX(hostaudio_mutex); + +/* /dev/dsp file operations */ + +static ssize_t hostaudio_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hostaudio_state *state = file->private_data; + void *kbuf; + int err; + +#ifdef DEBUG + printk(KERN_DEBUG "hostaudio: read called, count = %d\n", count); +#endif + + kbuf = kmalloc(count, GFP_KERNEL); + if (kbuf == NULL) + return -ENOMEM; + + err = os_read_file(state->fd, kbuf, count); + if (err < 0) + goto out; + + if (copy_to_user(buffer, kbuf, err)) + err = -EFAULT; + +out: + kfree(kbuf); + return err; +} + +static ssize_t hostaudio_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hostaudio_state *state = file->private_data; + void *kbuf; + int err; + +#ifdef DEBUG + printk(KERN_DEBUG "hostaudio: write called, count = %d\n", count); +#endif + + kbuf = memdup_user(buffer, count); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); + + err = os_write_file(state->fd, kbuf, count); + if (err < 0) + goto out; + *ppos += err; + + out: + kfree(kbuf); + return err; +} + +static __poll_t hostaudio_poll(struct file *file, + struct poll_table_struct *wait) +{ +#ifdef DEBUG + printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n"); +#endif + + return 0; +} + +static long hostaudio_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct hostaudio_state *state = file->private_data; + unsigned long data = 0; + int err; + +#ifdef DEBUG + printk(KERN_DEBUG "hostaudio: ioctl called, cmd = %u\n", cmd); +#endif + switch(cmd){ + case SNDCTL_DSP_SPEED: + case SNDCTL_DSP_STEREO: + case SNDCTL_DSP_GETBLKSIZE: + case SNDCTL_DSP_CHANNELS: + case SNDCTL_DSP_SUBDIVIDE: + case SNDCTL_DSP_SETFRAGMENT: + if (get_user(data, (int __user *) arg)) + return -EFAULT; + break; + default: + break; + } + + err = os_ioctl_generic(state->fd, cmd, (unsigned long) &data); + + switch(cmd){ + case SNDCTL_DSP_SPEED: + case SNDCTL_DSP_STEREO: + case SNDCTL_DSP_GETBLKSIZE: + case SNDCTL_DSP_CHANNELS: + case SNDCTL_DSP_SUBDIVIDE: + case SNDCTL_DSP_SETFRAGMENT: + if (put_user(data, (int __user *) arg)) + return -EFAULT; + break; + default: + break; + } + + return err; +} + +static int hostaudio_open(struct inode *inode, struct file *file) +{ + struct hostaudio_state *state; + int r = 0, w = 0; + int ret; + +#ifdef DEBUG + kernel_param_lock(THIS_MODULE); + printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp); + kernel_param_unlock(THIS_MODULE); +#endif + + state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + + if (file->f_mode & FMODE_READ) + r = 1; + if (file->f_mode & FMODE_WRITE) + w = 1; + + kernel_param_lock(THIS_MODULE); + mutex_lock(&hostaudio_mutex); + ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0); + mutex_unlock(&hostaudio_mutex); + kernel_param_unlock(THIS_MODULE); + + if (ret < 0) { + kfree(state); + return ret; + } + state->fd = ret; + file->private_data = state; + return 0; +} + +static int hostaudio_release(struct inode *inode, struct file *file) +{ + struct hostaudio_state *state = file->private_data; + +#ifdef DEBUG + printk(KERN_DEBUG "hostaudio: release called\n"); +#endif + os_close_file(state->fd); + kfree(state); + + return 0; +} + +/* /dev/mixer file operations */ + +static long hostmixer_ioctl_mixdev(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct hostmixer_state *state = file->private_data; + +#ifdef DEBUG + printk(KERN_DEBUG "hostmixer: ioctl called\n"); +#endif + + return os_ioctl_generic(state->fd, cmd, arg); +} + +static int hostmixer_open_mixdev(struct inode *inode, struct file *file) +{ + struct hostmixer_state *state; + int r = 0, w = 0; + int ret; + +#ifdef DEBUG + printk(KERN_DEBUG "hostmixer: open called (host: %s)\n", mixer); +#endif + + state = kmalloc(sizeof(struct hostmixer_state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + + if (file->f_mode & FMODE_READ) + r = 1; + if (file->f_mode & FMODE_WRITE) + w = 1; + + kernel_param_lock(THIS_MODULE); + mutex_lock(&hostaudio_mutex); + ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0); + mutex_unlock(&hostaudio_mutex); + kernel_param_unlock(THIS_MODULE); + + if (ret < 0) { + kernel_param_lock(THIS_MODULE); + printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', " + "err = %d\n", dsp, -ret); + kernel_param_unlock(THIS_MODULE); + kfree(state); + return ret; + } + + file->private_data = state; + return 0; +} + +static int hostmixer_release(struct inode *inode, struct file *file) +{ + struct hostmixer_state *state = file->private_data; + +#ifdef DEBUG + printk(KERN_DEBUG "hostmixer: release called\n"); +#endif + + os_close_file(state->fd); + kfree(state); + + return 0; +} + +/* kernel module operations */ + +static const struct file_operations hostaudio_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = hostaudio_read, + .write = hostaudio_write, + .poll = hostaudio_poll, + .unlocked_ioctl = hostaudio_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .mmap = NULL, + .open = hostaudio_open, + .release = hostaudio_release, +}; + +static const struct file_operations hostmixer_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = hostmixer_ioctl_mixdev, + .open = hostmixer_open_mixdev, + .release = hostmixer_release, +}; + +static struct { + int dev_audio; + int dev_mixer; +} module_data; + +MODULE_AUTHOR("Steve Schmidtke"); +MODULE_DESCRIPTION("UML Audio Relay"); +MODULE_LICENSE("GPL"); + +static int __init hostaudio_init_module(void) +{ + kernel_param_lock(THIS_MODULE); + printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n", + dsp, mixer); + kernel_param_unlock(THIS_MODULE); + + module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1); + if (module_data.dev_audio < 0) { + printk(KERN_ERR "hostaudio: couldn't register DSP device!\n"); + return -ENODEV; + } + + module_data.dev_mixer = register_sound_mixer(&hostmixer_fops, -1); + if (module_data.dev_mixer < 0) { + printk(KERN_ERR "hostmixer: couldn't register mixer " + "device!\n"); + unregister_sound_dsp(module_data.dev_audio); + return -ENODEV; + } + + return 0; +} + +static void __exit hostaudio_cleanup_module (void) +{ + unregister_sound_mixer(module_data.dev_mixer); + unregister_sound_dsp(module_data.dev_audio); +} + +module_init(hostaudio_init_module); +module_exit(hostaudio_cleanup_module); diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c new file mode 100644 index 0000000000..b98545f3ed --- /dev/null +++ b/arch/um/drivers/line.c @@ -0,0 +1,764 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <linux/irqreturn.h> +#include <linux/kd.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> + +#include "chan.h" +#include <irq_kern.h> +#include <irq_user.h> +#include <kern_util.h> +#include <os.h> + +#define LINE_BUFSIZE 4096 + +static irqreturn_t line_interrupt(int irq, void *data) +{ + struct chan *chan = data; + struct line *line = chan->line; + + if (line) + chan_interrupt(line, irq); + + return IRQ_HANDLED; +} + +/* + * Returns the free space inside the ring buffer of this line. + * + * Should be called while holding line->lock (this does not modify data). + */ +static unsigned int write_room(struct line *line) +{ + int n; + + if (line->buffer == NULL) + return LINE_BUFSIZE - 1; + + /* This is for the case where the buffer is wrapped! */ + n = line->head - line->tail; + + if (n <= 0) + n += LINE_BUFSIZE; /* The other case */ + return n - 1; +} + +unsigned int line_write_room(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + unsigned long flags; + unsigned int room; + + spin_lock_irqsave(&line->lock, flags); + room = write_room(line); + spin_unlock_irqrestore(&line->lock, flags); + + return room; +} + +unsigned int line_chars_in_buffer(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&line->lock, flags); + /* write_room subtracts 1 for the needed NULL, so we readd it.*/ + ret = LINE_BUFSIZE - (write_room(line) + 1); + spin_unlock_irqrestore(&line->lock, flags); + + return ret; +} + +/* + * This copies the content of buf into the circular buffer associated with + * this line. + * The return value is the number of characters actually copied, i.e. the ones + * for which there was space: this function is not supposed to ever flush out + * the circular buffer. + * + * Must be called while holding line->lock! + */ +static int buffer_data(struct line *line, const char *buf, int len) +{ + int end, room; + + if (line->buffer == NULL) { + line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC); + if (line->buffer == NULL) { + printk(KERN_ERR "buffer_data - atomic allocation " + "failed\n"); + return 0; + } + line->head = line->buffer; + line->tail = line->buffer; + } + + room = write_room(line); + len = (len > room) ? room : len; + + end = line->buffer + LINE_BUFSIZE - line->tail; + + if (len < end) { + memcpy(line->tail, buf, len); + line->tail += len; + } + else { + /* The circular buffer is wrapping */ + memcpy(line->tail, buf, end); + buf += end; + memcpy(line->buffer, buf, len - end); + line->tail = line->buffer + len - end; + } + + return len; +} + +/* + * Flushes the ring buffer to the output channels. That is, write_chan is + * called, passing it line->head as buffer, and an appropriate count. + * + * On exit, returns 1 when the buffer is empty, + * 0 when the buffer is not empty on exit, + * and -errno when an error occurred. + * + * Must be called while holding line->lock!*/ +static int flush_buffer(struct line *line) +{ + int n, count; + + if ((line->buffer == NULL) || (line->head == line->tail)) + return 1; + + if (line->tail < line->head) { + /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ + count = line->buffer + LINE_BUFSIZE - line->head; + + n = write_chan(line->chan_out, line->head, count, + line->write_irq); + if (n < 0) + return n; + if (n == count) { + /* + * We have flushed from ->head to buffer end, now we + * must flush only from the beginning to ->tail. + */ + line->head = line->buffer; + } else { + line->head += n; + return 0; + } + } + + count = line->tail - line->head; + n = write_chan(line->chan_out, line->head, count, + line->write_irq); + + if (n < 0) + return n; + + line->head += n; + return line->head == line->tail; +} + +void line_flush_buffer(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&line->lock, flags); + flush_buffer(line); + spin_unlock_irqrestore(&line->lock, flags); +} + +/* + * We map both ->flush_chars and ->put_char (which go in pair) onto + * ->flush_buffer and ->write. Hope it's not that bad. + */ +void line_flush_chars(struct tty_struct *tty) +{ + line_flush_buffer(tty); +} + +ssize_t line_write(struct tty_struct *tty, const u8 *buf, size_t len) +{ + struct line *line = tty->driver_data; + unsigned long flags; + int n, ret = 0; + + spin_lock_irqsave(&line->lock, flags); + if (line->head != line->tail) + ret = buffer_data(line, buf, len); + else { + n = write_chan(line->chan_out, buf, len, + line->write_irq); + if (n < 0) { + ret = n; + goto out_up; + } + + len -= n; + ret += n; + if (len > 0) + ret += buffer_data(line, buf + n, len); + } +out_up: + spin_unlock_irqrestore(&line->lock, flags); + return ret; +} + +void line_throttle(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + deactivate_chan(line->chan_in, line->read_irq); + line->throttled = 1; +} + +void line_unthrottle(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + line->throttled = 0; + chan_interrupt(line, line->read_irq); +} + +static irqreturn_t line_write_interrupt(int irq, void *data) +{ + struct chan *chan = data; + struct line *line = chan->line; + int err; + + /* + * Interrupts are disabled here because genirq keep irqs disabled when + * calling the action handler. + */ + + spin_lock(&line->lock); + err = flush_buffer(line); + if (err == 0) { + spin_unlock(&line->lock); + return IRQ_NONE; + } else if ((err < 0) && (err != -EAGAIN)) { + line->head = line->buffer; + line->tail = line->buffer; + } + spin_unlock(&line->lock); + + tty_port_tty_wakeup(&line->port); + + return IRQ_HANDLED; +} + +int line_setup_irq(int fd, int input, int output, struct line *line, void *data) +{ + const struct line_driver *driver = line->driver; + int err; + + if (input) { + err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_READ, + line_interrupt, 0, + driver->read_irq_name, data); + if (err < 0) + return err; + + line->read_irq = err; + } + + if (output) { + err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_WRITE, + line_write_interrupt, 0, + driver->write_irq_name, data); + if (err < 0) + return err; + + line->write_irq = err; + } + + return 0; +} + +static int line_activate(struct tty_port *port, struct tty_struct *tty) +{ + int ret; + struct line *line = tty->driver_data; + + ret = enable_chan(line); + if (ret) + return ret; + + if (!line->sigio) { + chan_enable_winch(line->chan_out, port); + line->sigio = 1; + } + + chan_window_size(line, &tty->winsize.ws_row, + &tty->winsize.ws_col); + + return 0; +} + +static void unregister_winch(struct tty_struct *tty); + +static void line_destruct(struct tty_port *port) +{ + struct tty_struct *tty = tty_port_tty_get(port); + struct line *line = tty->driver_data; + + if (line->sigio) { + unregister_winch(tty); + line->sigio = 0; + } +} + +static const struct tty_port_operations line_port_ops = { + .activate = line_activate, + .destruct = line_destruct, +}; + +int line_open(struct tty_struct *tty, struct file *filp) +{ + struct line *line = tty->driver_data; + + return tty_port_open(&line->port, tty, filp); +} + +int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line) +{ + int ret; + + ret = tty_standard_install(driver, tty); + if (ret) + return ret; + + tty->driver_data = line; + + return 0; +} + +void line_close(struct tty_struct *tty, struct file * filp) +{ + struct line *line = tty->driver_data; + + tty_port_close(&line->port, tty, filp); +} + +void line_hangup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + tty_port_hangup(&line->port); +} + +void close_lines(struct line *lines, int nlines) +{ + int i; + + for(i = 0; i < nlines; i++) + close_chan(&lines[i]); +} + +int setup_one_line(struct line *lines, int n, char *init, + const struct chan_opts *opts, char **error_out) +{ + struct line *line = &lines[n]; + struct tty_driver *driver = line->driver->driver; + int err = -EINVAL; + + if (line->port.count) { + *error_out = "Device is already open"; + goto out; + } + + if (!strcmp(init, "none")) { + if (line->valid) { + line->valid = 0; + kfree(line->init_str); + tty_unregister_device(driver, n); + parse_chan_pair(NULL, line, n, opts, error_out); + err = 0; + } + } else { + char *new = kstrdup(init, GFP_KERNEL); + if (!new) { + *error_out = "Failed to allocate memory"; + return -ENOMEM; + } + if (line->valid) { + tty_unregister_device(driver, n); + kfree(line->init_str); + } + line->init_str = new; + line->valid = 1; + err = parse_chan_pair(new, line, n, opts, error_out); + if (!err) { + struct device *d = tty_port_register_device(&line->port, + driver, n, NULL); + if (IS_ERR(d)) { + *error_out = "Failed to register device"; + err = PTR_ERR(d); + parse_chan_pair(NULL, line, n, opts, error_out); + } + } + if (err) { + line->init_str = NULL; + line->valid = 0; + kfree(new); + } + } +out: + return err; +} + +/* + * Common setup code for both startup command line and mconsole initialization. + * @lines contains the array (of size @num) to modify; + * @init is the setup string; + * @error_out is an error string in the case of failure; + */ + +int line_setup(char **conf, unsigned int num, char **def, + char *init, char *name) +{ + char *error; + + if (*init == '=') { + /* + * We said con=/ssl= instead of con#=, so we are configuring all + * consoles at once. + */ + *def = init + 1; + } else { + char *end; + unsigned n = simple_strtoul(init, &end, 0); + + if (*end != '=') { + error = "Couldn't parse device number"; + goto out; + } + if (n >= num) { + error = "Device number out of range"; + goto out; + } + conf[n] = end + 1; + } + return 0; + +out: + printk(KERN_ERR "Failed to set up %s with " + "configuration string \"%s\" : %s\n", name, init, error); + return -EINVAL; +} + +int line_config(struct line *lines, unsigned int num, char *str, + const struct chan_opts *opts, char **error_out) +{ + char *end; + int n; + + if (*str == '=') { + *error_out = "Can't configure all devices from mconsole"; + return -EINVAL; + } + + n = simple_strtoul(str, &end, 0); + if (*end++ != '=') { + *error_out = "Couldn't parse device number"; + return -EINVAL; + } + if (n >= num) { + *error_out = "Device number out of range"; + return -EINVAL; + } + + return setup_one_line(lines, n, end, opts, error_out); +} + +int line_get_config(char *name, struct line *lines, unsigned int num, char *str, + int size, char **error_out) +{ + struct line *line; + char *end; + int dev, n = 0; + + dev = simple_strtoul(name, &end, 0); + if ((*end != '\0') || (end == name)) { + *error_out = "line_get_config failed to parse device number"; + return 0; + } + + if ((dev < 0) || (dev >= num)) { + *error_out = "device number out of range"; + return 0; + } + + line = &lines[dev]; + + if (!line->valid) + CONFIG_CHUNK(str, size, n, "none", 1); + else { + struct tty_struct *tty = tty_port_tty_get(&line->port); + if (tty == NULL) { + CONFIG_CHUNK(str, size, n, line->init_str, 1); + } else { + n = chan_config_string(line, str, size, error_out); + tty_kref_put(tty); + } + } + + return n; +} + +int line_id(char **str, int *start_out, int *end_out) +{ + char *end; + int n; + + n = simple_strtoul(*str, &end, 0); + if ((*end != '\0') || (end == *str)) + return -1; + + *str = end; + *start_out = n; + *end_out = n; + return n; +} + +int line_remove(struct line *lines, unsigned int num, int n, char **error_out) +{ + if (n >= num) { + *error_out = "Device number out of range"; + return -EINVAL; + } + return setup_one_line(lines, n, "none", NULL, error_out); +} + +int register_lines(struct line_driver *line_driver, + const struct tty_operations *ops, + struct line *lines, int nlines) +{ + struct tty_driver *driver; + int err; + int i; + + driver = tty_alloc_driver(nlines, TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV); + if (IS_ERR(driver)) + return PTR_ERR(driver); + + driver->driver_name = line_driver->name; + driver->name = line_driver->device_name; + driver->major = line_driver->major; + driver->minor_start = line_driver->minor_start; + driver->type = line_driver->type; + driver->subtype = line_driver->subtype; + driver->init_termios = tty_std_termios; + + for (i = 0; i < nlines; i++) { + tty_port_init(&lines[i].port); + lines[i].port.ops = &line_port_ops; + spin_lock_init(&lines[i].lock); + lines[i].driver = line_driver; + INIT_LIST_HEAD(&lines[i].chan_list); + } + tty_set_operations(driver, ops); + + err = tty_register_driver(driver); + if (err) { + printk(KERN_ERR "register_lines : can't register %s driver\n", + line_driver->name); + tty_driver_kref_put(driver); + for (i = 0; i < nlines; i++) + tty_port_destroy(&lines[i].port); + return err; + } + + line_driver->driver = driver; + mconsole_register_dev(&line_driver->mc); + return 0; +} + +static DEFINE_SPINLOCK(winch_handler_lock); +static LIST_HEAD(winch_handlers); + +struct winch { + struct list_head list; + int fd; + int tty_fd; + int pid; + struct tty_port *port; + unsigned long stack; + struct work_struct work; +}; + +static void __free_winch(struct work_struct *work) +{ + struct winch *winch = container_of(work, struct winch, work); + um_free_irq(WINCH_IRQ, winch); + + if (winch->pid != -1) + os_kill_process(winch->pid, 1); + if (winch->stack != 0) + free_stack(winch->stack, 0); + kfree(winch); +} + +static void free_winch(struct winch *winch) +{ + int fd = winch->fd; + winch->fd = -1; + if (fd != -1) + os_close_file(fd); + __free_winch(&winch->work); +} + +static irqreturn_t winch_interrupt(int irq, void *data) +{ + struct winch *winch = data; + struct tty_struct *tty; + struct line *line; + int fd = winch->fd; + int err; + char c; + struct pid *pgrp; + + if (fd != -1) { + err = generic_read(fd, &c, NULL); + if (err < 0) { + if (err != -EAGAIN) { + winch->fd = -1; + list_del(&winch->list); + os_close_file(fd); + printk(KERN_ERR "winch_interrupt : " + "read failed, errno = %d\n", -err); + printk(KERN_ERR "fd %d is losing SIGWINCH " + "support\n", winch->tty_fd); + INIT_WORK(&winch->work, __free_winch); + schedule_work(&winch->work); + return IRQ_HANDLED; + } + goto out; + } + } + tty = tty_port_tty_get(winch->port); + if (tty != NULL) { + line = tty->driver_data; + if (line != NULL) { + chan_window_size(line, &tty->winsize.ws_row, + &tty->winsize.ws_col); + pgrp = tty_get_pgrp(tty); + if (pgrp) + kill_pgrp(pgrp, SIGWINCH, 1); + put_pid(pgrp); + } + tty_kref_put(tty); + } + out: + return IRQ_HANDLED; +} + +void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port, + unsigned long stack) +{ + struct winch *winch; + + winch = kmalloc(sizeof(*winch), GFP_KERNEL); + if (winch == NULL) { + printk(KERN_ERR "register_winch_irq - kmalloc failed\n"); + goto cleanup; + } + + *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), + .fd = fd, + .tty_fd = tty_fd, + .pid = pid, + .port = port, + .stack = stack }); + + if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, + IRQF_SHARED, "winch", winch) < 0) { + printk(KERN_ERR "register_winch_irq - failed to register " + "IRQ\n"); + goto out_free; + } + + spin_lock(&winch_handler_lock); + list_add(&winch->list, &winch_handlers); + spin_unlock(&winch_handler_lock); + + return; + + out_free: + kfree(winch); + cleanup: + os_kill_process(pid, 1); + os_close_file(fd); + if (stack != 0) + free_stack(stack, 0); +} + +static void unregister_winch(struct tty_struct *tty) +{ + struct list_head *ele, *next; + struct winch *winch; + struct tty_struct *wtty; + + spin_lock(&winch_handler_lock); + + list_for_each_safe(ele, next, &winch_handlers) { + winch = list_entry(ele, struct winch, list); + wtty = tty_port_tty_get(winch->port); + if (wtty == tty) { + list_del(&winch->list); + spin_unlock(&winch_handler_lock); + free_winch(winch); + break; + } + tty_kref_put(wtty); + } + spin_unlock(&winch_handler_lock); +} + +static void winch_cleanup(void) +{ + struct winch *winch; + + spin_lock(&winch_handler_lock); + while ((winch = list_first_entry_or_null(&winch_handlers, + struct winch, list))) { + list_del(&winch->list); + spin_unlock(&winch_handler_lock); + + free_winch(winch); + + spin_lock(&winch_handler_lock); + } + + spin_unlock(&winch_handler_lock); +} +__uml_exitcall(winch_cleanup); + +char *add_xterm_umid(char *base) +{ + char *umid, *title; + int len; + + umid = get_umid(); + if (*umid == '\0') + return base; + + len = strlen(base) + strlen(" ()") + strlen(umid) + 1; + title = kmalloc(len, GFP_KERNEL); + if (title == NULL) { + printk(KERN_ERR "Failed to allocate buffer for xterm title\n"); + return base; + } + + snprintf(title, len, "%s (%s)", base, umid); + return title; +} diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h new file mode 100644 index 0000000000..e84fb9b416 --- /dev/null +++ b/arch/um/drivers/line.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __LINE_H__ +#define __LINE_H__ + +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/tty.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include "chan_user.h" +#include "mconsole_kern.h" + +/* There's only two modifiable fields in this - .mc.list and .driver */ +struct line_driver { + const char *name; + const char *device_name; + const short major; + const short minor_start; + const short type; + const short subtype; + const char *read_irq_name; + const char *write_irq_name; + struct mc_device mc; + struct tty_driver *driver; +}; + +struct line { + struct tty_port port; + int valid; + + int read_irq, write_irq; + + char *init_str; + struct list_head chan_list; + struct chan *chan_in, *chan_out; + + /*This lock is actually, mostly, local to*/ + spinlock_t lock; + int throttled; + /* Yes, this is a real circular buffer. + * XXX: And this should become a struct kfifo! + * + * buffer points to a buffer allocated on demand, of length + * LINE_BUFSIZE, head to the start of the ring, tail to the end.*/ + char *buffer; + char *head; + char *tail; + + int sigio; + struct delayed_work task; + const struct line_driver *driver; +}; + +extern void line_close(struct tty_struct *tty, struct file * filp); +extern int line_open(struct tty_struct *tty, struct file *filp); +extern int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line); +extern void line_cleanup(struct tty_struct *tty); +extern void line_hangup(struct tty_struct *tty); +extern int line_setup(char **conf, unsigned nlines, char **def, + char *init, char *name); +extern ssize_t line_write(struct tty_struct *tty, const u8 *buf, size_t len); +extern unsigned int line_chars_in_buffer(struct tty_struct *tty); +extern void line_flush_buffer(struct tty_struct *tty); +extern void line_flush_chars(struct tty_struct *tty); +extern unsigned int line_write_room(struct tty_struct *tty); +extern void line_throttle(struct tty_struct *tty); +extern void line_unthrottle(struct tty_struct *tty); + +extern char *add_xterm_umid(char *base); +extern int line_setup_irq(int fd, int input, int output, struct line *line, + void *data); +extern void line_close_chan(struct line *line); +extern int register_lines(struct line_driver *line_driver, + const struct tty_operations *driver, + struct line *lines, int nlines); +extern int setup_one_line(struct line *lines, int n, char *init, + const struct chan_opts *opts, char **error_out); +extern void close_lines(struct line *lines, int nlines); + +extern int line_config(struct line *lines, unsigned int sizeof_lines, + char *str, const struct chan_opts *opts, + char **error_out); +extern int line_id(char **str, int *start_out, int *end_out); +extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n, + char **error_out); +extern int line_get_config(char *dev, struct line *lines, + unsigned int sizeof_lines, char *str, + int size, char **error_out); + +#endif diff --git a/arch/um/drivers/mconsole.h b/arch/um/drivers/mconsole.h new file mode 100644 index 0000000000..6356378304 --- /dev/null +++ b/arch/um/drivers/mconsole.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __MCONSOLE_H__ +#define __MCONSOLE_H__ + +#ifdef __UM_HOST__ +#include <stdint.h> +#define u32 uint32_t +#endif + +#include <sysdep/ptrace.h> + +#define MCONSOLE_MAGIC (0xcafebabe) +#define MCONSOLE_MAX_DATA (512) +#define MCONSOLE_VERSION 2 + +struct mconsole_request { + u32 magic; + u32 version; + u32 len; + char data[MCONSOLE_MAX_DATA]; +}; + +struct mconsole_reply { + u32 err; + u32 more; + u32 len; + char data[MCONSOLE_MAX_DATA]; +}; + +struct mconsole_notify { + u32 magic; + u32 version; + enum { MCONSOLE_SOCKET, MCONSOLE_PANIC, MCONSOLE_HANG, + MCONSOLE_USER_NOTIFY } type; + u32 len; + char data[MCONSOLE_MAX_DATA]; +}; + +struct mc_request; + +enum mc_context { MCONSOLE_INTR, MCONSOLE_PROC }; + +struct mconsole_command +{ + char *command; + void (*handler)(struct mc_request *req); + enum mc_context context; +}; + +struct mc_request +{ + int len; + int as_interrupt; + + int originating_fd; + unsigned int originlen; + unsigned char origin[128]; /* sockaddr_un */ + + struct mconsole_request request; + struct mconsole_command *cmd; + struct uml_pt_regs regs; +}; + +extern char mconsole_socket_name[]; + +extern int mconsole_unlink_socket(void); +extern int mconsole_reply_len(struct mc_request *req, const char *reply, + int len, int err, int more); +extern int mconsole_reply(struct mc_request *req, const char *str, int err, + int more); + +extern void mconsole_version(struct mc_request *req); +extern void mconsole_help(struct mc_request *req); +extern void mconsole_halt(struct mc_request *req); +extern void mconsole_reboot(struct mc_request *req); +extern void mconsole_config(struct mc_request *req); +extern void mconsole_remove(struct mc_request *req); +extern void mconsole_sysrq(struct mc_request *req); +extern void mconsole_cad(struct mc_request *req); +extern void mconsole_stop(struct mc_request *req); +extern void mconsole_go(struct mc_request *req); +extern void mconsole_log(struct mc_request *req); +extern void mconsole_proc(struct mc_request *req); +extern void mconsole_stack(struct mc_request *req); + +extern int mconsole_get_request(int fd, struct mc_request *req); +extern int mconsole_notify(char *sock_name, int type, const void *data, + int len); +extern char *mconsole_notify_socket(void); +extern void lock_notify(void); +extern void unlock_notify(void); + +#endif diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c new file mode 100644 index 0000000000..ff4bda95b9 --- /dev/null +++ b/arch/um/drivers/mconsole_kern.c @@ -0,0 +1,871 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) + * Copyright (C) 2001 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <linux/console.h> +#include <linux/ctype.h> +#include <linux/string.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/panic_notifier.h> +#include <linux/reboot.h> +#include <linux/sched/debug.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/syscalls.h> +#include <linux/utsname.h> +#include <linux/socket.h> +#include <linux/un.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/file.h> +#include <linux/uaccess.h> +#include <asm/switch_to.h> + +#include <init.h> +#include <irq_kern.h> +#include <irq_user.h> +#include <kern_util.h> +#include "mconsole.h" +#include "mconsole_kern.h" +#include <os.h> + +static struct vfsmount *proc_mnt = NULL; + +static int do_unlink_socket(struct notifier_block *notifier, + unsigned long what, void *data) +{ + return mconsole_unlink_socket(); +} + + +static struct notifier_block reboot_notifier = { + .notifier_call = do_unlink_socket, + .priority = 0, +}; + +/* Safe without explicit locking for now. Tasklets provide their own + * locking, and the interrupt handler is safe because it can't interrupt + * itself and it can only happen on CPU 0. + */ + +static LIST_HEAD(mc_requests); + +static void mc_work_proc(struct work_struct *unused) +{ + struct mconsole_entry *req; + unsigned long flags; + + while (!list_empty(&mc_requests)) { + local_irq_save(flags); + req = list_entry(mc_requests.next, struct mconsole_entry, list); + list_del(&req->list); + local_irq_restore(flags); + req->request.cmd->handler(&req->request); + kfree(req); + } +} + +static DECLARE_WORK(mconsole_work, mc_work_proc); + +static irqreturn_t mconsole_interrupt(int irq, void *dev_id) +{ + /* long to avoid size mismatch warnings from gcc */ + long fd; + struct mconsole_entry *new; + static struct mc_request req; /* that's OK */ + + fd = (long) dev_id; + while (mconsole_get_request(fd, &req)) { + if (req.cmd->context == MCONSOLE_INTR) + (*req.cmd->handler)(&req); + else { + new = kmalloc(sizeof(*new), GFP_NOWAIT); + if (new == NULL) + mconsole_reply(&req, "Out of memory", 1, 0); + else { + new->request = req; + new->request.regs = get_irq_regs()->regs; + list_add(&new->list, &mc_requests); + } + } + } + if (!list_empty(&mc_requests)) + schedule_work(&mconsole_work); + return IRQ_HANDLED; +} + +void mconsole_version(struct mc_request *req) +{ + char version[256]; + + sprintf(version, "%s %s %s %s %s", utsname()->sysname, + utsname()->nodename, utsname()->release, utsname()->version, + utsname()->machine); + mconsole_reply(req, version, 0, 0); +} + +void mconsole_log(struct mc_request *req) +{ + int len; + char *ptr = req->request.data; + + ptr += strlen("log "); + + len = req->len - (ptr - req->request.data); + printk(KERN_WARNING "%.*s", len, ptr); + mconsole_reply(req, "", 0, 0); +} + +void mconsole_proc(struct mc_request *req) +{ + struct vfsmount *mnt = proc_mnt; + char *buf; + int len; + struct file *file; + int first_chunk = 1; + char *ptr = req->request.data; + loff_t pos = 0; + + ptr += strlen("proc"); + ptr = skip_spaces(ptr); + + if (!mnt) { + mconsole_reply(req, "Proc not available", 1, 0); + goto out; + } + file = file_open_root_mnt(mnt, ptr, O_RDONLY, 0); + if (IS_ERR(file)) { + mconsole_reply(req, "Failed to open file", 1, 0); + printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file)); + goto out; + } + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (buf == NULL) { + mconsole_reply(req, "Failed to allocate buffer", 1, 0); + goto out_fput; + } + + do { + len = kernel_read(file, buf, PAGE_SIZE - 1, &pos); + if (len < 0) { + mconsole_reply(req, "Read of file failed", 1, 0); + goto out_free; + } + /* Begin the file content on his own line. */ + if (first_chunk) { + mconsole_reply(req, "\n", 0, 1); + first_chunk = 0; + } + buf[len] = '\0'; + mconsole_reply(req, buf, 0, (len > 0)); + } while (len > 0); + out_free: + kfree(buf); + out_fput: + fput(file); + out: ; +} + +#define UML_MCONSOLE_HELPTEXT \ +"Commands: \n\ + version - Get kernel version \n\ + help - Print this message \n\ + halt - Halt UML \n\ + reboot - Reboot UML \n\ + config <dev>=<config> - Add a new device to UML; \n\ + same syntax as command line \n\ + config <dev> - Query the configuration of a device \n\ + remove <dev> - Remove a device from UML \n\ + sysrq <letter> - Performs the SysRq action controlled by the letter \n\ + cad - invoke the Ctrl-Alt-Del handler \n\ + stop - pause the UML; it will do nothing until it receives a 'go' \n\ + go - continue the UML after a 'stop' \n\ + log <string> - make UML enter <string> into the kernel log\n\ + proc <file> - returns the contents of the UML's /proc/<file>\n\ + stack <pid> - returns the stack of the specified pid\n\ +" + +void mconsole_help(struct mc_request *req) +{ + mconsole_reply(req, UML_MCONSOLE_HELPTEXT, 0, 0); +} + +void mconsole_halt(struct mc_request *req) +{ + mconsole_reply(req, "", 0, 0); + machine_halt(); +} + +void mconsole_reboot(struct mc_request *req) +{ + mconsole_reply(req, "", 0, 0); + machine_restart(NULL); +} + +void mconsole_cad(struct mc_request *req) +{ + mconsole_reply(req, "", 0, 0); + ctrl_alt_del(); +} + +void mconsole_go(struct mc_request *req) +{ + mconsole_reply(req, "Not stopped", 1, 0); +} + +void mconsole_stop(struct mc_request *req) +{ + block_signals(); + os_set_fd_block(req->originating_fd, 1); + mconsole_reply(req, "stopped", 0, 0); + for (;;) { + if (!mconsole_get_request(req->originating_fd, req)) + continue; + if (req->cmd->handler == mconsole_go) + break; + if (req->cmd->handler == mconsole_stop) { + mconsole_reply(req, "Already stopped", 1, 0); + continue; + } + if (req->cmd->handler == mconsole_sysrq) { + struct pt_regs *old_regs; + old_regs = set_irq_regs((struct pt_regs *)&req->regs); + mconsole_sysrq(req); + set_irq_regs(old_regs); + continue; + } + (*req->cmd->handler)(req); + } + os_set_fd_block(req->originating_fd, 0); + mconsole_reply(req, "", 0, 0); + unblock_signals(); +} + +static DEFINE_SPINLOCK(mc_devices_lock); +static LIST_HEAD(mconsole_devices); + +void mconsole_register_dev(struct mc_device *new) +{ + spin_lock(&mc_devices_lock); + BUG_ON(!list_empty(&new->list)); + list_add(&new->list, &mconsole_devices); + spin_unlock(&mc_devices_lock); +} + +static struct mc_device *mconsole_find_dev(char *name) +{ + struct list_head *ele; + struct mc_device *dev; + + list_for_each(ele, &mconsole_devices) { + dev = list_entry(ele, struct mc_device, list); + if (!strncmp(name, dev->name, strlen(dev->name))) + return dev; + } + return NULL; +} + +#define UNPLUGGED_PER_PAGE \ + ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long)) + +struct unplugged_pages { + struct list_head list; + void *pages[UNPLUGGED_PER_PAGE]; +}; + +static DEFINE_MUTEX(plug_mem_mutex); +static unsigned long long unplugged_pages_count; +static LIST_HEAD(unplugged_pages); +static int unplug_index = UNPLUGGED_PER_PAGE; + +static int mem_config(char *str, char **error_out) +{ + unsigned long long diff; + int err = -EINVAL, i, add; + char *ret; + + if (str[0] != '=') { + *error_out = "Expected '=' after 'mem'"; + goto out; + } + + str++; + if (str[0] == '-') + add = 0; + else if (str[0] == '+') { + add = 1; + } + else { + *error_out = "Expected increment to start with '-' or '+'"; + goto out; + } + + str++; + diff = memparse(str, &ret); + if (*ret != '\0') { + *error_out = "Failed to parse memory increment"; + goto out; + } + + diff /= PAGE_SIZE; + + mutex_lock(&plug_mem_mutex); + for (i = 0; i < diff; i++) { + struct unplugged_pages *unplugged; + void *addr; + + if (add) { + if (list_empty(&unplugged_pages)) + break; + + unplugged = list_entry(unplugged_pages.next, + struct unplugged_pages, list); + if (unplug_index > 0) + addr = unplugged->pages[--unplug_index]; + else { + list_del(&unplugged->list); + addr = unplugged; + unplug_index = UNPLUGGED_PER_PAGE; + } + + free_page((unsigned long) addr); + unplugged_pages_count--; + } + else { + struct page *page; + + page = alloc_page(GFP_ATOMIC); + if (page == NULL) + break; + + unplugged = page_address(page); + if (unplug_index == UNPLUGGED_PER_PAGE) { + list_add(&unplugged->list, &unplugged_pages); + unplug_index = 0; + } + else { + struct list_head *entry = unplugged_pages.next; + addr = unplugged; + + unplugged = list_entry(entry, + struct unplugged_pages, + list); + err = os_drop_memory(addr, PAGE_SIZE); + if (err) { + printk(KERN_ERR "Failed to release " + "memory - errno = %d\n", err); + *error_out = "Failed to release memory"; + goto out_unlock; + } + unplugged->pages[unplug_index++] = addr; + } + + unplugged_pages_count++; + } + } + + err = 0; +out_unlock: + mutex_unlock(&plug_mem_mutex); +out: + return err; +} + +static int mem_get_config(char *name, char *str, int size, char **error_out) +{ + char buf[sizeof("18446744073709551615")]; + int len = 0; + + sprintf(buf, "%ld", uml_physmem); + CONFIG_CHUNK(str, size, len, buf, 1); + + return len; +} + +static int mem_id(char **str, int *start_out, int *end_out) +{ + *start_out = 0; + *end_out = 0; + + return 0; +} + +static int mem_remove(int n, char **error_out) +{ + *error_out = "Memory doesn't support the remove operation"; + return -EBUSY; +} + +static struct mc_device mem_mc = { + .list = LIST_HEAD_INIT(mem_mc.list), + .name = "mem", + .config = mem_config, + .get_config = mem_get_config, + .id = mem_id, + .remove = mem_remove, +}; + +static int __init mem_mc_init(void) +{ + if (can_drop_memory()) + mconsole_register_dev(&mem_mc); + else printk(KERN_ERR "Can't release memory to the host - memory " + "hotplug won't be supported\n"); + return 0; +} + +__initcall(mem_mc_init); + +#define CONFIG_BUF_SIZE 64 + +static void mconsole_get_config(int (*get_config)(char *, char *, int, + char **), + struct mc_request *req, char *name) +{ + char default_buf[CONFIG_BUF_SIZE], *error, *buf; + int n, size; + + if (get_config == NULL) { + mconsole_reply(req, "No get_config routine defined", 1, 0); + return; + } + + error = NULL; + size = ARRAY_SIZE(default_buf); + buf = default_buf; + + while (1) { + n = (*get_config)(name, buf, size, &error); + if (error != NULL) { + mconsole_reply(req, error, 1, 0); + goto out; + } + + if (n <= size) { + mconsole_reply(req, buf, 0, 0); + goto out; + } + + if (buf != default_buf) + kfree(buf); + + size = n; + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) { + mconsole_reply(req, "Failed to allocate buffer", 1, 0); + return; + } + } + out: + if (buf != default_buf) + kfree(buf); +} + +void mconsole_config(struct mc_request *req) +{ + struct mc_device *dev; + char *ptr = req->request.data, *name, *error_string = ""; + int err; + + ptr += strlen("config"); + ptr = skip_spaces(ptr); + dev = mconsole_find_dev(ptr); + if (dev == NULL) { + mconsole_reply(req, "Bad configuration option", 1, 0); + return; + } + + name = &ptr[strlen(dev->name)]; + ptr = name; + while ((*ptr != '=') && (*ptr != '\0')) + ptr++; + + if (*ptr == '=') { + err = (*dev->config)(name, &error_string); + mconsole_reply(req, error_string, err, 0); + } + else mconsole_get_config(dev->get_config, req, name); +} + +void mconsole_remove(struct mc_request *req) +{ + struct mc_device *dev; + char *ptr = req->request.data, *err_msg = ""; + char error[256]; + int err, start, end, n; + + ptr += strlen("remove"); + ptr = skip_spaces(ptr); + dev = mconsole_find_dev(ptr); + if (dev == NULL) { + mconsole_reply(req, "Bad remove option", 1, 0); + return; + } + + ptr = &ptr[strlen(dev->name)]; + + err = 1; + n = (*dev->id)(&ptr, &start, &end); + if (n < 0) { + err_msg = "Couldn't parse device number"; + goto out; + } + else if ((n < start) || (n > end)) { + sprintf(error, "Invalid device number - must be between " + "%d and %d", start, end); + err_msg = error; + goto out; + } + + err_msg = NULL; + err = (*dev->remove)(n, &err_msg); + switch(err) { + case 0: + err_msg = ""; + break; + case -ENODEV: + if (err_msg == NULL) + err_msg = "Device doesn't exist"; + break; + case -EBUSY: + if (err_msg == NULL) + err_msg = "Device is currently open"; + break; + default: + break; + } +out: + mconsole_reply(req, err_msg, err, 0); +} + +struct mconsole_output { + struct list_head list; + struct mc_request *req; +}; + +static DEFINE_SPINLOCK(client_lock); +static LIST_HEAD(clients); +static char console_buf[MCONSOLE_MAX_DATA] __nonstring; + +static void console_write(struct console *console, const char *string, + unsigned int len) +{ + struct list_head *ele; + int n; + + if (list_empty(&clients)) + return; + + while (len > 0) { + n = min((size_t) len, ARRAY_SIZE(console_buf)); + memcpy(console_buf, string, n); + string += n; + len -= n; + + list_for_each(ele, &clients) { + struct mconsole_output *entry; + + entry = list_entry(ele, struct mconsole_output, list); + mconsole_reply_len(entry->req, console_buf, n, 0, 1); + } + } +} + +static struct console mc_console = { .name = "mc", + .write = console_write, + .flags = CON_ENABLED, + .index = -1 }; + +static int mc_add_console(void) +{ + register_console(&mc_console); + return 0; +} + +late_initcall(mc_add_console); + +static void with_console(struct mc_request *req, void (*proc)(void *), + void *arg) +{ + struct mconsole_output entry; + unsigned long flags; + + entry.req = req; + spin_lock_irqsave(&client_lock, flags); + list_add(&entry.list, &clients); + spin_unlock_irqrestore(&client_lock, flags); + + (*proc)(arg); + + mconsole_reply_len(req, "", 0, 0, 0); + + spin_lock_irqsave(&client_lock, flags); + list_del(&entry.list); + spin_unlock_irqrestore(&client_lock, flags); +} + +#ifdef CONFIG_MAGIC_SYSRQ + +#include <linux/sysrq.h> + +static void sysrq_proc(void *arg) +{ + char *op = arg; + handle_sysrq(*op); +} + +void mconsole_sysrq(struct mc_request *req) +{ + char *ptr = req->request.data; + + ptr += strlen("sysrq"); + ptr = skip_spaces(ptr); + + /* + * With 'b', the system will shut down without a chance to reply, + * so in this case, we reply first. + */ + if (*ptr == 'b') + mconsole_reply(req, "", 0, 0); + + with_console(req, sysrq_proc, ptr); +} +#else +void mconsole_sysrq(struct mc_request *req) +{ + mconsole_reply(req, "Sysrq not compiled in", 1, 0); +} +#endif + +static void stack_proc(void *arg) +{ + struct task_struct *task = arg; + + show_stack(task, NULL, KERN_INFO); +} + +/* + * Mconsole stack trace + * Added by Allan Graves, Jeff Dike + * Dumps a stacks registers to the linux console. + * Usage stack <pid>. + */ +void mconsole_stack(struct mc_request *req) +{ + char *ptr = req->request.data; + int pid_requested= -1; + struct task_struct *to = NULL; + + /* + * Would be nice: + * 1) Send showregs output to mconsole. + * 2) Add a way to stack dump all pids. + */ + + ptr += strlen("stack"); + ptr = skip_spaces(ptr); + + /* + * Should really check for multiple pids or reject bad args here + */ + /* What do the arguments in mconsole_reply mean? */ + if (sscanf(ptr, "%d", &pid_requested) == 0) { + mconsole_reply(req, "Please specify a pid", 1, 0); + return; + } + + to = find_task_by_pid_ns(pid_requested, &init_pid_ns); + if ((to == NULL) || (pid_requested == 0)) { + mconsole_reply(req, "Couldn't find that pid", 1, 0); + return; + } + with_console(req, stack_proc, to); +} + +static int __init mount_proc(void) +{ + struct file_system_type *proc_fs_type; + struct vfsmount *mnt; + + proc_fs_type = get_fs_type("proc"); + if (!proc_fs_type) + return -ENODEV; + + mnt = kern_mount(proc_fs_type); + put_filesystem(proc_fs_type); + if (IS_ERR(mnt)) + return PTR_ERR(mnt); + + proc_mnt = mnt; + return 0; +} + +/* + * Changed by mconsole_setup, which is __setup, and called before SMP is + * active. + */ +static char *notify_socket = NULL; + +static int __init mconsole_init(void) +{ + /* long to avoid size mismatch warnings from gcc */ + long sock; + int err; + char file[UNIX_PATH_MAX]; + + mount_proc(); + + if (umid_file_name("mconsole", file, sizeof(file))) + return -1; + snprintf(mconsole_socket_name, sizeof(file), "%s", file); + + sock = os_create_unix_socket(file, sizeof(file), 1); + if (sock < 0) { + printk(KERN_ERR "Failed to initialize management console\n"); + return 1; + } + if (os_set_fd_block(sock, 0)) + goto out; + + register_reboot_notifier(&reboot_notifier); + + err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt, + IRQF_SHARED, "mconsole", (void *)sock); + if (err < 0) { + printk(KERN_ERR "Failed to get IRQ for management console\n"); + goto out; + } + + if (notify_socket != NULL) { + notify_socket = kstrdup(notify_socket, GFP_KERNEL); + if (notify_socket != NULL) + mconsole_notify(notify_socket, MCONSOLE_SOCKET, + mconsole_socket_name, + strlen(mconsole_socket_name) + 1); + else printk(KERN_ERR "mconsole_setup failed to strdup " + "string\n"); + } + + printk(KERN_INFO "mconsole (version %d) initialized on %s\n", + MCONSOLE_VERSION, mconsole_socket_name); + return 0; + + out: + os_close_file(sock); + return 1; +} + +__initcall(mconsole_init); + +static ssize_t mconsole_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) +{ + char *buf; + + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + mconsole_notify(notify_socket, MCONSOLE_USER_NOTIFY, buf, count); + kfree(buf); + return count; +} + +static const struct proc_ops mconsole_proc_ops = { + .proc_write = mconsole_proc_write, + .proc_lseek = noop_llseek, +}; + +static int create_proc_mconsole(void) +{ + struct proc_dir_entry *ent; + + if (notify_socket == NULL) + return 0; + + ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_ops); + if (ent == NULL) { + printk(KERN_INFO "create_proc_mconsole : proc_create failed\n"); + return 0; + } + return 0; +} + +static DEFINE_SPINLOCK(notify_spinlock); + +void lock_notify(void) +{ + spin_lock(¬ify_spinlock); +} + +void unlock_notify(void) +{ + spin_unlock(¬ify_spinlock); +} + +__initcall(create_proc_mconsole); + +#define NOTIFY "notify:" + +static int mconsole_setup(char *str) +{ + if (!strncmp(str, NOTIFY, strlen(NOTIFY))) { + str += strlen(NOTIFY); + notify_socket = str; + } + else printk(KERN_ERR "mconsole_setup : Unknown option - '%s'\n", str); + return 1; +} + +__setup("mconsole=", mconsole_setup); + +__uml_help(mconsole_setup, +"mconsole=notify:<socket>\n" +" Requests that the mconsole driver send a message to the named Unix\n" +" socket containing the name of the mconsole socket. This also serves\n" +" to notify outside processes when UML has booted far enough to respond\n" +" to mconsole requests.\n\n" +); + +static int notify_panic(struct notifier_block *self, unsigned long unused1, + void *ptr) +{ + char *message = ptr; + + if (notify_socket == NULL) + return 0; + + mconsole_notify(notify_socket, MCONSOLE_PANIC, message, + strlen(message) + 1); + return NOTIFY_DONE; +} + +static struct notifier_block panic_exit_notifier = { + .notifier_call = notify_panic, + .priority = INT_MAX, /* run as soon as possible */ +}; + +static int add_notifier(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &panic_exit_notifier); + return 0; +} + +__initcall(add_notifier); + +char *mconsole_notify_socket(void) +{ + return notify_socket; +} + +EXPORT_SYMBOL(mconsole_notify_socket); diff --git a/arch/um/drivers/mconsole_kern.h b/arch/um/drivers/mconsole_kern.h new file mode 100644 index 0000000000..56d8d6a3ff --- /dev/null +++ b/arch/um/drivers/mconsole_kern.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __MCONSOLE_KERN_H__ +#define __MCONSOLE_KERN_H__ + +#include <linux/list.h> +#include "mconsole.h" + +struct mconsole_entry { + struct list_head list; + struct mc_request request; +}; + +/* All these methods are called in process context. */ +struct mc_device { + struct list_head list; + char *name; + int (*config)(char *, char **); + int (*get_config)(char *, char *, int, char **); + int (*id)(char **, int *, int *); + int (*remove)(int, char **); +}; + +#define CONFIG_CHUNK(str, size, current, chunk, end) \ +do { \ + current += strlen(chunk); \ + if(current >= size) \ + str = NULL; \ + if(str != NULL){ \ + strcpy(str, chunk); \ + str += strlen(chunk); \ + } \ + if(end) \ + current++; \ +} while(0) + +#ifdef CONFIG_MCONSOLE + +extern void mconsole_register_dev(struct mc_device *new); + +#else + +static inline void mconsole_register_dev(struct mc_device *new) +{ +} + +#endif + +#endif diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c new file mode 100644 index 0000000000..e24298a734 --- /dev/null +++ b/arch/um/drivers/mconsole_user.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <errno.h> +#include <string.h> +#include <unistd.h> +#include <sys/socket.h> +#include <sys/uio.h> +#include <sys/un.h> +#include "mconsole.h" + +static struct mconsole_command commands[] = { + /* + * With uts namespaces, uts information becomes process-specific, so + * we need a process context. If we try handling this in interrupt + * context, we may hit an exiting process without a valid uts + * namespace. + */ + { "version", mconsole_version, MCONSOLE_PROC }, + { "halt", mconsole_halt, MCONSOLE_PROC }, + { "reboot", mconsole_reboot, MCONSOLE_PROC }, + { "config", mconsole_config, MCONSOLE_PROC }, + { "remove", mconsole_remove, MCONSOLE_PROC }, + { "sysrq", mconsole_sysrq, MCONSOLE_INTR }, + { "help", mconsole_help, MCONSOLE_INTR }, + { "cad", mconsole_cad, MCONSOLE_INTR }, + { "stop", mconsole_stop, MCONSOLE_PROC }, + { "go", mconsole_go, MCONSOLE_INTR }, + { "log", mconsole_log, MCONSOLE_INTR }, + { "proc", mconsole_proc, MCONSOLE_PROC }, + { "stack", mconsole_stack, MCONSOLE_INTR }, +}; + +/* Initialized in mconsole_init, which is an initcall */ +char mconsole_socket_name[256]; + +static int mconsole_reply_v0(struct mc_request *req, char *reply) +{ + struct iovec iov; + struct msghdr msg; + + iov.iov_base = reply; + iov.iov_len = strlen(reply); + + msg.msg_name = &(req->origin); + msg.msg_namelen = req->originlen; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + return sendmsg(req->originating_fd, &msg, 0); +} + +static struct mconsole_command *mconsole_parse(struct mc_request *req) +{ + struct mconsole_command *cmd; + int i; + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + cmd = &commands[i]; + if (!strncmp(req->request.data, cmd->command, + strlen(cmd->command))) { + return cmd; + } + } + return NULL; +} + +#define MIN(a,b) ((a)<(b) ? (a):(b)) + +#define STRINGX(x) #x +#define STRING(x) STRINGX(x) + +int mconsole_get_request(int fd, struct mc_request *req) +{ + int len; + + req->originlen = sizeof(req->origin); + req->len = recvfrom(fd, &req->request, sizeof(req->request), 0, + (struct sockaddr *) req->origin, &req->originlen); + if (req->len < 0) + return 0; + + req->originating_fd = fd; + + if (req->request.magic != MCONSOLE_MAGIC) { + /* Unversioned request */ + len = MIN(sizeof(req->request.data) - 1, + strlen((char *) &req->request)); + memmove(req->request.data, &req->request, len); + req->request.data[len] = '\0'; + + req->request.magic = MCONSOLE_MAGIC; + req->request.version = 0; + req->request.len = len; + + mconsole_reply_v0(req, "ERR Version 0 mconsole clients are " + "not supported by this driver"); + return 0; + } + + if (req->request.len >= MCONSOLE_MAX_DATA) { + mconsole_reply(req, "Request too large", 1, 0); + return 0; + } + if (req->request.version != MCONSOLE_VERSION) { + mconsole_reply(req, "This driver only supports version " + STRING(MCONSOLE_VERSION) " clients", 1, 0); + } + + req->request.data[req->request.len] = '\0'; + req->cmd = mconsole_parse(req); + if (req->cmd == NULL) { + mconsole_reply(req, "Unknown command", 1, 0); + return 0; + } + + return 1; +} + +int mconsole_reply_len(struct mc_request *req, const char *str, int total, + int err, int more) +{ + /* + * XXX This is a stack consumption problem. It'd be nice to + * make it global and serialize access to it, but there are a + * ton of callers to this function. + */ + struct mconsole_reply reply; + int len, n; + + do { + reply.err = err; + + /* err can only be true on the first packet */ + err = 0; + + len = MIN(total, MCONSOLE_MAX_DATA - 1); + + if (len == total) reply.more = more; + else reply.more = 1; + + memcpy(reply.data, str, len); + reply.data[len] = '\0'; + total -= len; + str += len; + reply.len = len + 1; + + len = sizeof(reply) + reply.len - sizeof(reply.data); + + n = sendto(req->originating_fd, &reply, len, 0, + (struct sockaddr *) req->origin, req->originlen); + + if (n < 0) + return -errno; + } while (total > 0); + return 0; +} + +int mconsole_reply(struct mc_request *req, const char *str, int err, int more) +{ + return mconsole_reply_len(req, str, strlen(str), err, more); +} + + +int mconsole_unlink_socket(void) +{ + unlink(mconsole_socket_name); + return 0; +} + +static int notify_sock = -1; + +int mconsole_notify(char *sock_name, int type, const void *data, int len) +{ + struct sockaddr_un target; + struct mconsole_notify packet; + int n, err = 0; + + lock_notify(); + if (notify_sock < 0) { + notify_sock = socket(PF_UNIX, SOCK_DGRAM, 0); + if (notify_sock < 0) { + err = -errno; + printk(UM_KERN_ERR "mconsole_notify - socket failed, " + "errno = %d\n", errno); + } + } + unlock_notify(); + + if (err) + return err; + + target.sun_family = AF_UNIX; + strcpy(target.sun_path, sock_name); + + packet.magic = MCONSOLE_MAGIC; + packet.version = MCONSOLE_VERSION; + packet.type = type; + len = (len > sizeof(packet.data)) ? sizeof(packet.data) : len; + packet.len = len; + memcpy(packet.data, data, len); + + err = 0; + len = sizeof(packet) + packet.len - sizeof(packet.data); + n = sendto(notify_sock, &packet, len, 0, (struct sockaddr *) &target, + sizeof(target)); + if (n < 0) { + err = -errno; + printk(UM_KERN_ERR "mconsole_notify - sendto failed, " + "errno = %d\n", errno); + } + return err; +} diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c new file mode 100644 index 0000000000..807cd33587 --- /dev/null +++ b/arch/um/drivers/mmapper_kern.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/um/drivers/mmapper_kern.c + * + * BRIEF MODULE DESCRIPTION + * + * Copyright (C) 2000 RidgeRun, Inc. + * Author: RidgeRun, Inc. + * Greg Lonnon glonnon@ridgerun.com or info@ridgerun.com + * + */ + +#include <linux/stddef.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mm.h> + +#include <linux/uaccess.h> +#include <mem_user.h> + +/* These are set in mmapper_init, which is called at boot time */ +static unsigned long mmapper_size; +static unsigned long p_buf; +static char *v_buf; + +static ssize_t mmapper_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size); +} + +static ssize_t mmapper_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (*ppos > mmapper_size) + return -EINVAL; + + return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count); +} + +static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +static int mmapper_mmap(struct file *file, struct vm_area_struct *vma) +{ + int ret = -EINVAL; + int size; + + if (vma->vm_pgoff != 0) + goto out; + + size = vma->vm_end - vma->vm_start; + if (size > mmapper_size) + return -EFAULT; + + /* + * XXX A comment above remap_pfn_range says it should only be + * called when the mm semaphore is held + */ + if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, + vma->vm_page_prot)) + goto out; + ret = 0; +out: + return ret; +} + +static int mmapper_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int mmapper_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations mmapper_fops = { + .owner = THIS_MODULE, + .read = mmapper_read, + .write = mmapper_write, + .unlocked_ioctl = mmapper_ioctl, + .mmap = mmapper_mmap, + .open = mmapper_open, + .release = mmapper_release, + .llseek = default_llseek, +}; + +/* + * No locking needed - only used (and modified) by below initcall and exitcall. + */ +static struct miscdevice mmapper_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "mmapper", + .fops = &mmapper_fops +}; + +static int __init mmapper_init(void) +{ + int err; + + printk(KERN_INFO "Mapper v0.1\n"); + + v_buf = (char *) find_iomem("mmapper", &mmapper_size); + if (mmapper_size == 0) { + printk(KERN_ERR "mmapper_init - find_iomem failed\n"); + return -ENODEV; + } + p_buf = __pa(v_buf); + + err = misc_register(&mmapper_dev); + if (err) { + printk(KERN_ERR "mmapper - misc_register failed, err = %d\n", + err); + return err; + } + return 0; +} + +static void __exit mmapper_exit(void) +{ + misc_deregister(&mmapper_dev); +} + +module_init(mmapper_init); +module_exit(mmapper_exit); + +MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>"); +MODULE_DESCRIPTION("DSPLinux simulator mmapper driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c new file mode 100644 index 0000000000..3d7836c465 --- /dev/null +++ b/arch/um/drivers/net_kern.c @@ -0,0 +1,892 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 by various other people who didn't put their name here. + */ + +#include <linux/memblock.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <init.h> +#include <irq_kern.h> +#include <irq_user.h> +#include "mconsole_kern.h" +#include <net_kern.h> +#include <net_user.h> + +#define DRIVER_NAME "uml-netdev" + +static DEFINE_SPINLOCK(opened_lock); +static LIST_HEAD(opened); + +/* + * The drop_skb is used when we can't allocate an skb. The + * packet is read into drop_skb in order to get the data off the + * connection to the host. + * It is reallocated whenever a maximum packet size is seen which is + * larger than any seen before. update_drop_skb is called from + * eth_configure when a new interface is added. + */ +static DEFINE_SPINLOCK(drop_lock); +static struct sk_buff *drop_skb; +static int drop_max; + +static int update_drop_skb(int max) +{ + struct sk_buff *new; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&drop_lock, flags); + + if (max <= drop_max) + goto out; + + err = -ENOMEM; + new = dev_alloc_skb(max); + if (new == NULL) + goto out; + + skb_put(new, max); + + kfree_skb(drop_skb); + drop_skb = new; + drop_max = max; + err = 0; +out: + spin_unlock_irqrestore(&drop_lock, flags); + + return err; +} + +static int uml_net_rx(struct net_device *dev) +{ + struct uml_net_private *lp = netdev_priv(dev); + int pkt_len; + struct sk_buff *skb; + + /* If we can't allocate memory, try again next round. */ + skb = dev_alloc_skb(lp->max_packet); + if (skb == NULL) { + drop_skb->dev = dev; + /* Read a packet into drop_skb and don't do anything with it. */ + (*lp->read)(lp->fd, drop_skb, lp); + dev->stats.rx_dropped++; + return 0; + } + + skb->dev = dev; + skb_put(skb, lp->max_packet); + skb_reset_mac_header(skb); + pkt_len = (*lp->read)(lp->fd, skb, lp); + + if (pkt_len > 0) { + skb_trim(skb, pkt_len); + skb->protocol = (*lp->protocol)(skb); + + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + netif_rx(skb); + return pkt_len; + } + + kfree_skb(skb); + return pkt_len; +} + +static void uml_dev_close(struct work_struct *work) +{ + struct uml_net_private *lp = + container_of(work, struct uml_net_private, work); + dev_close(lp->dev); +} + +static irqreturn_t uml_net_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct uml_net_private *lp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return IRQ_NONE; + + spin_lock(&lp->lock); + while ((err = uml_net_rx(dev)) > 0) ; + if (err < 0) { + printk(KERN_ERR + "Device '%s' read returned %d, shutting it down\n", + dev->name, err); + /* dev_close can't be called in interrupt context, and takes + * again lp->lock. + * And dev_close() can be safely called multiple times on the + * same device, since it tests for (dev->flags & IFF_UP). So + * there's no harm in delaying the device shutdown. + * Furthermore, the workqueue will not re-enqueue an already + * enqueued work item. */ + schedule_work(&lp->work); + goto out; + } +out: + spin_unlock(&lp->lock); + return IRQ_HANDLED; +} + +static int uml_net_open(struct net_device *dev) +{ + struct uml_net_private *lp = netdev_priv(dev); + int err; + + if (lp->fd >= 0) { + err = -ENXIO; + goto out; + } + + lp->fd = (*lp->open)(&lp->user); + if (lp->fd < 0) { + err = lp->fd; + goto out; + } + + err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, + IRQF_SHARED, dev->name, dev); + if (err < 0) { + printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); + err = -ENETUNREACH; + goto out_close; + } + + netif_start_queue(dev); + + /* clear buffer - it can happen that the host side of the interface + * is full when we get here. In this case, new data is never queued, + * SIGIOs never arrive, and the net never works. + */ + while ((err = uml_net_rx(dev)) > 0) ; + + spin_lock(&opened_lock); + list_add(&lp->list, &opened); + spin_unlock(&opened_lock); + + return 0; +out_close: + if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); + lp->fd = -1; +out: + return err; +} + +static int uml_net_close(struct net_device *dev) +{ + struct uml_net_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + + um_free_irq(dev->irq, dev); + if (lp->close != NULL) + (*lp->close)(lp->fd, &lp->user); + lp->fd = -1; + + spin_lock(&opened_lock); + list_del(&lp->list); + spin_unlock(&opened_lock); + + return 0; +} + +static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct uml_net_private *lp = netdev_priv(dev); + unsigned long flags; + int len; + + netif_stop_queue(dev); + + spin_lock_irqsave(&lp->lock, flags); + + len = (*lp->write)(lp->fd, skb, lp); + skb_tx_timestamp(skb); + + if (len == skb->len) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + netif_trans_update(dev); + netif_start_queue(dev); + + /* this is normally done in the interrupt when tx finishes */ + netif_wake_queue(dev); + } + else if (len == 0) { + netif_start_queue(dev); + dev->stats.tx_dropped++; + } + else { + netif_start_queue(dev); + printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); + } + + spin_unlock_irqrestore(&lp->lock, flags); + + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + +static void uml_net_set_multicast_list(struct net_device *dev) +{ + return; +} + +static void uml_net_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + netif_trans_update(dev); + netif_wake_queue(dev); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void uml_net_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + uml_net_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static void uml_net_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); +} + +static const struct ethtool_ops uml_net_ethtool_ops = { + .get_drvinfo = uml_net_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void uml_net_setup_etheraddr(struct net_device *dev, char *str) +{ + u8 addr[ETH_ALEN]; + char *end; + int i; + + if (str == NULL) + goto random; + + for (i = 0; i < 6; i++) { + addr[i] = simple_strtoul(str, &end, 16); + if ((end == str) || + ((*end != ':') && (*end != ',') && (*end != '\0'))) { + printk(KERN_ERR + "setup_etheraddr: failed to parse '%s' " + "as an ethernet address\n", str); + goto random; + } + str = end + 1; + } + if (is_multicast_ether_addr(addr)) { + printk(KERN_ERR + "Attempt to assign a multicast ethernet address to a " + "device disallowed\n"); + goto random; + } + if (!is_valid_ether_addr(addr)) { + printk(KERN_ERR + "Attempt to assign an invalid ethernet address to a " + "device disallowed\n"); + goto random; + } + if (!is_local_ether_addr(addr)) { + printk(KERN_WARNING + "Warning: Assigning a globally valid ethernet " + "address to a device\n"); + printk(KERN_WARNING "You should set the 2nd rightmost bit in " + "the first byte of the MAC,\n"); + printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n", + addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], + addr[5]); + } + eth_hw_addr_set(dev, addr); + return; + +random: + printk(KERN_INFO + "Choosing a random ethernet address for device %s\n", dev->name); + eth_hw_addr_random(dev); +} + +static DEFINE_SPINLOCK(devices_lock); +static LIST_HEAD(devices); + +static struct platform_driver uml_net_driver = { + .driver = { + .name = DRIVER_NAME, + }, +}; + +static void net_device_release(struct device *dev) +{ + struct uml_net *device = dev_get_drvdata(dev); + struct net_device *netdev = device->dev; + struct uml_net_private *lp = netdev_priv(netdev); + + if (lp->remove != NULL) + (*lp->remove)(&lp->user); + list_del(&device->list); + kfree(device); + free_netdev(netdev); +} + +static const struct net_device_ops uml_netdev_ops = { + .ndo_open = uml_net_open, + .ndo_stop = uml_net_close, + .ndo_start_xmit = uml_net_start_xmit, + .ndo_set_rx_mode = uml_net_set_multicast_list, + .ndo_tx_timeout = uml_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = uml_net_poll_controller, +#endif +}; + +/* + * Ensures that platform_driver_register is called only once by + * eth_configure. Will be set in an initcall. + */ +static int driver_registered; + +static void eth_configure(int n, void *init, char *mac, + struct transport *transport, gfp_t gfp_mask) +{ + struct uml_net *device; + struct net_device *dev; + struct uml_net_private *lp; + int err, size; + + size = transport->private_size + sizeof(struct uml_net_private); + + device = kzalloc(sizeof(*device), gfp_mask); + if (device == NULL) { + printk(KERN_ERR "eth_configure failed to allocate struct " + "uml_net\n"); + return; + } + + dev = alloc_etherdev(size); + if (dev == NULL) { + printk(KERN_ERR "eth_configure: failed to allocate struct " + "net_device for eth%d\n", n); + goto out_free_device; + } + + INIT_LIST_HEAD(&device->list); + device->index = n; + + /* If this name ends up conflicting with an existing registered + * netdevice, that is OK, register_netdev{,ice}() will notice this + * and fail. + */ + snprintf(dev->name, sizeof(dev->name), "eth%d", n); + + uml_net_setup_etheraddr(dev, mac); + + printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr); + + lp = netdev_priv(dev); + /* This points to the transport private data. It's still clear, but we + * must memset it to 0 *now*. Let's help the drivers. */ + memset(lp, 0, size); + INIT_WORK(&lp->work, uml_dev_close); + + /* sysfs register */ + if (!driver_registered) { + platform_driver_register(¨_net_driver); + driver_registered = 1; + } + device->pdev.id = n; + device->pdev.name = DRIVER_NAME; + device->pdev.dev.release = net_device_release; + dev_set_drvdata(&device->pdev.dev, device); + if (platform_device_register(&device->pdev)) + goto out_free_netdev; + SET_NETDEV_DEV(dev,&device->pdev.dev); + + device->dev = dev; + + /* + * These just fill in a data structure, so there's no failure + * to be worried about. + */ + (*transport->kern->init)(dev, init); + + *lp = ((struct uml_net_private) + { .list = LIST_HEAD_INIT(lp->list), + .dev = dev, + .fd = -1, + .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, + .max_packet = transport->user->max_packet, + .protocol = transport->kern->protocol, + .open = transport->user->open, + .close = transport->user->close, + .remove = transport->user->remove, + .read = transport->kern->read, + .write = transport->kern->write, + .add_address = transport->user->add_address, + .delete_address = transport->user->delete_address }); + + spin_lock_init(&lp->lock); + memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac)); + + if ((transport->user->init != NULL) && + ((*transport->user->init)(&lp->user, dev) != 0)) + goto out_unregister; + + dev->mtu = transport->user->mtu; + dev->netdev_ops = ¨_netdev_ops; + dev->ethtool_ops = ¨_net_ethtool_ops; + dev->watchdog_timeo = (HZ >> 1); + dev->irq = UM_ETH_IRQ; + + err = update_drop_skb(lp->max_packet); + if (err) + goto out_undo_user_init; + + rtnl_lock(); + err = register_netdevice(dev); + rtnl_unlock(); + if (err) + goto out_undo_user_init; + + spin_lock(&devices_lock); + list_add(&device->list, &devices); + spin_unlock(&devices_lock); + + return; + +out_undo_user_init: + if (transport->user->remove != NULL) + (*transport->user->remove)(&lp->user); +out_unregister: + platform_device_unregister(&device->pdev); + return; /* platform_device_unregister frees dev and device */ +out_free_netdev: + free_netdev(dev); +out_free_device: + kfree(device); +} + +static struct uml_net *find_device(int n) +{ + struct uml_net *device; + struct list_head *ele; + + spin_lock(&devices_lock); + list_for_each(ele, &devices) { + device = list_entry(ele, struct uml_net, list); + if (device->index == n) + goto out; + } + device = NULL; + out: + spin_unlock(&devices_lock); + return device; +} + +static int eth_parse(char *str, int *index_out, char **str_out, + char **error_out) +{ + char *end; + int n, err = -EINVAL; + + n = simple_strtoul(str, &end, 0); + if (end == str) { + *error_out = "Bad device number"; + return err; + } + + str = end; + if (*str != '=') { + *error_out = "Expected '=' after device number"; + return err; + } + + str++; + if (find_device(n)) { + *error_out = "Device already configured"; + return err; + } + + *index_out = n; + *str_out = str; + return 0; +} + +struct eth_init { + struct list_head list; + char *init; + int index; +}; + +static DEFINE_SPINLOCK(transports_lock); +static LIST_HEAD(transports); + +/* Filled in during early boot */ +static LIST_HEAD(eth_cmd_line); + +static int check_transport(struct transport *transport, char *eth, int n, + void **init_out, char **mac_out, gfp_t gfp_mask) +{ + int len; + + len = strlen(transport->name); + if (strncmp(eth, transport->name, len)) + return 0; + + eth += len; + if (*eth == ',') + eth++; + else if (*eth != '\0') + return 0; + + *init_out = kmalloc(transport->setup_size, gfp_mask); + if (*init_out == NULL) + return 1; + + if (!transport->setup(eth, mac_out, *init_out)) { + kfree(*init_out); + *init_out = NULL; + } + return 1; +} + +void register_transport(struct transport *new) +{ + struct list_head *ele, *next; + struct eth_init *eth; + void *init; + char *mac = NULL; + int match; + + spin_lock(&transports_lock); + BUG_ON(!list_empty(&new->list)); + list_add(&new->list, &transports); + spin_unlock(&transports_lock); + + list_for_each_safe(ele, next, ð_cmd_line) { + eth = list_entry(ele, struct eth_init, list); + match = check_transport(new, eth->init, eth->index, &init, + &mac, GFP_KERNEL); + if (!match) + continue; + else if (init != NULL) { + eth_configure(eth->index, init, mac, new, GFP_KERNEL); + kfree(init); + } + list_del(ð->list); + } +} + +static int eth_setup_common(char *str, int index) +{ + struct list_head *ele; + struct transport *transport; + void *init; + char *mac = NULL; + int found = 0; + + spin_lock(&transports_lock); + list_for_each(ele, &transports) { + transport = list_entry(ele, struct transport, list); + if (!check_transport(transport, str, index, &init, + &mac, GFP_ATOMIC)) + continue; + if (init != NULL) { + eth_configure(index, init, mac, transport, GFP_ATOMIC); + kfree(init); + } + found = 1; + break; + } + + spin_unlock(&transports_lock); + return found; +} + +static int __init eth_setup(char *str) +{ + struct eth_init *new; + char *error; + int n, err; + + err = eth_parse(str, &n, &str, &error); + if (err) { + printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n", + str, error); + return 1; + } + + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); + if (!new) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*new)); + + INIT_LIST_HEAD(&new->list); + new->index = n; + new->init = str; + + list_add_tail(&new->list, ð_cmd_line); + return 1; +} + +__setup("eth", eth_setup); +__uml_help(eth_setup, +"eth[0-9]+=<transport>,<options>\n" +" Configure a network device.\n\n" +); + +static int net_config(char *str, char **error_out) +{ + int n, err; + + err = eth_parse(str, &n, &str, error_out); + if (err) + return err; + + /* This string is broken up and the pieces used by the underlying + * driver. So, it is freed only if eth_setup_common fails. + */ + str = kstrdup(str, GFP_KERNEL); + if (str == NULL) { + *error_out = "net_config failed to strdup string"; + return -ENOMEM; + } + err = !eth_setup_common(str, n); + if (err) + kfree(str); + return err; +} + +static int net_id(char **str, int *start_out, int *end_out) +{ + char *end; + int n; + + n = simple_strtoul(*str, &end, 0); + if ((*end != '\0') || (end == *str)) + return -1; + + *start_out = n; + *end_out = n; + *str = end; + return n; +} + +static int net_remove(int n, char **error_out) +{ + struct uml_net *device; + struct net_device *dev; + struct uml_net_private *lp; + + device = find_device(n); + if (device == NULL) + return -ENODEV; + + dev = device->dev; + lp = netdev_priv(dev); + if (lp->fd > 0) + return -EBUSY; + unregister_netdev(dev); + platform_device_unregister(&device->pdev); + + return 0; +} + +static struct mc_device net_mc = { + .list = LIST_HEAD_INIT(net_mc.list), + .name = "eth", + .config = net_config, + .get_config = NULL, + .id = net_id, + .remove = net_remove, +}; + +#ifdef CONFIG_INET +static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct uml_net_private *lp; + void (*proc)(unsigned char *, unsigned char *, void *); + unsigned char addr_buf[4], netmask_buf[4]; + + if (dev->netdev_ops->ndo_open != uml_net_open) + return NOTIFY_DONE; + + lp = netdev_priv(dev); + + proc = NULL; + switch (event) { + case NETDEV_UP: + proc = lp->add_address; + break; + case NETDEV_DOWN: + proc = lp->delete_address; + break; + } + if (proc != NULL) { + memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf)); + memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf)); + (*proc)(addr_buf, netmask_buf, &lp->user); + } + return NOTIFY_DONE; +} + +/* uml_net_init shouldn't be called twice on two CPUs at the same time */ +static struct notifier_block uml_inetaddr_notifier = { + .notifier_call = uml_inetaddr_event, +}; + +static void inet_register(void) +{ + struct list_head *ele; + struct uml_net_private *lp; + struct in_device *ip; + struct in_ifaddr *in; + + register_inetaddr_notifier(¨_inetaddr_notifier); + + /* Devices may have been opened already, so the uml_inetaddr_notifier + * didn't get a chance to run for them. This fakes it so that + * addresses which have already been set up get handled properly. + */ + spin_lock(&opened_lock); + list_for_each(ele, &opened) { + lp = list_entry(ele, struct uml_net_private, list); + ip = lp->dev->ip_ptr; + if (ip == NULL) + continue; + in = ip->ifa_list; + while (in != NULL) { + uml_inetaddr_event(NULL, NETDEV_UP, in); + in = in->ifa_next; + } + } + spin_unlock(&opened_lock); +} +#else +static inline void inet_register(void) +{ +} +#endif + +static int uml_net_init(void) +{ + mconsole_register_dev(&net_mc); + inet_register(); + return 0; +} + +__initcall(uml_net_init); + +static void close_devices(void) +{ + struct list_head *ele; + struct uml_net_private *lp; + + spin_lock(&opened_lock); + list_for_each(ele, &opened) { + lp = list_entry(ele, struct uml_net_private, list); + um_free_irq(lp->dev->irq, lp->dev); + if ((lp->close != NULL) && (lp->fd >= 0)) + (*lp->close)(lp->fd, &lp->user); + if (lp->remove != NULL) + (*lp->remove)(&lp->user); + } + spin_unlock(&opened_lock); +} + +__uml_exitcall(close_devices); + +void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, + void *), + void *arg) +{ + struct net_device *dev = d; + struct in_device *ip = dev->ip_ptr; + struct in_ifaddr *in; + unsigned char address[4], netmask[4]; + + if (ip == NULL) return; + in = ip->ifa_list; + while (in != NULL) { + memcpy(address, &in->ifa_address, sizeof(address)); + memcpy(netmask, &in->ifa_mask, sizeof(netmask)); + (*cb)(address, netmask, arg); + in = in->ifa_next; + } +} + +int dev_netmask(void *d, void *m) +{ + struct net_device *dev = d; + struct in_device *ip = dev->ip_ptr; + struct in_ifaddr *in; + __be32 *mask_out = m; + + if (ip == NULL) + return 1; + + in = ip->ifa_list; + if (in == NULL) + return 1; + + *mask_out = in->ifa_mask; + return 0; +} + +void *get_output_buffer(int *len_out) +{ + void *ret; + + ret = (void *) __get_free_pages(GFP_KERNEL, 0); + if (ret) *len_out = PAGE_SIZE; + else *len_out = 0; + return ret; +} + +void free_output_buffer(void *buffer) +{ + free_pages((unsigned long) buffer, 0); +} + +int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, + char **gate_addr) +{ + char *remain; + + remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); + if (remain != NULL) { + printk(KERN_ERR "tap_setup_common - Extra garbage on " + "specification : '%s'\n", remain); + return 1; + } + + return 0; +} + +unsigned short eth_protocol(struct sk_buff *skb) +{ + return eth_type_trans(skb, skb->dev); +} diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c new file mode 100644 index 0000000000..4c9576452a --- /dev/null +++ b/arch/um/drivers/net_user.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <stdio.h> +#include <unistd.h> +#include <stdarg.h> +#include <errno.h> +#include <stddef.h> +#include <string.h> +#include <sys/socket.h> +#include <sys/wait.h> +#include <net_user.h> +#include <os.h> +#include <um_malloc.h> + +int tap_open_common(void *dev, char *gate_addr) +{ + int tap_addr[4]; + + if (gate_addr == NULL) + return 0; + if (sscanf(gate_addr, "%d.%d.%d.%d", &tap_addr[0], + &tap_addr[1], &tap_addr[2], &tap_addr[3]) != 4) { + printk(UM_KERN_ERR "Invalid tap IP address - '%s'\n", + gate_addr); + return -EINVAL; + } + return 0; +} + +void tap_check_ips(char *gate_addr, unsigned char *eth_addr) +{ + int tap_addr[4]; + + if ((gate_addr != NULL) && + (sscanf(gate_addr, "%d.%d.%d.%d", &tap_addr[0], + &tap_addr[1], &tap_addr[2], &tap_addr[3]) == 4) && + (eth_addr[0] == tap_addr[0]) && + (eth_addr[1] == tap_addr[1]) && + (eth_addr[2] == tap_addr[2]) && + (eth_addr[3] == tap_addr[3])) { + printk(UM_KERN_ERR "The tap IP address and the UML eth IP " + "address must be different\n"); + } +} + +/* Do reliable error handling as this fails frequently enough. */ +void read_output(int fd, char *output, int len) +{ + int remain, ret, expected; + char c; + char *str; + + if (output == NULL) { + output = &c; + len = sizeof(c); + } + + *output = '\0'; + ret = read(fd, &remain, sizeof(remain)); + + if (ret != sizeof(remain)) { + if (ret < 0) + ret = -errno; + expected = sizeof(remain); + str = "length"; + goto err; + } + + while (remain != 0) { + expected = (remain < len) ? remain : len; + ret = read(fd, output, expected); + if (ret != expected) { + if (ret < 0) + ret = -errno; + str = "data"; + goto err; + } + remain -= ret; + } + + return; + +err: + if (ret < 0) + printk(UM_KERN_ERR "read_output - read of %s failed, " + "errno = %d\n", str, -ret); + else + printk(UM_KERN_ERR "read_output - read of %s failed, read only " + "%d of %d bytes\n", str, ret, expected); +} + +int net_read(int fd, void *buf, int len) +{ + int n; + + n = read(fd, buf, len); + + if ((n < 0) && (errno == EAGAIN)) + return 0; + else if (n == 0) + return -ENOTCONN; + return n; +} + +int net_recvfrom(int fd, void *buf, int len) +{ + int n; + + CATCH_EINTR(n = recvfrom(fd, buf, len, 0, NULL, NULL)); + if (n < 0) { + if (errno == EAGAIN) + return 0; + return -errno; + } + else if (n == 0) + return -ENOTCONN; + return n; +} + +int net_write(int fd, void *buf, int len) +{ + int n; + + n = write(fd, buf, len); + + if ((n < 0) && (errno == EAGAIN)) + return 0; + else if (n == 0) + return -ENOTCONN; + return n; +} + +int net_send(int fd, void *buf, int len) +{ + int n; + + CATCH_EINTR(n = send(fd, buf, len, 0)); + if (n < 0) { + if (errno == EAGAIN) + return 0; + return -errno; + } + else if (n == 0) + return -ENOTCONN; + return n; +} + +int net_sendto(int fd, void *buf, int len, void *to, int sock_len) +{ + int n; + + CATCH_EINTR(n = sendto(fd, buf, len, 0, (struct sockaddr *) to, + sock_len)); + if (n < 0) { + if (errno == EAGAIN) + return 0; + return -errno; + } + else if (n == 0) + return -ENOTCONN; + return n; +} + +struct change_pre_exec_data { + int close_me; + int stdout_fd; +}; + +static void change_pre_exec(void *arg) +{ + struct change_pre_exec_data *data = arg; + + close(data->close_me); + dup2(data->stdout_fd, 1); +} + +static int change_tramp(char **argv, char *output, int output_len) +{ + int pid, fds[2], err; + struct change_pre_exec_data pe_data; + + err = os_pipe(fds, 1, 0); + if (err < 0) { + printk(UM_KERN_ERR "change_tramp - pipe failed, err = %d\n", + -err); + return err; + } + pe_data.close_me = fds[0]; + pe_data.stdout_fd = fds[1]; + pid = run_helper(change_pre_exec, &pe_data, argv); + + if (pid > 0) /* Avoid hang as we won't get data in failure case. */ + read_output(fds[0], output, output_len); + + close(fds[0]); + close(fds[1]); + + if (pid > 0) + helper_wait(pid); + return pid; +} + +static void change(char *dev, char *what, unsigned char *addr, + unsigned char *netmask) +{ + char addr_buf[sizeof("255.255.255.255\0")]; + char netmask_buf[sizeof("255.255.255.255\0")]; + char version[sizeof("nnnnn\0")]; + char *argv[] = { "uml_net", version, what, dev, addr_buf, + netmask_buf, NULL }; + char *output; + int output_len, pid; + + sprintf(version, "%d", UML_NET_VERSION); + sprintf(addr_buf, "%d.%d.%d.%d", addr[0], addr[1], addr[2], addr[3]); + sprintf(netmask_buf, "%d.%d.%d.%d", netmask[0], netmask[1], + netmask[2], netmask[3]); + + output_len = UM_KERN_PAGE_SIZE; + output = uml_kmalloc(output_len, UM_GFP_KERNEL); + if (output == NULL) + printk(UM_KERN_ERR "change : failed to allocate output " + "buffer\n"); + + pid = change_tramp(argv, output, output_len); + if (pid < 0) { + kfree(output); + return; + } + + if (output != NULL) { + printk("%s", output); + kfree(output); + } +} + +void open_addr(unsigned char *addr, unsigned char *netmask, void *arg) +{ + change(arg, "add", addr, netmask); +} + +void close_addr(unsigned char *addr, unsigned char *netmask, void *arg) +{ + change(arg, "del", addr, netmask); +} + +char *split_if_spec(char *str, ...) +{ + char **arg, *end, *ret = NULL; + va_list ap; + + va_start(ap, str); + while ((arg = va_arg(ap, char **)) != NULL) { + if (*str == '\0') + goto out; + end = strchr(str, ','); + if (end != str) + *arg = str; + if (end == NULL) + goto out; + *end++ = '\0'; + str = end; + } + ret = str; +out: + va_end(ap); + return ret; +} diff --git a/arch/um/drivers/null.c b/arch/um/drivers/null.c new file mode 100644 index 0000000000..87087763a4 --- /dev/null +++ b/arch/um/drivers/null.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <stddef.h> +#include <errno.h> +#include <fcntl.h> +#include "chan_user.h" +#include <os.h> + +/* This address is used only as a unique identifier */ +static int null_chan; + +static void *null_init(char *str, int device, const struct chan_opts *opts) +{ + return &null_chan; +} + +static int null_open(int input, int output, int primary, void *d, + char **dev_out) +{ + int fd; + + *dev_out = NULL; + + fd = open(DEV_NULL, O_RDWR); + return (fd < 0) ? -errno : fd; +} + +static int null_read(int fd, char *c_out, void *unused) +{ + return -ENODEV; +} + +static void null_free(void *data) +{ +} + +const struct chan_ops null_ops = { + .type = "null", + .init = null_init, + .open = null_open, + .close = generic_close, + .read = null_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = null_free, + .winch = 0, +}; diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c new file mode 100644 index 0000000000..25ee2c97ca --- /dev/null +++ b/arch/um/drivers/pcap_kern.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <linux/init.h> +#include <linux/netdevice.h> +#include <net_kern.h> +#include "pcap_user.h" + +struct pcap_init { + char *host_if; + int promisc; + int optimize; + char *filter; +}; + +void pcap_init_kern(struct net_device *dev, void *data) +{ + struct uml_net_private *pri; + struct pcap_data *ppri; + struct pcap_init *init = data; + + pri = netdev_priv(dev); + ppri = (struct pcap_data *) pri->user; + ppri->host_if = init->host_if; + ppri->promisc = init->promisc; + ppri->optimize = init->optimize; + ppri->filter = init->filter; + + printk("pcap backend, host interface %s\n", ppri->host_if); +} + +static int pcap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return pcap_user_read(fd, skb_mac_header(skb), + skb->dev->mtu + ETH_HEADER_OTHER, + (struct pcap_data *) &lp->user); +} + +static int pcap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return -EPERM; +} + +static const struct net_kern_info pcap_kern_info = { + .init = pcap_init_kern, + .protocol = eth_protocol, + .read = pcap_read, + .write = pcap_write, +}; + +int pcap_setup(char *str, char **mac_out, void *data) +{ + struct pcap_init *init = data; + char *remain, *host_if = NULL, *options[2] = { NULL, NULL }; + int i; + + *init = ((struct pcap_init) + { .host_if = "eth0", + .promisc = 1, + .optimize = 0, + .filter = NULL }); + + remain = split_if_spec(str, &host_if, &init->filter, + &options[0], &options[1], mac_out, NULL); + if (remain != NULL) { + printk(KERN_ERR "pcap_setup - Extra garbage on " + "specification : '%s'\n", remain); + return 0; + } + + if (host_if != NULL) + init->host_if = host_if; + + for (i = 0; i < ARRAY_SIZE(options); i++) { + if (options[i] == NULL) + continue; + if (!strcmp(options[i], "promisc")) + init->promisc = 1; + else if (!strcmp(options[i], "nopromisc")) + init->promisc = 0; + else if (!strcmp(options[i], "optimize")) + init->optimize = 1; + else if (!strcmp(options[i], "nooptimize")) + init->optimize = 0; + else { + printk(KERN_ERR "pcap_setup : bad option - '%s'\n", + options[i]); + return 0; + } + } + + return 1; +} + +static struct transport pcap_transport = { + .list = LIST_HEAD_INIT(pcap_transport.list), + .name = "pcap", + .setup = pcap_setup, + .user = &pcap_user_info, + .kern = &pcap_kern_info, + .private_size = sizeof(struct pcap_data), + .setup_size = sizeof(struct pcap_init), +}; + +static int register_pcap(void) +{ + register_transport(&pcap_transport); + return 0; +} + +late_initcall(register_pcap); diff --git a/arch/um/drivers/pcap_user.c b/arch/um/drivers/pcap_user.c new file mode 100644 index 0000000000..52ddda3e3b --- /dev/null +++ b/arch/um/drivers/pcap_user.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <errno.h> +#include <pcap.h> +#include <string.h> +#include <asm/types.h> +#include <net_user.h> +#include "pcap_user.h" +#include <um_malloc.h> + +#define PCAP_FD(p) (*(int *)(p)) + +static int pcap_user_init(void *data, void *dev) +{ + struct pcap_data *pri = data; + pcap_t *p; + char errors[PCAP_ERRBUF_SIZE]; + + p = pcap_open_live(pri->host_if, ETH_MAX_PACKET + ETH_HEADER_OTHER, + pri->promisc, 0, errors); + if (p == NULL) { + printk(UM_KERN_ERR "pcap_user_init : pcap_open_live failed - " + "'%s'\n", errors); + return -EINVAL; + } + + pri->dev = dev; + pri->pcap = p; + return 0; +} + +static int pcap_user_open(void *data) +{ + struct pcap_data *pri = data; + __u32 netmask; + int err; + + if (pri->pcap == NULL) + return -ENODEV; + + if (pri->filter != NULL) { + err = dev_netmask(pri->dev, &netmask); + if (err < 0) { + printk(UM_KERN_ERR "pcap_user_open : dev_netmask failed\n"); + return -EIO; + } + + pri->compiled = uml_kmalloc(sizeof(struct bpf_program), + UM_GFP_KERNEL); + if (pri->compiled == NULL) { + printk(UM_KERN_ERR "pcap_user_open : kmalloc failed\n"); + return -ENOMEM; + } + + err = pcap_compile(pri->pcap, + (struct bpf_program *) pri->compiled, + pri->filter, pri->optimize, netmask); + if (err < 0) { + printk(UM_KERN_ERR "pcap_user_open : pcap_compile failed - " + "'%s'\n", pcap_geterr(pri->pcap)); + goto out; + } + + err = pcap_setfilter(pri->pcap, pri->compiled); + if (err < 0) { + printk(UM_KERN_ERR "pcap_user_open : pcap_setfilter " + "failed - '%s'\n", pcap_geterr(pri->pcap)); + goto out; + } + } + + return PCAP_FD(pri->pcap); + + out: + kfree(pri->compiled); + return -EIO; +} + +static void pcap_remove(void *data) +{ + struct pcap_data *pri = data; + + if (pri->compiled != NULL) + pcap_freecode(pri->compiled); + + if (pri->pcap != NULL) + pcap_close(pri->pcap); +} + +struct pcap_handler_data { + char *buffer; + int len; +}; + +static void handler(u_char *data, const struct pcap_pkthdr *header, + const u_char *packet) +{ + int len; + + struct pcap_handler_data *hdata = (struct pcap_handler_data *) data; + + len = hdata->len < header->caplen ? hdata->len : header->caplen; + memcpy(hdata->buffer, packet, len); + hdata->len = len; +} + +int pcap_user_read(int fd, void *buffer, int len, struct pcap_data *pri) +{ + struct pcap_handler_data hdata = ((struct pcap_handler_data) + { .buffer = buffer, + .len = len }); + int n; + + n = pcap_dispatch(pri->pcap, 1, handler, (u_char *) &hdata); + if (n < 0) { + printk(UM_KERN_ERR "pcap_dispatch failed - %s\n", + pcap_geterr(pri->pcap)); + return -EIO; + } + else if (n == 0) + return 0; + return hdata.len; +} + +const struct net_user_info pcap_user_info = { + .init = pcap_user_init, + .open = pcap_user_open, + .close = NULL, + .remove = pcap_remove, + .add_address = NULL, + .delete_address = NULL, + .mtu = ETH_MAX_PACKET, + .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER, +}; diff --git a/arch/um/drivers/pcap_user.h b/arch/um/drivers/pcap_user.h new file mode 100644 index 0000000000..216246f5f0 --- /dev/null +++ b/arch/um/drivers/pcap_user.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + */ + +#include <net_user.h> + +struct pcap_data { + char *host_if; + int promisc; + int optimize; + char *filter; + void *compiled; + void *pcap; + void *dev; +}; + +extern const struct net_user_info pcap_user_info; + +extern int pcap_user_read(int fd, void *buf, int len, struct pcap_data *pri); + diff --git a/arch/um/drivers/port.h b/arch/um/drivers/port.h new file mode 100644 index 0000000000..9085b336e6 --- /dev/null +++ b/arch/um/drivers/port.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __PORT_H__ +#define __PORT_H__ + +extern void *port_data(int port); +extern int port_wait(void *data); +extern void port_kern_close(void *d); +extern int port_connection(int fd, int *socket_out, int *pid_out); +extern int port_listen_fd(int port); +extern void port_read(int fd, void *data); +extern void port_kern_free(void *d); +extern int port_rcv_fd(int fd); +extern void port_remove_dev(void *d); + +#endif + diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c new file mode 100644 index 0000000000..c52b3ff3c0 --- /dev/null +++ b/arch/um/drivers/port_kern.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <asm/atomic.h> +#include <init.h> +#include <irq_kern.h> +#include <os.h> +#include "port.h" + +struct port_list { + struct list_head list; + atomic_t wait_count; + int has_connection; + struct completion done; + int port; + int fd; + spinlock_t lock; + struct list_head pending; + struct list_head connections; +}; + +struct port_dev { + struct port_list *port; + int helper_pid; + int telnetd_pid; +}; + +struct connection { + struct list_head list; + int fd; + int helper_pid; + int socket[2]; + int telnetd_pid; + struct port_list *port; +}; + +static irqreturn_t pipe_interrupt(int irq, void *data) +{ + struct connection *conn = data; + int fd; + + fd = os_rcv_fd(conn->socket[0], &conn->helper_pid); + if (fd < 0) { + if (fd == -EAGAIN) + return IRQ_NONE; + + printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n", + -fd); + os_close_file(conn->fd); + } + + list_del(&conn->list); + + conn->fd = fd; + list_add(&conn->list, &conn->port->connections); + + complete(&conn->port->done); + return IRQ_HANDLED; +} + +#define NO_WAITER_MSG \ + "****\n" \ + "There are currently no UML consoles waiting for port connections.\n" \ + "Either disconnect from one to make it available or activate some more\n" \ + "by enabling more consoles in the UML /etc/inittab.\n" \ + "****\n" + +static int port_accept(struct port_list *port) +{ + struct connection *conn; + int fd, socket[2], pid; + + fd = port_connection(port->fd, socket, &pid); + if (fd < 0) { + if (fd != -EAGAIN) + printk(KERN_ERR "port_accept : port_connection " + "returned %d\n", -fd); + goto out; + } + + conn = kmalloc(sizeof(*conn), GFP_ATOMIC); + if (conn == NULL) { + printk(KERN_ERR "port_accept : failed to allocate " + "connection\n"); + goto out_close; + } + *conn = ((struct connection) + { .list = LIST_HEAD_INIT(conn->list), + .fd = fd, + .socket = { socket[0], socket[1] }, + .telnetd_pid = pid, + .port = port }); + + if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt, + IRQF_SHARED, "telnetd", conn) < 0) { + printk(KERN_ERR "port_accept : failed to get IRQ for " + "telnetd\n"); + goto out_free; + } + + if (atomic_read(&port->wait_count) == 0) { + os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG)); + printk(KERN_ERR "No one waiting for port\n"); + } + list_add(&conn->list, &port->pending); + return 1; + + out_free: + kfree(conn); + out_close: + os_close_file(fd); + os_kill_process(pid, 1); + out: + return 0; +} + +static DEFINE_MUTEX(ports_mutex); +static LIST_HEAD(ports); + +static void port_work_proc(struct work_struct *unused) +{ + struct port_list *port; + struct list_head *ele; + unsigned long flags; + + local_irq_save(flags); + list_for_each(ele, &ports) { + port = list_entry(ele, struct port_list, list); + if (!port->has_connection) + continue; + + while (port_accept(port)) + ; + port->has_connection = 0; + } + local_irq_restore(flags); +} + +static DECLARE_WORK(port_work, port_work_proc); + +static irqreturn_t port_interrupt(int irq, void *data) +{ + struct port_list *port = data; + + port->has_connection = 1; + schedule_work(&port_work); + return IRQ_HANDLED; +} + +void *port_data(int port_num) +{ + struct list_head *ele; + struct port_list *port; + struct port_dev *dev = NULL; + int fd; + + mutex_lock(&ports_mutex); + list_for_each(ele, &ports) { + port = list_entry(ele, struct port_list, list); + if (port->port == port_num) + goto found; + } + port = kmalloc(sizeof(struct port_list), GFP_KERNEL); + if (port == NULL) { + printk(KERN_ERR "Allocation of port list failed\n"); + goto out; + } + + fd = port_listen_fd(port_num); + if (fd < 0) { + printk(KERN_ERR "binding to port %d failed, errno = %d\n", + port_num, -fd); + goto out_free; + } + + if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt, + IRQF_SHARED, "port", port) < 0) { + printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num); + goto out_close; + } + + *port = ((struct port_list) + { .list = LIST_HEAD_INIT(port->list), + .wait_count = ATOMIC_INIT(0), + .has_connection = 0, + .port = port_num, + .fd = fd, + .pending = LIST_HEAD_INIT(port->pending), + .connections = LIST_HEAD_INIT(port->connections) }); + spin_lock_init(&port->lock); + init_completion(&port->done); + list_add(&port->list, &ports); + + found: + dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "Allocation of port device entry failed\n"); + goto out; + } + + *dev = ((struct port_dev) { .port = port, + .helper_pid = -1, + .telnetd_pid = -1 }); + goto out; + + out_close: + os_close_file(fd); + out_free: + kfree(port); + out: + mutex_unlock(&ports_mutex); + return dev; +} + +int port_wait(void *data) +{ + struct port_dev *dev = data; + struct connection *conn; + struct port_list *port = dev->port; + int fd; + + atomic_inc(&port->wait_count); + while (1) { + fd = -ERESTARTSYS; + if (wait_for_completion_interruptible(&port->done)) + goto out; + + spin_lock(&port->lock); + + conn = list_entry(port->connections.next, struct connection, + list); + list_del(&conn->list); + spin_unlock(&port->lock); + + os_shutdown_socket(conn->socket[0], 1, 1); + os_close_file(conn->socket[0]); + os_shutdown_socket(conn->socket[1], 1, 1); + os_close_file(conn->socket[1]); + + /* This is done here because freeing an IRQ can't be done + * within the IRQ handler. So, pipe_interrupt always ups + * the semaphore regardless of whether it got a successful + * connection. Then we loop here throwing out failed + * connections until a good one is found. + */ + um_free_irq(TELNETD_IRQ, conn); + + if (conn->fd >= 0) + break; + os_close_file(conn->fd); + kfree(conn); + } + + fd = conn->fd; + dev->helper_pid = conn->helper_pid; + dev->telnetd_pid = conn->telnetd_pid; + kfree(conn); + out: + atomic_dec(&port->wait_count); + return fd; +} + +void port_remove_dev(void *d) +{ + struct port_dev *dev = d; + + if (dev->helper_pid != -1) + os_kill_process(dev->helper_pid, 0); + if (dev->telnetd_pid != -1) + os_kill_process(dev->telnetd_pid, 1); + dev->helper_pid = -1; + dev->telnetd_pid = -1; +} + +void port_kern_free(void *d) +{ + struct port_dev *dev = d; + + port_remove_dev(dev); + kfree(dev); +} + +static void free_port(void) +{ + struct list_head *ele; + struct port_list *port; + + list_for_each(ele, &ports) { + port = list_entry(ele, struct port_list, list); + free_irq_by_fd(port->fd); + os_close_file(port->fd); + } +} + +__uml_exitcall(free_port); diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c new file mode 100644 index 0000000000..3c62ae81df --- /dev/null +++ b/arch/um/drivers/port_user.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <termios.h> +#include <unistd.h> +#include <netinet/in.h> +#include "chan_user.h" +#include <os.h> +#include "port.h" +#include <um_malloc.h> + +struct port_chan { + int raw; + struct termios tt; + void *kernel_data; + char dev[sizeof("32768\0")]; +}; + +static void *port_init(char *str, int device, const struct chan_opts *opts) +{ + struct port_chan *data; + void *kern_data; + char *end; + int port; + + if (*str != ':') { + printk(UM_KERN_ERR "port_init : channel type 'port' must " + "specify a port number\n"); + return NULL; + } + str++; + port = strtoul(str, &end, 0); + if ((*end != '\0') || (end == str)) { + printk(UM_KERN_ERR "port_init : couldn't parse port '%s'\n", + str); + return NULL; + } + + kern_data = port_data(port); + if (kern_data == NULL) + return NULL; + + data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL); + if (data == NULL) + goto err; + + *data = ((struct port_chan) { .raw = opts->raw, + .kernel_data = kern_data }); + sprintf(data->dev, "%d", port); + + return data; + err: + port_kern_free(kern_data); + return NULL; +} + +static void port_free(void *d) +{ + struct port_chan *data = d; + + port_kern_free(data->kernel_data); + kfree(data); +} + +static int port_open(int input, int output, int primary, void *d, + char **dev_out) +{ + struct port_chan *data = d; + int fd, err; + + fd = port_wait(data->kernel_data); + if ((fd >= 0) && data->raw) { + CATCH_EINTR(err = tcgetattr(fd, &data->tt)); + if (err) + return err; + + err = raw(fd); + if (err) + return err; + } + *dev_out = data->dev; + return fd; +} + +static void port_close(int fd, void *d) +{ + struct port_chan *data = d; + + port_remove_dev(data->kernel_data); + os_close_file(fd); +} + +const struct chan_ops port_ops = { + .type = "port", + .init = port_init, + .open = port_open, + .close = port_close, + .read = generic_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = port_free, + .winch = 1, +}; + +int port_listen_fd(int port) +{ + struct sockaddr_in addr; + int fd, err, arg; + + fd = socket(PF_INET, SOCK_STREAM, 0); + if (fd == -1) + return -errno; + + arg = 1; + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &arg, sizeof(arg)) < 0) { + err = -errno; + goto out; + } + + addr.sin_family = AF_INET; + addr.sin_port = htons(port); + addr.sin_addr.s_addr = htonl(INADDR_ANY); + if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) { + err = -errno; + goto out; + } + + if (listen(fd, 1) < 0) { + err = -errno; + goto out; + } + + err = os_set_fd_block(fd, 0); + if (err < 0) + goto out; + + return fd; + out: + close(fd); + return err; +} + +struct port_pre_exec_data { + int sock_fd; + int pipe_fd; +}; + +static void port_pre_exec(void *arg) +{ + struct port_pre_exec_data *data = arg; + + dup2(data->sock_fd, 0); + dup2(data->sock_fd, 1); + dup2(data->sock_fd, 2); + close(data->sock_fd); + dup2(data->pipe_fd, 3); + shutdown(3, SHUT_RD); + close(data->pipe_fd); +} + +int port_connection(int fd, int *socket, int *pid_out) +{ + int new, err; + char *env; + char *argv[] = { "in.telnetd", "-L", + OS_LIB_PATH "/uml/port-helper", NULL }; + struct port_pre_exec_data data; + + if ((env = getenv("UML_PORT_HELPER"))) + argv[2] = env; + + new = accept(fd, NULL, 0); + if (new < 0) + return -errno; + + err = os_access(argv[2], X_OK); + if (err < 0) { + printk(UM_KERN_ERR "port_connection : error accessing port-helper " + "executable at %s: %s\n", argv[2], strerror(-err)); + if (env == NULL) + printk(UM_KERN_ERR "Set UML_PORT_HELPER environment " + "variable to path to uml-utilities port-helper " + "binary\n"); + goto out_close; + } + + err = os_pipe(socket, 0, 0); + if (err < 0) + goto out_close; + + data = ((struct port_pre_exec_data) + { .sock_fd = new, + .pipe_fd = socket[1] }); + + err = run_helper(port_pre_exec, &data, argv); + if (err < 0) + goto out_shutdown; + + *pid_out = err; + return new; + + out_shutdown: + shutdown(socket[0], SHUT_RDWR); + close(socket[0]); + shutdown(socket[1], SHUT_RDWR); + close(socket[1]); + out_close: + close(new); + return err; +} diff --git a/arch/um/drivers/pty.c b/arch/um/drivers/pty.c new file mode 100644 index 0000000000..39c60068cf --- /dev/null +++ b/arch/um/drivers/pty.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <fcntl.h> +#include <string.h> +#include <termios.h> +#include <sys/stat.h> +#include "chan_user.h" +#include <os.h> +#include <um_malloc.h> + +struct pty_chan { + void (*announce)(char *dev_name, int dev); + int dev; + int raw; + struct termios tt; + char dev_name[sizeof("/dev/pts/0123456\0")]; +}; + +static void *pty_chan_init(char *str, int device, const struct chan_opts *opts) +{ + struct pty_chan *data; + + data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL); + if (data == NULL) + return NULL; + + *data = ((struct pty_chan) { .announce = opts->announce, + .dev = device, + .raw = opts->raw }); + return data; +} + +static int pts_open(int input, int output, int primary, void *d, + char **dev_out) +{ + struct pty_chan *data = d; + char *dev; + int fd, err; + + fd = get_pty(); + if (fd < 0) { + err = -errno; + printk(UM_KERN_ERR "open_pts : Failed to open pts\n"); + return err; + } + + if (data->raw) { + CATCH_EINTR(err = tcgetattr(fd, &data->tt)); + if (err) + goto out_close; + + err = raw(fd); + if (err) + goto out_close; + } + + dev = ptsname(fd); + sprintf(data->dev_name, "%s", dev); + *dev_out = data->dev_name; + + if (data->announce) + (*data->announce)(dev, data->dev); + + return fd; + +out_close: + close(fd); + return err; +} + +static int getmaster(char *line) +{ + struct stat buf; + char *pty, *bank, *cp; + int master, err; + + pty = &line[strlen("/dev/ptyp")]; + for (bank = "pqrs"; *bank; bank++) { + line[strlen("/dev/pty")] = *bank; + *pty = '0'; + /* Did we hit the end ? */ + if ((stat(line, &buf) < 0) && (errno == ENOENT)) + break; + + for (cp = "0123456789abcdef"; *cp; cp++) { + *pty = *cp; + master = open(line, O_RDWR); + if (master >= 0) { + char *tp = &line[strlen("/dev/")]; + + /* verify slave side is usable */ + *tp = 't'; + err = access(line, R_OK | W_OK); + *tp = 'p'; + if (!err) + return master; + close(master); + } + } + } + + printk(UM_KERN_ERR "getmaster - no usable host pty devices\n"); + return -ENOENT; +} + +static int pty_open(int input, int output, int primary, void *d, + char **dev_out) +{ + struct pty_chan *data = d; + int fd, err; + char dev[sizeof("/dev/ptyxx\0")] = "/dev/ptyxx"; + + fd = getmaster(dev); + if (fd < 0) + return fd; + + if (data->raw) { + err = raw(fd); + if (err) { + close(fd); + return err; + } + } + + if (data->announce) + (*data->announce)(dev, data->dev); + + sprintf(data->dev_name, "%s", dev); + *dev_out = data->dev_name; + + return fd; +} + +const struct chan_ops pty_ops = { + .type = "pty", + .init = pty_chan_init, + .open = pty_open, + .close = generic_close, + .read = generic_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = generic_free, + .winch = 0, +}; + +const struct chan_ops pts_ops = { + .type = "pts", + .init = pty_chan_init, + .open = pts_open, + .close = generic_close, + .read = generic_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = generic_free, + .winch = 0, +}; diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c new file mode 100644 index 0000000000..da985e0dc6 --- /dev/null +++ b/arch/um/drivers/random.c @@ -0,0 +1,121 @@ +/* Copyright (C) 2005 - 2008 Jeff Dike <jdike@{linux.intel,addtoit}.com> */ + +/* Much of this ripped from drivers/char/hw_random.c, see there for other + * copyright. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + */ +#include <linux/sched/signal.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> +#include <linux/hw_random.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <init.h> +#include <irq_kern.h> +#include <os.h> + +/* + * core module information + */ +#define RNG_MODULE_NAME "hw_random" + +/* Changed at init time, in the non-modular case, and at module load + * time, in the module case. Presumably, the module subsystem + * protects against a module being loaded twice at the same time. + */ +static int random_fd = -1; +static struct hwrng hwrng; +static DECLARE_COMPLETION(have_data); + +static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block) +{ + int ret; + + for (;;) { + ret = os_read_file(random_fd, buf, max); + if (block && ret == -EAGAIN) { + add_sigio_fd(random_fd); + + ret = wait_for_completion_killable(&have_data); + + ignore_sigio_fd(random_fd); + deactivate_fd(random_fd, RANDOM_IRQ); + + if (ret < 0) + break; + } else { + break; + } + } + + return ret != -EAGAIN ? ret : 0; +} + +static irqreturn_t random_interrupt(int irq, void *data) +{ + complete(&have_data); + + return IRQ_HANDLED; +} + +/* + * rng_init - initialize RNG module + */ +static int __init rng_init (void) +{ + int err; + + err = os_open_file("/dev/random", of_read(OPENFLAGS()), 0); + if (err < 0) + goto out; + + random_fd = err; + err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt, + 0, "random", NULL); + if (err < 0) + goto err_out_cleanup_hw; + + sigio_broken(random_fd); + hwrng.name = RNG_MODULE_NAME; + hwrng.read = rng_dev_read; + + err = hwrng_register(&hwrng); + if (err) { + pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err); + goto err_out_cleanup_hw; + } +out: + return err; + +err_out_cleanup_hw: + os_close_file(random_fd); + random_fd = -1; + goto out; +} + +/* + * rng_cleanup - shutdown RNG module + */ + +static void cleanup(void) +{ + free_irq_by_fd(random_fd); + os_close_file(random_fd); +} + +static void __exit rng_cleanup(void) +{ + hwrng_unregister(&hwrng); + os_close_file(random_fd); +} + +module_init (rng_init); +module_exit (rng_cleanup); +__uml_exitcall(cleanup); + +MODULE_DESCRIPTION("UML Host Random Number Generator (RNG) driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/um/drivers/rtc.h b/arch/um/drivers/rtc.h new file mode 100644 index 0000000000..95e41c7d35 --- /dev/null +++ b/arch/um/drivers/rtc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#ifndef __UM_RTC_H__ +#define __UM_RTC_H__ + +int uml_rtc_start(bool timetravel); +int uml_rtc_enable_alarm(unsigned long long delta_seconds); +void uml_rtc_disable_alarm(void); +void uml_rtc_stop(bool timetravel); +void uml_rtc_send_timetravel_alarm(void); + +#endif /* __UM_RTC_H__ */ diff --git a/arch/um/drivers/rtc_kern.c b/arch/um/drivers/rtc_kern.c new file mode 100644 index 0000000000..97ceb205cf --- /dev/null +++ b/arch/um/drivers/rtc_kern.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#include <linux/platform_device.h> +#include <linux/time-internal.h> +#include <linux/suspend.h> +#include <linux/err.h> +#include <linux/rtc.h> +#include <kern_util.h> +#include <irq_kern.h> +#include <os.h> +#include "rtc.h" + +static time64_t uml_rtc_alarm_time; +static bool uml_rtc_alarm_enabled; +static struct rtc_device *uml_rtc; +static int uml_rtc_irq_fd, uml_rtc_irq; + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + +static void uml_rtc_time_travel_alarm(struct time_travel_event *ev) +{ + uml_rtc_send_timetravel_alarm(); +} + +static struct time_travel_event uml_rtc_alarm_event = { + .fn = uml_rtc_time_travel_alarm, +}; +#endif + +static int uml_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct timespec64 ts; + + /* Use this to get correct time in time-travel mode */ + read_persistent_clock64(&ts); + rtc_time64_to_tm(timespec64_to_ktime(ts) / NSEC_PER_SEC, tm); + + return 0; +} + +static int uml_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + rtc_time64_to_tm(uml_rtc_alarm_time, &alrm->time); + alrm->enabled = uml_rtc_alarm_enabled; + + return 0; +} + +static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + unsigned long long secs; + + if (!enable && !uml_rtc_alarm_enabled) + return 0; + + uml_rtc_alarm_enabled = enable; + + secs = uml_rtc_alarm_time - ktime_get_real_seconds(); + + if (time_travel_mode == TT_MODE_OFF) { + if (!enable) { + uml_rtc_disable_alarm(); + return 0; + } + + /* enable or update */ + return uml_rtc_enable_alarm(secs); + } else { + time_travel_del_event(¨_rtc_alarm_event); + + if (enable) + time_travel_add_event_rel(¨_rtc_alarm_event, + secs * NSEC_PER_SEC); + } + + return 0; +} + +static int uml_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + uml_rtc_alarm_irq_enable(dev, 0); + uml_rtc_alarm_time = rtc_tm_to_time64(&alrm->time); + uml_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static const struct rtc_class_ops uml_rtc_ops = { + .read_time = uml_rtc_read_time, + .read_alarm = uml_rtc_read_alarm, + .alarm_irq_enable = uml_rtc_alarm_irq_enable, + .set_alarm = uml_rtc_set_alarm, +}; + +static irqreturn_t uml_rtc_interrupt(int irq, void *data) +{ + unsigned long long c = 0; + + /* alarm triggered, it's now off */ + uml_rtc_alarm_enabled = false; + + os_read_file(uml_rtc_irq_fd, &c, sizeof(c)); + WARN_ON(c == 0); + + pm_system_wakeup(); + rtc_update_irq(uml_rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int uml_rtc_setup(void) +{ + int err; + + err = uml_rtc_start(time_travel_mode != TT_MODE_OFF); + if (WARN(err < 0, "err = %d\n", err)) + return err; + + uml_rtc_irq_fd = err; + + err = um_request_irq(UM_IRQ_ALLOC, uml_rtc_irq_fd, IRQ_READ, + uml_rtc_interrupt, 0, "rtc", NULL); + if (err < 0) { + uml_rtc_stop(time_travel_mode != TT_MODE_OFF); + return err; + } + + irq_set_irq_wake(err, 1); + + uml_rtc_irq = err; + return 0; +} + +static void uml_rtc_cleanup(void) +{ + um_free_irq(uml_rtc_irq, NULL); + uml_rtc_stop(time_travel_mode != TT_MODE_OFF); +} + +static int uml_rtc_probe(struct platform_device *pdev) +{ + int err; + + err = uml_rtc_setup(); + if (err) + return err; + + uml_rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(uml_rtc)) { + err = PTR_ERR(uml_rtc); + goto cleanup; + } + + uml_rtc->ops = ¨_rtc_ops; + + device_init_wakeup(&pdev->dev, 1); + + err = devm_rtc_register_device(uml_rtc); + if (err) + goto cleanup; + + return 0; +cleanup: + uml_rtc_cleanup(); + return err; +} + +static int uml_rtc_remove(struct platform_device *pdev) +{ + device_init_wakeup(&pdev->dev, 0); + uml_rtc_cleanup(); + return 0; +} + +static struct platform_driver uml_rtc_driver = { + .probe = uml_rtc_probe, + .remove = uml_rtc_remove, + .driver = { + .name = "uml-rtc", + }, +}; + +static int __init uml_rtc_init(void) +{ + struct platform_device *pdev; + int err; + + err = platform_driver_register(¨_rtc_driver); + if (err) + return err; + + pdev = platform_device_alloc("uml-rtc", 0); + if (!pdev) { + err = -ENOMEM; + goto unregister; + } + + err = platform_device_add(pdev); + if (err) + goto unregister; + return 0; + +unregister: + platform_device_put(pdev); + platform_driver_unregister(¨_rtc_driver); + return err; +} +device_initcall(uml_rtc_init); diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c new file mode 100644 index 0000000000..7c3cec4c68 --- /dev/null +++ b/arch/um/drivers/rtc_user.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#include <stdbool.h> +#include <os.h> +#include <errno.h> +#include <sched.h> +#include <unistd.h> +#include <kern_util.h> +#include <sys/select.h> +#include <stdio.h> +#include <sys/timerfd.h> +#include "rtc.h" + +static int uml_rtc_irq_fds[2]; + +void uml_rtc_send_timetravel_alarm(void) +{ + unsigned long long c = 1; + + CATCH_EINTR(write(uml_rtc_irq_fds[1], &c, sizeof(c))); +} + +int uml_rtc_start(bool timetravel) +{ + int err; + + if (timetravel) { + int err = os_pipe(uml_rtc_irq_fds, 1, 1); + if (err) + goto fail; + } else { + uml_rtc_irq_fds[0] = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC); + if (uml_rtc_irq_fds[0] < 0) { + err = -errno; + goto fail; + } + + /* apparently timerfd won't send SIGIO, use workaround */ + sigio_broken(uml_rtc_irq_fds[0]); + err = add_sigio_fd(uml_rtc_irq_fds[0]); + if (err < 0) { + close(uml_rtc_irq_fds[0]); + goto fail; + } + } + + return uml_rtc_irq_fds[0]; +fail: + uml_rtc_stop(timetravel); + return err; +} + +int uml_rtc_enable_alarm(unsigned long long delta_seconds) +{ + struct itimerspec it = { + .it_value = { + .tv_sec = delta_seconds, + }, + }; + + if (timerfd_settime(uml_rtc_irq_fds[0], 0, &it, NULL)) + return -errno; + return 0; +} + +void uml_rtc_disable_alarm(void) +{ + uml_rtc_enable_alarm(0); +} + +void uml_rtc_stop(bool timetravel) +{ + if (timetravel) + os_close_file(uml_rtc_irq_fds[1]); + else + ignore_sigio_fd(uml_rtc_irq_fds[0]); + os_close_file(uml_rtc_irq_fds[0]); +} diff --git a/arch/um/drivers/slip.h b/arch/um/drivers/slip.h new file mode 100644 index 0000000000..0f3b7ca994 --- /dev/null +++ b/arch/um/drivers/slip.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_SLIP_H +#define __UM_SLIP_H + +#include "slip_common.h" + +struct slip_data { + void *dev; + char name[sizeof("slnnnnn\0")]; + char *addr; + char *gate_addr; + int slave; + struct slip_proto slip; +}; + +extern const struct net_user_info slip_user_info; + +extern int slip_user_read(int fd, void *buf, int len, struct slip_data *pri); +extern int slip_user_write(int fd, void *buf, int len, struct slip_data *pri); + +#endif diff --git a/arch/um/drivers/slip_common.c b/arch/um/drivers/slip_common.c new file mode 100644 index 0000000000..20fe4f4274 --- /dev/null +++ b/arch/um/drivers/slip_common.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <string.h> +#include "slip_common.h" +#include <net_user.h> + +int slip_proto_read(int fd, void *buf, int len, struct slip_proto *slip) +{ + int i, n, size, start; + + if(slip->more > 0){ + i = 0; + while(i < slip->more){ + size = slip_unesc(slip->ibuf[i++], slip->ibuf, + &slip->pos, &slip->esc); + if(size){ + memcpy(buf, slip->ibuf, size); + memmove(slip->ibuf, &slip->ibuf[i], + slip->more - i); + slip->more = slip->more - i; + return size; + } + } + slip->more = 0; + } + + n = net_read(fd, &slip->ibuf[slip->pos], + sizeof(slip->ibuf) - slip->pos); + if(n <= 0) + return n; + + start = slip->pos; + for(i = 0; i < n; i++){ + size = slip_unesc(slip->ibuf[start + i], slip->ibuf,&slip->pos, + &slip->esc); + if(size){ + memcpy(buf, slip->ibuf, size); + memmove(slip->ibuf, &slip->ibuf[start+i+1], + n - (i + 1)); + slip->more = n - (i + 1); + return size; + } + } + return 0; +} + +int slip_proto_write(int fd, void *buf, int len, struct slip_proto *slip) +{ + int actual, n; + + actual = slip_esc(buf, slip->obuf, len); + n = net_write(fd, slip->obuf, actual); + if(n < 0) + return n; + else return len; +} diff --git a/arch/um/drivers/slip_common.h b/arch/um/drivers/slip_common.h new file mode 100644 index 0000000000..d3798b5caf --- /dev/null +++ b/arch/um/drivers/slip_common.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_SLIP_COMMON_H +#define __UM_SLIP_COMMON_H + +#define BUF_SIZE 1500 + /* two bytes each for a (pathological) max packet of escaped chars + * + * terminating END char + initial END char */ +#define ENC_BUF_SIZE (2 * BUF_SIZE + 2) + +/* SLIP protocol characters. */ +#define SLIP_END 0300 /* indicates end of frame */ +#define SLIP_ESC 0333 /* indicates byte stuffing */ +#define SLIP_ESC_END 0334 /* ESC ESC_END means END 'data' */ +#define SLIP_ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */ + +static inline int slip_unesc(unsigned char c, unsigned char *buf, int *pos, + int *esc) +{ + int ret; + + switch(c){ + case SLIP_END: + *esc = 0; + ret=*pos; + *pos=0; + return(ret); + case SLIP_ESC: + *esc = 1; + return(0); + case SLIP_ESC_ESC: + if(*esc){ + *esc = 0; + c = SLIP_ESC; + } + break; + case SLIP_ESC_END: + if(*esc){ + *esc = 0; + c = SLIP_END; + } + break; + } + buf[(*pos)++] = c; + return(0); +} + +static inline int slip_esc(unsigned char *s, unsigned char *d, int len) +{ + unsigned char *ptr = d; + unsigned char c; + + /* + * Send an initial END character to flush out any + * data that may have accumulated in the receiver + * due to line noise. + */ + + *ptr++ = SLIP_END; + + /* + * For each byte in the packet, send the appropriate + * character sequence, according to the SLIP protocol. + */ + + while (len-- > 0) { + switch(c = *s++) { + case SLIP_END: + *ptr++ = SLIP_ESC; + *ptr++ = SLIP_ESC_END; + break; + case SLIP_ESC: + *ptr++ = SLIP_ESC; + *ptr++ = SLIP_ESC_ESC; + break; + default: + *ptr++ = c; + break; + } + } + *ptr++ = SLIP_END; + return (ptr - d); +} + +struct slip_proto { + unsigned char ibuf[ENC_BUF_SIZE]; + unsigned char obuf[ENC_BUF_SIZE]; + int more; /* more data: do not read fd until ibuf has been drained */ + int pos; + int esc; +}; + +static inline void slip_proto_init(struct slip_proto * slip) +{ + memset(slip->ibuf, 0, sizeof(slip->ibuf)); + memset(slip->obuf, 0, sizeof(slip->obuf)); + slip->more = 0; + slip->pos = 0; + slip->esc = 0; +} + +extern int slip_proto_read(int fd, void *buf, int len, + struct slip_proto *slip); +extern int slip_proto_write(int fd, void *buf, int len, + struct slip_proto *slip); + +#endif diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c new file mode 100644 index 0000000000..c58ccdcc16 --- /dev/null +++ b/arch/um/drivers/slip_kern.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <linux/if_arp.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <net_kern.h> +#include "slip.h" + +struct slip_init { + char *gate_addr; +}; + +static void slip_init(struct net_device *dev, void *data) +{ + struct uml_net_private *private; + struct slip_data *spri; + struct slip_init *init = data; + + private = netdev_priv(dev); + spri = (struct slip_data *) private->user; + + memset(spri->name, 0, sizeof(spri->name)); + spri->addr = NULL; + spri->gate_addr = init->gate_addr; + spri->slave = -1; + spri->dev = dev; + + slip_proto_init(&spri->slip); + + dev->hard_header_len = 0; + dev->header_ops = NULL; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = 256; + dev->flags = IFF_NOARP; + printk("SLIP backend - SLIP IP = %s\n", spri->gate_addr); +} + +static unsigned short slip_protocol(struct sk_buff *skbuff) +{ + return htons(ETH_P_IP); +} + +static int slip_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return slip_user_read(fd, skb_mac_header(skb), skb->dev->mtu, + (struct slip_data *) &lp->user); +} + +static int slip_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return slip_user_write(fd, skb->data, skb->len, + (struct slip_data *) &lp->user); +} + +static const struct net_kern_info slip_kern_info = { + .init = slip_init, + .protocol = slip_protocol, + .read = slip_read, + .write = slip_write, +}; + +static int slip_setup(char *str, char **mac_out, void *data) +{ + struct slip_init *init = data; + + *init = ((struct slip_init) { .gate_addr = NULL }); + + if (str[0] != '\0') + init->gate_addr = str; + return 1; +} + +static struct transport slip_transport = { + .list = LIST_HEAD_INIT(slip_transport.list), + .name = "slip", + .setup = slip_setup, + .user = &slip_user_info, + .kern = &slip_kern_info, + .private_size = sizeof(struct slip_data), + .setup_size = sizeof(struct slip_init), +}; + +static int register_slip(void) +{ + register_transport(&slip_transport); + return 0; +} + +late_initcall(register_slip); diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c new file mode 100644 index 0000000000..7334019c9e --- /dev/null +++ b/arch/um/drivers/slip_user.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <fcntl.h> +#include <string.h> +#include <termios.h> +#include <sys/wait.h> +#include <net_user.h> +#include <os.h> +#include "slip.h" +#include <um_malloc.h> + +static int slip_user_init(void *data, void *dev) +{ + struct slip_data *pri = data; + + pri->dev = dev; + return 0; +} + +static int set_up_tty(int fd) +{ + int i; + struct termios tios; + + if (tcgetattr(fd, &tios) < 0) { + printk(UM_KERN_ERR "could not get initial terminal " + "attributes\n"); + return -1; + } + + tios.c_cflag = CS8 | CREAD | HUPCL | CLOCAL; + tios.c_iflag = IGNBRK | IGNPAR; + tios.c_oflag = 0; + tios.c_lflag = 0; + for (i = 0; i < NCCS; i++) + tios.c_cc[i] = 0; + tios.c_cc[VMIN] = 1; + tios.c_cc[VTIME] = 0; + + cfsetospeed(&tios, B38400); + cfsetispeed(&tios, B38400); + + if (tcsetattr(fd, TCSAFLUSH, &tios) < 0) { + printk(UM_KERN_ERR "failed to set terminal attributes\n"); + return -1; + } + return 0; +} + +struct slip_pre_exec_data { + int stdin_fd; + int stdout_fd; + int close_me; +}; + +static void slip_pre_exec(void *arg) +{ + struct slip_pre_exec_data *data = arg; + + if (data->stdin_fd >= 0) + dup2(data->stdin_fd, 0); + dup2(data->stdout_fd, 1); + if (data->close_me >= 0) + close(data->close_me); +} + +static int slip_tramp(char **argv, int fd) +{ + struct slip_pre_exec_data pe_data; + char *output; + int pid, fds[2], err, output_len; + + err = os_pipe(fds, 1, 0); + if (err < 0) { + printk(UM_KERN_ERR "slip_tramp : pipe failed, err = %d\n", + -err); + goto out; + } + + err = 0; + pe_data.stdin_fd = fd; + pe_data.stdout_fd = fds[1]; + pe_data.close_me = fds[0]; + err = run_helper(slip_pre_exec, &pe_data, argv); + if (err < 0) + goto out_close; + pid = err; + + output_len = UM_KERN_PAGE_SIZE; + output = uml_kmalloc(output_len, UM_GFP_KERNEL); + if (output == NULL) { + printk(UM_KERN_ERR "slip_tramp : failed to allocate output " + "buffer\n"); + os_kill_process(pid, 1); + err = -ENOMEM; + goto out_close; + } + + close(fds[1]); + read_output(fds[0], output, output_len); + printk("%s", output); + + err = helper_wait(pid); + close(fds[0]); + + kfree(output); + return err; + +out_close: + close(fds[0]); + close(fds[1]); +out: + return err; +} + +static int slip_open(void *data) +{ + struct slip_data *pri = data; + char version_buf[sizeof("nnnnn\0")]; + char gate_buf[sizeof("nnn.nnn.nnn.nnn\0")]; + char *argv[] = { "uml_net", version_buf, "slip", "up", gate_buf, + NULL }; + int sfd, mfd, err; + + err = get_pty(); + if (err < 0) { + printk(UM_KERN_ERR "slip-open : Failed to open pty, err = %d\n", + -err); + goto out; + } + mfd = err; + + err = open(ptsname(mfd), O_RDWR, 0); + if (err < 0) { + printk(UM_KERN_ERR "Couldn't open tty for slip line, " + "err = %d\n", -err); + goto out_close; + } + sfd = err; + + err = set_up_tty(sfd); + if (err) + goto out_close2; + + pri->slave = sfd; + pri->slip.pos = 0; + pri->slip.esc = 0; + if (pri->gate_addr != NULL) { + sprintf(version_buf, "%d", UML_NET_VERSION); + strcpy(gate_buf, pri->gate_addr); + + err = slip_tramp(argv, sfd); + + if (err < 0) { + printk(UM_KERN_ERR "slip_tramp failed - err = %d\n", + -err); + goto out_close2; + } + err = os_get_ifname(pri->slave, pri->name); + if (err < 0) { + printk(UM_KERN_ERR "get_ifname failed, err = %d\n", + -err); + goto out_close2; + } + iter_addresses(pri->dev, open_addr, pri->name); + } + else { + err = os_set_slip(sfd); + if (err < 0) { + printk(UM_KERN_ERR "Failed to set slip discipline " + "encapsulation - err = %d\n", -err); + goto out_close2; + } + } + return mfd; +out_close2: + close(sfd); +out_close: + close(mfd); +out: + return err; +} + +static void slip_close(int fd, void *data) +{ + struct slip_data *pri = data; + char version_buf[sizeof("nnnnn\0")]; + char *argv[] = { "uml_net", version_buf, "slip", "down", pri->name, + NULL }; + int err; + + if (pri->gate_addr != NULL) + iter_addresses(pri->dev, close_addr, pri->name); + + sprintf(version_buf, "%d", UML_NET_VERSION); + + err = slip_tramp(argv, pri->slave); + + if (err != 0) + printk(UM_KERN_ERR "slip_tramp failed - errno = %d\n", -err); + close(fd); + close(pri->slave); + pri->slave = -1; +} + +int slip_user_read(int fd, void *buf, int len, struct slip_data *pri) +{ + return slip_proto_read(fd, buf, len, &pri->slip); +} + +int slip_user_write(int fd, void *buf, int len, struct slip_data *pri) +{ + return slip_proto_write(fd, buf, len, &pri->slip); +} + +static void slip_add_addr(unsigned char *addr, unsigned char *netmask, + void *data) +{ + struct slip_data *pri = data; + + if (pri->slave < 0) + return; + open_addr(addr, netmask, pri->name); +} + +static void slip_del_addr(unsigned char *addr, unsigned char *netmask, + void *data) +{ + struct slip_data *pri = data; + + if (pri->slave < 0) + return; + close_addr(addr, netmask, pri->name); +} + +const struct net_user_info slip_user_info = { + .init = slip_user_init, + .open = slip_open, + .close = slip_close, + .remove = NULL, + .add_address = slip_add_addr, + .delete_address = slip_del_addr, + .mtu = BUF_SIZE, + .max_packet = BUF_SIZE, +}; diff --git a/arch/um/drivers/slirp.h b/arch/um/drivers/slirp.h new file mode 100644 index 0000000000..4aef2b8824 --- /dev/null +++ b/arch/um/drivers/slirp.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_SLIRP_H +#define __UM_SLIRP_H + +#include "slip_common.h" + +#define SLIRP_MAX_ARGS 100 +/* + * XXX this next definition is here because I don't understand why this + * initializer doesn't work in slirp_kern.c: + * + * argv : { init->argv[ 0 ... SLIRP_MAX_ARGS-1 ] }, + * + * or why I can't typecast like this: + * + * argv : (char* [SLIRP_MAX_ARGS])(init->argv), + */ +struct arg_list_dummy_wrapper { char *argv[SLIRP_MAX_ARGS]; }; + +struct slirp_data { + void *dev; + struct arg_list_dummy_wrapper argw; + int pid; + int slave; + struct slip_proto slip; +}; + +extern const struct net_user_info slirp_user_info; + +extern int slirp_user_read(int fd, void *buf, int len, struct slirp_data *pri); +extern int slirp_user_write(int fd, void *buf, int len, + struct slirp_data *pri); + +#endif diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c new file mode 100644 index 0000000000..0a6151ee95 --- /dev/null +++ b/arch/um/drivers/slirp_kern.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <linux/if_arp.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/string.h> +#include <net_kern.h> +#include <net_user.h> +#include "slirp.h" + +struct slirp_init { + struct arg_list_dummy_wrapper argw; /* XXX should be simpler... */ +}; + +static void slirp_init(struct net_device *dev, void *data) +{ + struct uml_net_private *private; + struct slirp_data *spri; + struct slirp_init *init = data; + int i; + + private = netdev_priv(dev); + spri = (struct slirp_data *) private->user; + + spri->argw = init->argw; + spri->pid = -1; + spri->slave = -1; + spri->dev = dev; + + slip_proto_init(&spri->slip); + + dev->hard_header_len = 0; + dev->header_ops = NULL; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = 256; + dev->flags = IFF_NOARP; + printk("SLIRP backend - command line:"); + for (i = 0; spri->argw.argv[i] != NULL; i++) + printk(" '%s'",spri->argw.argv[i]); + printk("\n"); +} + +static unsigned short slirp_protocol(struct sk_buff *skbuff) +{ + return htons(ETH_P_IP); +} + +static int slirp_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return slirp_user_read(fd, skb_mac_header(skb), skb->dev->mtu, + (struct slirp_data *) &lp->user); +} + +static int slirp_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return slirp_user_write(fd, skb->data, skb->len, + (struct slirp_data *) &lp->user); +} + +const struct net_kern_info slirp_kern_info = { + .init = slirp_init, + .protocol = slirp_protocol, + .read = slirp_read, + .write = slirp_write, +}; + +static int slirp_setup(char *str, char **mac_out, void *data) +{ + struct slirp_init *init = data; + int i=0; + + *init = ((struct slirp_init) { .argw = { { "slirp", NULL } } }); + + str = split_if_spec(str, mac_out, NULL); + + if (str == NULL) /* no command line given after MAC addr */ + return 1; + + do { + if (i >= SLIRP_MAX_ARGS - 1) { + printk(KERN_WARNING "slirp_setup: truncating slirp " + "arguments\n"); + break; + } + init->argw.argv[i++] = str; + while(*str && *str!=',') { + if (*str == '_') + *str=' '; + str++; + } + if (*str != ',') + break; + *str++ = '\0'; + } while (1); + + init->argw.argv[i] = NULL; + return 1; +} + +static struct transport slirp_transport = { + .list = LIST_HEAD_INIT(slirp_transport.list), + .name = "slirp", + .setup = slirp_setup, + .user = &slirp_user_info, + .kern = &slirp_kern_info, + .private_size = sizeof(struct slirp_data), + .setup_size = sizeof(struct slirp_init), +}; + +static int register_slirp(void) +{ + register_transport(&slirp_transport); + return 0; +} + +late_initcall(register_slirp); diff --git a/arch/um/drivers/slirp_user.c b/arch/um/drivers/slirp_user.c new file mode 100644 index 0000000000..8f633e2e5f --- /dev/null +++ b/arch/um/drivers/slirp_user.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <sys/wait.h> +#include <net_user.h> +#include <os.h> +#include "slirp.h" + +static int slirp_user_init(void *data, void *dev) +{ + struct slirp_data *pri = data; + + pri->dev = dev; + return 0; +} + +struct slirp_pre_exec_data { + int stdin_fd; + int stdout_fd; +}; + +static void slirp_pre_exec(void *arg) +{ + struct slirp_pre_exec_data *data = arg; + + if (data->stdin_fd != -1) + dup2(data->stdin_fd, 0); + if (data->stdout_fd != -1) + dup2(data->stdout_fd, 1); +} + +static int slirp_tramp(char **argv, int fd) +{ + struct slirp_pre_exec_data pe_data; + int pid; + + pe_data.stdin_fd = fd; + pe_data.stdout_fd = fd; + pid = run_helper(slirp_pre_exec, &pe_data, argv); + + return pid; +} + +static int slirp_open(void *data) +{ + struct slirp_data *pri = data; + int fds[2], pid, err; + + err = os_pipe(fds, 1, 1); + if (err) + return err; + + err = slirp_tramp(pri->argw.argv, fds[1]); + if (err < 0) { + printk(UM_KERN_ERR "slirp_tramp failed - errno = %d\n", -err); + goto out; + } + pid = err; + + pri->slave = fds[1]; + pri->slip.pos = 0; + pri->slip.esc = 0; + pri->pid = err; + + return fds[0]; +out: + close(fds[0]); + close(fds[1]); + return err; +} + +static void slirp_close(int fd, void *data) +{ + struct slirp_data *pri = data; + int err; + + close(fd); + close(pri->slave); + + pri->slave = -1; + + if (pri->pid<1) { + printk(UM_KERN_ERR "slirp_close: no child process to shut " + "down\n"); + return; + } + +#if 0 + if (kill(pri->pid, SIGHUP)<0) { + printk(UM_KERN_ERR "slirp_close: sending hangup to %d failed " + "(%d)\n", pri->pid, errno); + } +#endif + err = helper_wait(pri->pid); + if (err < 0) + return; + + pri->pid = -1; +} + +int slirp_user_read(int fd, void *buf, int len, struct slirp_data *pri) +{ + return slip_proto_read(fd, buf, len, &pri->slip); +} + +int slirp_user_write(int fd, void *buf, int len, struct slirp_data *pri) +{ + return slip_proto_write(fd, buf, len, &pri->slip); +} + +const struct net_user_info slirp_user_info = { + .init = slirp_user_init, + .open = slirp_open, + .close = slirp_close, + .remove = NULL, + .add_address = NULL, + .delete_address = NULL, + .mtu = BUF_SIZE, + .max_packet = BUF_SIZE, +}; diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c new file mode 100644 index 0000000000..277cea3d30 --- /dev/null +++ b/arch/um/drivers/ssl.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com) + */ + +#include <linux/fs.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/major.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/console.h> +#include <asm/termbits.h> +#include <asm/irq.h> +#include "chan.h" +#include <init.h> +#include <irq_user.h> +#include "mconsole_kern.h" + +static const int ssl_version = 1; + +#define NR_PORTS 64 + +static void ssl_announce(char *dev_name, int dev) +{ + printk(KERN_INFO "Serial line %d assigned device '%s'\n", dev, + dev_name); +} + +/* Almost const, except that xterm_title may be changed in an initcall */ +static struct chan_opts opts = { + .announce = ssl_announce, + .xterm_title = "Serial Line #%d", + .raw = 1, +}; + +static int ssl_config(char *str, char **error_out); +static int ssl_get_config(char *dev, char *str, int size, char **error_out); +static int ssl_remove(int n, char **error_out); + + +/* Const, except for .mc.list */ +static struct line_driver driver = { + .name = "UML serial line", + .device_name = "ttyS", + .major = TTY_MAJOR, + .minor_start = 64, + .type = TTY_DRIVER_TYPE_SERIAL, + .subtype = 0, + .read_irq_name = "ssl", + .write_irq_name = "ssl-write", + .mc = { + .list = LIST_HEAD_INIT(driver.mc.list), + .name = "ssl", + .config = ssl_config, + .get_config = ssl_get_config, + .id = line_id, + .remove = ssl_remove, + }, +}; + +/* The array is initialized by line_init, at initcall time. The + * elements are locked individually as needed. + */ +static char *conf[NR_PORTS]; +static char *def_conf = CONFIG_SSL_CHAN; +static struct line serial_lines[NR_PORTS]; + +static int ssl_config(char *str, char **error_out) +{ + return line_config(serial_lines, ARRAY_SIZE(serial_lines), str, &opts, + error_out); +} + +static int ssl_get_config(char *dev, char *str, int size, char **error_out) +{ + return line_get_config(dev, serial_lines, ARRAY_SIZE(serial_lines), str, + size, error_out); +} + +static int ssl_remove(int n, char **error_out) +{ + return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n, + error_out); +} + +static int ssl_install(struct tty_driver *driver, struct tty_struct *tty) +{ + return line_install(driver, tty, &serial_lines[tty->index]); +} + +static const struct tty_operations ssl_ops = { + .open = line_open, + .close = line_close, + .write = line_write, + .write_room = line_write_room, + .chars_in_buffer = line_chars_in_buffer, + .flush_buffer = line_flush_buffer, + .flush_chars = line_flush_chars, + .throttle = line_throttle, + .unthrottle = line_unthrottle, + .install = ssl_install, + .hangup = line_hangup, +}; + +/* Changed by ssl_init and referenced by ssl_exit, which are both serialized + * by being an initcall and exitcall, respectively. + */ +static int ssl_init_done; + +static void ssl_console_write(struct console *c, const char *string, + unsigned len) +{ + struct line *line = &serial_lines[c->index]; + unsigned long flags; + + spin_lock_irqsave(&line->lock, flags); + console_write_chan(line->chan_out, string, len); + spin_unlock_irqrestore(&line->lock, flags); +} + +static struct tty_driver *ssl_console_device(struct console *c, int *index) +{ + *index = c->index; + return driver.driver; +} + +static int ssl_console_setup(struct console *co, char *options) +{ + struct line *line = &serial_lines[co->index]; + + return console_open_chan(line, co); +} + +/* No locking for register_console call - relies on single-threaded initcalls */ +static struct console ssl_cons = { + .name = "ttyS", + .write = ssl_console_write, + .device = ssl_console_device, + .setup = ssl_console_setup, + .flags = CON_PRINTBUFFER|CON_ANYTIME, + .index = -1, +}; + +static int ssl_init(void) +{ + char *new_title; + int err; + int i; + + printk(KERN_INFO "Initializing software serial port version %d\n", + ssl_version); + + err = register_lines(&driver, &ssl_ops, serial_lines, + ARRAY_SIZE(serial_lines)); + if (err) + return err; + + new_title = add_xterm_umid(opts.xterm_title); + if (new_title != NULL) + opts.xterm_title = new_title; + + for (i = 0; i < NR_PORTS; i++) { + char *error; + char *s = conf[i]; + if (!s) + s = def_conf; + if (setup_one_line(serial_lines, i, s, &opts, &error)) + printk(KERN_ERR "setup_one_line failed for " + "device %d : %s\n", i, error); + } + + ssl_init_done = 1; + register_console(&ssl_cons); + return 0; +} +late_initcall(ssl_init); + +static void ssl_exit(void) +{ + if (!ssl_init_done) + return; + close_lines(serial_lines, ARRAY_SIZE(serial_lines)); +} +__uml_exitcall(ssl_exit); + +static int ssl_chan_setup(char *str) +{ + line_setup(conf, NR_PORTS, &def_conf, str, "serial line"); + return 1; +} + +__setup("ssl", ssl_chan_setup); +__channel_help(ssl_chan_setup, "ssl"); + +static int ssl_non_raw_setup(char *str) +{ + opts.raw = 0; + return 1; +} +__setup("ssl-non-raw", ssl_non_raw_setup); +__channel_help(ssl_non_raw_setup, "set serial lines to non-raw mode"); diff --git a/arch/um/drivers/stderr_console.c b/arch/um/drivers/stderr_console.c new file mode 100644 index 0000000000..ecc3a58149 --- /dev/null +++ b/arch/um/drivers/stderr_console.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/console.h> + +#include "chan_user.h" + +/* ----------------------------------------------------------------------------- */ +/* trivial console driver -- simply dump everything to stderr */ + +/* + * Don't register by default -- as this registers very early in the + * boot process it becomes the default console. + * + * Initialized at init time. + */ +static int use_stderr_console = 0; + +static void stderr_console_write(struct console *console, const char *string, + unsigned len) +{ + generic_write(2 /* stderr */, string, len, NULL); +} + +static struct console stderr_console = { + .name = "stderr", + .write = stderr_console_write, + .flags = CON_PRINTBUFFER, +}; + +static int __init stderr_console_init(void) +{ + if (use_stderr_console) + register_console(&stderr_console); + return 0; +} +console_initcall(stderr_console_init); + +static int stderr_setup(char *str) +{ + if (!str) + return 0; + use_stderr_console = simple_strtoul(str,&str,0); + return 1; +} +__setup("stderr=", stderr_setup); + +/* The previous behavior of not unregistering led to /dev/console being + * impossible to open. My FC5 filesystem started having init die, and the + * system panicing because of this. Unregistering causes the real + * console to become the default console, and /dev/console can then be + * opened. Making this an initcall makes this happen late enough that + * there is no added value in dumping everything to stderr, and the + * normal console is good enough to show you all available output. + */ +static int __init unregister_stderr(void) +{ + unregister_console(&stderr_console); + + return 0; +} + +__initcall(unregister_stderr); diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c new file mode 100644 index 0000000000..1c239737d8 --- /dev/null +++ b/arch/um/drivers/stdio_console.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) + */ + +#include <linux/posix_types.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/types.h> +#include <linux/major.h> +#include <linux/kdev_t.h> +#include <linux/console.h> +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/hardirq.h> +#include <asm/current.h> +#include <asm/irq.h> +#include "stdio_console.h" +#include "chan.h" +#include <irq_user.h> +#include "mconsole_kern.h" +#include <init.h> + +#define MAX_TTYS (16) + +static void stdio_announce(char *dev_name, int dev) +{ + printk(KERN_INFO "Virtual console %d assigned device '%s'\n", dev, + dev_name); +} + +/* Almost const, except that xterm_title may be changed in an initcall */ +static struct chan_opts opts = { + .announce = stdio_announce, + .xterm_title = "Virtual Console #%d", + .raw = 1, +}; + +static int con_config(char *str, char **error_out); +static int con_get_config(char *dev, char *str, int size, char **error_out); +static int con_remove(int n, char **con_remove); + + +/* Const, except for .mc.list */ +static struct line_driver driver = { + .name = "UML console", + .device_name = "tty", + .major = TTY_MAJOR, + .minor_start = 0, + .type = TTY_DRIVER_TYPE_CONSOLE, + .subtype = SYSTEM_TYPE_CONSOLE, + .read_irq_name = "console", + .write_irq_name = "console-write", + .mc = { + .list = LIST_HEAD_INIT(driver.mc.list), + .name = "con", + .config = con_config, + .get_config = con_get_config, + .id = line_id, + .remove = con_remove, + }, +}; + +/* The array is initialized by line_init, at initcall time. The + * elements are locked individually as needed. + */ +static char *vt_conf[MAX_TTYS]; +static char *def_conf; +static struct line vts[MAX_TTYS]; + +static int con_config(char *str, char **error_out) +{ + return line_config(vts, ARRAY_SIZE(vts), str, &opts, error_out); +} + +static int con_get_config(char *dev, char *str, int size, char **error_out) +{ + return line_get_config(dev, vts, ARRAY_SIZE(vts), str, size, error_out); +} + +static int con_remove(int n, char **error_out) +{ + return line_remove(vts, ARRAY_SIZE(vts), n, error_out); +} + +/* Set in an initcall, checked in an exitcall */ +static int con_init_done; + +static int con_install(struct tty_driver *driver, struct tty_struct *tty) +{ + return line_install(driver, tty, &vts[tty->index]); +} + +static const struct tty_operations console_ops = { + .open = line_open, + .install = con_install, + .close = line_close, + .write = line_write, + .write_room = line_write_room, + .chars_in_buffer = line_chars_in_buffer, + .flush_buffer = line_flush_buffer, + .flush_chars = line_flush_chars, + .throttle = line_throttle, + .unthrottle = line_unthrottle, + .hangup = line_hangup, +}; + +static void uml_console_write(struct console *console, const char *string, + unsigned len) +{ + struct line *line = &vts[console->index]; + unsigned long flags; + + spin_lock_irqsave(&line->lock, flags); + console_write_chan(line->chan_out, string, len); + spin_unlock_irqrestore(&line->lock, flags); +} + +static struct tty_driver *uml_console_device(struct console *c, int *index) +{ + *index = c->index; + return driver.driver; +} + +static int uml_console_setup(struct console *co, char *options) +{ + struct line *line = &vts[co->index]; + + return console_open_chan(line, co); +} + +/* No locking for register_console call - relies on single-threaded initcalls */ +static struct console stdiocons = { + .name = "tty", + .write = uml_console_write, + .device = uml_console_device, + .setup = uml_console_setup, + .flags = CON_PRINTBUFFER|CON_ANYTIME, + .index = -1, +}; + +static int stdio_init(void) +{ + char *new_title; + int err; + int i; + + err = register_lines(&driver, &console_ops, vts, + ARRAY_SIZE(vts)); + if (err) + return err; + + printk(KERN_INFO "Initialized stdio console driver\n"); + + new_title = add_xterm_umid(opts.xterm_title); + if(new_title != NULL) + opts.xterm_title = new_title; + + for (i = 0; i < MAX_TTYS; i++) { + char *error; + char *s = vt_conf[i]; + if (!s) + s = def_conf; + if (!s) + s = i ? CONFIG_CON_CHAN : CONFIG_CON_ZERO_CHAN; + if (setup_one_line(vts, i, s, &opts, &error)) + printk(KERN_ERR "setup_one_line failed for " + "device %d : %s\n", i, error); + } + + con_init_done = 1; + register_console(&stdiocons); + return 0; +} +late_initcall(stdio_init); + +static void console_exit(void) +{ + if (!con_init_done) + return; + close_lines(vts, ARRAY_SIZE(vts)); +} +__uml_exitcall(console_exit); + +static int console_chan_setup(char *str) +{ + if (!strncmp(str, "sole=", 5)) /* console= option specifies tty */ + return 0; + + line_setup(vt_conf, MAX_TTYS, &def_conf, str, "console"); + return 1; +} +__setup("con", console_chan_setup); +__channel_help(console_chan_setup, "con"); diff --git a/arch/um/drivers/stdio_console.h b/arch/um/drivers/stdio_console.h new file mode 100644 index 0000000000..3a409ec23d --- /dev/null +++ b/arch/um/drivers/stdio_console.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __STDIO_CONSOLE_H +#define __STDIO_CONSOLE_H + +extern void save_console_flags(void); +#endif + diff --git a/arch/um/drivers/tty.c b/arch/um/drivers/tty.c new file mode 100644 index 0000000000..884a762d21 --- /dev/null +++ b/arch/um/drivers/tty.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) + */ + +#include <errno.h> +#include <fcntl.h> +#include <termios.h> +#include "chan_user.h" +#include <os.h> +#include <um_malloc.h> + +struct tty_chan { + char *dev; + int raw; + struct termios tt; +}; + +static void *tty_chan_init(char *str, int device, const struct chan_opts *opts) +{ + struct tty_chan *data; + + if (*str != ':') { + printk(UM_KERN_ERR "tty_init : channel type 'tty' must specify " + "a device\n"); + return NULL; + } + str++; + + data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL); + if (data == NULL) + return NULL; + *data = ((struct tty_chan) { .dev = str, + .raw = opts->raw }); + + return data; +} + +static int tty_open(int input, int output, int primary, void *d, + char **dev_out) +{ + struct tty_chan *data = d; + int fd, err, mode = 0; + + if (input && output) + mode = O_RDWR; + else if (input) + mode = O_RDONLY; + else if (output) + mode = O_WRONLY; + + fd = open(data->dev, mode); + if (fd < 0) + return -errno; + + if (data->raw) { + CATCH_EINTR(err = tcgetattr(fd, &data->tt)); + if (err) + return err; + + err = raw(fd); + if (err) + return err; + } + + *dev_out = data->dev; + return fd; +} + +const struct chan_ops tty_ops = { + .type = "tty", + .init = tty_chan_init, + .open = tty_open, + .close = generic_close, + .read = generic_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = generic_free, + .winch = 0, +}; diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h new file mode 100644 index 0000000000..f016fe1549 --- /dev/null +++ b/arch/um/drivers/ubd.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2001 RidgeRun, Inc (glonnon@ridgerun.com) + */ + +#ifndef __UM_UBD_USER_H +#define __UM_UBD_USER_H + +extern int start_io_thread(unsigned long sp, int *fds_out); +extern int io_thread(void *arg); +extern int kernel_fd; + +extern int ubd_read_poll(int timeout); +extern int ubd_write_poll(int timeout); + +#define UBD_REQ_BUFFER_SIZE 64 + +#endif + diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c new file mode 100644 index 0000000000..50206feac5 --- /dev/null +++ b/arch/um/drivers/ubd_kern.c @@ -0,0 +1,1599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Cambridge Greys Ltd + * Copyright (C) 2015-2016 Anton Ivanov (aivanov@brocade.com) + * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) + */ + +/* 2001-09-28...2002-04-17 + * Partition stuff by James_McMechan@hotmail.com + * old style ubd by setting UBD_SHIFT to 0 + * 2002-09-27...2002-10-18 massive tinkering for 2.5 + * partitions have changed in 2.5 + * 2003-01-29 more tinkering for 2.5.59-1 + * This should now address the sysfs problems and has + * the symlink for devfs to allow for booting with + * the common /dev/ubd/discX/... names rather than + * only /dev/ubdN/discN this version also has lots of + * clean ups preparing for ubd-many. + * James McMechan + */ + +#define UBD_SHIFT 4 + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/blk-mq.h> +#include <linux/ata.h> +#include <linux/hdreg.h> +#include <linux/major.h> +#include <linux/cdrom.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/ctype.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <asm/tlbflush.h> +#include <kern_util.h> +#include "mconsole_kern.h" +#include <init.h> +#include <irq_kern.h> +#include "ubd.h" +#include <os.h> +#include "cow.h" + +/* Max request size is determined by sector mask - 32K */ +#define UBD_MAX_REQUEST (8 * sizeof(long)) + +struct io_desc { + char *buffer; + unsigned long length; + unsigned long sector_mask; + unsigned long long cow_offset; + unsigned long bitmap_words[2]; +}; + +struct io_thread_req { + struct request *req; + int fds[2]; + unsigned long offsets[2]; + unsigned long long offset; + int sectorsize; + int error; + + int desc_cnt; + /* io_desc has to be the last element of the struct */ + struct io_desc io_desc[]; +}; + + +static struct io_thread_req * (*irq_req_buffer)[]; +static struct io_thread_req *irq_remainder; +static int irq_remainder_size; + +static struct io_thread_req * (*io_req_buffer)[]; +static struct io_thread_req *io_remainder; +static int io_remainder_size; + + + +static inline int ubd_test_bit(__u64 bit, unsigned char *data) +{ + __u64 n; + int bits, off; + + bits = sizeof(data[0]) * 8; + n = bit / bits; + off = bit % bits; + return (data[n] & (1 << off)) != 0; +} + +static inline void ubd_set_bit(__u64 bit, unsigned char *data) +{ + __u64 n; + int bits, off; + + bits = sizeof(data[0]) * 8; + n = bit / bits; + off = bit % bits; + data[n] |= (1 << off); +} +/*End stuff from ubd_user.h*/ + +#define DRIVER_NAME "uml-blkdev" + +static DEFINE_MUTEX(ubd_lock); +static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */ + +static int ubd_open(struct gendisk *disk, blk_mode_t mode); +static void ubd_release(struct gendisk *disk); +static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg); +static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); + +#define MAX_DEV (16) + +static const struct block_device_operations ubd_blops = { + .owner = THIS_MODULE, + .open = ubd_open, + .release = ubd_release, + .ioctl = ubd_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, + .getgeo = ubd_getgeo, +}; + +/* Protected by ubd_lock */ +static struct gendisk *ubd_gendisk[MAX_DEV]; + +#ifdef CONFIG_BLK_DEV_UBD_SYNC +#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ + .cl = 1 }) +#else +#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \ + .cl = 1 }) +#endif +static struct openflags global_openflags = OPEN_FLAGS; + +struct cow { + /* backing file name */ + char *file; + /* backing file fd */ + int fd; + unsigned long *bitmap; + unsigned long bitmap_len; + int bitmap_offset; + int data_offset; +}; + +#define MAX_SG 64 + +struct ubd { + /* name (and fd, below) of the file opened for writing, either the + * backing or the cow file. */ + char *file; + char *serial; + int count; + int fd; + __u64 size; + struct openflags boot_openflags; + struct openflags openflags; + unsigned shared:1; + unsigned no_cow:1; + unsigned no_trim:1; + struct cow cow; + struct platform_device pdev; + struct request_queue *queue; + struct blk_mq_tag_set tag_set; + spinlock_t lock; +}; + +#define DEFAULT_COW { \ + .file = NULL, \ + .fd = -1, \ + .bitmap = NULL, \ + .bitmap_offset = 0, \ + .data_offset = 0, \ +} + +#define DEFAULT_UBD { \ + .file = NULL, \ + .serial = NULL, \ + .count = 0, \ + .fd = -1, \ + .size = -1, \ + .boot_openflags = OPEN_FLAGS, \ + .openflags = OPEN_FLAGS, \ + .no_cow = 0, \ + .no_trim = 0, \ + .shared = 0, \ + .cow = DEFAULT_COW, \ + .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ +} + +/* Protected by ubd_lock */ +static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD }; + +static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd); + +static int fake_ide_setup(char *str) +{ + pr_warn("The fake_ide option has been removed\n"); + return 1; +} +__setup("fake_ide", fake_ide_setup); + +__uml_help(fake_ide_setup, +"fake_ide\n" +" Obsolete stub.\n\n" +); + +static int parse_unit(char **ptr) +{ + char *str = *ptr, *end; + int n = -1; + + if(isdigit(*str)) { + n = simple_strtoul(str, &end, 0); + if(end == str) + return -1; + *ptr = end; + } + else if (('a' <= *str) && (*str <= 'z')) { + n = *str - 'a'; + str++; + *ptr = str; + } + return n; +} + +/* If *index_out == -1 at exit, the passed option was a general one; + * otherwise, the str pointer is used (and owned) inside ubd_devs array, so it + * should not be freed on exit. + */ +static int ubd_setup_common(char *str, int *index_out, char **error_out) +{ + struct ubd *ubd_dev; + struct openflags flags = global_openflags; + char *file, *backing_file, *serial; + int n, err = 0, i; + + if(index_out) *index_out = -1; + n = *str; + if(n == '='){ + str++; + if(!strcmp(str, "sync")){ + global_openflags = of_sync(global_openflags); + return err; + } + + pr_warn("fake major not supported any more\n"); + return 0; + } + + n = parse_unit(&str); + if(n < 0){ + *error_out = "Couldn't parse device number"; + return -EINVAL; + } + if(n >= MAX_DEV){ + *error_out = "Device number out of range"; + return 1; + } + + err = -EBUSY; + mutex_lock(&ubd_lock); + + ubd_dev = &ubd_devs[n]; + if(ubd_dev->file != NULL){ + *error_out = "Device is already configured"; + goto out; + } + + if (index_out) + *index_out = n; + + err = -EINVAL; + for (i = 0; i < sizeof("rscdt="); i++) { + switch (*str) { + case 'r': + flags.w = 0; + break; + case 's': + flags.s = 1; + break; + case 'd': + ubd_dev->no_cow = 1; + break; + case 'c': + ubd_dev->shared = 1; + break; + case 't': + ubd_dev->no_trim = 1; + break; + case '=': + str++; + goto break_loop; + default: + *error_out = "Expected '=' or flag letter " + "(r, s, c, t or d)"; + goto out; + } + str++; + } + + if (*str == '=') + *error_out = "Too many flags specified"; + else + *error_out = "Missing '='"; + goto out; + +break_loop: + file = strsep(&str, ",:"); + if (*file == '\0') + file = NULL; + + backing_file = strsep(&str, ",:"); + if (backing_file && *backing_file == '\0') + backing_file = NULL; + + serial = strsep(&str, ",:"); + if (serial && *serial == '\0') + serial = NULL; + + if (backing_file && ubd_dev->no_cow) { + *error_out = "Can't specify both 'd' and a cow file"; + goto out; + } + + err = 0; + ubd_dev->file = file; + ubd_dev->cow.file = backing_file; + ubd_dev->serial = serial; + ubd_dev->boot_openflags = flags; +out: + mutex_unlock(&ubd_lock); + return err; +} + +static int ubd_setup(char *str) +{ + char *error; + int err; + + err = ubd_setup_common(str, NULL, &error); + if(err) + printk(KERN_ERR "Failed to initialize device with \"%s\" : " + "%s\n", str, error); + return 1; +} + +__setup("ubd", ubd_setup); +__uml_help(ubd_setup, +"ubd<n><flags>=<filename>[(:|,)<filename2>][(:|,)<serial>]\n" +" This is used to associate a device with a file in the underlying\n" +" filesystem. When specifying two filenames, the first one is the\n" +" COW name and the second is the backing file name. As separator you can\n" +" use either a ':' or a ',': the first one allows writing things like;\n" +" ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n" +" while with a ',' the shell would not expand the 2nd '~'.\n" +" When using only one filename, UML will detect whether to treat it like\n" +" a COW file or a backing file. To override this detection, add the 'd'\n" +" flag:\n" +" ubd0d=BackingFile\n" +" Usually, there is a filesystem in the file, but \n" +" that's not required. Swap devices containing swap files can be\n" +" specified like this. Also, a file which doesn't contain a\n" +" filesystem can have its contents read in the virtual \n" +" machine by running 'dd' on the device. <n> must be in the range\n" +" 0 to 7. Appending an 'r' to the number will cause that device\n" +" to be mounted read-only. For example ubd1r=./ext_fs. Appending\n" +" an 's' will cause data to be written to disk on the host immediately.\n" +" 'c' will cause the device to be treated as being shared between multiple\n" +" UMLs and file locking will be turned off - this is appropriate for a\n" +" cluster filesystem and inappropriate at almost all other times.\n\n" +" 't' will disable trim/discard support on the device (enabled by default).\n\n" +" An optional device serial number can be exposed using the serial parameter\n" +" on the cmdline which is exposed as a sysfs entry. This is particularly\n" +" useful when a unique number should be given to the device. Note when\n" +" specifying a label, the filename2 must be also presented. It can be\n" +" an empty string, in which case the backing file is not used:\n" +" ubd0=File,,Serial\n" +); + +static int udb_setup(char *str) +{ + printk("udb%s specified on command line is almost certainly a ubd -> " + "udb TYPO\n", str); + return 1; +} + +__setup("udb", udb_setup); +__uml_help(udb_setup, +"udb\n" +" This option is here solely to catch ubd -> udb typos, which can be\n" +" to impossible to catch visually unless you specifically look for\n" +" them. The only result of any option starting with 'udb' is an error\n" +" in the boot output.\n\n" +); + +/* Only changed by ubd_init, which is an initcall. */ +static int thread_fd = -1; + +/* Function to read several request pointers at a time +* handling fractional reads if (and as) needed +*/ + +static int bulk_req_safe_read( + int fd, + struct io_thread_req * (*request_buffer)[], + struct io_thread_req **remainder, + int *remainder_size, + int max_recs + ) +{ + int n = 0; + int res = 0; + + if (*remainder_size > 0) { + memmove( + (char *) request_buffer, + (char *) remainder, *remainder_size + ); + n = *remainder_size; + } + + res = os_read_file( + fd, + ((char *) request_buffer) + *remainder_size, + sizeof(struct io_thread_req *)*max_recs + - *remainder_size + ); + if (res > 0) { + n += res; + if ((n % sizeof(struct io_thread_req *)) > 0) { + /* + * Read somehow returned not a multiple of dword + * theoretically possible, but never observed in the + * wild, so read routine must be able to handle it + */ + *remainder_size = n % sizeof(struct io_thread_req *); + WARN(*remainder_size > 0, "UBD IPC read returned a partial result"); + memmove( + remainder, + ((char *) request_buffer) + + (n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *), + *remainder_size + ); + n = n - *remainder_size; + } + } else { + n = res; + } + return n; +} + +/* Called without dev->lock held, and only in interrupt context. */ +static void ubd_handler(void) +{ + int n; + int count; + + while(1){ + n = bulk_req_safe_read( + thread_fd, + irq_req_buffer, + &irq_remainder, + &irq_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if (n < 0) { + if(n == -EAGAIN) + break; + printk(KERN_ERR "spurious interrupt in ubd_handler, " + "err = %d\n", -n); + return; + } + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + struct io_thread_req *io_req = (*irq_req_buffer)[count]; + + if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { + blk_queue_max_discard_sectors(io_req->req->q, 0); + blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); + } + blk_mq_end_request(io_req->req, io_req->error); + kfree(io_req); + } + } +} + +static irqreturn_t ubd_intr(int irq, void *dev) +{ + ubd_handler(); + return IRQ_HANDLED; +} + +/* Only changed by ubd_init, which is an initcall. */ +static int io_pid = -1; + +static void kill_io_thread(void) +{ + if(io_pid != -1) + os_kill_process(io_pid, 1); +} + +__uml_exitcall(kill_io_thread); + +static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) +{ + char *file; + int fd; + int err; + + __u32 version; + __u32 align; + char *backing_file; + time64_t mtime; + unsigned long long size; + int sector_size; + int bitmap_offset; + + if (ubd_dev->file && ubd_dev->cow.file) { + file = ubd_dev->cow.file; + + goto out; + } + + fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); + if (fd < 0) + return fd; + + err = read_cow_header(file_reader, &fd, &version, &backing_file, \ + &mtime, &size, §or_size, &align, &bitmap_offset); + os_close_file(fd); + + if(err == -EINVAL) + file = ubd_dev->file; + else + file = backing_file; + +out: + return os_file_size(file, size_out); +} + +static int read_cow_bitmap(int fd, void *buf, int offset, int len) +{ + int err; + + err = os_pread_file(fd, buf, len, offset); + if (err < 0) + return err; + + return 0; +} + +static int backing_file_mismatch(char *file, __u64 size, time64_t mtime) +{ + time64_t modtime; + unsigned long long actual; + int err; + + err = os_file_modtime(file, &modtime); + if (err < 0) { + printk(KERN_ERR "Failed to get modification time of backing " + "file \"%s\", err = %d\n", file, -err); + return err; + } + + err = os_file_size(file, &actual); + if (err < 0) { + printk(KERN_ERR "Failed to get size of backing file \"%s\", " + "err = %d\n", file, -err); + return err; + } + + if (actual != size) { + /*__u64 can be a long on AMD64 and with %lu GCC complains; so + * the typecast.*/ + printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header " + "vs backing file\n", (unsigned long long) size, actual); + return -EINVAL; + } + if (modtime != mtime) { + printk(KERN_ERR "mtime mismatch (%lld vs %lld) of COW header vs " + "backing file\n", mtime, modtime); + return -EINVAL; + } + return 0; +} + +static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow) +{ + struct uml_stat buf1, buf2; + int err; + + if (from_cmdline == NULL) + return 0; + if (!strcmp(from_cmdline, from_cow)) + return 0; + + err = os_stat_file(from_cmdline, &buf1); + if (err < 0) { + printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline, + -err); + return 0; + } + err = os_stat_file(from_cow, &buf2); + if (err < 0) { + printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow, + -err); + return 1; + } + if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino)) + return 0; + + printk(KERN_ERR "Backing file mismatch - \"%s\" requested, " + "\"%s\" specified in COW header of \"%s\"\n", + from_cmdline, from_cow, cow); + return 1; +} + +static int open_ubd_file(char *file, struct openflags *openflags, int shared, + char **backing_file_out, int *bitmap_offset_out, + unsigned long *bitmap_len_out, int *data_offset_out, + int *create_cow_out) +{ + time64_t mtime; + unsigned long long size; + __u32 version, align; + char *backing_file; + int fd, err, sectorsize, asked_switch, mode = 0644; + + fd = os_open_file(file, *openflags, mode); + if (fd < 0) { + if ((fd == -ENOENT) && (create_cow_out != NULL)) + *create_cow_out = 1; + if (!openflags->w || + ((fd != -EROFS) && (fd != -EACCES))) + return fd; + openflags->w = 0; + fd = os_open_file(file, *openflags, mode); + if (fd < 0) + return fd; + } + + if (shared) + printk(KERN_INFO "Not locking \"%s\" on the host\n", file); + else { + err = os_lock_file(fd, openflags->w); + if (err < 0) { + printk(KERN_ERR "Failed to lock '%s', err = %d\n", + file, -err); + goto out_close; + } + } + + /* Successful return case! */ + if (backing_file_out == NULL) + return fd; + + err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime, + &size, §orsize, &align, bitmap_offset_out); + if (err && (*backing_file_out != NULL)) { + printk(KERN_ERR "Failed to read COW header from COW file " + "\"%s\", errno = %d\n", file, -err); + goto out_close; + } + if (err) + return fd; + + asked_switch = path_requires_switch(*backing_file_out, backing_file, + file); + + /* Allow switching only if no mismatch. */ + if (asked_switch && !backing_file_mismatch(*backing_file_out, size, + mtime)) { + printk(KERN_ERR "Switching backing file to '%s'\n", + *backing_file_out); + err = write_cow_header(file, fd, *backing_file_out, + sectorsize, align, &size); + if (err) { + printk(KERN_ERR "Switch failed, errno = %d\n", -err); + goto out_close; + } + } else { + *backing_file_out = backing_file; + err = backing_file_mismatch(*backing_file_out, size, mtime); + if (err) + goto out_close; + } + + cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, + bitmap_len_out, data_offset_out); + + return fd; + out_close: + os_close_file(fd); + return err; +} + +static int create_cow_file(char *cow_file, char *backing_file, + struct openflags flags, + int sectorsize, int alignment, int *bitmap_offset_out, + unsigned long *bitmap_len_out, int *data_offset_out) +{ + int err, fd; + + flags.c = 1; + fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL); + if (fd < 0) { + err = fd; + printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n", + cow_file, -err); + goto out; + } + + err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment, + bitmap_offset_out, bitmap_len_out, + data_offset_out); + if (!err) + return fd; + os_close_file(fd); + out: + return err; +} + +static void ubd_close_dev(struct ubd *ubd_dev) +{ + os_close_file(ubd_dev->fd); + if(ubd_dev->cow.file == NULL) + return; + + os_close_file(ubd_dev->cow.fd); + vfree(ubd_dev->cow.bitmap); + ubd_dev->cow.bitmap = NULL; +} + +static int ubd_open_dev(struct ubd *ubd_dev) +{ + struct openflags flags; + char **back_ptr; + int err, create_cow, *create_ptr; + int fd; + + ubd_dev->openflags = ubd_dev->boot_openflags; + create_cow = 0; + create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL; + back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file; + + fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared, + back_ptr, &ubd_dev->cow.bitmap_offset, + &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset, + create_ptr); + + if((fd == -ENOENT) && create_cow){ + fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, + ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE, + &ubd_dev->cow.bitmap_offset, + &ubd_dev->cow.bitmap_len, + &ubd_dev->cow.data_offset); + if(fd >= 0){ + printk(KERN_INFO "Creating \"%s\" as COW file for " + "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file); + } + } + + if(fd < 0){ + printk("Failed to open '%s', errno = %d\n", ubd_dev->file, + -fd); + return fd; + } + ubd_dev->fd = fd; + + if(ubd_dev->cow.file != NULL){ + blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); + + err = -ENOMEM; + ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); + if(ubd_dev->cow.bitmap == NULL){ + printk(KERN_ERR "Failed to vmalloc COW bitmap\n"); + goto error; + } + flush_tlb_kernel_vm(); + + err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap, + ubd_dev->cow.bitmap_offset, + ubd_dev->cow.bitmap_len); + if(err < 0) + goto error; + + flags = ubd_dev->openflags; + flags.w = 0; + err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL, + NULL, NULL, NULL, NULL); + if(err < 0) goto error; + ubd_dev->cow.fd = err; + } + if (ubd_dev->no_trim == 0) { + ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE; + blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST); + blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST); + } + blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue); + return 0; + error: + os_close_file(ubd_dev->fd); + return err; +} + +static void ubd_device_release(struct device *dev) +{ + struct ubd *ubd_dev = dev_get_drvdata(dev); + + blk_mq_free_tag_set(&ubd_dev->tag_set); + *ubd_dev = ((struct ubd) DEFAULT_UBD); +} + +static ssize_t serial_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct ubd *ubd_dev = disk->private_data; + + if (!ubd_dev) + return 0; + + return sprintf(buf, "%s", ubd_dev->serial); +} + +static DEVICE_ATTR_RO(serial); + +static struct attribute *ubd_attrs[] = { + &dev_attr_serial.attr, + NULL, +}; + +static umode_t ubd_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + return a->mode; +} + +static const struct attribute_group ubd_attr_group = { + .attrs = ubd_attrs, + .is_visible = ubd_attrs_are_visible, +}; + +static const struct attribute_group *ubd_attr_groups[] = { + &ubd_attr_group, + NULL, +}; + +static int ubd_disk_register(int major, u64 size, int unit, + struct gendisk *disk) +{ + disk->major = major; + disk->first_minor = unit << UBD_SHIFT; + disk->minors = 1 << UBD_SHIFT; + disk->fops = &ubd_blops; + set_capacity(disk, size / 512); + sprintf(disk->disk_name, "ubd%c", 'a' + unit); + + ubd_devs[unit].pdev.id = unit; + ubd_devs[unit].pdev.name = DRIVER_NAME; + ubd_devs[unit].pdev.dev.release = ubd_device_release; + dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]); + platform_device_register(&ubd_devs[unit].pdev); + + disk->private_data = &ubd_devs[unit]; + disk->queue = ubd_devs[unit].queue; + return device_add_disk(&ubd_devs[unit].pdev.dev, disk, ubd_attr_groups); +} + +#define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE)) + +static const struct blk_mq_ops ubd_mq_ops = { + .queue_rq = ubd_queue_rq, +}; + +static int ubd_add(int n, char **error_out) +{ + struct ubd *ubd_dev = &ubd_devs[n]; + struct gendisk *disk; + int err = 0; + + if(ubd_dev->file == NULL) + goto out; + + err = ubd_file_size(ubd_dev, &ubd_dev->size); + if(err < 0){ + *error_out = "Couldn't determine size of device's file"; + goto out; + } + + ubd_dev->size = ROUND_BLOCK(ubd_dev->size); + + ubd_dev->tag_set.ops = &ubd_mq_ops; + ubd_dev->tag_set.queue_depth = 64; + ubd_dev->tag_set.numa_node = NUMA_NO_NODE; + ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + ubd_dev->tag_set.driver_data = ubd_dev; + ubd_dev->tag_set.nr_hw_queues = 1; + + err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); + if (err) + goto out; + + disk = blk_mq_alloc_disk(&ubd_dev->tag_set, ubd_dev); + if (IS_ERR(disk)) { + err = PTR_ERR(disk); + goto out_cleanup_tags; + } + ubd_dev->queue = disk->queue; + + blk_queue_write_cache(ubd_dev->queue, true, false); + blk_queue_max_segments(ubd_dev->queue, MAX_SG); + blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1); + err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk); + if (err) + goto out_cleanup_disk; + + ubd_gendisk[n] = disk; + return 0; + +out_cleanup_disk: + put_disk(disk); +out_cleanup_tags: + blk_mq_free_tag_set(&ubd_dev->tag_set); +out: + return err; +} + +static int ubd_config(char *str, char **error_out) +{ + int n, ret; + + /* This string is possibly broken up and stored, so it's only + * freed if ubd_setup_common fails, or if only general options + * were set. + */ + str = kstrdup(str, GFP_KERNEL); + if (str == NULL) { + *error_out = "Failed to allocate memory"; + return -ENOMEM; + } + + ret = ubd_setup_common(str, &n, error_out); + if (ret) + goto err_free; + + if (n == -1) { + ret = 0; + goto err_free; + } + + mutex_lock(&ubd_lock); + ret = ubd_add(n, error_out); + if (ret) + ubd_devs[n].file = NULL; + mutex_unlock(&ubd_lock); + +out: + return ret; + +err_free: + kfree(str); + goto out; +} + +static int ubd_get_config(char *name, char *str, int size, char **error_out) +{ + struct ubd *ubd_dev; + int n, len = 0; + + n = parse_unit(&name); + if((n >= MAX_DEV) || (n < 0)){ + *error_out = "ubd_get_config : device number out of range"; + return -1; + } + + ubd_dev = &ubd_devs[n]; + mutex_lock(&ubd_lock); + + if(ubd_dev->file == NULL){ + CONFIG_CHUNK(str, size, len, "", 1); + goto out; + } + + CONFIG_CHUNK(str, size, len, ubd_dev->file, 0); + + if(ubd_dev->cow.file != NULL){ + CONFIG_CHUNK(str, size, len, ",", 0); + CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1); + } + else CONFIG_CHUNK(str, size, len, "", 1); + + out: + mutex_unlock(&ubd_lock); + return len; +} + +static int ubd_id(char **str, int *start_out, int *end_out) +{ + int n; + + n = parse_unit(str); + *start_out = 0; + *end_out = MAX_DEV - 1; + return n; +} + +static int ubd_remove(int n, char **error_out) +{ + struct gendisk *disk = ubd_gendisk[n]; + struct ubd *ubd_dev; + int err = -ENODEV; + + mutex_lock(&ubd_lock); + + ubd_dev = &ubd_devs[n]; + + if(ubd_dev->file == NULL) + goto out; + + /* you cannot remove a open disk */ + err = -EBUSY; + if(ubd_dev->count > 0) + goto out; + + ubd_gendisk[n] = NULL; + if(disk != NULL){ + del_gendisk(disk); + put_disk(disk); + } + + err = 0; + platform_device_unregister(&ubd_dev->pdev); +out: + mutex_unlock(&ubd_lock); + return err; +} + +/* All these are called by mconsole in process context and without + * ubd-specific locks. The structure itself is const except for .list. + */ +static struct mc_device ubd_mc = { + .list = LIST_HEAD_INIT(ubd_mc.list), + .name = "ubd", + .config = ubd_config, + .get_config = ubd_get_config, + .id = ubd_id, + .remove = ubd_remove, +}; + +static int __init ubd_mc_init(void) +{ + mconsole_register_dev(&ubd_mc); + return 0; +} + +__initcall(ubd_mc_init); + +static int __init ubd0_init(void) +{ + struct ubd *ubd_dev = &ubd_devs[0]; + + mutex_lock(&ubd_lock); + if(ubd_dev->file == NULL) + ubd_dev->file = "root_fs"; + mutex_unlock(&ubd_lock); + + return 0; +} + +__initcall(ubd0_init); + +/* Used in ubd_init, which is an initcall */ +static struct platform_driver ubd_driver = { + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init ubd_init(void) +{ + char *error; + int i, err; + + if (register_blkdev(UBD_MAJOR, "ubd")) + return -1; + + irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, + sizeof(struct io_thread_req *), + GFP_KERNEL + ); + irq_remainder = 0; + + if (irq_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } + io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, + sizeof(struct io_thread_req *), + GFP_KERNEL + ); + + io_remainder = 0; + + if (io_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } + platform_driver_register(&ubd_driver); + mutex_lock(&ubd_lock); + for (i = 0; i < MAX_DEV; i++){ + err = ubd_add(i, &error); + if(err) + printk(KERN_ERR "Failed to initialize ubd device %d :" + "%s\n", i, error); + } + mutex_unlock(&ubd_lock); + return 0; +} + +late_initcall(ubd_init); + +static int __init ubd_driver_init(void){ + unsigned long stack; + int err; + + /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ + if(global_openflags.s){ + printk(KERN_INFO "ubd: Synchronous mode\n"); + /* Letting ubd=sync be like using ubd#s= instead of ubd#= is + * enough. So use anyway the io thread. */ + } + stack = alloc_stack(0, 0); + io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd); + if(io_pid < 0){ + printk(KERN_ERR + "ubd : Failed to start I/O thread (errno = %d) - " + "falling back to synchronous I/O\n", -io_pid); + io_pid = -1; + return 0; + } + err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, + 0, "ubd", ubd_devs); + if(err < 0) + printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); + return 0; +} + +device_initcall(ubd_driver_init); + +static int ubd_open(struct gendisk *disk, blk_mode_t mode) +{ + struct ubd *ubd_dev = disk->private_data; + int err = 0; + + mutex_lock(&ubd_mutex); + if(ubd_dev->count == 0){ + err = ubd_open_dev(ubd_dev); + if(err){ + printk(KERN_ERR "%s: Can't open \"%s\": errno = %d\n", + disk->disk_name, ubd_dev->file, -err); + goto out; + } + } + ubd_dev->count++; + set_disk_ro(disk, !ubd_dev->openflags.w); +out: + mutex_unlock(&ubd_mutex); + return err; +} + +static void ubd_release(struct gendisk *disk) +{ + struct ubd *ubd_dev = disk->private_data; + + mutex_lock(&ubd_mutex); + if(--ubd_dev->count == 0) + ubd_close_dev(ubd_dev); + mutex_unlock(&ubd_mutex); +} + +static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, + __u64 *cow_offset, unsigned long *bitmap, + __u64 bitmap_offset, unsigned long *bitmap_words, + __u64 bitmap_len) +{ + __u64 sector = io_offset >> SECTOR_SHIFT; + int i, update_bitmap = 0; + + for (i = 0; i < length >> SECTOR_SHIFT; i++) { + if(cow_mask != NULL) + ubd_set_bit(i, (unsigned char *) cow_mask); + if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) + continue; + + update_bitmap = 1; + ubd_set_bit(sector + i, (unsigned char *) bitmap); + } + + if(!update_bitmap) + return; + + *cow_offset = sector / (sizeof(unsigned long) * 8); + + /* This takes care of the case where we're exactly at the end of the + * device, and *cow_offset + 1 is off the end. So, just back it up + * by one word. Thanks to Lynn Kerby for the fix and James McMechan + * for the original diagnosis. + */ + if (*cow_offset == (DIV_ROUND_UP(bitmap_len, + sizeof(unsigned long)) - 1)) + (*cow_offset)--; + + bitmap_words[0] = bitmap[*cow_offset]; + bitmap_words[1] = bitmap[*cow_offset + 1]; + + *cow_offset *= sizeof(unsigned long); + *cow_offset += bitmap_offset; +} + +static void cowify_req(struct io_thread_req *req, struct io_desc *segment, + unsigned long offset, unsigned long *bitmap, + __u64 bitmap_offset, __u64 bitmap_len) +{ + __u64 sector = offset >> SECTOR_SHIFT; + int i; + + if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT) + panic("Operation too long"); + + if (req_op(req->req) == REQ_OP_READ) { + for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) { + if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) + ubd_set_bit(i, (unsigned char *) + &segment->sector_mask); + } + } else { + cowify_bitmap(offset, segment->length, &segment->sector_mask, + &segment->cow_offset, bitmap, bitmap_offset, + segment->bitmap_words, bitmap_len); + } +} + +static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, + struct request *req) +{ + struct bio_vec bvec; + struct req_iterator iter; + int i = 0; + unsigned long byte_offset = io_req->offset; + enum req_op op = req_op(req); + + if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) { + io_req->io_desc[0].buffer = NULL; + io_req->io_desc[0].length = blk_rq_bytes(req); + } else { + rq_for_each_segment(bvec, req, iter) { + BUG_ON(i >= io_req->desc_cnt); + + io_req->io_desc[i].buffer = bvec_virt(&bvec); + io_req->io_desc[i].length = bvec.bv_len; + i++; + } + } + + if (dev->cow.file) { + for (i = 0; i < io_req->desc_cnt; i++) { + cowify_req(io_req, &io_req->io_desc[i], byte_offset, + dev->cow.bitmap, dev->cow.bitmap_offset, + dev->cow.bitmap_len); + byte_offset += io_req->io_desc[i].length; + } + + } +} + +static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req, + int desc_cnt) +{ + struct io_thread_req *io_req; + int i; + + io_req = kmalloc(sizeof(*io_req) + + (desc_cnt * sizeof(struct io_desc)), + GFP_ATOMIC); + if (!io_req) + return NULL; + + io_req->req = req; + if (dev->cow.file) + io_req->fds[0] = dev->cow.fd; + else + io_req->fds[0] = dev->fd; + io_req->error = 0; + io_req->sectorsize = SECTOR_SIZE; + io_req->fds[1] = dev->fd; + io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; + io_req->offsets[0] = 0; + io_req->offsets[1] = dev->cow.data_offset; + + for (i = 0 ; i < desc_cnt; i++) { + io_req->io_desc[i].sector_mask = 0; + io_req->io_desc[i].cow_offset = -1; + } + + return io_req; +} + +static int ubd_submit_request(struct ubd *dev, struct request *req) +{ + int segs = 0; + struct io_thread_req *io_req; + int ret; + enum req_op op = req_op(req); + + if (op == REQ_OP_FLUSH) + segs = 0; + else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) + segs = 1; + else + segs = blk_rq_nr_phys_segments(req); + + io_req = ubd_alloc_req(dev, req, segs); + if (!io_req) + return -ENOMEM; + + io_req->desc_cnt = segs; + if (segs) + ubd_map_req(dev, io_req, req); + + ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); + if (ret != sizeof(io_req)) { + if (ret != -EAGAIN) + pr_err("write to io thread failed: %d\n", -ret); + kfree(io_req); + } + return ret; +} + +static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct ubd *ubd_dev = hctx->queue->queuedata; + struct request *req = bd->rq; + int ret = 0, res = BLK_STS_OK; + + blk_mq_start_request(req); + + spin_lock_irq(&ubd_dev->lock); + + switch (req_op(req)) { + case REQ_OP_FLUSH: + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_DISCARD: + case REQ_OP_WRITE_ZEROES: + ret = ubd_submit_request(ubd_dev, req); + break; + default: + WARN_ON_ONCE(1); + res = BLK_STS_NOTSUPP; + } + + spin_unlock_irq(&ubd_dev->lock); + + if (ret < 0) { + if (ret == -ENOMEM) + res = BLK_STS_RESOURCE; + else + res = BLK_STS_DEV_RESOURCE; + } + + return res; +} + +static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct ubd *ubd_dev = bdev->bd_disk->private_data; + + geo->heads = 128; + geo->sectors = 32; + geo->cylinders = ubd_dev->size / (128 * 32 * 512); + return 0; +} + +static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct ubd *ubd_dev = bdev->bd_disk->private_data; + u16 ubd_id[ATA_ID_WORDS]; + + switch (cmd) { + struct cdrom_volctrl volume; + case HDIO_GET_IDENTITY: + memset(&ubd_id, 0, ATA_ID_WORDS * 2); + ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512); + ubd_id[ATA_ID_HEADS] = 128; + ubd_id[ATA_ID_SECTORS] = 32; + if(copy_to_user((char __user *) arg, (char *) &ubd_id, + sizeof(ubd_id))) + return -EFAULT; + return 0; + + case CDROMVOLREAD: + if(copy_from_user(&volume, (char __user *) arg, sizeof(volume))) + return -EFAULT; + volume.channel0 = 255; + volume.channel1 = 255; + volume.channel2 = 255; + volume.channel3 = 255; + if(copy_to_user((char __user *) arg, &volume, sizeof(volume))) + return -EFAULT; + return 0; + } + return -EINVAL; +} + +static int map_error(int error_code) +{ + switch (error_code) { + case 0: + return BLK_STS_OK; + case ENOSYS: + case EOPNOTSUPP: + return BLK_STS_NOTSUPP; + case ENOSPC: + return BLK_STS_NOSPC; + } + return BLK_STS_IOERR; +} + +/* + * Everything from here onwards *IS NOT PART OF THE KERNEL* + * + * The following functions are part of UML hypervisor code. + * All functions from here onwards are executed as a helper + * thread and are not allowed to execute any kernel functions. + * + * Any communication must occur strictly via shared memory and IPC. + * + * Do not add printks, locks, kernel memory operations, etc - it + * will result in unpredictable behaviour and/or crashes. + */ + +static int update_bitmap(struct io_thread_req *req, struct io_desc *segment) +{ + int n; + + if (segment->cow_offset == -1) + return map_error(0); + + n = os_pwrite_file(req->fds[1], &segment->bitmap_words, + sizeof(segment->bitmap_words), segment->cow_offset); + if (n != sizeof(segment->bitmap_words)) + return map_error(-n); + + return map_error(0); +} + +static void do_io(struct io_thread_req *req, struct io_desc *desc) +{ + char *buf = NULL; + unsigned long len; + int n, nsectors, start, end, bit; + __u64 off; + + /* FLUSH is really a special case, we cannot "case" it with others */ + + if (req_op(req->req) == REQ_OP_FLUSH) { + /* fds[0] is always either the rw image or our cow file */ + req->error = map_error(-os_sync_file(req->fds[0])); + return; + } + + nsectors = desc->length / req->sectorsize; + start = 0; + do { + bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask); + end = start; + while((end < nsectors) && + (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit)) + end++; + + off = req->offset + req->offsets[bit] + + start * req->sectorsize; + len = (end - start) * req->sectorsize; + if (desc->buffer != NULL) + buf = &desc->buffer[start * req->sectorsize]; + + switch (req_op(req->req)) { + case REQ_OP_READ: + n = 0; + do { + buf = &buf[n]; + len -= n; + n = os_pread_file(req->fds[bit], buf, len, off); + if (n < 0) { + req->error = map_error(-n); + return; + } + } while((n < len) && (n != 0)); + if (n < len) memset(&buf[n], 0, len - n); + break; + case REQ_OP_WRITE: + n = os_pwrite_file(req->fds[bit], buf, len, off); + if(n != len){ + req->error = map_error(-n); + return; + } + break; + case REQ_OP_DISCARD: + n = os_falloc_punch(req->fds[bit], off, len); + if (n) { + req->error = map_error(-n); + return; + } + break; + case REQ_OP_WRITE_ZEROES: + n = os_falloc_zeroes(req->fds[bit], off, len); + if (n) { + req->error = map_error(-n); + return; + } + break; + default: + WARN_ON_ONCE(1); + req->error = BLK_STS_NOTSUPP; + return; + } + + start = end; + } while(start < nsectors); + + req->offset += len; + req->error = update_bitmap(req, desc); +} + +/* Changed in start_io_thread, which is serialized by being called only + * from ubd_init, which is an initcall. + */ +int kernel_fd = -1; + +/* Only changed by the io thread. XXX: currently unused. */ +static int io_count; + +int io_thread(void *arg) +{ + int n, count, written, res; + + os_fix_helper_signals(); + + while(1){ + n = bulk_req_safe_read( + kernel_fd, + io_req_buffer, + &io_remainder, + &io_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if (n <= 0) { + if (n == -EAGAIN) + ubd_read_poll(-1); + + continue; + } + + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + struct io_thread_req *req = (*io_req_buffer)[count]; + int i; + + io_count++; + for (i = 0; !req->error && i < req->desc_cnt; i++) + do_io(req, &(req->io_desc[i])); + + } + + written = 0; + + do { + res = os_write_file(kernel_fd, + ((char *) io_req_buffer) + written, + n - written); + if (res >= 0) { + written += res; + } + if (written < n) { + ubd_write_poll(-1); + } + } while (written < n); + } + + return 0; +} diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c new file mode 100644 index 0000000000..a1afe414ce --- /dev/null +++ b/arch/um/drivers/ubd_user.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 Anton Ivanov (aivanov@brocade.com) + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2001 Ridgerun,Inc (glonnon@ridgerun.com) + */ + +#include <stddef.h> +#include <unistd.h> +#include <errno.h> +#include <sched.h> +#include <signal.h> +#include <string.h> +#include <netinet/in.h> +#include <sys/time.h> +#include <sys/socket.h> +#include <sys/mman.h> +#include <sys/param.h> +#include <endian.h> +#include <byteswap.h> + +#include "ubd.h" +#include <os.h> +#include <poll.h> + +struct pollfd kernel_pollfd; + +int start_io_thread(unsigned long sp, int *fd_out) +{ + int pid, fds[2], err; + + err = os_pipe(fds, 1, 1); + if(err < 0){ + printk("start_io_thread - os_pipe failed, err = %d\n", -err); + goto out; + } + + kernel_fd = fds[0]; + kernel_pollfd.fd = kernel_fd; + kernel_pollfd.events = POLLIN; + *fd_out = fds[1]; + + err = os_set_fd_block(*fd_out, 0); + err = os_set_fd_block(kernel_fd, 0); + if (err) { + printk("start_io_thread - failed to set nonblocking I/O.\n"); + goto out_close; + } + + pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL); + if(pid < 0){ + err = -errno; + printk("start_io_thread - clone failed : errno = %d\n", errno); + goto out_close; + } + + return(pid); + + out_close: + os_close_file(fds[0]); + os_close_file(fds[1]); + kernel_fd = -1; + *fd_out = -1; + out: + return err; +} + +int ubd_read_poll(int timeout) +{ + kernel_pollfd.events = POLLIN; + return poll(&kernel_pollfd, 1, timeout); +} +int ubd_write_poll(int timeout) +{ + kernel_pollfd.events = POLLOUT; + return poll(&kernel_pollfd, 1, timeout); +} + diff --git a/arch/um/drivers/umcast.h b/arch/um/drivers/umcast.h new file mode 100644 index 0000000000..fe39bee1e3 --- /dev/null +++ b/arch/um/drivers/umcast.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __DRIVERS_UMCAST_H +#define __DRIVERS_UMCAST_H + +#include <net_user.h> + +struct umcast_data { + char *addr; + unsigned short lport; + unsigned short rport; + void *listen_addr; + void *remote_addr; + int ttl; + int unicast; + void *dev; +}; + +extern const struct net_user_info umcast_user_info; + +extern int umcast_user_write(int fd, void *buf, int len, + struct umcast_data *pri); + +#endif diff --git a/arch/um/drivers/umcast_kern.c b/arch/um/drivers/umcast_kern.c new file mode 100644 index 0000000000..595a54f2b9 --- /dev/null +++ b/arch/um/drivers/umcast_kern.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * user-mode-linux networking multicast transport + * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org> + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * + * based on the existing uml-networking code, which is + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 by various other people who didn't put their name here. + * + */ + +#include <linux/init.h> +#include <linux/netdevice.h> +#include "umcast.h" +#include <net_kern.h> + +struct umcast_init { + char *addr; + int lport; + int rport; + int ttl; + bool unicast; +}; + +static void umcast_init(struct net_device *dev, void *data) +{ + struct uml_net_private *pri; + struct umcast_data *dpri; + struct umcast_init *init = data; + + pri = netdev_priv(dev); + dpri = (struct umcast_data *) pri->user; + dpri->addr = init->addr; + dpri->lport = init->lport; + dpri->rport = init->rport; + dpri->unicast = init->unicast; + dpri->ttl = init->ttl; + dpri->dev = dev; + + if (dpri->unicast) { + printk(KERN_INFO "ucast backend address: %s:%u listen port: " + "%u\n", dpri->addr, dpri->rport, dpri->lport); + } else { + printk(KERN_INFO "mcast backend multicast address: %s:%u, " + "TTL:%u\n", dpri->addr, dpri->lport, dpri->ttl); + } +} + +static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return net_recvfrom(fd, skb_mac_header(skb), + skb->dev->mtu + ETH_HEADER_OTHER); +} + +static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + return umcast_user_write(fd, skb->data, skb->len, + (struct umcast_data *) &lp->user); +} + +static const struct net_kern_info umcast_kern_info = { + .init = umcast_init, + .protocol = eth_protocol, + .read = umcast_read, + .write = umcast_write, +}; + +static int mcast_setup(char *str, char **mac_out, void *data) +{ + struct umcast_init *init = data; + char *port_str = NULL, *ttl_str = NULL, *remain; + char *last; + + *init = ((struct umcast_init) + { .addr = "239.192.168.1", + .lport = 1102, + .ttl = 1 }); + + remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str, + NULL); + if (remain != NULL) { + printk(KERN_ERR "mcast_setup - Extra garbage on " + "specification : '%s'\n", remain); + return 0; + } + + if (port_str != NULL) { + init->lport = simple_strtoul(port_str, &last, 10); + if ((*last != '\0') || (last == port_str)) { + printk(KERN_ERR "mcast_setup - Bad port : '%s'\n", + port_str); + return 0; + } + } + + if (ttl_str != NULL) { + init->ttl = simple_strtoul(ttl_str, &last, 10); + if ((*last != '\0') || (last == ttl_str)) { + printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n", + ttl_str); + return 0; + } + } + + init->unicast = false; + init->rport = init->lport; + + printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr, + init->lport, init->ttl); + + return 1; +} + +static int ucast_setup(char *str, char **mac_out, void *data) +{ + struct umcast_init *init = data; + char *lport_str = NULL, *rport_str = NULL, *remain; + char *last; + + *init = ((struct umcast_init) + { .addr = "", + .lport = 1102, + .rport = 1102 }); + + remain = split_if_spec(str, mac_out, &init->addr, + &lport_str, &rport_str, NULL); + if (remain != NULL) { + printk(KERN_ERR "ucast_setup - Extra garbage on " + "specification : '%s'\n", remain); + return 0; + } + + if (lport_str != NULL) { + init->lport = simple_strtoul(lport_str, &last, 10); + if ((*last != '\0') || (last == lport_str)) { + printk(KERN_ERR "ucast_setup - Bad listen port : " + "'%s'\n", lport_str); + return 0; + } + } + + if (rport_str != NULL) { + init->rport = simple_strtoul(rport_str, &last, 10); + if ((*last != '\0') || (last == rport_str)) { + printk(KERN_ERR "ucast_setup - Bad remote port : " + "'%s'\n", rport_str); + return 0; + } + } + + init->unicast = true; + + printk(KERN_INFO "Configured ucast device: :%u -> %s:%u\n", + init->lport, init->addr, init->rport); + + return 1; +} + +static struct transport mcast_transport = { + .list = LIST_HEAD_INIT(mcast_transport.list), + .name = "mcast", + .setup = mcast_setup, + .user = &umcast_user_info, + .kern = &umcast_kern_info, + .private_size = sizeof(struct umcast_data), + .setup_size = sizeof(struct umcast_init), +}; + +static struct transport ucast_transport = { + .list = LIST_HEAD_INIT(ucast_transport.list), + .name = "ucast", + .setup = ucast_setup, + .user = &umcast_user_info, + .kern = &umcast_kern_info, + .private_size = sizeof(struct umcast_data), + .setup_size = sizeof(struct umcast_init), +}; + +static int register_umcast(void) +{ + register_transport(&mcast_transport); + register_transport(&ucast_transport); + return 0; +} + +late_initcall(register_umcast); diff --git a/arch/um/drivers/umcast_user.c b/arch/um/drivers/umcast_user.c new file mode 100644 index 0000000000..b50b13cff0 --- /dev/null +++ b/arch/um/drivers/umcast_user.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * user-mode-linux networking multicast transport + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org> + * + * based on the existing uml-networking code, which is + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 by various other people who didn't put their name here. + * + * + */ + +#include <unistd.h> +#include <errno.h> +#include <netinet/in.h> +#include "umcast.h" +#include <net_user.h> +#include <um_malloc.h> + +static struct sockaddr_in *new_addr(char *addr, unsigned short port) +{ + struct sockaddr_in *sin; + + sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL); + if (sin == NULL) { + printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in " + "failed\n"); + return NULL; + } + sin->sin_family = AF_INET; + if (addr) + sin->sin_addr.s_addr = in_aton(addr); + else + sin->sin_addr.s_addr = INADDR_ANY; + sin->sin_port = htons(port); + return sin; +} + +static int umcast_user_init(void *data, void *dev) +{ + struct umcast_data *pri = data; + + pri->remote_addr = new_addr(pri->addr, pri->rport); + if (pri->unicast) + pri->listen_addr = new_addr(NULL, pri->lport); + else + pri->listen_addr = pri->remote_addr; + pri->dev = dev; + return 0; +} + +static void umcast_remove(void *data) +{ + struct umcast_data *pri = data; + + kfree(pri->listen_addr); + if (pri->unicast) + kfree(pri->remote_addr); + pri->listen_addr = pri->remote_addr = NULL; +} + +static int umcast_open(void *data) +{ + struct umcast_data *pri = data; + struct sockaddr_in *lsin = pri->listen_addr; + struct sockaddr_in *rsin = pri->remote_addr; + struct ip_mreq mreq; + int fd, yes = 1, err = -EINVAL; + + + if ((!pri->unicast && lsin->sin_addr.s_addr == 0) || + (rsin->sin_addr.s_addr == 0) || + (lsin->sin_port == 0) || (rsin->sin_port == 0)) + goto out; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + + if (fd < 0) { + err = -errno; + printk(UM_KERN_ERR "umcast_open : data socket failed, " + "errno = %d\n", errno); + goto out; + } + + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) { + err = -errno; + printk(UM_KERN_ERR "umcast_open: SO_REUSEADDR failed, " + "errno = %d\n", errno); + goto out_close; + } + + if (!pri->unicast) { + /* set ttl according to config */ + if (setsockopt(fd, SOL_IP, IP_MULTICAST_TTL, &pri->ttl, + sizeof(pri->ttl)) < 0) { + err = -errno; + printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_TTL " + "failed, error = %d\n", errno); + goto out_close; + } + + /* set LOOP, so data does get fed back to local sockets */ + if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, + &yes, sizeof(yes)) < 0) { + err = -errno; + printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_LOOP " + "failed, error = %d\n", errno); + goto out_close; + } + } + + /* bind socket to the address */ + if (bind(fd, (struct sockaddr *) lsin, sizeof(*lsin)) < 0) { + err = -errno; + printk(UM_KERN_ERR "umcast_open : data bind failed, " + "errno = %d\n", errno); + goto out_close; + } + + if (!pri->unicast) { + /* subscribe to the multicast group */ + mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr; + mreq.imr_interface.s_addr = 0; + if (setsockopt(fd, SOL_IP, IP_ADD_MEMBERSHIP, + &mreq, sizeof(mreq)) < 0) { + err = -errno; + printk(UM_KERN_ERR "umcast_open: IP_ADD_MEMBERSHIP " + "failed, error = %d\n", errno); + printk(UM_KERN_ERR "There appears not to be a " + "multicast-capable network interface on the " + "host.\n"); + printk(UM_KERN_ERR "eth0 should be configured in order " + "to use the multicast transport.\n"); + goto out_close; + } + } + + return fd; + + out_close: + close(fd); + out: + return err; +} + +static void umcast_close(int fd, void *data) +{ + struct umcast_data *pri = data; + + if (!pri->unicast) { + struct ip_mreq mreq; + struct sockaddr_in *lsin = pri->listen_addr; + + mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr; + mreq.imr_interface.s_addr = 0; + if (setsockopt(fd, SOL_IP, IP_DROP_MEMBERSHIP, + &mreq, sizeof(mreq)) < 0) { + printk(UM_KERN_ERR "umcast_close: IP_DROP_MEMBERSHIP " + "failed, error = %d\n", errno); + } + } + + close(fd); +} + +int umcast_user_write(int fd, void *buf, int len, struct umcast_data *pri) +{ + struct sockaddr_in *data_addr = pri->remote_addr; + + return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr)); +} + +const struct net_user_info umcast_user_info = { + .init = umcast_user_init, + .open = umcast_open, + .close = umcast_close, + .remove = umcast_remove, + .add_address = NULL, + .delete_address = NULL, + .mtu = ETH_MAX_PACKET, + .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER, +}; diff --git a/arch/um/drivers/vde.h b/arch/um/drivers/vde.h new file mode 100644 index 0000000000..cab0379e61 --- /dev/null +++ b/arch/um/drivers/vde.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2007 Luca Bigliardi (shammash@artha.org). + */ + +#ifndef __UM_VDE_H__ +#define __UM_VDE_H__ + +struct vde_data { + char *vde_switch; + char *descr; + void *args; + void *conn; + void *dev; +}; + +struct vde_init { + char *vde_switch; + char *descr; + int port; + char *group; + int mode; +}; + +extern const struct net_user_info vde_user_info; + +extern void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init); + +extern int vde_user_read(void *conn, void *buf, int len); +extern int vde_user_write(void *conn, void *buf, int len); + +#endif diff --git a/arch/um/drivers/vde_kern.c b/arch/um/drivers/vde_kern.c new file mode 100644 index 0000000000..bc6f22cbfb --- /dev/null +++ b/arch/um/drivers/vde_kern.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Luca Bigliardi (shammash@artha.org). + * + * Transport usage: + * ethN=vde,<vde_switch>,<mac addr>,<port>,<group>,<mode>,<description> + * + */ + +#include <linux/init.h> +#include <linux/netdevice.h> +#include <net_kern.h> +#include <net_user.h> +#include "vde.h" + +static void vde_init(struct net_device *dev, void *data) +{ + struct vde_init *init = data; + struct uml_net_private *pri; + struct vde_data *vpri; + + pri = netdev_priv(dev); + vpri = (struct vde_data *) pri->user; + + vpri->vde_switch = init->vde_switch; + vpri->descr = init->descr ? init->descr : "UML vde_transport"; + vpri->args = NULL; + vpri->conn = NULL; + vpri->dev = dev; + + printk("vde backend - %s, ", vpri->vde_switch ? + vpri->vde_switch : "(default socket)"); + + vde_init_libstuff(vpri, init); + + printk("\n"); +} + +static int vde_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + struct vde_data *pri = (struct vde_data *) &lp->user; + + if (pri->conn != NULL) + return vde_user_read(pri->conn, skb_mac_header(skb), + skb->dev->mtu + ETH_HEADER_OTHER); + + printk(KERN_ERR "vde_read - we have no VDECONN to read from"); + return -EBADF; +} + +static int vde_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) +{ + struct vde_data *pri = (struct vde_data *) &lp->user; + + if (pri->conn != NULL) + return vde_user_write((void *)pri->conn, skb->data, + skb->len); + + printk(KERN_ERR "vde_write - we have no VDECONN to write to"); + return -EBADF; +} + +static const struct net_kern_info vde_kern_info = { + .init = vde_init, + .protocol = eth_protocol, + .read = vde_read, + .write = vde_write, +}; + +static int vde_setup(char *str, char **mac_out, void *data) +{ + struct vde_init *init = data; + char *remain, *port_str = NULL, *mode_str = NULL, *last; + + *init = ((struct vde_init) + { .vde_switch = NULL, + .descr = NULL, + .port = 0, + .group = NULL, + .mode = 0 }); + + remain = split_if_spec(str, &init->vde_switch, mac_out, &port_str, + &init->group, &mode_str, &init->descr, NULL); + + if (remain != NULL) + printk(KERN_WARNING "vde_setup - Ignoring extra data :" + "'%s'\n", remain); + + if (port_str != NULL) { + init->port = simple_strtoul(port_str, &last, 10); + if ((*last != '\0') || (last == port_str)) { + printk(KERN_ERR "vde_setup - Bad port : '%s'\n", + port_str); + return 0; + } + } + + if (mode_str != NULL) { + init->mode = simple_strtoul(mode_str, &last, 8); + if ((*last != '\0') || (last == mode_str)) { + printk(KERN_ERR "vde_setup - Bad mode : '%s'\n", + mode_str); + return 0; + } + } + + printk(KERN_INFO "Configured vde device: %s\n", init->vde_switch ? + init->vde_switch : "(default socket)"); + + return 1; +} + +static struct transport vde_transport = { + .list = LIST_HEAD_INIT(vde_transport.list), + .name = "vde", + .setup = vde_setup, + .user = &vde_user_info, + .kern = &vde_kern_info, + .private_size = sizeof(struct vde_data), + .setup_size = sizeof(struct vde_init), +}; + +static int register_vde(void) +{ + register_transport(&vde_transport); + return 0; +} + +late_initcall(register_vde); diff --git a/arch/um/drivers/vde_user.c b/arch/um/drivers/vde_user.c new file mode 100644 index 0000000000..bc7dc4e1e4 --- /dev/null +++ b/arch/um/drivers/vde_user.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Luca Bigliardi (shammash@artha.org). + */ + +#include <stddef.h> +#include <errno.h> +#include <libvdeplug.h> +#include <net_user.h> +#include <um_malloc.h> +#include "vde.h" + +static int vde_user_init(void *data, void *dev) +{ + struct vde_data *pri = data; + VDECONN *conn = NULL; + int err = -EINVAL; + + pri->dev = dev; + + conn = vde_open(pri->vde_switch, pri->descr, pri->args); + + if (conn == NULL) { + err = -errno; + printk(UM_KERN_ERR "vde_user_init: vde_open failed, " + "errno = %d\n", errno); + return err; + } + + printk(UM_KERN_INFO "vde backend - connection opened\n"); + + pri->conn = conn; + + return 0; +} + +static int vde_user_open(void *data) +{ + struct vde_data *pri = data; + + if (pri->conn != NULL) + return vde_datafd(pri->conn); + + printk(UM_KERN_WARNING "vde_open - we have no VDECONN to open"); + return -EINVAL; +} + +static void vde_remove(void *data) +{ + struct vde_data *pri = data; + + if (pri->conn != NULL) { + printk(UM_KERN_INFO "vde backend - closing connection\n"); + vde_close(pri->conn); + pri->conn = NULL; + kfree(pri->args); + pri->args = NULL; + return; + } + + printk(UM_KERN_WARNING "vde_remove - we have no VDECONN to remove"); +} + +const struct net_user_info vde_user_info = { + .init = vde_user_init, + .open = vde_user_open, + .close = NULL, + .remove = vde_remove, + .add_address = NULL, + .delete_address = NULL, + .mtu = ETH_MAX_PACKET, + .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER, +}; + +void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init) +{ + struct vde_open_args *args; + + vpri->args = uml_kmalloc(sizeof(struct vde_open_args), UM_GFP_KERNEL); + if (vpri->args == NULL) { + printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args " + "allocation failed"); + return; + } + + args = vpri->args; + + args->port = init->port; + args->group = init->group; + args->mode = init->mode ? init->mode : 0700; + + args->port ? printk("port %d", args->port) : + printk("undefined port"); +} + +int vde_user_read(void *conn, void *buf, int len) +{ + VDECONN *vconn = conn; + int rv; + + if (vconn == NULL) + return 0; + + rv = vde_recv(vconn, buf, len, 0); + if (rv < 0) { + if (errno == EAGAIN) + return 0; + return -errno; + } + else if (rv == 0) + return -ENOTCONN; + + return rv; +} + +int vde_user_write(void *conn, void *buf, int len) +{ + VDECONN *vconn = conn; + + if (vconn == NULL) + return 0; + + return vde_send(vconn, buf, len, 0); +} + diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c new file mode 100644 index 0000000000..131b7cb295 --- /dev/null +++ b/arch/um/drivers/vector_kern.c @@ -0,0 +1,1766 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 - 2019 Cambridge Greys Limited + * Copyright (C) 2011 - 2014 Cisco Systems Inc + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 by various other people who didn't put their name here. + */ + +#include <linux/memblock.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/rtnetlink.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/firmware.h> +#include <linux/fs.h> +#include <uapi/linux/filter.h> +#include <init.h> +#include <irq_kern.h> +#include <irq_user.h> +#include <net_kern.h> +#include <os.h> +#include "mconsole_kern.h" +#include "vector_user.h" +#include "vector_kern.h" + +/* + * Adapted from network devices with the following major changes: + * All transports are static - simplifies the code significantly + * Multiple FDs/IRQs per device + * Vector IO optionally used for read/write, falling back to legacy + * based on configuration and/or availability + * Configuration is no longer positional - L2TPv3 and GRE require up to + * 10 parameters, passing this as positional is not fit for purpose. + * Only socket transports are supported + */ + + +#define DRIVER_NAME "uml-vector" +struct vector_cmd_line_arg { + struct list_head list; + int unit; + char *arguments; +}; + +struct vector_device { + struct list_head list; + struct net_device *dev; + struct platform_device pdev; + int unit; + int opened; +}; + +static LIST_HEAD(vec_cmd_line); + +static DEFINE_SPINLOCK(vector_devices_lock); +static LIST_HEAD(vector_devices); + +static int driver_registered; + +static void vector_eth_configure(int n, struct arglist *def); +static int vector_mmsg_rx(struct vector_private *vp, int budget); + +/* Argument accessors to set variables (and/or set default values) + * mtu, buffer sizing, default headroom, etc + */ + +#define DEFAULT_HEADROOM 2 +#define SAFETY_MARGIN 32 +#define DEFAULT_VECTOR_SIZE 64 +#define TX_SMALL_PACKET 128 +#define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx_queue_max" }, + { "rx_queue_running_average" }, + { "tx_queue_max" }, + { "tx_queue_running_average" }, + { "rx_encaps_errors" }, + { "tx_timeout_count" }, + { "tx_restart_queue" }, + { "tx_kicks" }, + { "tx_flow_control_xon" }, + { "tx_flow_control_xoff" }, + { "rx_csum_offload_good" }, + { "rx_csum_offload_errors"}, + { "sg_ok"}, + { "sg_linearized"}, +}; + +#define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + +static void vector_reset_stats(struct vector_private *vp) +{ + vp->estats.rx_queue_max = 0; + vp->estats.rx_queue_running_average = 0; + vp->estats.tx_queue_max = 0; + vp->estats.tx_queue_running_average = 0; + vp->estats.rx_encaps_errors = 0; + vp->estats.tx_timeout_count = 0; + vp->estats.tx_restart_queue = 0; + vp->estats.tx_kicks = 0; + vp->estats.tx_flow_control_xon = 0; + vp->estats.tx_flow_control_xoff = 0; + vp->estats.sg_ok = 0; + vp->estats.sg_linearized = 0; +} + +static int get_mtu(struct arglist *def) +{ + char *mtu = uml_vector_fetch_arg(def, "mtu"); + long result; + + if (mtu != NULL) { + if (kstrtoul(mtu, 10, &result) == 0) + if ((result < (1 << 16) - 1) && (result >= 576)) + return result; + } + return ETH_MAX_PACKET; +} + +static char *get_bpf_file(struct arglist *def) +{ + return uml_vector_fetch_arg(def, "bpffile"); +} + +static bool get_bpf_flash(struct arglist *def) +{ + char *allow = uml_vector_fetch_arg(def, "bpfflash"); + long result; + + if (allow != NULL) { + if (kstrtoul(allow, 10, &result) == 0) + return (allow > 0); + } + return false; +} + +static int get_depth(struct arglist *def) +{ + char *mtu = uml_vector_fetch_arg(def, "depth"); + long result; + + if (mtu != NULL) { + if (kstrtoul(mtu, 10, &result) == 0) + return result; + } + return DEFAULT_VECTOR_SIZE; +} + +static int get_headroom(struct arglist *def) +{ + char *mtu = uml_vector_fetch_arg(def, "headroom"); + long result; + + if (mtu != NULL) { + if (kstrtoul(mtu, 10, &result) == 0) + return result; + } + return DEFAULT_HEADROOM; +} + +static int get_req_size(struct arglist *def) +{ + char *gro = uml_vector_fetch_arg(def, "gro"); + long result; + + if (gro != NULL) { + if (kstrtoul(gro, 10, &result) == 0) { + if (result > 0) + return 65536; + } + } + return get_mtu(def) + ETH_HEADER_OTHER + + get_headroom(def) + SAFETY_MARGIN; +} + + +static int get_transport_options(struct arglist *def) +{ + char *transport = uml_vector_fetch_arg(def, "transport"); + char *vector = uml_vector_fetch_arg(def, "vec"); + + int vec_rx = VECTOR_RX; + int vec_tx = VECTOR_TX; + long parsed; + int result = 0; + + if (transport == NULL) + return -EINVAL; + + if (vector != NULL) { + if (kstrtoul(vector, 10, &parsed) == 0) { + if (parsed == 0) { + vec_rx = 0; + vec_tx = 0; + } + } + } + + if (get_bpf_flash(def)) + result = VECTOR_BPF_FLASH; + + if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) + return result; + if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0) + return (result | vec_rx | VECTOR_BPF); + if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) + return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS); + return (result | vec_rx | vec_tx); +} + + +/* A mini-buffer for packet drop read + * All of our supported transports are datagram oriented and we always + * read using recvmsg or recvmmsg. If we pass a buffer which is smaller + * than the packet size it still counts as full packet read and will + * clean the incoming stream to keep sigio/epoll happy + */ + +#define DROP_BUFFER_SIZE 32 + +static char *drop_buffer; + +/* Array backed queues optimized for bulk enqueue/dequeue and + * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios. + * For more details and full design rationale see + * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt + */ + + +/* + * Advance the mmsg queue head by n = advance. Resets the queue to + * maximum enqueue/dequeue-at-once capacity if possible. Called by + * dequeuers. Caller must hold the head_lock! + */ + +static int vector_advancehead(struct vector_queue *qi, int advance) +{ + int queue_depth; + + qi->head = + (qi->head + advance) + % qi->max_depth; + + + spin_lock(&qi->tail_lock); + qi->queue_depth -= advance; + + /* we are at 0, use this to + * reset head and tail so we can use max size vectors + */ + + if (qi->queue_depth == 0) { + qi->head = 0; + qi->tail = 0; + } + queue_depth = qi->queue_depth; + spin_unlock(&qi->tail_lock); + return queue_depth; +} + +/* Advance the queue tail by n = advance. + * This is called by enqueuers which should hold the + * head lock already + */ + +static int vector_advancetail(struct vector_queue *qi, int advance) +{ + int queue_depth; + + qi->tail = + (qi->tail + advance) + % qi->max_depth; + spin_lock(&qi->head_lock); + qi->queue_depth += advance; + queue_depth = qi->queue_depth; + spin_unlock(&qi->head_lock); + return queue_depth; +} + +static int prep_msg(struct vector_private *vp, + struct sk_buff *skb, + struct iovec *iov) +{ + int iov_index = 0; + int nr_frags, frag; + skb_frag_t *skb_frag; + + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags > MAX_IOV_SIZE) { + if (skb_linearize(skb) != 0) + goto drop; + } + if (vp->header_size > 0) { + iov[iov_index].iov_len = vp->header_size; + vp->form_header(iov[iov_index].iov_base, skb, vp); + iov_index++; + } + iov[iov_index].iov_base = skb->data; + if (nr_frags > 0) { + iov[iov_index].iov_len = skb->len - skb->data_len; + vp->estats.sg_ok++; + } else + iov[iov_index].iov_len = skb->len; + iov_index++; + for (frag = 0; frag < nr_frags; frag++) { + skb_frag = &skb_shinfo(skb)->frags[frag]; + iov[iov_index].iov_base = skb_frag_address_safe(skb_frag); + iov[iov_index].iov_len = skb_frag_size(skb_frag); + iov_index++; + } + return iov_index; +drop: + return -1; +} +/* + * Generic vector enqueue with support for forming headers using transport + * specific callback. Allows GRE, L2TPv3, RAW and other transports + * to use a common enqueue procedure in vector mode + */ + +static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) +{ + struct vector_private *vp = netdev_priv(qi->dev); + int queue_depth; + int packet_len; + struct mmsghdr *mmsg_vector = qi->mmsg_vector; + int iov_count; + + spin_lock(&qi->tail_lock); + spin_lock(&qi->head_lock); + queue_depth = qi->queue_depth; + spin_unlock(&qi->head_lock); + + if (skb) + packet_len = skb->len; + + if (queue_depth < qi->max_depth) { + + *(qi->skbuff_vector + qi->tail) = skb; + mmsg_vector += qi->tail; + iov_count = prep_msg( + vp, + skb, + mmsg_vector->msg_hdr.msg_iov + ); + if (iov_count < 1) + goto drop; + mmsg_vector->msg_hdr.msg_iovlen = iov_count; + mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; + mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; + queue_depth = vector_advancetail(qi, 1); + } else + goto drop; + spin_unlock(&qi->tail_lock); + return queue_depth; +drop: + qi->dev->stats.tx_dropped++; + if (skb != NULL) { + packet_len = skb->len; + dev_consume_skb_any(skb); + netdev_completed_queue(qi->dev, 1, packet_len); + } + spin_unlock(&qi->tail_lock); + return queue_depth; +} + +static int consume_vector_skbs(struct vector_queue *qi, int count) +{ + struct sk_buff *skb; + int skb_index; + int bytes_compl = 0; + + for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { + skb = *(qi->skbuff_vector + skb_index); + /* mark as empty to ensure correct destruction if + * needed + */ + bytes_compl += skb->len; + *(qi->skbuff_vector + skb_index) = NULL; + dev_consume_skb_any(skb); + } + qi->dev->stats.tx_bytes += bytes_compl; + qi->dev->stats.tx_packets += count; + netdev_completed_queue(qi->dev, count, bytes_compl); + return vector_advancehead(qi, count); +} + +/* + * Generic vector deque via sendmmsg with support for forming headers + * using transport specific callback. Allows GRE, L2TPv3, RAW and + * other transports to use a common dequeue procedure in vector mode + */ + + +static int vector_send(struct vector_queue *qi) +{ + struct vector_private *vp = netdev_priv(qi->dev); + struct mmsghdr *send_from; + int result = 0, send_len, queue_depth = qi->max_depth; + + if (spin_trylock(&qi->head_lock)) { + if (spin_trylock(&qi->tail_lock)) { + /* update queue_depth to current value */ + queue_depth = qi->queue_depth; + spin_unlock(&qi->tail_lock); + while (queue_depth > 0) { + /* Calculate the start of the vector */ + send_len = queue_depth; + send_from = qi->mmsg_vector; + send_from += qi->head; + /* Adjust vector size if wraparound */ + if (send_len + qi->head > qi->max_depth) + send_len = qi->max_depth - qi->head; + /* Try to TX as many packets as possible */ + if (send_len > 0) { + result = uml_vector_sendmmsg( + vp->fds->tx_fd, + send_from, + send_len, + 0 + ); + vp->in_write_poll = + (result != send_len); + } + /* For some of the sendmmsg error scenarios + * we may end being unsure in the TX success + * for all packets. It is safer to declare + * them all TX-ed and blame the network. + */ + if (result < 0) { + if (net_ratelimit()) + netdev_err(vp->dev, "sendmmsg err=%i\n", + result); + vp->in_error = true; + result = send_len; + } + if (result > 0) { + queue_depth = + consume_vector_skbs(qi, result); + /* This is equivalent to an TX IRQ. + * Restart the upper layers to feed us + * more packets. + */ + if (result > vp->estats.tx_queue_max) + vp->estats.tx_queue_max = result; + vp->estats.tx_queue_running_average = + (vp->estats.tx_queue_running_average + result) >> 1; + } + netif_wake_queue(qi->dev); + /* if TX is busy, break out of the send loop, + * poll write IRQ will reschedule xmit for us + */ + if (result != send_len) { + vp->estats.tx_restart_queue++; + break; + } + } + } + spin_unlock(&qi->head_lock); + } + return queue_depth; +} + +/* Queue destructor. Deliberately stateless so we can use + * it in queue cleanup if initialization fails. + */ + +static void destroy_queue(struct vector_queue *qi) +{ + int i; + struct iovec *iov; + struct vector_private *vp = netdev_priv(qi->dev); + struct mmsghdr *mmsg_vector; + + if (qi == NULL) + return; + /* deallocate any skbuffs - we rely on any unused to be + * set to NULL. + */ + if (qi->skbuff_vector != NULL) { + for (i = 0; i < qi->max_depth; i++) { + if (*(qi->skbuff_vector + i) != NULL) + dev_kfree_skb_any(*(qi->skbuff_vector + i)); + } + kfree(qi->skbuff_vector); + } + /* deallocate matching IOV structures including header buffs */ + if (qi->mmsg_vector != NULL) { + mmsg_vector = qi->mmsg_vector; + for (i = 0; i < qi->max_depth; i++) { + iov = mmsg_vector->msg_hdr.msg_iov; + if (iov != NULL) { + if ((vp->header_size > 0) && + (iov->iov_base != NULL)) + kfree(iov->iov_base); + kfree(iov); + } + mmsg_vector++; + } + kfree(qi->mmsg_vector); + } + kfree(qi); +} + +/* + * Queue constructor. Create a queue with a given side. + */ +static struct vector_queue *create_queue( + struct vector_private *vp, + int max_size, + int header_size, + int num_extra_frags) +{ + struct vector_queue *result; + int i; + struct iovec *iov; + struct mmsghdr *mmsg_vector; + + result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL); + if (result == NULL) + return NULL; + result->max_depth = max_size; + result->dev = vp->dev; + result->mmsg_vector = kmalloc( + (sizeof(struct mmsghdr) * max_size), GFP_KERNEL); + if (result->mmsg_vector == NULL) + goto out_mmsg_fail; + result->skbuff_vector = kmalloc( + (sizeof(void *) * max_size), GFP_KERNEL); + if (result->skbuff_vector == NULL) + goto out_skb_fail; + + /* further failures can be handled safely by destroy_queue*/ + + mmsg_vector = result->mmsg_vector; + for (i = 0; i < max_size; i++) { + /* Clear all pointers - we use non-NULL as marking on + * what to free on destruction + */ + *(result->skbuff_vector + i) = NULL; + mmsg_vector->msg_hdr.msg_iov = NULL; + mmsg_vector++; + } + mmsg_vector = result->mmsg_vector; + result->max_iov_frags = num_extra_frags; + for (i = 0; i < max_size; i++) { + if (vp->header_size > 0) + iov = kmalloc_array(3 + num_extra_frags, + sizeof(struct iovec), + GFP_KERNEL + ); + else + iov = kmalloc_array(2 + num_extra_frags, + sizeof(struct iovec), + GFP_KERNEL + ); + if (iov == NULL) + goto out_fail; + mmsg_vector->msg_hdr.msg_iov = iov; + mmsg_vector->msg_hdr.msg_iovlen = 1; + mmsg_vector->msg_hdr.msg_control = NULL; + mmsg_vector->msg_hdr.msg_controllen = 0; + mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT; + mmsg_vector->msg_hdr.msg_name = NULL; + mmsg_vector->msg_hdr.msg_namelen = 0; + if (vp->header_size > 0) { + iov->iov_base = kmalloc(header_size, GFP_KERNEL); + if (iov->iov_base == NULL) + goto out_fail; + iov->iov_len = header_size; + mmsg_vector->msg_hdr.msg_iovlen = 2; + iov++; + } + iov->iov_base = NULL; + iov->iov_len = 0; + mmsg_vector++; + } + spin_lock_init(&result->head_lock); + spin_lock_init(&result->tail_lock); + result->queue_depth = 0; + result->head = 0; + result->tail = 0; + return result; +out_skb_fail: + kfree(result->mmsg_vector); +out_mmsg_fail: + kfree(result); + return NULL; +out_fail: + destroy_queue(result); + return NULL; +} + +/* + * We do not use the RX queue as a proper wraparound queue for now + * This is not necessary because the consumption via napi_gro_receive() + * happens in-line. While we can try using the return code of + * netif_rx() for flow control there are no drivers doing this today. + * For this RX specific use we ignore the tail/head locks and + * just read into a prepared queue filled with skbuffs. + */ + +static struct sk_buff *prep_skb( + struct vector_private *vp, + struct user_msghdr *msg) +{ + int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN; + struct sk_buff *result; + int iov_index = 0, len; + struct iovec *iov = msg->msg_iov; + int err, nr_frags, frag; + skb_frag_t *skb_frag; + + if (vp->req_size <= linear) + len = linear; + else + len = vp->req_size; + result = alloc_skb_with_frags( + linear, + len - vp->max_packet, + 3, + &err, + GFP_ATOMIC + ); + if (vp->header_size > 0) + iov_index++; + if (result == NULL) { + iov[iov_index].iov_base = NULL; + iov[iov_index].iov_len = 0; + goto done; + } + skb_reserve(result, vp->headroom); + result->dev = vp->dev; + skb_put(result, vp->max_packet); + result->data_len = len - vp->max_packet; + result->len += len - vp->max_packet; + skb_reset_mac_header(result); + result->ip_summed = CHECKSUM_NONE; + iov[iov_index].iov_base = result->data; + iov[iov_index].iov_len = vp->max_packet; + iov_index++; + + nr_frags = skb_shinfo(result)->nr_frags; + for (frag = 0; frag < nr_frags; frag++) { + skb_frag = &skb_shinfo(result)->frags[frag]; + iov[iov_index].iov_base = skb_frag_address_safe(skb_frag); + if (iov[iov_index].iov_base != NULL) + iov[iov_index].iov_len = skb_frag_size(skb_frag); + else + iov[iov_index].iov_len = 0; + iov_index++; + } +done: + msg->msg_iovlen = iov_index; + return result; +} + + +/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/ + +static void prep_queue_for_rx(struct vector_queue *qi) +{ + struct vector_private *vp = netdev_priv(qi->dev); + struct mmsghdr *mmsg_vector = qi->mmsg_vector; + void **skbuff_vector = qi->skbuff_vector; + int i; + + if (qi->queue_depth == 0) + return; + for (i = 0; i < qi->queue_depth; i++) { + /* it is OK if allocation fails - recvmmsg with NULL data in + * iov argument still performs an RX, just drops the packet + * This allows us stop faffing around with a "drop buffer" + */ + + *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr); + skbuff_vector++; + mmsg_vector++; + } + qi->queue_depth = 0; +} + +static struct vector_device *find_device(int n) +{ + struct vector_device *device; + struct list_head *ele; + + spin_lock(&vector_devices_lock); + list_for_each(ele, &vector_devices) { + device = list_entry(ele, struct vector_device, list); + if (device->unit == n) + goto out; + } + device = NULL; + out: + spin_unlock(&vector_devices_lock); + return device; +} + +static int vector_parse(char *str, int *index_out, char **str_out, + char **error_out) +{ + int n, len, err; + char *start = str; + + len = strlen(str); + + while ((*str != ':') && (strlen(str) > 1)) + str++; + if (*str != ':') { + *error_out = "Expected ':' after device number"; + return -EINVAL; + } + *str = '\0'; + + err = kstrtouint(start, 0, &n); + if (err < 0) { + *error_out = "Bad device number"; + return err; + } + + str++; + if (find_device(n)) { + *error_out = "Device already configured"; + return -EINVAL; + } + + *index_out = n; + *str_out = str; + return 0; +} + +static int vector_config(char *str, char **error_out) +{ + int err, n; + char *params; + struct arglist *parsed; + + err = vector_parse(str, &n, ¶ms, error_out); + if (err != 0) + return err; + + /* This string is broken up and the pieces used by the underlying + * driver. We should copy it to make sure things do not go wrong + * later. + */ + + params = kstrdup(params, GFP_KERNEL); + if (params == NULL) { + *error_out = "vector_config failed to strdup string"; + return -ENOMEM; + } + + parsed = uml_parse_vector_ifspec(params); + + if (parsed == NULL) { + *error_out = "vector_config failed to parse parameters"; + kfree(params); + return -EINVAL; + } + + vector_eth_configure(n, parsed); + return 0; +} + +static int vector_id(char **str, int *start_out, int *end_out) +{ + char *end; + int n; + + n = simple_strtoul(*str, &end, 0); + if ((*end != '\0') || (end == *str)) + return -1; + + *start_out = n; + *end_out = n; + *str = end; + return n; +} + +static int vector_remove(int n, char **error_out) +{ + struct vector_device *vec_d; + struct net_device *dev; + struct vector_private *vp; + + vec_d = find_device(n); + if (vec_d == NULL) + return -ENODEV; + dev = vec_d->dev; + vp = netdev_priv(dev); + if (vp->fds != NULL) + return -EBUSY; + unregister_netdev(dev); + platform_device_unregister(&vec_d->pdev); + return 0; +} + +/* + * There is no shared per-transport initialization code, so + * we will just initialize each interface one by one and + * add them to a list + */ + +static struct platform_driver uml_net_driver = { + .driver = { + .name = DRIVER_NAME, + }, +}; + + +static void vector_device_release(struct device *dev) +{ + struct vector_device *device = dev_get_drvdata(dev); + struct net_device *netdev = device->dev; + + list_del(&device->list); + kfree(device); + free_netdev(netdev); +} + +/* Bog standard recv using recvmsg - not used normally unless the user + * explicitly specifies not to use recvmmsg vector RX. + */ + +static int vector_legacy_rx(struct vector_private *vp) +{ + int pkt_len; + struct user_msghdr hdr; + struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */ + int iovpos = 0; + struct sk_buff *skb; + int header_check; + + hdr.msg_name = NULL; + hdr.msg_namelen = 0; + hdr.msg_iov = (struct iovec *) &iov; + hdr.msg_control = NULL; + hdr.msg_controllen = 0; + hdr.msg_flags = 0; + + if (vp->header_size > 0) { + iov[0].iov_base = vp->header_rxbuffer; + iov[0].iov_len = vp->header_size; + } + + skb = prep_skb(vp, &hdr); + + if (skb == NULL) { + /* Read a packet into drop_buffer and don't do + * anything with it. + */ + iov[iovpos].iov_base = drop_buffer; + iov[iovpos].iov_len = DROP_BUFFER_SIZE; + hdr.msg_iovlen = 1; + vp->dev->stats.rx_dropped++; + } + + pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0); + if (pkt_len < 0) { + vp->in_error = true; + return pkt_len; + } + + if (skb != NULL) { + if (pkt_len > vp->header_size) { + if (vp->header_size > 0) { + header_check = vp->verify_header( + vp->header_rxbuffer, skb, vp); + if (header_check < 0) { + dev_kfree_skb_irq(skb); + vp->dev->stats.rx_dropped++; + vp->estats.rx_encaps_errors++; + return 0; + } + if (header_check > 0) { + vp->estats.rx_csum_offload_good++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } + pskb_trim(skb, pkt_len - vp->rx_header_size); + skb->protocol = eth_type_trans(skb, skb->dev); + vp->dev->stats.rx_bytes += skb->len; + vp->dev->stats.rx_packets++; + napi_gro_receive(&vp->napi, skb); + } else { + dev_kfree_skb_irq(skb); + } + } + return pkt_len; +} + +/* + * Packet at a time TX which falls back to vector TX if the + * underlying transport is busy. + */ + + + +static int writev_tx(struct vector_private *vp, struct sk_buff *skb) +{ + struct iovec iov[3 + MAX_IOV_SIZE]; + int iov_count, pkt_len = 0; + + iov[0].iov_base = vp->header_txbuffer; + iov_count = prep_msg(vp, skb, (struct iovec *) &iov); + + if (iov_count < 1) + goto drop; + + pkt_len = uml_vector_writev( + vp->fds->tx_fd, + (struct iovec *) &iov, + iov_count + ); + + if (pkt_len < 0) + goto drop; + + netif_trans_update(vp->dev); + netif_wake_queue(vp->dev); + + if (pkt_len > 0) { + vp->dev->stats.tx_bytes += skb->len; + vp->dev->stats.tx_packets++; + } else { + vp->dev->stats.tx_dropped++; + } + consume_skb(skb); + return pkt_len; +drop: + vp->dev->stats.tx_dropped++; + consume_skb(skb); + if (pkt_len < 0) + vp->in_error = true; + return pkt_len; +} + +/* + * Receive as many messages as we can in one call using the special + * mmsg vector matched to an skb vector which we prepared earlier. + */ + +static int vector_mmsg_rx(struct vector_private *vp, int budget) +{ + int packet_count, i; + struct vector_queue *qi = vp->rx_queue; + struct sk_buff *skb; + struct mmsghdr *mmsg_vector = qi->mmsg_vector; + void **skbuff_vector = qi->skbuff_vector; + int header_check; + + /* Refresh the vector and make sure it is with new skbs and the + * iovs are updated to point to them. + */ + + prep_queue_for_rx(qi); + + /* Fire the Lazy Gun - get as many packets as we can in one go. */ + + if (budget > qi->max_depth) + budget = qi->max_depth; + + packet_count = uml_vector_recvmmsg( + vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); + + if (packet_count < 0) + vp->in_error = true; + + if (packet_count <= 0) + return packet_count; + + /* We treat packet processing as enqueue, buffer refresh as dequeue + * The queue_depth tells us how many buffers have been used and how + * many do we need to prep the next time prep_queue_for_rx() is called. + */ + + qi->queue_depth = packet_count; + + for (i = 0; i < packet_count; i++) { + skb = (*skbuff_vector); + if (mmsg_vector->msg_len > vp->header_size) { + if (vp->header_size > 0) { + header_check = vp->verify_header( + mmsg_vector->msg_hdr.msg_iov->iov_base, + skb, + vp + ); + if (header_check < 0) { + /* Overlay header failed to verify - discard. + * We can actually keep this skb and reuse it, + * but that will make the prep logic too + * complex. + */ + dev_kfree_skb_irq(skb); + vp->estats.rx_encaps_errors++; + continue; + } + if (header_check > 0) { + vp->estats.rx_csum_offload_good++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } + pskb_trim(skb, + mmsg_vector->msg_len - vp->rx_header_size); + skb->protocol = eth_type_trans(skb, skb->dev); + /* + * We do not need to lock on updating stats here + * The interrupt loop is non-reentrant. + */ + vp->dev->stats.rx_bytes += skb->len; + vp->dev->stats.rx_packets++; + napi_gro_receive(&vp->napi, skb); + } else { + /* Overlay header too short to do anything - discard. + * We can actually keep this skb and reuse it, + * but that will make the prep logic too complex. + */ + if (skb != NULL) + dev_kfree_skb_irq(skb); + } + (*skbuff_vector) = NULL; + /* Move to the next buffer element */ + mmsg_vector++; + skbuff_vector++; + } + if (packet_count > 0) { + if (vp->estats.rx_queue_max < packet_count) + vp->estats.rx_queue_max = packet_count; + vp->estats.rx_queue_running_average = + (vp->estats.rx_queue_running_average + packet_count) >> 1; + } + return packet_count; +} + +static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vector_private *vp = netdev_priv(dev); + int queue_depth = 0; + + if (vp->in_error) { + deactivate_fd(vp->fds->rx_fd, vp->rx_irq); + if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0)) + deactivate_fd(vp->fds->tx_fd, vp->tx_irq); + return NETDEV_TX_BUSY; + } + + if ((vp->options & VECTOR_TX) == 0) { + writev_tx(vp, skb); + return NETDEV_TX_OK; + } + + /* We do BQL only in the vector path, no point doing it in + * packet at a time mode as there is no device queue + */ + + netdev_sent_queue(vp->dev, skb->len); + queue_depth = vector_enqueue(vp->tx_queue, skb); + + if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) { + mod_timer(&vp->tl, vp->coalesce); + return NETDEV_TX_OK; + } else { + queue_depth = vector_send(vp->tx_queue); + if (queue_depth > 0) + napi_schedule(&vp->napi); + } + + return NETDEV_TX_OK; +} + +static irqreturn_t vector_rx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vector_private *vp = netdev_priv(dev); + + if (!netif_running(dev)) + return IRQ_NONE; + napi_schedule(&vp->napi); + return IRQ_HANDLED; + +} + +static irqreturn_t vector_tx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vector_private *vp = netdev_priv(dev); + + if (!netif_running(dev)) + return IRQ_NONE; + /* We need to pay attention to it only if we got + * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise + * we ignore it. In the future, it may be worth + * it to improve the IRQ controller a bit to make + * tweaking the IRQ mask less costly + */ + + napi_schedule(&vp->napi); + return IRQ_HANDLED; + +} + +static int irq_rr; + +static int vector_net_close(struct net_device *dev) +{ + struct vector_private *vp = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + del_timer(&vp->tl); + + if (vp->fds == NULL) + return 0; + + /* Disable and free all IRQS */ + if (vp->rx_irq > 0) { + um_free_irq(vp->rx_irq, dev); + vp->rx_irq = 0; + } + if (vp->tx_irq > 0) { + um_free_irq(vp->tx_irq, dev); + vp->tx_irq = 0; + } + napi_disable(&vp->napi); + netif_napi_del(&vp->napi); + if (vp->fds->rx_fd > 0) { + if (vp->bpf) + uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); + os_close_file(vp->fds->rx_fd); + vp->fds->rx_fd = -1; + } + if (vp->fds->tx_fd > 0) { + os_close_file(vp->fds->tx_fd); + vp->fds->tx_fd = -1; + } + if (vp->bpf != NULL) + kfree(vp->bpf->filter); + kfree(vp->bpf); + vp->bpf = NULL; + kfree(vp->fds->remote_addr); + kfree(vp->transport_data); + kfree(vp->header_rxbuffer); + kfree(vp->header_txbuffer); + if (vp->rx_queue != NULL) + destroy_queue(vp->rx_queue); + if (vp->tx_queue != NULL) + destroy_queue(vp->tx_queue); + kfree(vp->fds); + vp->fds = NULL; + spin_lock_irqsave(&vp->lock, flags); + vp->opened = false; + vp->in_error = false; + spin_unlock_irqrestore(&vp->lock, flags); + return 0; +} + +static int vector_poll(struct napi_struct *napi, int budget) +{ + struct vector_private *vp = container_of(napi, struct vector_private, napi); + int work_done = 0; + int err; + bool tx_enqueued = false; + + if ((vp->options & VECTOR_TX) != 0) + tx_enqueued = (vector_send(vp->tx_queue) > 0); + if ((vp->options & VECTOR_RX) > 0) + err = vector_mmsg_rx(vp, budget); + else { + err = vector_legacy_rx(vp); + if (err > 0) + err = 1; + } + if (err > 0) + work_done += err; + + if (tx_enqueued || err > 0) + napi_schedule(napi); + if (work_done < budget) + napi_complete_done(napi, work_done); + return work_done; +} + +static void vector_reset_tx(struct work_struct *work) +{ + struct vector_private *vp = + container_of(work, struct vector_private, reset_tx); + netdev_reset_queue(vp->dev); + netif_start_queue(vp->dev); + netif_wake_queue(vp->dev); +} + +static int vector_net_open(struct net_device *dev) +{ + struct vector_private *vp = netdev_priv(dev); + unsigned long flags; + int err = -EINVAL; + struct vector_device *vdevice; + + spin_lock_irqsave(&vp->lock, flags); + if (vp->opened) { + spin_unlock_irqrestore(&vp->lock, flags); + return -ENXIO; + } + vp->opened = true; + spin_unlock_irqrestore(&vp->lock, flags); + + vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed)); + + vp->fds = uml_vector_user_open(vp->unit, vp->parsed); + + if (vp->fds == NULL) + goto out_close; + + if (build_transport_data(vp) < 0) + goto out_close; + + if ((vp->options & VECTOR_RX) > 0) { + vp->rx_queue = create_queue( + vp, + get_depth(vp->parsed), + vp->rx_header_size, + MAX_IOV_SIZE + ); + vp->rx_queue->queue_depth = get_depth(vp->parsed); + } else { + vp->header_rxbuffer = kmalloc( + vp->rx_header_size, + GFP_KERNEL + ); + if (vp->header_rxbuffer == NULL) + goto out_close; + } + if ((vp->options & VECTOR_TX) > 0) { + vp->tx_queue = create_queue( + vp, + get_depth(vp->parsed), + vp->header_size, + MAX_IOV_SIZE + ); + } else { + vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL); + if (vp->header_txbuffer == NULL) + goto out_close; + } + + netif_napi_add_weight(vp->dev, &vp->napi, vector_poll, + get_depth(vp->parsed)); + napi_enable(&vp->napi); + + /* READ IRQ */ + err = um_request_irq( + irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd, + IRQ_READ, vector_rx_interrupt, + IRQF_SHARED, dev->name, dev); + if (err < 0) { + netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err); + err = -ENETUNREACH; + goto out_close; + } + vp->rx_irq = irq_rr + VECTOR_BASE_IRQ; + dev->irq = irq_rr + VECTOR_BASE_IRQ; + irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE; + + /* WRITE IRQ - we need it only if we have vector TX */ + if ((vp->options & VECTOR_TX) > 0) { + err = um_request_irq( + irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd, + IRQ_WRITE, vector_tx_interrupt, + IRQF_SHARED, dev->name, dev); + if (err < 0) { + netdev_err(dev, + "vector_open: failed to get tx irq(%d)\n", err); + err = -ENETUNREACH; + goto out_close; + } + vp->tx_irq = irq_rr + VECTOR_BASE_IRQ; + irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE; + } + + if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { + if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) + vp->options |= VECTOR_BPF; + } + if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL)) + vp->bpf = uml_vector_default_bpf(dev->dev_addr); + + if (vp->bpf != NULL) + uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); + + netif_start_queue(dev); + vector_reset_stats(vp); + + /* clear buffer - it can happen that the host side of the interface + * is full when we get here. In this case, new data is never queued, + * SIGIOs never arrive, and the net never works. + */ + + napi_schedule(&vp->napi); + + vdevice = find_device(vp->unit); + vdevice->opened = 1; + + if ((vp->options & VECTOR_TX) != 0) + add_timer(&vp->tl); + return 0; +out_close: + vector_net_close(dev); + return err; +} + + +static void vector_net_set_multicast_list(struct net_device *dev) +{ + /* TODO: - we can do some BPF games here */ + return; +} + +static void vector_net_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct vector_private *vp = netdev_priv(dev); + + vp->estats.tx_timeout_count++; + netif_trans_update(dev); + schedule_work(&vp->reset_tx); +} + +static netdev_features_t vector_fix_features(struct net_device *dev, + netdev_features_t features) +{ + features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + return features; +} + +static int vector_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct vector_private *vp = netdev_priv(dev); + /* Adjust buffer sizes for GSO/GRO. Unfortunately, there is + * no way to negotiate it on raw sockets, so we can change + * only our side. + */ + if (features & NETIF_F_GRO) + /* All new frame buffers will be GRO-sized */ + vp->req_size = 65536; + else + /* All new frame buffers will be normal sized */ + vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN; + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vector_net_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + vector_rx_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static void vector_net_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); +} + +static int vector_net_load_bpf_flash(struct net_device *dev, + struct ethtool_flash *efl) +{ + struct vector_private *vp = netdev_priv(dev); + struct vector_device *vdevice; + const struct firmware *fw; + int result = 0; + + if (!(vp->options & VECTOR_BPF_FLASH)) { + netdev_err(dev, "loading firmware not permitted: %s\n", efl->data); + return -1; + } + + spin_lock(&vp->lock); + + if (vp->bpf != NULL) { + if (vp->opened) + uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); + kfree(vp->bpf->filter); + vp->bpf->filter = NULL; + } else { + vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); + if (vp->bpf == NULL) { + netdev_err(dev, "failed to allocate memory for firmware\n"); + goto flash_fail; + } + } + + vdevice = find_device(vp->unit); + + if (request_firmware(&fw, efl->data, &vdevice->pdev.dev)) + goto flash_fail; + + vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); + if (!vp->bpf->filter) + goto free_buffer; + + vp->bpf->len = fw->size / sizeof(struct sock_filter); + release_firmware(fw); + + if (vp->opened) + result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); + + spin_unlock(&vp->lock); + + return result; + +free_buffer: + release_firmware(fw); + +flash_fail: + spin_unlock(&vp->lock); + if (vp->bpf != NULL) + kfree(vp->bpf->filter); + kfree(vp->bpf); + vp->bpf = NULL; + return -1; +} + +static void vector_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct vector_private *vp = netdev_priv(netdev); + + ring->rx_max_pending = vp->rx_queue->max_depth; + ring->tx_max_pending = vp->tx_queue->max_depth; + ring->rx_pending = vp->rx_queue->max_depth; + ring->tx_pending = vp->tx_queue->max_depth; +} + +static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_TEST: + *buf = '\0'; + break; + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + default: + WARN_ON(1); + break; + } +} + +static int vector_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return 0; + case ETH_SS_STATS: + return VECTOR_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void vector_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, + u64 *tmp_stats) +{ + struct vector_private *vp = netdev_priv(dev); + + memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); +} + +static int vector_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct vector_private *vp = netdev_priv(netdev); + + ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ; + return 0; +} + +static int vector_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct vector_private *vp = netdev_priv(netdev); + + vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000; + if (vp->coalesce == 0) + vp->coalesce = 1; + return 0; +} + +static const struct ethtool_ops vector_net_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS, + .get_drvinfo = vector_net_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, + .get_ringparam = vector_get_ringparam, + .get_strings = vector_get_strings, + .get_sset_count = vector_get_sset_count, + .get_ethtool_stats = vector_get_ethtool_stats, + .get_coalesce = vector_get_coalesce, + .set_coalesce = vector_set_coalesce, + .flash_device = vector_net_load_bpf_flash, +}; + + +static const struct net_device_ops vector_netdev_ops = { + .ndo_open = vector_net_open, + .ndo_stop = vector_net_close, + .ndo_start_xmit = vector_net_start_xmit, + .ndo_set_rx_mode = vector_net_set_multicast_list, + .ndo_tx_timeout = vector_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_fix_features = vector_fix_features, + .ndo_set_features = vector_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vector_net_poll_controller, +#endif +}; + +static void vector_timer_expire(struct timer_list *t) +{ + struct vector_private *vp = from_timer(vp, t, tl); + + vp->estats.tx_kicks++; + napi_schedule(&vp->napi); +} + + + +static void vector_eth_configure( + int n, + struct arglist *def + ) +{ + struct vector_device *device; + struct net_device *dev; + struct vector_private *vp; + int err; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (device == NULL) { + printk(KERN_ERR "eth_configure failed to allocate struct " + "vector_device\n"); + return; + } + dev = alloc_etherdev(sizeof(struct vector_private)); + if (dev == NULL) { + printk(KERN_ERR "eth_configure: failed to allocate struct " + "net_device for vec%d\n", n); + goto out_free_device; + } + + dev->mtu = get_mtu(def); + + INIT_LIST_HEAD(&device->list); + device->unit = n; + + /* If this name ends up conflicting with an existing registered + * netdevice, that is OK, register_netdev{,ice}() will notice this + * and fail. + */ + snprintf(dev->name, sizeof(dev->name), "vec%d", n); + uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac")); + vp = netdev_priv(dev); + + /* sysfs register */ + if (!driver_registered) { + platform_driver_register(¨_net_driver); + driver_registered = 1; + } + device->pdev.id = n; + device->pdev.name = DRIVER_NAME; + device->pdev.dev.release = vector_device_release; + dev_set_drvdata(&device->pdev.dev, device); + if (platform_device_register(&device->pdev)) + goto out_free_netdev; + SET_NETDEV_DEV(dev, &device->pdev.dev); + + device->dev = dev; + + *vp = ((struct vector_private) + { + .list = LIST_HEAD_INIT(vp->list), + .dev = dev, + .unit = n, + .options = get_transport_options(def), + .rx_irq = 0, + .tx_irq = 0, + .parsed = def, + .max_packet = get_mtu(def) + ETH_HEADER_OTHER, + /* TODO - we need to calculate headroom so that ip header + * is 16 byte aligned all the time + */ + .headroom = get_headroom(def), + .form_header = NULL, + .verify_header = NULL, + .header_rxbuffer = NULL, + .header_txbuffer = NULL, + .header_size = 0, + .rx_header_size = 0, + .rexmit_scheduled = false, + .opened = false, + .transport_data = NULL, + .in_write_poll = false, + .coalesce = 2, + .req_size = get_req_size(def), + .in_error = false, + .bpf = NULL + }); + + dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST); + INIT_WORK(&vp->reset_tx, vector_reset_tx); + + timer_setup(&vp->tl, vector_timer_expire, 0); + spin_lock_init(&vp->lock); + + /* FIXME */ + dev->netdev_ops = &vector_netdev_ops; + dev->ethtool_ops = &vector_net_ethtool_ops; + dev->watchdog_timeo = (HZ >> 1); + /* primary IRQ - fixme */ + dev->irq = 0; /* we will adjust this once opened */ + + rtnl_lock(); + err = register_netdevice(dev); + rtnl_unlock(); + if (err) + goto out_undo_user_init; + + spin_lock(&vector_devices_lock); + list_add(&device->list, &vector_devices); + spin_unlock(&vector_devices_lock); + + return; + +out_undo_user_init: + return; +out_free_netdev: + free_netdev(dev); +out_free_device: + kfree(device); +} + + + + +/* + * Invoked late in the init + */ + +static int __init vector_init(void) +{ + struct list_head *ele; + struct vector_cmd_line_arg *def; + struct arglist *parsed; + + list_for_each(ele, &vec_cmd_line) { + def = list_entry(ele, struct vector_cmd_line_arg, list); + parsed = uml_parse_vector_ifspec(def->arguments); + if (parsed != NULL) + vector_eth_configure(def->unit, parsed); + } + return 0; +} + + +/* Invoked at initial argument parsing, only stores + * arguments until a proper vector_init is called + * later + */ + +static int __init vector_setup(char *str) +{ + char *error; + int n, err; + struct vector_cmd_line_arg *new; + + err = vector_parse(str, &n, &str, &error); + if (err) { + printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n", + str, error); + return 1; + } + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); + if (!new) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*new)); + INIT_LIST_HEAD(&new->list); + new->unit = n; + new->arguments = str; + list_add_tail(&new->list, &vec_cmd_line); + return 1; +} + +__setup("vec", vector_setup); +__uml_help(vector_setup, +"vec[0-9]+:<option>=<value>,<option>=<value>\n" +" Configure a vector io network device.\n\n" +); + +late_initcall(vector_init); + +static struct mc_device vector_mc = { + .list = LIST_HEAD_INIT(vector_mc.list), + .name = "vec", + .config = vector_config, + .get_config = NULL, + .id = vector_id, + .remove = vector_remove, +}; + +#ifdef CONFIG_INET +static int vector_inetaddr_event( + struct notifier_block *this, + unsigned long event, + void *ptr) +{ + return NOTIFY_DONE; +} + +static struct notifier_block vector_inetaddr_notifier = { + .notifier_call = vector_inetaddr_event, +}; + +static void inet_register(void) +{ + register_inetaddr_notifier(&vector_inetaddr_notifier); +} +#else +static inline void inet_register(void) +{ +} +#endif + +static int vector_net_init(void) +{ + mconsole_register_dev(&vector_mc); + inet_register(); + return 0; +} + +__initcall(vector_net_init); + + + diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h new file mode 100644 index 0000000000..2a1fa8e0f3 --- /dev/null +++ b/arch/um/drivers/vector_kern.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_VECTOR_KERN_H +#define __UM_VECTOR_KERN_H + +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/list.h> +#include <linux/ctype.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> + +#include "vector_user.h" + +/* Queue structure specially adapted for multiple enqueue/dequeue + * in a mmsgrecv/mmsgsend context + */ + +/* Dequeue method */ + +#define QUEUE_SENDMSG 0 +#define QUEUE_SENDMMSG 1 + +#define VECTOR_RX 1 +#define VECTOR_TX (1 << 1) +#define VECTOR_BPF (1 << 2) +#define VECTOR_QDISC_BYPASS (1 << 3) +#define VECTOR_BPF_FLASH (1 << 4) + +#define ETH_MAX_PACKET 1500 +#define ETH_HEADER_OTHER 32 /* just in case someone decides to go mad on QnQ */ + +#define MAX_FILTER_PROG (2 << 16) + +struct vector_queue { + struct mmsghdr *mmsg_vector; + void **skbuff_vector; + /* backlink to device which owns us */ + struct net_device *dev; + spinlock_t head_lock; + spinlock_t tail_lock; + int queue_depth, head, tail, max_depth, max_iov_frags; + short options; +}; + +struct vector_estats { + uint64_t rx_queue_max; + uint64_t rx_queue_running_average; + uint64_t tx_queue_max; + uint64_t tx_queue_running_average; + uint64_t rx_encaps_errors; + uint64_t tx_timeout_count; + uint64_t tx_restart_queue; + uint64_t tx_kicks; + uint64_t tx_flow_control_xon; + uint64_t tx_flow_control_xoff; + uint64_t rx_csum_offload_good; + uint64_t rx_csum_offload_errors; + uint64_t sg_ok; + uint64_t sg_linearized; +}; + +#define VERIFY_HEADER_NOK -1 +#define VERIFY_HEADER_OK 0 +#define VERIFY_CSUM_OK 1 + +struct vector_private { + struct list_head list; + spinlock_t lock; + struct net_device *dev; + struct napi_struct napi ____cacheline_aligned; + + int unit; + + /* Timeout timer in TX */ + + struct timer_list tl; + + /* Scheduled "remove device" work */ + struct work_struct reset_tx; + struct vector_fds *fds; + + struct vector_queue *rx_queue; + struct vector_queue *tx_queue; + + int rx_irq; + int tx_irq; + + struct arglist *parsed; + + void *transport_data; /* transport specific params if needed */ + + int max_packet; + int req_size; /* different from max packet - used for TSO */ + int headroom; + + int options; + + /* remote address if any - some transports will leave this as null */ + + int header_size; + int rx_header_size; + int coalesce; + + void *header_rxbuffer; + void *header_txbuffer; + + int (*form_header)(uint8_t *header, + struct sk_buff *skb, struct vector_private *vp); + int (*verify_header)(uint8_t *header, + struct sk_buff *skb, struct vector_private *vp); + + spinlock_t stats_lock; + + bool rexmit_scheduled; + bool opened; + bool in_write_poll; + bool in_error; + + /* guest allowed to use ethtool flash to load bpf */ + bool bpf_via_flash; + + /* ethtool stats */ + + struct vector_estats estats; + struct sock_fprog *bpf; + + char user[]; +}; + +extern int build_transport_data(struct vector_private *vp); + +#endif diff --git a/arch/um/drivers/vector_transports.c b/arch/um/drivers/vector_transports.c new file mode 100644 index 0000000000..0794d23f07 --- /dev/null +++ b/arch/um/drivers/vector_transports.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 - Cambridge Greys Limited + * Copyright (C) 2011 - 2014 Cisco Systems Inc + */ + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <asm/byteorder.h> +#include <uapi/linux/ip.h> +#include <uapi/linux/virtio_net.h> +#include <linux/virtio_net.h> +#include <linux/virtio_byteorder.h> +#include <linux/netdev_features.h> +#include "vector_user.h" +#include "vector_kern.h" + +#define GOOD_LINEAR 512 +#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface" + +struct gre_minimal_header { + uint16_t header; + uint16_t arptype; +}; + + +struct uml_gre_data { + uint32_t rx_key; + uint32_t tx_key; + uint32_t sequence; + + bool ipv6; + bool has_sequence; + bool pin_sequence; + bool checksum; + bool key; + struct gre_minimal_header expected_header; + + uint32_t checksum_offset; + uint32_t key_offset; + uint32_t sequence_offset; + +}; + +struct uml_l2tpv3_data { + uint64_t rx_cookie; + uint64_t tx_cookie; + uint64_t rx_session; + uint64_t tx_session; + uint32_t counter; + + bool udp; + bool ipv6; + bool has_counter; + bool pin_counter; + bool cookie; + bool cookie_is_64; + + uint32_t cookie_offset; + uint32_t session_offset; + uint32_t counter_offset; +}; + +static int l2tpv3_form_header(uint8_t *header, + struct sk_buff *skb, struct vector_private *vp) +{ + struct uml_l2tpv3_data *td = vp->transport_data; + uint32_t *counter; + + if (td->udp) + *(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET); + (*(uint32_t *) (header + td->session_offset)) = td->tx_session; + + if (td->cookie) { + if (td->cookie_is_64) + (*(uint64_t *)(header + td->cookie_offset)) = + td->tx_cookie; + else + (*(uint32_t *)(header + td->cookie_offset)) = + td->tx_cookie; + } + if (td->has_counter) { + counter = (uint32_t *)(header + td->counter_offset); + if (td->pin_counter) { + *counter = 0; + } else { + td->counter++; + *counter = cpu_to_be32(td->counter); + } + } + return 0; +} + +static int gre_form_header(uint8_t *header, + struct sk_buff *skb, struct vector_private *vp) +{ + struct uml_gre_data *td = vp->transport_data; + uint32_t *sequence; + *((uint32_t *) header) = *((uint32_t *) &td->expected_header); + if (td->key) + (*(uint32_t *) (header + td->key_offset)) = td->tx_key; + if (td->has_sequence) { + sequence = (uint32_t *)(header + td->sequence_offset); + if (td->pin_sequence) + *sequence = 0; + else + *sequence = cpu_to_be32(++td->sequence); + } + return 0; +} + +static int raw_form_header(uint8_t *header, + struct sk_buff *skb, struct vector_private *vp) +{ + struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header; + + virtio_net_hdr_from_skb( + skb, + vheader, + virtio_legacy_is_little_endian(), + false, + 0 + ); + + return 0; +} + +static int l2tpv3_verify_header( + uint8_t *header, struct sk_buff *skb, struct vector_private *vp) +{ + struct uml_l2tpv3_data *td = vp->transport_data; + uint32_t *session; + uint64_t cookie; + + if ((!td->udp) && (!td->ipv6)) + header += sizeof(struct iphdr) /* fix for ipv4 raw */; + + /* we do not do a strict check for "data" packets as per + * the RFC spec because the pure IP spec does not have + * that anyway. + */ + + if (td->cookie) { + if (td->cookie_is_64) + cookie = *(uint64_t *)(header + td->cookie_offset); + else + cookie = *(uint32_t *)(header + td->cookie_offset); + if (cookie != td->rx_cookie) { + if (net_ratelimit()) + netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id"); + return -1; + } + } + session = (uint32_t *) (header + td->session_offset); + if (*session != td->rx_session) { + if (net_ratelimit()) + netdev_err(vp->dev, "uml_l2tpv3: session mismatch"); + return -1; + } + return 0; +} + +static int gre_verify_header( + uint8_t *header, struct sk_buff *skb, struct vector_private *vp) +{ + + uint32_t key; + struct uml_gre_data *td = vp->transport_data; + + if (!td->ipv6) + header += sizeof(struct iphdr) /* fix for ipv4 raw */; + + if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) { + if (net_ratelimit()) + netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x", + *((uint32_t *) &td->expected_header), + *((uint32_t *) header) + ); + return -1; + } + + if (td->key) { + key = (*(uint32_t *)(header + td->key_offset)); + if (key != td->rx_key) { + if (net_ratelimit()) + netdev_err(vp->dev, "unknown key id %0x, expecting %0x", + key, td->rx_key); + return -1; + } + } + return 0; +} + +static int raw_verify_header( + uint8_t *header, struct sk_buff *skb, struct vector_private *vp) +{ + struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header; + + if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) && + (vp->req_size != 65536)) { + if (net_ratelimit()) + netdev_err( + vp->dev, + GSO_ERROR + ); + } + if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0) + return 1; + + virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian()); + return 0; +} + +static bool get_uint_param( + struct arglist *def, char *param, unsigned int *result) +{ + char *arg = uml_vector_fetch_arg(def, param); + + if (arg != NULL) { + if (kstrtoint(arg, 0, result) == 0) + return true; + } + return false; +} + +static bool get_ulong_param( + struct arglist *def, char *param, unsigned long *result) +{ + char *arg = uml_vector_fetch_arg(def, param); + + if (arg != NULL) { + if (kstrtoul(arg, 0, result) == 0) + return true; + return true; + } + return false; +} + +static int build_gre_transport_data(struct vector_private *vp) +{ + struct uml_gre_data *td; + int temp_int; + int temp_rx; + int temp_tx; + + vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL); + if (vp->transport_data == NULL) + return -ENOMEM; + td = vp->transport_data; + td->sequence = 0; + + td->expected_header.arptype = GRE_IRB; + td->expected_header.header = 0; + + vp->form_header = &gre_form_header; + vp->verify_header = &gre_verify_header; + vp->header_size = 4; + td->key_offset = 4; + td->sequence_offset = 4; + td->checksum_offset = 4; + + td->ipv6 = false; + if (get_uint_param(vp->parsed, "v6", &temp_int)) { + if (temp_int > 0) + td->ipv6 = true; + } + td->key = false; + if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) { + if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) { + td->key = true; + td->expected_header.header |= GRE_MODE_KEY; + td->rx_key = cpu_to_be32(temp_rx); + td->tx_key = cpu_to_be32(temp_tx); + vp->header_size += 4; + td->sequence_offset += 4; + } else { + return -EINVAL; + } + } + + td->sequence = false; + if (get_uint_param(vp->parsed, "sequence", &temp_int)) { + if (temp_int > 0) { + vp->header_size += 4; + td->has_sequence = true; + td->expected_header.header |= GRE_MODE_SEQUENCE; + if (get_uint_param( + vp->parsed, "pin_sequence", &temp_int)) { + if (temp_int > 0) + td->pin_sequence = true; + } + } + } + vp->rx_header_size = vp->header_size; + if (!td->ipv6) + vp->rx_header_size += sizeof(struct iphdr); + return 0; +} + +static int build_l2tpv3_transport_data(struct vector_private *vp) +{ + + struct uml_l2tpv3_data *td; + int temp_int, temp_rxs, temp_txs; + unsigned long temp_rx; + unsigned long temp_tx; + + vp->transport_data = kmalloc( + sizeof(struct uml_l2tpv3_data), GFP_KERNEL); + + if (vp->transport_data == NULL) + return -ENOMEM; + + td = vp->transport_data; + + vp->form_header = &l2tpv3_form_header; + vp->verify_header = &l2tpv3_verify_header; + td->counter = 0; + + vp->header_size = 4; + td->session_offset = 0; + td->cookie_offset = 4; + td->counter_offset = 4; + + + td->ipv6 = false; + if (get_uint_param(vp->parsed, "v6", &temp_int)) { + if (temp_int > 0) + td->ipv6 = true; + } + + if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) { + if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) { + td->tx_session = cpu_to_be32(temp_txs); + td->rx_session = cpu_to_be32(temp_rxs); + } else { + return -EINVAL; + } + } else { + return -EINVAL; + } + + td->cookie_is_64 = false; + if (get_uint_param(vp->parsed, "cookie64", &temp_int)) { + if (temp_int > 0) + td->cookie_is_64 = true; + } + td->cookie = false; + if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) { + if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) { + td->cookie = true; + if (td->cookie_is_64) { + td->rx_cookie = cpu_to_be64(temp_rx); + td->tx_cookie = cpu_to_be64(temp_tx); + vp->header_size += 8; + td->counter_offset += 8; + } else { + td->rx_cookie = cpu_to_be32(temp_rx); + td->tx_cookie = cpu_to_be32(temp_tx); + vp->header_size += 4; + td->counter_offset += 4; + } + } else { + return -EINVAL; + } + } + + td->has_counter = false; + if (get_uint_param(vp->parsed, "counter", &temp_int)) { + if (temp_int > 0) { + td->has_counter = true; + vp->header_size += 4; + if (get_uint_param( + vp->parsed, "pin_counter", &temp_int)) { + if (temp_int > 0) + td->pin_counter = true; + } + } + } + + if (get_uint_param(vp->parsed, "udp", &temp_int)) { + if (temp_int > 0) { + td->udp = true; + vp->header_size += 4; + td->counter_offset += 4; + td->session_offset += 4; + td->cookie_offset += 4; + } + } + + vp->rx_header_size = vp->header_size; + if ((!td->ipv6) && (!td->udp)) + vp->rx_header_size += sizeof(struct iphdr); + + return 0; +} + +static int build_raw_transport_data(struct vector_private *vp) +{ + if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) { + if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd)) + return -1; + vp->form_header = &raw_form_header; + vp->verify_header = &raw_verify_header; + vp->header_size = sizeof(struct virtio_net_hdr); + vp->rx_header_size = sizeof(struct virtio_net_hdr); + vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO); + vp->dev->features |= + (NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_GRO); + netdev_info( + vp->dev, + "raw: using vnet headers for tso and tx/rx checksum" + ); + } + return 0; +} + +static int build_hybrid_transport_data(struct vector_private *vp) +{ + if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) { + vp->form_header = &raw_form_header; + vp->verify_header = &raw_verify_header; + vp->header_size = sizeof(struct virtio_net_hdr); + vp->rx_header_size = sizeof(struct virtio_net_hdr); + vp->dev->hw_features |= + (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO); + vp->dev->features |= + (NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO); + netdev_info( + vp->dev, + "tap/raw hybrid: using vnet headers for tso and tx/rx checksum" + ); + } else { + return 0; /* do not try to enable tap too if raw failed */ + } + if (uml_tap_enable_vnet_headers(vp->fds->tx_fd)) + return 0; + return -1; +} + +static int build_tap_transport_data(struct vector_private *vp) +{ + /* "Pure" tap uses the same fd for rx and tx */ + if (uml_tap_enable_vnet_headers(vp->fds->tx_fd)) { + vp->form_header = &raw_form_header; + vp->verify_header = &raw_verify_header; + vp->header_size = sizeof(struct virtio_net_hdr); + vp->rx_header_size = sizeof(struct virtio_net_hdr); + vp->dev->hw_features |= + (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO); + vp->dev->features |= + (NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO); + netdev_info( + vp->dev, + "tap: using vnet headers for tso and tx/rx checksum" + ); + return 0; + } + return -1; +} + + +static int build_bess_transport_data(struct vector_private *vp) +{ + vp->form_header = NULL; + vp->verify_header = NULL; + vp->header_size = 0; + vp->rx_header_size = 0; + return 0; +} + +int build_transport_data(struct vector_private *vp) +{ + char *transport = uml_vector_fetch_arg(vp->parsed, "transport"); + + if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0) + return build_gre_transport_data(vp); + if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0) + return build_l2tpv3_transport_data(vp); + if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) + return build_raw_transport_data(vp); + if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) + return build_tap_transport_data(vp); + if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0) + return build_hybrid_transport_data(vp); + if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0) + return build_bess_transport_data(vp); + return 0; +} + diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c new file mode 100644 index 0000000000..c719e1ec46 --- /dev/null +++ b/arch/um/drivers/vector_user.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <stdbool.h> +#include <stdio.h> +#include <unistd.h> +#include <stdarg.h> +#include <errno.h> +#include <stddef.h> +#include <string.h> +#include <sys/ioctl.h> +#include <net/if.h> +#include <linux/if_tun.h> +#include <arpa/inet.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <sys/socket.h> +#include <sys/un.h> +#include <netinet/ip.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <sys/wait.h> +#include <sys/uio.h> +#include <linux/virtio_net.h> +#include <netdb.h> +#include <stdlib.h> +#include <os.h> +#include <limits.h> +#include <um_malloc.h> +#include "vector_user.h" + +#define ID_GRE 0 +#define ID_L2TPV3 1 +#define ID_BESS 2 +#define ID_MAX 2 + +#define TOKEN_IFNAME "ifname" +#define TOKEN_SCRIPT "ifup" + +#define TRANS_RAW "raw" +#define TRANS_RAW_LEN strlen(TRANS_RAW) + +#define TRANS_FD "fd" +#define TRANS_FD_LEN strlen(TRANS_FD) + +#define VNET_HDR_FAIL "could not enable vnet headers on fd %d" +#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s" +#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i" +#define UNIX_BIND_FAIL "unix_open : could not bind socket err=%i" +#define BPF_ATTACH_FAIL "Failed to attach filter size %d prog %px to %d, err %d\n" +#define BPF_DETACH_FAIL "Failed to detach filter size %d prog %px to %d, err %d\n" + +#define MAX_UN_LEN 107 + +static const char padchar[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; +static const char *template = "tapXXXXXX"; + +/* This is very ugly and brute force lookup, but it is done + * only once at initialization so not worth doing hashes or + * anything more intelligent + */ + +char *uml_vector_fetch_arg(struct arglist *ifspec, char *token) +{ + int i; + + for (i = 0; i < ifspec->numargs; i++) { + if (strcmp(ifspec->tokens[i], token) == 0) + return ifspec->values[i]; + } + return NULL; + +} + +struct arglist *uml_parse_vector_ifspec(char *arg) +{ + struct arglist *result; + int pos, len; + bool parsing_token = true, next_starts = true; + + if (arg == NULL) + return NULL; + result = uml_kmalloc(sizeof(struct arglist), UM_GFP_KERNEL); + if (result == NULL) + return NULL; + result->numargs = 0; + len = strlen(arg); + for (pos = 0; pos < len; pos++) { + if (next_starts) { + if (parsing_token) { + result->tokens[result->numargs] = arg + pos; + } else { + result->values[result->numargs] = arg + pos; + result->numargs++; + } + next_starts = false; + } + if (*(arg + pos) == '=') { + if (parsing_token) + parsing_token = false; + else + goto cleanup; + next_starts = true; + (*(arg + pos)) = '\0'; + } + if (*(arg + pos) == ',') { + parsing_token = true; + next_starts = true; + (*(arg + pos)) = '\0'; + } + } + return result; +cleanup: + printk(UM_KERN_ERR "vector_setup - Couldn't parse '%s'\n", arg); + kfree(result); + return NULL; +} + +/* + * Socket/FD configuration functions. These return an structure + * of rx and tx descriptors to cover cases where these are not + * the same (f.e. read via raw socket and write via tap). + */ + +#define PATH_NET_TUN "/dev/net/tun" + + +static int create_tap_fd(char *iface) +{ + struct ifreq ifr; + int fd = -1; + int err = -ENOMEM, offload; + + fd = open(PATH_NET_TUN, O_RDWR); + if (fd < 0) { + printk(UM_KERN_ERR "uml_tap: failed to open tun device\n"); + goto tap_fd_cleanup; + } + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; + strscpy(ifr.ifr_name, iface, sizeof(ifr.ifr_name)); + + err = ioctl(fd, TUNSETIFF, (void *) &ifr); + if (err != 0) { + printk(UM_KERN_ERR "uml_tap: failed to select tap interface\n"); + goto tap_fd_cleanup; + } + + offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6; + ioctl(fd, TUNSETOFFLOAD, offload); + return fd; +tap_fd_cleanup: + if (fd >= 0) + os_close_file(fd); + return err; +} + +static int create_raw_fd(char *iface, int flags, int proto) +{ + struct ifreq ifr; + int fd = -1; + struct sockaddr_ll sock; + int err = -ENOMEM; + + fd = socket(AF_PACKET, SOCK_RAW, flags); + if (fd == -1) { + err = -errno; + goto raw_fd_cleanup; + } + memset(&ifr, 0, sizeof(ifr)); + strscpy(ifr.ifr_name, iface, sizeof(ifr.ifr_name)); + if (ioctl(fd, SIOCGIFINDEX, (void *) &ifr) < 0) { + err = -errno; + goto raw_fd_cleanup; + } + + sock.sll_family = AF_PACKET; + sock.sll_protocol = htons(proto); + sock.sll_ifindex = ifr.ifr_ifindex; + + if (bind(fd, + (struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) { + err = -errno; + goto raw_fd_cleanup; + } + return fd; +raw_fd_cleanup: + printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err); + if (fd >= 0) + os_close_file(fd); + return err; +} + + +static struct vector_fds *user_init_tap_fds(struct arglist *ifspec) +{ + int fd = -1, i; + char *iface; + struct vector_fds *result = NULL; + bool dynamic = false; + char dynamic_ifname[IFNAMSIZ]; + char *argv[] = {NULL, NULL, NULL, NULL}; + + iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME); + if (iface == NULL) { + dynamic = true; + iface = dynamic_ifname; + srand(getpid()); + } + + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result == NULL) { + printk(UM_KERN_ERR "uml_tap: failed to allocate file descriptors\n"); + goto tap_cleanup; + } + result->rx_fd = -1; + result->tx_fd = -1; + result->remote_addr = NULL; + result->remote_addr_size = 0; + + /* TAP */ + do { + if (dynamic) { + strcpy(iface, template); + for (i = 0; i < strlen(iface); i++) { + if (iface[i] == 'X') { + iface[i] = padchar[rand() % strlen(padchar)]; + } + } + } + fd = create_tap_fd(iface); + if ((fd < 0) && (!dynamic)) { + printk(UM_KERN_ERR "uml_tap: failed to create tun interface\n"); + goto tap_cleanup; + } + result->tx_fd = fd; + result->rx_fd = fd; + } while (fd < 0); + + argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT); + if (argv[0]) { + argv[1] = iface; + run_helper(NULL, NULL, argv); + } + + return result; +tap_cleanup: + printk(UM_KERN_ERR "user_init_tap: init failed, error %d", fd); + kfree(result); + return NULL; +} + +static struct vector_fds *user_init_hybrid_fds(struct arglist *ifspec) +{ + char *iface; + struct vector_fds *result = NULL; + char *argv[] = {NULL, NULL, NULL, NULL}; + + iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME); + if (iface == NULL) { + printk(UM_KERN_ERR "uml_tap: failed to parse interface spec\n"); + goto hybrid_cleanup; + } + + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result == NULL) { + printk(UM_KERN_ERR "uml_tap: failed to allocate file descriptors\n"); + goto hybrid_cleanup; + } + result->rx_fd = -1; + result->tx_fd = -1; + result->remote_addr = NULL; + result->remote_addr_size = 0; + + /* TAP */ + + result->tx_fd = create_tap_fd(iface); + if (result->tx_fd < 0) { + printk(UM_KERN_ERR "uml_tap: failed to create tun interface: %i\n", result->tx_fd); + goto hybrid_cleanup; + } + + /* RAW */ + + result->rx_fd = create_raw_fd(iface, ETH_P_ALL, ETH_P_ALL); + if (result->rx_fd == -1) { + printk(UM_KERN_ERR + "uml_tap: failed to create paired raw socket: %i\n", result->rx_fd); + goto hybrid_cleanup; + } + + argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT); + if (argv[0]) { + argv[1] = iface; + run_helper(NULL, NULL, argv); + } + return result; +hybrid_cleanup: + printk(UM_KERN_ERR "user_init_hybrid: init failed"); + kfree(result); + return NULL; +} + +static struct vector_fds *user_init_unix_fds(struct arglist *ifspec, int id) +{ + int fd = -1; + int socktype; + char *src, *dst; + struct vector_fds *result = NULL; + struct sockaddr_un *local_addr = NULL, *remote_addr = NULL; + + src = uml_vector_fetch_arg(ifspec, "src"); + dst = uml_vector_fetch_arg(ifspec, "dst"); + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result == NULL) { + printk(UM_KERN_ERR "unix open:cannot allocate remote addr"); + goto unix_cleanup; + } + remote_addr = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL); + if (remote_addr == NULL) { + printk(UM_KERN_ERR "unix open:cannot allocate remote addr"); + goto unix_cleanup; + } + + switch (id) { + case ID_BESS: + socktype = SOCK_SEQPACKET; + if ((src != NULL) && (strlen(src) <= MAX_UN_LEN)) { + local_addr = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL); + if (local_addr == NULL) { + printk(UM_KERN_ERR "bess open:cannot allocate local addr"); + goto unix_cleanup; + } + local_addr->sun_family = AF_UNIX; + memcpy(local_addr->sun_path, src, strlen(src) + 1); + } + if ((dst == NULL) || (strlen(dst) > MAX_UN_LEN)) + goto unix_cleanup; + remote_addr->sun_family = AF_UNIX; + memcpy(remote_addr->sun_path, dst, strlen(dst) + 1); + break; + default: + printk(KERN_ERR "Unsupported unix socket type\n"); + return NULL; + } + + fd = socket(AF_UNIX, socktype, 0); + if (fd == -1) { + printk(UM_KERN_ERR + "unix open: could not open socket, error = %d", + -errno + ); + goto unix_cleanup; + } + if (local_addr != NULL) { + if (bind(fd, (struct sockaddr *) local_addr, sizeof(struct sockaddr_un))) { + printk(UM_KERN_ERR UNIX_BIND_FAIL, errno); + goto unix_cleanup; + } + } + switch (id) { + case ID_BESS: + if (connect(fd, (const struct sockaddr *) remote_addr, sizeof(struct sockaddr_un)) < 0) { + printk(UM_KERN_ERR "bess open:cannot connect to %s %i", remote_addr->sun_path, -errno); + goto unix_cleanup; + } + break; + } + result->rx_fd = fd; + result->tx_fd = fd; + result->remote_addr_size = sizeof(struct sockaddr_un); + result->remote_addr = remote_addr; + return result; +unix_cleanup: + if (fd >= 0) + os_close_file(fd); + kfree(remote_addr); + kfree(result); + return NULL; +} + +static int strtofd(const char *nptr) +{ + long fd; + char *endptr; + + if (nptr == NULL) + return -1; + + errno = 0; + fd = strtol(nptr, &endptr, 10); + if (nptr == endptr || + errno != 0 || + *endptr != '\0' || + fd < 0 || + fd > INT_MAX) { + return -1; + } + return fd; +} + +static struct vector_fds *user_init_fd_fds(struct arglist *ifspec) +{ + int fd = -1; + char *fdarg = NULL; + struct vector_fds *result = NULL; + + fdarg = uml_vector_fetch_arg(ifspec, "fd"); + fd = strtofd(fdarg); + if (fd == -1) { + printk(UM_KERN_ERR "fd open: bad or missing fd argument"); + goto fd_cleanup; + } + + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result == NULL) { + printk(UM_KERN_ERR "fd open: allocation failed"); + goto fd_cleanup; + } + + result->rx_fd = fd; + result->tx_fd = fd; + result->remote_addr_size = 0; + result->remote_addr = NULL; + return result; + +fd_cleanup: + if (fd >= 0) + os_close_file(fd); + kfree(result); + return NULL; +} + +static struct vector_fds *user_init_raw_fds(struct arglist *ifspec) +{ + int rxfd = -1, txfd = -1; + int err = -ENOMEM; + char *iface; + struct vector_fds *result = NULL; + char *argv[] = {NULL, NULL, NULL, NULL}; + + iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME); + if (iface == NULL) + goto raw_cleanup; + + rxfd = create_raw_fd(iface, ETH_P_ALL, ETH_P_ALL); + if (rxfd == -1) { + err = -errno; + goto raw_cleanup; + } + txfd = create_raw_fd(iface, 0, ETH_P_IP); /* Turn off RX on this fd */ + if (txfd == -1) { + err = -errno; + goto raw_cleanup; + } + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result != NULL) { + result->rx_fd = rxfd; + result->tx_fd = txfd; + result->remote_addr = NULL; + result->remote_addr_size = 0; + } + argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT); + if (argv[0]) { + argv[1] = iface; + run_helper(NULL, NULL, argv); + } + return result; +raw_cleanup: + printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err); + kfree(result); + return NULL; +} + + +bool uml_raw_enable_qdisc_bypass(int fd) +{ + int optval = 1; + + if (setsockopt(fd, + SOL_PACKET, PACKET_QDISC_BYPASS, + &optval, sizeof(optval)) != 0) { + return false; + } + return true; +} + +bool uml_raw_enable_vnet_headers(int fd) +{ + int optval = 1; + + if (setsockopt(fd, + SOL_PACKET, PACKET_VNET_HDR, + &optval, sizeof(optval)) != 0) { + printk(UM_KERN_INFO VNET_HDR_FAIL, fd); + return false; + } + return true; +} +bool uml_tap_enable_vnet_headers(int fd) +{ + unsigned int features; + int len = sizeof(struct virtio_net_hdr); + + if (ioctl(fd, TUNGETFEATURES, &features) == -1) { + printk(UM_KERN_INFO TUN_GET_F_FAIL, strerror(errno)); + return false; + } + if ((features & IFF_VNET_HDR) == 0) { + printk(UM_KERN_INFO "tapraw: No VNET HEADER support"); + return false; + } + ioctl(fd, TUNSETVNETHDRSZ, &len); + return true; +} + +static struct vector_fds *user_init_socket_fds(struct arglist *ifspec, int id) +{ + int err = -ENOMEM; + int fd = -1, gairet; + struct addrinfo srchints; + struct addrinfo dsthints; + bool v6, udp; + char *value; + char *src, *dst, *srcport, *dstport; + struct addrinfo *gairesult = NULL; + struct vector_fds *result = NULL; + + + value = uml_vector_fetch_arg(ifspec, "v6"); + v6 = false; + udp = false; + if (value != NULL) { + if (strtol((const char *) value, NULL, 10) > 0) + v6 = true; + } + + value = uml_vector_fetch_arg(ifspec, "udp"); + if (value != NULL) { + if (strtol((const char *) value, NULL, 10) > 0) + udp = true; + } + src = uml_vector_fetch_arg(ifspec, "src"); + dst = uml_vector_fetch_arg(ifspec, "dst"); + srcport = uml_vector_fetch_arg(ifspec, "srcport"); + dstport = uml_vector_fetch_arg(ifspec, "dstport"); + + memset(&dsthints, 0, sizeof(dsthints)); + + if (v6) + dsthints.ai_family = AF_INET6; + else + dsthints.ai_family = AF_INET; + + switch (id) { + case ID_GRE: + dsthints.ai_socktype = SOCK_RAW; + dsthints.ai_protocol = IPPROTO_GRE; + break; + case ID_L2TPV3: + if (udp) { + dsthints.ai_socktype = SOCK_DGRAM; + dsthints.ai_protocol = 0; + } else { + dsthints.ai_socktype = SOCK_RAW; + dsthints.ai_protocol = IPPROTO_L2TP; + } + break; + default: + printk(KERN_ERR "Unsupported socket type\n"); + return NULL; + } + memcpy(&srchints, &dsthints, sizeof(struct addrinfo)); + + gairet = getaddrinfo(src, srcport, &dsthints, &gairesult); + if ((gairet != 0) || (gairesult == NULL)) { + printk(UM_KERN_ERR + "socket_open : could not resolve src, error = %s", + gai_strerror(gairet) + ); + return NULL; + } + fd = socket(gairesult->ai_family, + gairesult->ai_socktype, gairesult->ai_protocol); + if (fd == -1) { + printk(UM_KERN_ERR + "socket_open : could not open socket, error = %d", + -errno + ); + goto cleanup; + } + if (bind(fd, + (struct sockaddr *) gairesult->ai_addr, + gairesult->ai_addrlen)) { + printk(UM_KERN_ERR L2TPV3_BIND_FAIL, errno); + goto cleanup; + } + + if (gairesult != NULL) + freeaddrinfo(gairesult); + + gairesult = NULL; + + gairet = getaddrinfo(dst, dstport, &dsthints, &gairesult); + if ((gairet != 0) || (gairesult == NULL)) { + printk(UM_KERN_ERR + "socket_open : could not resolve dst, error = %s", + gai_strerror(gairet) + ); + return NULL; + } + + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result != NULL) { + result->rx_fd = fd; + result->tx_fd = fd; + result->remote_addr = uml_kmalloc( + gairesult->ai_addrlen, UM_GFP_KERNEL); + if (result->remote_addr == NULL) + goto cleanup; + result->remote_addr_size = gairesult->ai_addrlen; + memcpy( + result->remote_addr, + gairesult->ai_addr, + gairesult->ai_addrlen + ); + } + freeaddrinfo(gairesult); + return result; +cleanup: + if (gairesult != NULL) + freeaddrinfo(gairesult); + printk(UM_KERN_ERR "user_init_socket: init failed, error %d", err); + if (fd >= 0) + os_close_file(fd); + if (result != NULL) { + kfree(result->remote_addr); + kfree(result); + } + return NULL; +} + +struct vector_fds *uml_vector_user_open( + int unit, + struct arglist *parsed +) +{ + char *transport; + + if (parsed == NULL) { + printk(UM_KERN_ERR "no parsed config for unit %d\n", unit); + return NULL; + } + transport = uml_vector_fetch_arg(parsed, "transport"); + if (transport == NULL) { + printk(UM_KERN_ERR "missing transport for unit %d\n", unit); + return NULL; + } + if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) + return user_init_raw_fds(parsed); + if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0) + return user_init_hybrid_fds(parsed); + if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) + return user_init_tap_fds(parsed); + if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0) + return user_init_socket_fds(parsed, ID_GRE); + if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0) + return user_init_socket_fds(parsed, ID_L2TPV3); + if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0) + return user_init_unix_fds(parsed, ID_BESS); + if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0) + return user_init_fd_fds(parsed); + return NULL; +} + + +int uml_vector_sendmsg(int fd, void *hdr, int flags) +{ + int n; + + CATCH_EINTR(n = sendmsg(fd, (struct msghdr *) hdr, flags)); + if ((n < 0) && (errno == EAGAIN)) + return 0; + if (n >= 0) + return n; + else + return -errno; +} + +int uml_vector_recvmsg(int fd, void *hdr, int flags) +{ + int n; + struct msghdr *msg = (struct msghdr *) hdr; + + CATCH_EINTR(n = readv(fd, msg->msg_iov, msg->msg_iovlen)); + if ((n < 0) && (errno == EAGAIN)) + return 0; + if (n >= 0) + return n; + else + return -errno; +} + +int uml_vector_writev(int fd, void *hdr, int iovcount) +{ + int n; + + CATCH_EINTR(n = writev(fd, (struct iovec *) hdr, iovcount)); + if ((n < 0) && ((errno == EAGAIN) || (errno == ENOBUFS))) + return 0; + if (n >= 0) + return n; + else + return -errno; +} + +int uml_vector_sendmmsg( + int fd, + void *msgvec, + unsigned int vlen, + unsigned int flags) +{ + int n; + + CATCH_EINTR(n = sendmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags)); + if ((n < 0) && ((errno == EAGAIN) || (errno == ENOBUFS))) + return 0; + if (n >= 0) + return n; + else + return -errno; +} + +int uml_vector_recvmmsg( + int fd, + void *msgvec, + unsigned int vlen, + unsigned int flags) +{ + int n; + + CATCH_EINTR( + n = recvmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags, 0)); + if ((n < 0) && (errno == EAGAIN)) + return 0; + if (n >= 0) + return n; + else + return -errno; +} +int uml_vector_attach_bpf(int fd, void *bpf) +{ + struct sock_fprog *prog = bpf; + + int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, sizeof(struct sock_fprog)); + + if (err < 0) + printk(KERN_ERR BPF_ATTACH_FAIL, prog->len, prog->filter, fd, -errno); + return err; +} + +int uml_vector_detach_bpf(int fd, void *bpf) +{ + struct sock_fprog *prog = bpf; + + int err = setsockopt(fd, SOL_SOCKET, SO_DETACH_FILTER, bpf, sizeof(struct sock_fprog)); + if (err < 0) + printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno); + return err; +} +void *uml_vector_default_bpf(const void *mac) +{ + struct sock_filter *bpf; + uint32_t *mac1 = (uint32_t *)(mac + 2); + uint16_t *mac2 = (uint16_t *) mac; + struct sock_fprog *bpf_prog; + + bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL); + if (bpf_prog) { + bpf_prog->len = DEFAULT_BPF_LEN; + bpf_prog->filter = NULL; + } else { + return NULL; + } + bpf = uml_kmalloc( + sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL); + if (bpf) { + bpf_prog->filter = bpf; + /* ld [8] */ + bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 }; + /* jeq #0xMAC[2-6] jt 2 jf 5*/ + bpf[1] = (struct sock_filter){ 0x15, 0, 3, ntohl(*mac1)}; + /* ldh [6] */ + bpf[2] = (struct sock_filter){ 0x28, 0, 0, 0x00000006 }; + /* jeq #0xMAC[0-1] jt 4 jf 5 */ + bpf[3] = (struct sock_filter){ 0x15, 0, 1, ntohs(*mac2)}; + /* ret #0 */ + bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 }; + /* ret #0x40000 */ + bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 }; + } else { + kfree(bpf_prog); + bpf_prog = NULL; + } + return bpf_prog; +} + +/* Note - this function requires a valid mac being passed as an arg */ + +void *uml_vector_user_bpf(char *filename) +{ + struct sock_filter *bpf; + struct sock_fprog *bpf_prog; + struct stat statbuf; + int res, ffd = -1; + + if (filename == NULL) + return NULL; + + if (stat(filename, &statbuf) < 0) { + printk(KERN_ERR "Error %d reading bpf file", -errno); + return false; + } + bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL); + if (bpf_prog == NULL) { + printk(KERN_ERR "Failed to allocate bpf prog buffer"); + return NULL; + } + bpf_prog->len = statbuf.st_size / sizeof(struct sock_filter); + bpf_prog->filter = NULL; + ffd = os_open_file(filename, of_read(OPENFLAGS()), 0); + if (ffd < 0) { + printk(KERN_ERR "Error %d opening bpf file", -errno); + goto bpf_failed; + } + bpf = uml_kmalloc(statbuf.st_size, UM_GFP_KERNEL); + if (bpf == NULL) { + printk(KERN_ERR "Failed to allocate bpf buffer"); + goto bpf_failed; + } + bpf_prog->filter = bpf; + res = os_read_file(ffd, bpf, statbuf.st_size); + if (res < statbuf.st_size) { + printk(KERN_ERR "Failed to read bpf program %s, error %d", filename, res); + kfree(bpf); + goto bpf_failed; + } + os_close_file(ffd); + return bpf_prog; +bpf_failed: + if (ffd > 0) + os_close_file(ffd); + kfree(bpf_prog); + return NULL; +} diff --git a/arch/um/drivers/vector_user.h b/arch/um/drivers/vector_user.h new file mode 100644 index 0000000000..59ed5f9e6e --- /dev/null +++ b/arch/um/drivers/vector_user.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_VECTOR_USER_H +#define __UM_VECTOR_USER_H + +#define MAXVARGS 20 + +#define TOKEN_IFNAME "ifname" + +#define TRANS_RAW "raw" +#define TRANS_RAW_LEN strlen(TRANS_RAW) + +#define TRANS_TAP "tap" +#define TRANS_TAP_LEN strlen(TRANS_TAP) + +#define TRANS_GRE "gre" +#define TRANS_GRE_LEN strlen(TRANS_GRE) + +#define TRANS_L2TPV3 "l2tpv3" +#define TRANS_L2TPV3_LEN strlen(TRANS_L2TPV3) + +#define TRANS_HYBRID "hybrid" +#define TRANS_HYBRID_LEN strlen(TRANS_HYBRID) + +#define TRANS_BESS "bess" +#define TRANS_BESS_LEN strlen(TRANS_BESS) + +#define DEFAULT_BPF_LEN 6 + +#ifndef IPPROTO_GRE +#define IPPROTO_GRE 0x2F +#endif + +#define GRE_MODE_CHECKSUM cpu_to_be16(8 << 12) /* checksum */ +#define GRE_MODE_RESERVED cpu_to_be16(4 << 12) /* unused */ +#define GRE_MODE_KEY cpu_to_be16(2 << 12) /* KEY present */ +#define GRE_MODE_SEQUENCE cpu_to_be16(1 << 12) /* sequence */ + +#define GRE_IRB cpu_to_be16(0x6558) + +#define L2TPV3_DATA_PACKET 0x30000 + +/* IANA-assigned IP protocol ID for L2TPv3 */ + +#ifndef IPPROTO_L2TP +#define IPPROTO_L2TP 0x73 +#endif + +struct arglist { + int numargs; + char *tokens[MAXVARGS]; + char *values[MAXVARGS]; +}; + +/* Separating read and write FDs allows us to have different + * rx and tx method. Example - read tap via raw socket using + * recvmmsg, write using legacy tap write calls + */ + +struct vector_fds { + int rx_fd; + int tx_fd; + void *remote_addr; + int remote_addr_size; +}; + +#define VECTOR_READ 1 + +extern struct arglist *uml_parse_vector_ifspec(char *arg); + +extern struct vector_fds *uml_vector_user_open( + int unit, + struct arglist *parsed +); + +extern char *uml_vector_fetch_arg( + struct arglist *ifspec, + char *token +); + +extern int uml_vector_recvmsg(int fd, void *hdr, int flags); +extern int uml_vector_sendmsg(int fd, void *hdr, int flags); +extern int uml_vector_writev(int fd, void *hdr, int iovcount); +extern int uml_vector_sendmmsg( + int fd, void *msgvec, + unsigned int vlen, + unsigned int flags +); +extern int uml_vector_recvmmsg( + int fd, + void *msgvec, + unsigned int vlen, + unsigned int flags +); +extern void *uml_vector_default_bpf(const void *mac); +extern void *uml_vector_user_bpf(char *filename); +extern int uml_vector_attach_bpf(int fd, void *bpf); +extern int uml_vector_detach_bpf(int fd, void *bpf); +extern bool uml_raw_enable_qdisc_bypass(int fd); +extern bool uml_raw_enable_vnet_headers(int fd); +extern bool uml_tap_enable_vnet_headers(int fd); + + +#endif diff --git a/arch/um/drivers/vhost_user.h b/arch/um/drivers/vhost_user.h new file mode 100644 index 0000000000..6f147cd3c9 --- /dev/null +++ b/arch/um/drivers/vhost_user.h @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Vhost-user protocol */ + +#ifndef __VHOST_USER_H__ +#define __VHOST_USER_H__ + +/* Message flags */ +#define VHOST_USER_FLAG_REPLY BIT(2) +#define VHOST_USER_FLAG_NEED_REPLY BIT(3) +/* Feature bits */ +#define VHOST_USER_F_PROTOCOL_FEATURES 30 +/* Protocol feature bits */ +#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3 +#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5 +#define VHOST_USER_PROTOCOL_F_CONFIG 9 +#define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14 +/* Vring state index masks */ +#define VHOST_USER_VRING_INDEX_MASK 0xff +#define VHOST_USER_VRING_POLL_MASK BIT(8) + +/* Supported version */ +#define VHOST_USER_VERSION 1 +/* Supported transport features */ +#define VHOST_USER_SUPPORTED_F BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES) +/* Supported protocol features */ +#define VHOST_USER_SUPPORTED_PROTOCOL_F (BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK) | \ + BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ + BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG) | \ + BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) + +enum vhost_user_request { + VHOST_USER_GET_FEATURES = 1, + VHOST_USER_SET_FEATURES = 2, + VHOST_USER_SET_OWNER = 3, + VHOST_USER_RESET_OWNER = 4, + VHOST_USER_SET_MEM_TABLE = 5, + VHOST_USER_SET_LOG_BASE = 6, + VHOST_USER_SET_LOG_FD = 7, + VHOST_USER_SET_VRING_NUM = 8, + VHOST_USER_SET_VRING_ADDR = 9, + VHOST_USER_SET_VRING_BASE = 10, + VHOST_USER_GET_VRING_BASE = 11, + VHOST_USER_SET_VRING_KICK = 12, + VHOST_USER_SET_VRING_CALL = 13, + VHOST_USER_SET_VRING_ERR = 14, + VHOST_USER_GET_PROTOCOL_FEATURES = 15, + VHOST_USER_SET_PROTOCOL_FEATURES = 16, + VHOST_USER_GET_QUEUE_NUM = 17, + VHOST_USER_SET_VRING_ENABLE = 18, + VHOST_USER_SEND_RARP = 19, + VHOST_USER_NET_SEND_MTU = 20, + VHOST_USER_SET_SLAVE_REQ_FD = 21, + VHOST_USER_IOTLB_MSG = 22, + VHOST_USER_SET_VRING_ENDIAN = 23, + VHOST_USER_GET_CONFIG = 24, + VHOST_USER_SET_CONFIG = 25, + VHOST_USER_VRING_KICK = 35, +}; + +enum vhost_user_slave_request { + VHOST_USER_SLAVE_IOTLB_MSG = 1, + VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, + VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, + VHOST_USER_SLAVE_VRING_CALL = 4, +}; + +struct vhost_user_header { + /* + * Use enum vhost_user_request for outgoing messages, + * uses enum vhost_user_slave_request for incoming ones. + */ + u32 request; + u32 flags; + u32 size; +} __packed; + +struct vhost_user_config { + u32 offset; + u32 size; + u32 flags; + u8 payload[]; /* Variable length */ +} __packed; + +struct vhost_user_vring_state { + u32 index; + u32 num; +} __packed; + +struct vhost_user_vring_addr { + u32 index; + u32 flags; + u64 desc, used, avail, log; +} __packed; + +struct vhost_user_mem_region { + u64 guest_addr; + u64 size; + u64 user_addr; + u64 mmap_offset; +} __packed; + +struct vhost_user_mem_regions { + u32 num; + u32 padding; + struct vhost_user_mem_region regions[2]; /* Currently supporting 2 */ +} __packed; + +union vhost_user_payload { + u64 integer; + struct vhost_user_config config; + struct vhost_user_vring_state vring_state; + struct vhost_user_vring_addr vring_addr; + struct vhost_user_mem_regions mem_regions; +}; + +struct vhost_user_msg { + struct vhost_user_header header; + union vhost_user_payload payload; +} __packed; + +#endif diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c new file mode 100644 index 0000000000..97a37c0629 --- /dev/null +++ b/arch/um/drivers/virt-pci.c @@ -0,0 +1,1082 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/logic_iomem.h> +#include <linux/of_platform.h> +#include <linux/irqdomain.h> +#include <linux/virtio_pcidev.h> +#include <linux/virtio-uml.h> +#include <linux/delay.h> +#include <linux/msi.h> +#include <asm/unaligned.h> +#include <irq_kern.h> + +#define MAX_DEVICES 8 +#define MAX_MSI_VECTORS 32 +#define CFG_SPACE_SIZE 4096 + +/* for MSI-X we have a 32-bit payload */ +#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32)) +#define NUM_IRQ_MSGS 10 + +#define HANDLE_NO_FREE(ptr) ((void *)((unsigned long)(ptr) | 1)) +#define HANDLE_IS_NO_FREE(ptr) ((unsigned long)(ptr) & 1) + +struct um_pci_device { + struct virtio_device *vdev; + + /* for now just standard BARs */ + u8 resptr[PCI_STD_NUM_BARS]; + + struct virtqueue *cmd_vq, *irq_vq; + +#define UM_PCI_STAT_WAITING 0 + unsigned long status; + + int irq; + + bool platform; +}; + +struct um_pci_device_reg { + struct um_pci_device *dev; + void __iomem *iomem; +}; + +static struct pci_host_bridge *bridge; +static DEFINE_MUTEX(um_pci_mtx); +static struct um_pci_device *um_pci_platform_device; +static struct um_pci_device_reg um_pci_devices[MAX_DEVICES]; +static struct fwnode_handle *um_pci_fwnode; +static struct irq_domain *um_pci_inner_domain; +static struct irq_domain *um_pci_msi_domain; +static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)]; + +static unsigned int um_pci_max_delay_us = 40000; +module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644); + +struct um_pci_message_buffer { + struct virtio_pcidev_msg hdr; + u8 data[8]; +}; + +static struct um_pci_message_buffer __percpu *um_pci_msg_bufs; + +static int um_pci_send_cmd(struct um_pci_device *dev, + struct virtio_pcidev_msg *cmd, + unsigned int cmd_size, + const void *extra, unsigned int extra_size, + void *out, unsigned int out_size) +{ + struct scatterlist out_sg, extra_sg, in_sg; + struct scatterlist *sgs_list[] = { + [0] = &out_sg, + [1] = extra ? &extra_sg : &in_sg, + [2] = extra ? &in_sg : NULL, + }; + struct um_pci_message_buffer *buf; + int delay_count = 0; + int ret, len; + bool posted; + + if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf))) + return -EINVAL; + + switch (cmd->op) { + case VIRTIO_PCIDEV_OP_CFG_WRITE: + case VIRTIO_PCIDEV_OP_MMIO_WRITE: + case VIRTIO_PCIDEV_OP_MMIO_MEMSET: + /* in PCI, writes are posted, so don't wait */ + posted = !out; + WARN_ON(!posted); + break; + default: + posted = false; + break; + } + + buf = get_cpu_var(um_pci_msg_bufs); + if (buf) + memcpy(buf, cmd, cmd_size); + + if (posted) { + u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC); + + if (ncmd) { + memcpy(ncmd, cmd, cmd_size); + if (extra) + memcpy(ncmd + cmd_size, extra, extra_size); + cmd = (void *)ncmd; + cmd_size += extra_size; + extra = NULL; + extra_size = 0; + } else { + /* try without allocating memory */ + posted = false; + cmd = (void *)buf; + } + } else { + cmd = (void *)buf; + } + + sg_init_one(&out_sg, cmd, cmd_size); + if (extra) + sg_init_one(&extra_sg, extra, extra_size); + if (out) + sg_init_one(&in_sg, out, out_size); + + /* add to internal virtio queue */ + ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list, + extra ? 2 : 1, + out ? 1 : 0, + posted ? cmd : HANDLE_NO_FREE(cmd), + GFP_ATOMIC); + if (ret) { + if (posted) + kfree(cmd); + goto out; + } + + if (posted) { + virtqueue_kick(dev->cmd_vq); + ret = 0; + goto out; + } + + /* kick and poll for getting a response on the queue */ + set_bit(UM_PCI_STAT_WAITING, &dev->status); + virtqueue_kick(dev->cmd_vq); + + while (1) { + void *completed = virtqueue_get_buf(dev->cmd_vq, &len); + + if (completed == HANDLE_NO_FREE(cmd)) + break; + + if (completed && !HANDLE_IS_NO_FREE(completed)) + kfree(completed); + + if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) || + ++delay_count > um_pci_max_delay_us, + "um virt-pci delay: %d", delay_count)) { + ret = -EIO; + break; + } + udelay(1); + } + clear_bit(UM_PCI_STAT_WAITING, &dev->status); + +out: + put_cpu_var(um_pci_msg_bufs); + return ret; +} + +static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, + int size) +{ + struct um_pci_device_reg *reg = priv; + struct um_pci_device *dev = reg->dev; + struct virtio_pcidev_msg hdr = { + .op = VIRTIO_PCIDEV_OP_CFG_READ, + .size = size, + .addr = offset, + }; + /* buf->data is maximum size - we may only use parts of it */ + struct um_pci_message_buffer *buf; + u8 *data; + unsigned long ret = ULONG_MAX; + size_t bytes = sizeof(buf->data); + + if (!dev) + return ULONG_MAX; + + buf = get_cpu_var(um_pci_msg_bufs); + data = buf->data; + + if (buf) + memset(data, 0xff, bytes); + + switch (size) { + case 1: + case 2: + case 4: +#ifdef CONFIG_64BIT + case 8: +#endif + break; + default: + WARN(1, "invalid config space read size %d\n", size); + goto out; + } + + if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes)) + goto out; + + switch (size) { + case 1: + ret = data[0]; + break; + case 2: + ret = le16_to_cpup((void *)data); + break; + case 4: + ret = le32_to_cpup((void *)data); + break; +#ifdef CONFIG_64BIT + case 8: + ret = le64_to_cpup((void *)data); + break; +#endif + default: + break; + } + +out: + put_cpu_var(um_pci_msg_bufs); + return ret; +} + +static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size, + unsigned long val) +{ + struct um_pci_device_reg *reg = priv; + struct um_pci_device *dev = reg->dev; + struct { + struct virtio_pcidev_msg hdr; + /* maximum size - we may only use parts of it */ + u8 data[8]; + } msg = { + .hdr = { + .op = VIRTIO_PCIDEV_OP_CFG_WRITE, + .size = size, + .addr = offset, + }, + }; + + if (!dev) + return; + + switch (size) { + case 1: + msg.data[0] = (u8)val; + break; + case 2: + put_unaligned_le16(val, (void *)msg.data); + break; + case 4: + put_unaligned_le32(val, (void *)msg.data); + break; +#ifdef CONFIG_64BIT + case 8: + put_unaligned_le64(val, (void *)msg.data); + break; +#endif + default: + WARN(1, "invalid config space write size %d\n", size); + return; + } + + WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0)); +} + +static const struct logic_iomem_ops um_pci_device_cfgspace_ops = { + .read = um_pci_cfgspace_read, + .write = um_pci_cfgspace_write, +}; + +static void um_pci_bar_copy_from(void *priv, void *buffer, + unsigned int offset, int size) +{ + u8 *resptr = priv; + struct um_pci_device *dev = container_of(resptr - *resptr, + struct um_pci_device, + resptr[0]); + struct virtio_pcidev_msg hdr = { + .op = VIRTIO_PCIDEV_OP_MMIO_READ, + .bar = *resptr, + .size = size, + .addr = offset, + }; + + memset(buffer, 0xff, size); + + um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size); +} + +static unsigned long um_pci_bar_read(void *priv, unsigned int offset, + int size) +{ + /* buf->data is maximum size - we may only use parts of it */ + struct um_pci_message_buffer *buf; + u8 *data; + unsigned long ret = ULONG_MAX; + + buf = get_cpu_var(um_pci_msg_bufs); + data = buf->data; + + switch (size) { + case 1: + case 2: + case 4: +#ifdef CONFIG_64BIT + case 8: +#endif + break; + default: + WARN(1, "invalid config space read size %d\n", size); + goto out; + } + + um_pci_bar_copy_from(priv, data, offset, size); + + switch (size) { + case 1: + ret = data[0]; + break; + case 2: + ret = le16_to_cpup((void *)data); + break; + case 4: + ret = le32_to_cpup((void *)data); + break; +#ifdef CONFIG_64BIT + case 8: + ret = le64_to_cpup((void *)data); + break; +#endif + default: + break; + } + +out: + put_cpu_var(um_pci_msg_bufs); + return ret; +} + +static void um_pci_bar_copy_to(void *priv, unsigned int offset, + const void *buffer, int size) +{ + u8 *resptr = priv; + struct um_pci_device *dev = container_of(resptr - *resptr, + struct um_pci_device, + resptr[0]); + struct virtio_pcidev_msg hdr = { + .op = VIRTIO_PCIDEV_OP_MMIO_WRITE, + .bar = *resptr, + .size = size, + .addr = offset, + }; + + um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0); +} + +static void um_pci_bar_write(void *priv, unsigned int offset, int size, + unsigned long val) +{ + /* maximum size - we may only use parts of it */ + u8 data[8]; + + switch (size) { + case 1: + data[0] = (u8)val; + break; + case 2: + put_unaligned_le16(val, (void *)data); + break; + case 4: + put_unaligned_le32(val, (void *)data); + break; +#ifdef CONFIG_64BIT + case 8: + put_unaligned_le64(val, (void *)data); + break; +#endif + default: + WARN(1, "invalid config space write size %d\n", size); + return; + } + + um_pci_bar_copy_to(priv, offset, data, size); +} + +static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size) +{ + u8 *resptr = priv; + struct um_pci_device *dev = container_of(resptr - *resptr, + struct um_pci_device, + resptr[0]); + struct { + struct virtio_pcidev_msg hdr; + u8 data; + } msg = { + .hdr = { + .op = VIRTIO_PCIDEV_OP_CFG_WRITE, + .bar = *resptr, + .size = size, + .addr = offset, + }, + .data = value, + }; + + um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0); +} + +static const struct logic_iomem_ops um_pci_device_bar_ops = { + .read = um_pci_bar_read, + .write = um_pci_bar_write, + .set = um_pci_bar_set, + .copy_from = um_pci_bar_copy_from, + .copy_to = um_pci_bar_copy_to, +}; + +static void __iomem *um_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct um_pci_device_reg *dev; + unsigned int busn = bus->number; + + if (busn > 0) + return NULL; + + /* not allowing functions for now ... */ + if (devfn % 8) + return NULL; + + if (devfn / 8 >= ARRAY_SIZE(um_pci_devices)) + return NULL; + + dev = &um_pci_devices[devfn / 8]; + if (!dev) + return NULL; + + return (void __iomem *)((unsigned long)dev->iomem + where); +} + +static struct pci_ops um_pci_ops = { + .map_bus = um_pci_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static void um_pci_rescan(void) +{ + pci_lock_rescan_remove(); + pci_rescan_bus(bridge->bus); + pci_unlock_rescan_remove(); +} + +static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick) +{ + struct scatterlist sg[1]; + + sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE); + if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC)) + kfree(buf); + else if (kick) + virtqueue_kick(vq); +} + +static void um_pci_handle_irq_message(struct virtqueue *vq, + struct virtio_pcidev_msg *msg) +{ + struct virtio_device *vdev = vq->vdev; + struct um_pci_device *dev = vdev->priv; + + if (!dev->irq) + return; + + /* we should properly chain interrupts, but on ARCH=um we don't care */ + + switch (msg->op) { + case VIRTIO_PCIDEV_OP_INT: + generic_handle_irq(dev->irq); + break; + case VIRTIO_PCIDEV_OP_MSI: + /* our MSI message is just the interrupt number */ + if (msg->size == sizeof(u32)) + generic_handle_irq(le32_to_cpup((void *)msg->data)); + else + generic_handle_irq(le16_to_cpup((void *)msg->data)); + break; + case VIRTIO_PCIDEV_OP_PME: + /* nothing to do - we already woke up due to the message */ + break; + default: + dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op); + break; + } +} + +static void um_pci_cmd_vq_cb(struct virtqueue *vq) +{ + struct virtio_device *vdev = vq->vdev; + struct um_pci_device *dev = vdev->priv; + void *cmd; + int len; + + if (test_bit(UM_PCI_STAT_WAITING, &dev->status)) + return; + + while ((cmd = virtqueue_get_buf(vq, &len))) { + if (WARN_ON(HANDLE_IS_NO_FREE(cmd))) + continue; + kfree(cmd); + } +} + +static void um_pci_irq_vq_cb(struct virtqueue *vq) +{ + struct virtio_pcidev_msg *msg; + int len; + + while ((msg = virtqueue_get_buf(vq, &len))) { + if (len >= sizeof(*msg)) + um_pci_handle_irq_message(vq, msg); + + /* recycle the message buffer */ + um_pci_irq_vq_addbuf(vq, msg, true); + } +} + +#ifdef CONFIG_OF +/* Copied from arch/x86/kernel/devicetree.c */ +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + const void *prop; + unsigned int bus_min; + + prop = of_get_property(np, "bus-range", NULL); + if (!prop) + continue; + bus_min = be32_to_cpup(prop); + if (bus->number == bus_min) + return np; + } + return NULL; +} +#endif + +static int um_pci_init_vqs(struct um_pci_device *dev) +{ + struct virtqueue *vqs[2]; + static const char *const names[2] = { "cmd", "irq" }; + vq_callback_t *cbs[2] = { um_pci_cmd_vq_cb, um_pci_irq_vq_cb }; + int err, i; + + err = virtio_find_vqs(dev->vdev, 2, vqs, cbs, names, NULL); + if (err) + return err; + + dev->cmd_vq = vqs[0]; + dev->irq_vq = vqs[1]; + + virtio_device_ready(dev->vdev); + + for (i = 0; i < NUM_IRQ_MSGS; i++) { + void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL); + + if (msg) + um_pci_irq_vq_addbuf(dev->irq_vq, msg, false); + } + + virtqueue_kick(dev->irq_vq); + + return 0; +} + +static void __um_pci_virtio_platform_remove(struct virtio_device *vdev, + struct um_pci_device *dev) +{ + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); + + mutex_lock(&um_pci_mtx); + um_pci_platform_device = NULL; + mutex_unlock(&um_pci_mtx); + + kfree(dev); +} + +static int um_pci_virtio_platform_probe(struct virtio_device *vdev, + struct um_pci_device *dev) +{ + int ret; + + dev->platform = true; + + mutex_lock(&um_pci_mtx); + + if (um_pci_platform_device) { + mutex_unlock(&um_pci_mtx); + ret = -EBUSY; + goto out_free; + } + + ret = um_pci_init_vqs(dev); + if (ret) { + mutex_unlock(&um_pci_mtx); + goto out_free; + } + + um_pci_platform_device = dev; + + mutex_unlock(&um_pci_mtx); + + ret = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev); + if (ret) + __um_pci_virtio_platform_remove(vdev, dev); + + return ret; + +out_free: + kfree(dev); + return ret; +} + +static int um_pci_virtio_probe(struct virtio_device *vdev) +{ + struct um_pci_device *dev; + int i, free = -1; + int err = -ENOSPC; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->vdev = vdev; + vdev->priv = dev; + + if (of_device_is_compatible(vdev->dev.of_node, "simple-bus")) + return um_pci_virtio_platform_probe(vdev, dev); + + mutex_lock(&um_pci_mtx); + for (i = 0; i < MAX_DEVICES; i++) { + if (um_pci_devices[i].dev) + continue; + free = i; + break; + } + + if (free < 0) + goto error; + + err = um_pci_init_vqs(dev); + if (err) + goto error; + + dev->irq = irq_alloc_desc(numa_node_id()); + if (dev->irq < 0) { + err = dev->irq; + goto err_reset; + } + um_pci_devices[free].dev = dev; + vdev->priv = dev; + + mutex_unlock(&um_pci_mtx); + + device_set_wakeup_enable(&vdev->dev, true); + + /* + * In order to do suspend-resume properly, don't allow VQs + * to be suspended. + */ + virtio_uml_set_no_vq_suspend(vdev, true); + + um_pci_rescan(); + return 0; +err_reset: + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); +error: + mutex_unlock(&um_pci_mtx); + kfree(dev); + return err; +} + +static void um_pci_virtio_remove(struct virtio_device *vdev) +{ + struct um_pci_device *dev = vdev->priv; + int i; + + if (dev->platform) { + of_platform_depopulate(&vdev->dev); + __um_pci_virtio_platform_remove(vdev, dev); + return; + } + + device_set_wakeup_enable(&vdev->dev, false); + + mutex_lock(&um_pci_mtx); + for (i = 0; i < MAX_DEVICES; i++) { + if (um_pci_devices[i].dev != dev) + continue; + + um_pci_devices[i].dev = NULL; + irq_free_desc(dev->irq); + + break; + } + mutex_unlock(&um_pci_mtx); + + if (i < MAX_DEVICES) { + struct pci_dev *pci_dev; + + pci_dev = pci_get_slot(bridge->bus, i); + if (pci_dev) + pci_stop_and_remove_bus_device_locked(pci_dev); + } + + /* Stop all virtqueues */ + virtio_reset_device(vdev); + dev->cmd_vq = NULL; + dev->irq_vq = NULL; + vdev->config->del_vqs(vdev); + + kfree(dev); +} + +static struct virtio_device_id id_table[] = { + { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; +MODULE_DEVICE_TABLE(virtio, id_table); + +static struct virtio_driver um_pci_virtio_driver = { + .driver.name = "virtio-pci", + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = um_pci_virtio_probe, + .remove = um_pci_virtio_remove, +}; + +static struct resource virt_cfgspace_resource = { + .name = "PCI config space", + .start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE, + .end = 0xf0000000 - 1, + .flags = IORESOURCE_MEM, +}; + +static long um_pci_map_cfgspace(unsigned long offset, size_t size, + const struct logic_iomem_ops **ops, + void **priv) +{ + if (WARN_ON(size > CFG_SPACE_SIZE || offset % CFG_SPACE_SIZE)) + return -EINVAL; + + if (offset / CFG_SPACE_SIZE < MAX_DEVICES) { + *ops = &um_pci_device_cfgspace_ops; + *priv = &um_pci_devices[offset / CFG_SPACE_SIZE]; + return 0; + } + + WARN(1, "cannot map offset 0x%lx/0x%zx\n", offset, size); + return -ENOENT; +} + +static const struct logic_iomem_region_ops um_pci_cfgspace_ops = { + .map = um_pci_map_cfgspace, +}; + +static struct resource virt_iomem_resource = { + .name = "PCI iomem", + .start = 0xf0000000, + .end = 0xffffffff, + .flags = IORESOURCE_MEM, +}; + +struct um_pci_map_iomem_data { + unsigned long offset; + size_t size; + const struct logic_iomem_ops **ops; + void **priv; + long ret; +}; + +static int um_pci_map_iomem_walk(struct pci_dev *pdev, void *_data) +{ + struct um_pci_map_iomem_data *data = _data; + struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8]; + struct um_pci_device *dev; + int i; + + if (!reg->dev) + return 0; + + for (i = 0; i < ARRAY_SIZE(dev->resptr); i++) { + struct resource *r = &pdev->resource[i]; + + if ((r->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) + continue; + + /* + * must be the whole or part of the resource, + * not allowed to only overlap + */ + if (data->offset < r->start || data->offset > r->end) + continue; + if (data->offset + data->size - 1 > r->end) + continue; + + dev = reg->dev; + *data->ops = &um_pci_device_bar_ops; + dev->resptr[i] = i; + *data->priv = &dev->resptr[i]; + data->ret = data->offset - r->start; + + /* no need to continue */ + return 1; + } + + return 0; +} + +static long um_pci_map_iomem(unsigned long offset, size_t size, + const struct logic_iomem_ops **ops, + void **priv) +{ + struct um_pci_map_iomem_data data = { + /* we want the full address here */ + .offset = offset + virt_iomem_resource.start, + .size = size, + .ops = ops, + .priv = priv, + .ret = -ENOENT, + }; + + pci_walk_bus(bridge->bus, um_pci_map_iomem_walk, &data); + return data.ret; +} + +static const struct logic_iomem_region_ops um_pci_iomem_ops = { + .map = um_pci_map_iomem, +}; + +static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + /* + * This is a very low address and not actually valid 'physical' memory + * in UML, so we can simply map MSI(-X) vectors to there, it cannot be + * legitimately written to by the device in any other way. + * We use the (virtual) IRQ number here as the message to simplify the + * code that receives the message, where for now we simply trust the + * device to send the correct message. + */ + msg->address_hi = 0; + msg->address_lo = 0xa0000; + msg->data = data->irq; +} + +static struct irq_chip um_pci_msi_bottom_irq_chip = { + .name = "UM virtio MSI", + .irq_compose_msi_msg = um_pci_compose_msi_msg, +}; + +static int um_pci_inner_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + unsigned long bit; + + WARN_ON(nr_irqs != 1); + + mutex_lock(&um_pci_mtx); + bit = find_first_zero_bit(um_pci_msi_used, MAX_MSI_VECTORS); + if (bit >= MAX_MSI_VECTORS) { + mutex_unlock(&um_pci_mtx); + return -ENOSPC; + } + + set_bit(bit, um_pci_msi_used); + mutex_unlock(&um_pci_mtx); + + irq_domain_set_info(domain, virq, bit, &um_pci_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + + return 0; +} + +static void um_pci_inner_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + + mutex_lock(&um_pci_mtx); + + if (!test_bit(d->hwirq, um_pci_msi_used)) + pr_err("trying to free unused MSI#%lu\n", d->hwirq); + else + __clear_bit(d->hwirq, um_pci_msi_used); + + mutex_unlock(&um_pci_mtx); +} + +static const struct irq_domain_ops um_pci_inner_domain_ops = { + .alloc = um_pci_inner_domain_alloc, + .free = um_pci_inner_domain_free, +}; + +static struct irq_chip um_pci_msi_irq_chip = { + .name = "UM virtio PCIe MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info um_pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .chip = &um_pci_msi_irq_chip, +}; + +static struct resource busn_resource = { + .name = "PCI busn", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUS, +}; + +static int um_pci_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8]; + + if (WARN_ON(!reg->dev)) + return -EINVAL; + + /* Yes, we map all pins to the same IRQ ... doesn't matter for now. */ + return reg->dev->irq; +} + +void *pci_root_bus_fwnode(struct pci_bus *bus) +{ + return um_pci_fwnode; +} + +static long um_pci_map_platform(unsigned long offset, size_t size, + const struct logic_iomem_ops **ops, + void **priv) +{ + if (!um_pci_platform_device) + return -ENOENT; + + *ops = &um_pci_device_bar_ops; + *priv = &um_pci_platform_device->resptr[0]; + + return offset; +} + +static const struct logic_iomem_region_ops um_pci_platform_ops = { + .map = um_pci_map_platform, +}; + +static struct resource virt_platform_resource = { + .name = "platform", + .start = 0x10000000, + .end = 0x1fffffff, + .flags = IORESOURCE_MEM, +}; + +static int __init um_pci_init(void) +{ + int err, i; + + WARN_ON(logic_iomem_add_region(&virt_cfgspace_resource, + &um_pci_cfgspace_ops)); + WARN_ON(logic_iomem_add_region(&virt_iomem_resource, + &um_pci_iomem_ops)); + WARN_ON(logic_iomem_add_region(&virt_platform_resource, + &um_pci_platform_ops)); + + if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0, + "No virtio device ID configured for PCI - no PCI support\n")) + return 0; + + um_pci_msg_bufs = alloc_percpu(struct um_pci_message_buffer); + if (!um_pci_msg_bufs) + return -ENOMEM; + + bridge = pci_alloc_host_bridge(0); + if (!bridge) { + err = -ENOMEM; + goto free; + } + + um_pci_fwnode = irq_domain_alloc_named_fwnode("um-pci"); + if (!um_pci_fwnode) { + err = -ENOMEM; + goto free; + } + + um_pci_inner_domain = __irq_domain_add(um_pci_fwnode, MAX_MSI_VECTORS, + MAX_MSI_VECTORS, 0, + &um_pci_inner_domain_ops, NULL); + if (!um_pci_inner_domain) { + err = -ENOMEM; + goto free; + } + + um_pci_msi_domain = pci_msi_create_irq_domain(um_pci_fwnode, + &um_pci_msi_domain_info, + um_pci_inner_domain); + if (!um_pci_msi_domain) { + err = -ENOMEM; + goto free; + } + + pci_add_resource(&bridge->windows, &virt_iomem_resource); + pci_add_resource(&bridge->windows, &busn_resource); + bridge->ops = &um_pci_ops; + bridge->map_irq = um_pci_map_irq; + + for (i = 0; i < MAX_DEVICES; i++) { + resource_size_t start; + + start = virt_cfgspace_resource.start + i * CFG_SPACE_SIZE; + um_pci_devices[i].iomem = ioremap(start, CFG_SPACE_SIZE); + if (WARN(!um_pci_devices[i].iomem, "failed to map %d\n", i)) { + err = -ENOMEM; + goto free; + } + } + + err = pci_host_probe(bridge); + if (err) + goto free; + + err = register_virtio_driver(&um_pci_virtio_driver); + if (err) + goto free; + return 0; +free: + if (um_pci_inner_domain) + irq_domain_remove(um_pci_inner_domain); + if (um_pci_fwnode) + irq_domain_free_fwnode(um_pci_fwnode); + if (bridge) { + pci_free_resource_list(&bridge->windows); + pci_free_host_bridge(bridge); + } + free_percpu(um_pci_msg_bufs); + return err; +} +module_init(um_pci_init); + +static void __exit um_pci_exit(void) +{ + unregister_virtio_driver(&um_pci_virtio_driver); + irq_domain_remove(um_pci_msi_domain); + irq_domain_remove(um_pci_inner_domain); + pci_free_resource_list(&bridge->windows); + pci_free_host_bridge(bridge); + free_percpu(um_pci_msg_bufs); +} +module_exit(um_pci_exit); diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c new file mode 100644 index 0000000000..8adca2000e --- /dev/null +++ b/arch/um/drivers/virtio_uml.c @@ -0,0 +1,1473 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtio vhost-user driver + * + * Copyright(c) 2019 Intel Corporation + * + * This driver allows virtio devices to be used over a vhost-user socket. + * + * Guest devices can be instantiated by kernel module or command line + * parameters. One device will be created for each parameter. Syntax: + * + * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>] + * where: + * <socket> := vhost-user socket path to connect + * <virtio_id> := virtio device id (as in virtio_ids.h) + * <platform_id> := (optional) platform device id + * + * example: + * virtio_uml.device=/var/uml.socket:1 + * + * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd. + */ +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> +#include <linux/time-internal.h> +#include <linux/virtio-uml.h> +#include <shared/as-layout.h> +#include <irq_kern.h> +#include <init.h> +#include <os.h> +#include "vhost_user.h" + +#define MAX_SUPPORTED_QUEUE_SIZE 256 + +#define to_virtio_uml_device(_vdev) \ + container_of(_vdev, struct virtio_uml_device, vdev) + +struct virtio_uml_platform_data { + u32 virtio_device_id; + const char *socket_path; + struct work_struct conn_broken_wk; + struct platform_device *pdev; +}; + +struct virtio_uml_device { + struct virtio_device vdev; + struct platform_device *pdev; + struct virtio_uml_platform_data *pdata; + + spinlock_t sock_lock; + int sock, req_fd, irq; + u64 features; + u64 protocol_features; + u8 status; + u8 registered:1; + u8 suspended:1; + u8 no_vq_suspend:1; + + u8 config_changed_irq:1; + uint64_t vq_irq_vq_map; + int recv_rc; +}; + +struct virtio_uml_vq_info { + int kick_fd, call_fd; + char name[32]; + bool suspended; +}; + +extern unsigned long long physmem_size, highmem; + +#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__) + +/* Vhost-user protocol */ + +static int full_sendmsg_fds(int fd, const void *buf, unsigned int len, + const int *fds, unsigned int fds_num) +{ + int rc; + + do { + rc = os_sendmsg_fds(fd, buf, len, fds, fds_num); + if (rc > 0) { + buf += rc; + len -= rc; + fds = NULL; + fds_num = 0; + } + } while (len && (rc >= 0 || rc == -EINTR)); + + if (rc < 0) + return rc; + return 0; +} + +static int full_read(int fd, void *buf, int len, bool abortable) +{ + int rc; + + if (!len) + return 0; + + do { + rc = os_read_file(fd, buf, len); + if (rc > 0) { + buf += rc; + len -= rc; + } + } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN))); + + if (rc < 0) + return rc; + if (rc == 0) + return -ECONNRESET; + return 0; +} + +static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg) +{ + return full_read(fd, msg, sizeof(msg->header), true); +} + +static int vhost_user_recv(struct virtio_uml_device *vu_dev, + int fd, struct vhost_user_msg *msg, + size_t max_payload_size, bool wait) +{ + size_t size; + int rc; + + /* + * In virtio time-travel mode, we're handling all the vhost-user + * FDs by polling them whenever appropriate. However, we may get + * into a situation where we're sending out an interrupt message + * to a device (e.g. a net device) and need to handle a simulation + * time message while doing so, e.g. one that tells us to update + * our idea of how long we can run without scheduling. + * + * Thus, we need to not just read() from the given fd, but need + * to also handle messages for the simulation time - this function + * does that for us while waiting for the given fd to be readable. + */ + if (wait) + time_travel_wait_readable(fd); + + rc = vhost_user_recv_header(fd, msg); + + if (rc) + return rc; + size = msg->header.size; + if (size > max_payload_size) + return -EPROTO; + return full_read(fd, &msg->payload, size, false); +} + +static void vhost_user_check_reset(struct virtio_uml_device *vu_dev, + int rc) +{ + struct virtio_uml_platform_data *pdata = vu_dev->pdata; + + if (rc != -ECONNRESET) + return; + + if (!vu_dev->registered) + return; + + vu_dev->registered = 0; + + schedule_work(&pdata->conn_broken_wk); +} + +static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, + struct vhost_user_msg *msg, + size_t max_payload_size) +{ + int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, + max_payload_size, true); + + if (rc) { + vhost_user_check_reset(vu_dev, rc); + return rc; + } + + if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION)) + return -EPROTO; + + return 0; +} + +static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev, + u64 *value) +{ + struct vhost_user_msg msg; + int rc = vhost_user_recv_resp(vu_dev, &msg, + sizeof(msg.payload.integer)); + + if (rc) + return rc; + if (msg.header.size != sizeof(msg.payload.integer)) + return -EPROTO; + *value = msg.payload.integer; + return 0; +} + +static int vhost_user_recv_req(struct virtio_uml_device *vu_dev, + struct vhost_user_msg *msg, + size_t max_payload_size) +{ + int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg, + max_payload_size, false); + + if (rc) + return rc; + + if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) != + VHOST_USER_VERSION) + return -EPROTO; + + return 0; +} + +static int vhost_user_send(struct virtio_uml_device *vu_dev, + bool need_response, struct vhost_user_msg *msg, + int *fds, size_t num_fds) +{ + size_t size = sizeof(msg->header) + msg->header.size; + unsigned long flags; + bool request_ack; + int rc; + + msg->header.flags |= VHOST_USER_VERSION; + + /* + * The need_response flag indicates that we already need a response, + * e.g. to read the features. In these cases, don't request an ACK as + * it is meaningless. Also request an ACK only if supported. + */ + request_ack = !need_response; + if (!(vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK))) + request_ack = false; + + if (request_ack) + msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY; + + spin_lock_irqsave(&vu_dev->sock_lock, flags); + rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds); + if (rc < 0) + goto out; + + if (request_ack) { + uint64_t status; + + rc = vhost_user_recv_u64(vu_dev, &status); + if (rc) + goto out; + + if (status) { + vu_err(vu_dev, "slave reports error: %llu\n", status); + rc = -EIO; + goto out; + } + } + +out: + spin_unlock_irqrestore(&vu_dev->sock_lock, flags); + return rc; +} + +static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev, + bool need_response, u32 request) +{ + struct vhost_user_msg msg = { + .header.request = request, + }; + + return vhost_user_send(vu_dev, need_response, &msg, NULL, 0); +} + +static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev, + u32 request, int fd) +{ + struct vhost_user_msg msg = { + .header.request = request, + }; + + return vhost_user_send(vu_dev, false, &msg, &fd, 1); +} + +static int vhost_user_send_u64(struct virtio_uml_device *vu_dev, + u32 request, u64 value) +{ + struct vhost_user_msg msg = { + .header.request = request, + .header.size = sizeof(msg.payload.integer), + .payload.integer = value, + }; + + return vhost_user_send(vu_dev, false, &msg, NULL, 0); +} + +static int vhost_user_set_owner(struct virtio_uml_device *vu_dev) +{ + return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER); +} + +static int vhost_user_get_features(struct virtio_uml_device *vu_dev, + u64 *features) +{ + int rc = vhost_user_send_no_payload(vu_dev, true, + VHOST_USER_GET_FEATURES); + + if (rc) + return rc; + return vhost_user_recv_u64(vu_dev, features); +} + +static int vhost_user_set_features(struct virtio_uml_device *vu_dev, + u64 features) +{ + return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features); +} + +static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev, + u64 *protocol_features) +{ + int rc = vhost_user_send_no_payload(vu_dev, true, + VHOST_USER_GET_PROTOCOL_FEATURES); + + if (rc) + return rc; + return vhost_user_recv_u64(vu_dev, protocol_features); +} + +static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev, + u64 protocol_features) +{ + return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES, + protocol_features); +} + +static void vhost_user_reply(struct virtio_uml_device *vu_dev, + struct vhost_user_msg *msg, int response) +{ + struct vhost_user_msg reply = { + .payload.integer = response, + }; + size_t size = sizeof(reply.header) + sizeof(reply.payload.integer); + int rc; + + reply.header = msg->header; + reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY; + reply.header.flags |= VHOST_USER_FLAG_REPLY; + reply.header.size = sizeof(reply.payload.integer); + + rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0); + + if (rc) + vu_err(vu_dev, + "sending reply to slave request failed: %d (size %zu)\n", + rc, size); +} + +static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev, + struct time_travel_event *ev) +{ + struct virtqueue *vq; + int response = 1; + struct { + struct vhost_user_msg msg; + u8 extra_payload[512]; + } msg; + int rc; + irqreturn_t irq_rc = IRQ_NONE; + + while (1) { + rc = vhost_user_recv_req(vu_dev, &msg.msg, + sizeof(msg.msg.payload) + + sizeof(msg.extra_payload)); + if (rc) + break; + + switch (msg.msg.header.request) { + case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG: + vu_dev->config_changed_irq = true; + response = 0; + break; + case VHOST_USER_SLAVE_VRING_CALL: + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + if (vq->index == msg.msg.payload.vring_state.index) { + response = 0; + vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index); + break; + } + } + break; + case VHOST_USER_SLAVE_IOTLB_MSG: + /* not supported - VIRTIO_F_ACCESS_PLATFORM */ + case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: + /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ + default: + vu_err(vu_dev, "unexpected slave request %d\n", + msg.msg.header.request); + } + + if (ev && !vu_dev->suspended) + time_travel_add_irq_event(ev); + + if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY) + vhost_user_reply(vu_dev, &msg.msg, response); + irq_rc = IRQ_HANDLED; + } + /* mask EAGAIN as we try non-blocking read until socket is empty */ + vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc; + return irq_rc; +} + +static irqreturn_t vu_req_interrupt(int irq, void *data) +{ + struct virtio_uml_device *vu_dev = data; + irqreturn_t ret = IRQ_HANDLED; + + if (!um_irq_timetravel_handler_used()) + ret = vu_req_read_message(vu_dev, NULL); + + if (vu_dev->recv_rc) { + vhost_user_check_reset(vu_dev, vu_dev->recv_rc); + } else if (vu_dev->vq_irq_vq_map) { + struct virtqueue *vq; + + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index)) + vring_interrupt(0 /* ignored */, vq); + } + vu_dev->vq_irq_vq_map = 0; + } else if (vu_dev->config_changed_irq) { + virtio_config_changed(&vu_dev->vdev); + vu_dev->config_changed_irq = false; + } + + return ret; +} + +static void vu_req_interrupt_comm_handler(int irq, int fd, void *data, + struct time_travel_event *ev) +{ + vu_req_read_message(data, ev); +} + +static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev) +{ + int rc, req_fds[2]; + + /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */ + rc = os_pipe(req_fds, true, true); + if (rc < 0) + return rc; + vu_dev->req_fd = req_fds[0]; + + rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ, + vu_req_interrupt, IRQF_SHARED, + vu_dev->pdev->name, vu_dev, + vu_req_interrupt_comm_handler); + if (rc < 0) + goto err_close; + + vu_dev->irq = rc; + + rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD, + req_fds[1]); + if (rc) + goto err_free_irq; + + goto out; + +err_free_irq: + um_free_irq(vu_dev->irq, vu_dev); +err_close: + os_close_file(req_fds[0]); +out: + /* Close unused write end of request fds */ + os_close_file(req_fds[1]); + return rc; +} + +static int vhost_user_init(struct virtio_uml_device *vu_dev) +{ + int rc = vhost_user_set_owner(vu_dev); + + if (rc) + return rc; + rc = vhost_user_get_features(vu_dev, &vu_dev->features); + if (rc) + return rc; + + if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) { + rc = vhost_user_get_protocol_features(vu_dev, + &vu_dev->protocol_features); + if (rc) + return rc; + vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F; + rc = vhost_user_set_protocol_features(vu_dev, + vu_dev->protocol_features); + if (rc) + return rc; + } + + if (vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + rc = vhost_user_init_slave_req(vu_dev); + if (rc) + return rc; + } + + return 0; +} + +static void vhost_user_get_config(struct virtio_uml_device *vu_dev, + u32 offset, void *buf, u32 len) +{ + u32 cfg_size = offset + len; + struct vhost_user_msg *msg; + size_t payload_size = sizeof(msg->payload.config) + cfg_size; + size_t msg_size = sizeof(msg->header) + payload_size; + int rc; + + if (!(vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG))) + return; + + msg = kzalloc(msg_size, GFP_KERNEL); + if (!msg) + return; + msg->header.request = VHOST_USER_GET_CONFIG; + msg->header.size = payload_size; + msg->payload.config.offset = 0; + msg->payload.config.size = cfg_size; + + rc = vhost_user_send(vu_dev, true, msg, NULL, 0); + if (rc) { + vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n", + rc); + goto free; + } + + rc = vhost_user_recv_resp(vu_dev, msg, msg_size); + if (rc) { + vu_err(vu_dev, + "receiving VHOST_USER_GET_CONFIG response failed: %d\n", + rc); + goto free; + } + + if (msg->header.size != payload_size || + msg->payload.config.size != cfg_size) { + rc = -EPROTO; + vu_err(vu_dev, + "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n", + msg->header.size, payload_size, + msg->payload.config.size, cfg_size); + goto free; + } + memcpy(buf, msg->payload.config.payload + offset, len); + +free: + kfree(msg); +} + +static void vhost_user_set_config(struct virtio_uml_device *vu_dev, + u32 offset, const void *buf, u32 len) +{ + struct vhost_user_msg *msg; + size_t payload_size = sizeof(msg->payload.config) + len; + size_t msg_size = sizeof(msg->header) + payload_size; + int rc; + + if (!(vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG))) + return; + + msg = kzalloc(msg_size, GFP_KERNEL); + if (!msg) + return; + msg->header.request = VHOST_USER_SET_CONFIG; + msg->header.size = payload_size; + msg->payload.config.offset = offset; + msg->payload.config.size = len; + memcpy(msg->payload.config.payload, buf, len); + + rc = vhost_user_send(vu_dev, false, msg, NULL, 0); + if (rc) + vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n", + rc); + + kfree(msg); +} + +static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out, + struct vhost_user_mem_region *region_out) +{ + unsigned long long mem_offset; + int rc = phys_mapping(addr, &mem_offset); + + if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc)) + return -EFAULT; + *fd_out = rc; + region_out->guest_addr = addr; + region_out->user_addr = addr; + region_out->size = size; + region_out->mmap_offset = mem_offset; + + /* Ensure mapping is valid for the entire region */ + rc = phys_mapping(addr + size - 1, &mem_offset); + if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n", + addr + size - 1, rc, *fd_out)) + return -EFAULT; + return 0; +} + +static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev) +{ + struct vhost_user_msg msg = { + .header.request = VHOST_USER_SET_MEM_TABLE, + .header.size = sizeof(msg.payload.mem_regions), + .payload.mem_regions.num = 1, + }; + unsigned long reserved = uml_reserved - uml_physmem; + int fds[2]; + int rc; + + /* + * This is a bit tricky, see also the comment with setup_physmem(). + * + * Essentially, setup_physmem() uses a file to mmap() our physmem, + * but the code and data we *already* have is omitted. To us, this + * is no difference, since they both become part of our address + * space and memory consumption. To somebody looking in from the + * outside, however, it is different because the part of our memory + * consumption that's already part of the binary (code/data) is not + * mapped from the file, so it's not visible to another mmap from + * the file descriptor. + * + * Thus, don't advertise this space to the vhost-user slave. This + * means that the slave will likely abort or similar when we give + * it an address from the hidden range, since it's not marked as + * a valid address, but at least that way we detect the issue and + * don't just have the slave read an all-zeroes buffer from the + * shared memory file, or write something there that we can never + * see (depending on the direction of the virtqueue traffic.) + * + * Since we usually don't want to use .text for virtio buffers, + * this effectively means that you cannot use + * 1) global variables, which are in the .bss and not in the shm + * file-backed memory + * 2) the stack in some processes, depending on where they have + * their stack (or maybe only no interrupt stack?) + * + * The stack is already not typically valid for DMA, so this isn't + * much of a restriction, but global variables might be encountered. + * + * It might be possible to fix it by copying around the data that's + * between bss_start and where we map the file now, but it's not + * something that you typically encounter with virtio drivers, so + * it didn't seem worthwhile. + */ + rc = vhost_user_init_mem_region(reserved, physmem_size - reserved, + &fds[0], + &msg.payload.mem_regions.regions[0]); + + if (rc < 0) + return rc; + if (highmem) { + msg.payload.mem_regions.num++; + rc = vhost_user_init_mem_region(__pa(end_iomem), highmem, + &fds[1], &msg.payload.mem_regions.regions[1]); + if (rc < 0) + return rc; + } + + return vhost_user_send(vu_dev, false, &msg, fds, + msg.payload.mem_regions.num); +} + +static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev, + u32 request, u32 index, u32 num) +{ + struct vhost_user_msg msg = { + .header.request = request, + .header.size = sizeof(msg.payload.vring_state), + .payload.vring_state.index = index, + .payload.vring_state.num = num, + }; + + return vhost_user_send(vu_dev, false, &msg, NULL, 0); +} + +static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev, + u32 index, u32 num) +{ + return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM, + index, num); +} + +static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev, + u32 index, u32 offset) +{ + return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE, + index, offset); +} + +static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev, + u32 index, u64 desc, u64 used, u64 avail, + u64 log) +{ + struct vhost_user_msg msg = { + .header.request = VHOST_USER_SET_VRING_ADDR, + .header.size = sizeof(msg.payload.vring_addr), + .payload.vring_addr.index = index, + .payload.vring_addr.desc = desc, + .payload.vring_addr.used = used, + .payload.vring_addr.avail = avail, + .payload.vring_addr.log = log, + }; + + return vhost_user_send(vu_dev, false, &msg, NULL, 0); +} + +static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev, + u32 request, int index, int fd) +{ + struct vhost_user_msg msg = { + .header.request = request, + .header.size = sizeof(msg.payload.integer), + .payload.integer = index, + }; + + if (index & ~VHOST_USER_VRING_INDEX_MASK) + return -EINVAL; + if (fd < 0) { + msg.payload.integer |= VHOST_USER_VRING_POLL_MASK; + return vhost_user_send(vu_dev, false, &msg, NULL, 0); + } + return vhost_user_send(vu_dev, false, &msg, &fd, 1); +} + +static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev, + int index, int fd) +{ + return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL, + index, fd); +} + +static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev, + int index, int fd) +{ + return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK, + index, fd); +} + +static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev, + u32 index, bool enable) +{ + if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES))) + return 0; + + return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE, + index, enable); +} + + +/* Virtio interface */ + +static bool vu_notify(struct virtqueue *vq) +{ + struct virtio_uml_vq_info *info = vq->priv; + const uint64_t n = 1; + int rc; + + if (info->suspended) + return true; + + time_travel_propagate_time(); + + if (info->kick_fd < 0) { + struct virtio_uml_device *vu_dev; + + vu_dev = to_virtio_uml_device(vq->vdev); + + return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK, + vq->index, 0) == 0; + } + + do { + rc = os_write_file(info->kick_fd, &n, sizeof(n)); + } while (rc == -EINTR); + return !WARN(rc != sizeof(n), "write returned %d\n", rc); +} + +static irqreturn_t vu_interrupt(int irq, void *opaque) +{ + struct virtqueue *vq = opaque; + struct virtio_uml_vq_info *info = vq->priv; + uint64_t n; + int rc; + irqreturn_t ret = IRQ_NONE; + + do { + rc = os_read_file(info->call_fd, &n, sizeof(n)); + if (rc == sizeof(n)) + ret |= vring_interrupt(irq, vq); + } while (rc == sizeof(n) || rc == -EINTR); + WARN(rc != -EAGAIN, "read returned %d\n", rc); + return ret; +} + + +static void vu_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + vhost_user_get_config(vu_dev, offset, buf, len); +} + +static void vu_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + vhost_user_set_config(vu_dev, offset, buf, len); +} + +static u8 vu_get_status(struct virtio_device *vdev) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + return vu_dev->status; +} + +static void vu_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + vu_dev->status = status; +} + +static void vu_reset(struct virtio_device *vdev) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + vu_dev->status = 0; +} + +static void vu_del_vq(struct virtqueue *vq) +{ + struct virtio_uml_vq_info *info = vq->priv; + + if (info->call_fd >= 0) { + struct virtio_uml_device *vu_dev; + + vu_dev = to_virtio_uml_device(vq->vdev); + + um_free_irq(vu_dev->irq, vq); + os_close_file(info->call_fd); + } + + if (info->kick_fd >= 0) + os_close_file(info->kick_fd); + + vring_del_virtqueue(vq); + kfree(info); +} + +static void vu_del_vqs(struct virtio_device *vdev) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + struct virtqueue *vq, *n; + u64 features; + + /* Note: reverse order as a workaround to a decoding bug in snabb */ + list_for_each_entry_reverse(vq, &vdev->vqs, list) + WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false)); + + /* Ensure previous messages have been processed */ + WARN_ON(vhost_user_get_features(vu_dev, &features)); + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + vu_del_vq(vq); +} + +static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev, + struct virtqueue *vq) +{ + struct virtio_uml_vq_info *info = vq->priv; + int call_fds[2]; + int rc; + + /* no call FD needed/desired in this case */ + if (vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && + vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + info->call_fd = -1; + return 0; + } + + /* Use a pipe for call fd, since SIGIO is not supported for eventfd */ + rc = os_pipe(call_fds, true, true); + if (rc < 0) + return rc; + + info->call_fd = call_fds[0]; + rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ, + vu_interrupt, IRQF_SHARED, info->name, vq); + if (rc < 0) + goto close_both; + + rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]); + if (rc) + goto release_irq; + + goto out; + +release_irq: + um_free_irq(vu_dev->irq, vq); +close_both: + os_close_file(call_fds[0]); +out: + /* Close (unused) write end of call fds */ + os_close_file(call_fds[1]); + + return rc; +} + +static struct virtqueue *vu_setup_vq(struct virtio_device *vdev, + unsigned index, vq_callback_t *callback, + const char *name, bool ctx) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + struct platform_device *pdev = vu_dev->pdev; + struct virtio_uml_vq_info *info; + struct virtqueue *vq; + int num = MAX_SUPPORTED_QUEUE_SIZE; + int rc; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + rc = -ENOMEM; + goto error_kzalloc; + } + snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name, + pdev->id, name); + + vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true, + ctx, vu_notify, callback, info->name); + if (!vq) { + rc = -ENOMEM; + goto error_create; + } + vq->priv = info; + vq->num_max = num; + num = virtqueue_get_vring_size(vq); + + if (vu_dev->protocol_features & + BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) { + info->kick_fd = -1; + } else { + rc = os_eventfd(0, 0); + if (rc < 0) + goto error_kick; + info->kick_fd = rc; + } + + rc = vu_setup_vq_call_fd(vu_dev, vq); + if (rc) + goto error_call; + + rc = vhost_user_set_vring_num(vu_dev, index, num); + if (rc) + goto error_setup; + + rc = vhost_user_set_vring_base(vu_dev, index, 0); + if (rc) + goto error_setup; + + rc = vhost_user_set_vring_addr(vu_dev, index, + virtqueue_get_desc_addr(vq), + virtqueue_get_used_addr(vq), + virtqueue_get_avail_addr(vq), + (u64) -1); + if (rc) + goto error_setup; + + return vq; + +error_setup: + if (info->call_fd >= 0) { + um_free_irq(vu_dev->irq, vq); + os_close_file(info->call_fd); + } +error_call: + if (info->kick_fd >= 0) + os_close_file(info->kick_fd); +error_kick: + vring_del_virtqueue(vq); +error_create: + kfree(info); +error_kzalloc: + return ERR_PTR(rc); +} + +static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], const bool *ctx, + struct irq_affinity *desc) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + int i, queue_idx = 0, rc; + struct virtqueue *vq; + + /* not supported for now */ + if (WARN_ON(nvqs > 64)) + return -EINVAL; + + rc = vhost_user_set_mem_table(vu_dev); + if (rc) + return rc; + + for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i], + ctx ? ctx[i] : false); + if (IS_ERR(vqs[i])) { + rc = PTR_ERR(vqs[i]); + goto error_setup; + } + } + + list_for_each_entry(vq, &vdev->vqs, list) { + struct virtio_uml_vq_info *info = vq->priv; + + if (info->kick_fd >= 0) { + rc = vhost_user_set_vring_kick(vu_dev, vq->index, + info->kick_fd); + if (rc) + goto error_setup; + } + + rc = vhost_user_set_vring_enable(vu_dev, vq->index, true); + if (rc) + goto error_setup; + } + + return 0; + +error_setup: + vu_del_vqs(vdev); + return rc; +} + +static u64 vu_get_features(struct virtio_device *vdev) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + return vu_dev->features; +} + +static int vu_finalize_features(struct virtio_device *vdev) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + u64 supported = vdev->features & VHOST_USER_SUPPORTED_F; + + vring_transport_features(vdev); + vu_dev->features = vdev->features | supported; + + return vhost_user_set_features(vu_dev, vu_dev->features); +} + +static const char *vu_bus_name(struct virtio_device *vdev) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + return vu_dev->pdev->name; +} + +static const struct virtio_config_ops virtio_uml_config_ops = { + .get = vu_get, + .set = vu_set, + .get_status = vu_get_status, + .set_status = vu_set_status, + .reset = vu_reset, + .find_vqs = vu_find_vqs, + .del_vqs = vu_del_vqs, + .get_features = vu_get_features, + .finalize_features = vu_finalize_features, + .bus_name = vu_bus_name, +}; + +static void virtio_uml_release_dev(struct device *d) +{ + struct virtio_device *vdev = + container_of(d, struct virtio_device, dev); + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + time_travel_propagate_time(); + + /* might not have been opened due to not negotiating the feature */ + if (vu_dev->req_fd >= 0) { + um_free_irq(vu_dev->irq, vu_dev); + os_close_file(vu_dev->req_fd); + } + + os_close_file(vu_dev->sock); + kfree(vu_dev); +} + +void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev, + bool no_vq_suspend) +{ + struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + + if (WARN_ON(vdev->config != &virtio_uml_config_ops)) + return; + + vu_dev->no_vq_suspend = no_vq_suspend; + dev_info(&vdev->dev, "%sabled VQ suspend\n", + no_vq_suspend ? "dis" : "en"); +} + +static void vu_of_conn_broken(struct work_struct *wk) +{ + struct virtio_uml_platform_data *pdata; + struct virtio_uml_device *vu_dev; + + pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); + + vu_dev = platform_get_drvdata(pdata->pdev); + + virtio_break_device(&vu_dev->vdev); + + /* + * We can't remove the device from the devicetree so the only thing we + * can do is warn. + */ + WARN_ON(1); +} + +/* Platform device */ + +static struct virtio_uml_platform_data * +virtio_uml_create_pdata(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct virtio_uml_platform_data *pdata; + int ret; + + if (!np) + return ERR_PTR(-EINVAL); + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken); + pdata->pdev = pdev; + + ret = of_property_read_string(np, "socket-path", &pdata->socket_path); + if (ret) + return ERR_PTR(ret); + + ret = of_property_read_u32(np, "virtio-device-id", + &pdata->virtio_device_id); + if (ret) + return ERR_PTR(ret); + + return pdata; +} + +static int virtio_uml_probe(struct platform_device *pdev) +{ + struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; + struct virtio_uml_device *vu_dev; + int rc; + + if (!pdata) { + pdata = virtio_uml_create_pdata(pdev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); + if (!vu_dev) + return -ENOMEM; + + vu_dev->pdata = pdata; + vu_dev->vdev.dev.parent = &pdev->dev; + vu_dev->vdev.dev.release = virtio_uml_release_dev; + vu_dev->vdev.config = &virtio_uml_config_ops; + vu_dev->vdev.id.device = pdata->virtio_device_id; + vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID; + vu_dev->pdev = pdev; + vu_dev->req_fd = -1; + + time_travel_propagate_time(); + + do { + rc = os_connect_socket(pdata->socket_path); + } while (rc == -EINTR); + if (rc < 0) + goto error_free; + vu_dev->sock = rc; + + spin_lock_init(&vu_dev->sock_lock); + + rc = vhost_user_init(vu_dev); + if (rc) + goto error_init; + + platform_set_drvdata(pdev, vu_dev); + + device_set_wakeup_capable(&vu_dev->vdev.dev, true); + + rc = register_virtio_device(&vu_dev->vdev); + if (rc) + put_device(&vu_dev->vdev.dev); + vu_dev->registered = 1; + return rc; + +error_init: + os_close_file(vu_dev->sock); +error_free: + kfree(vu_dev); + return rc; +} + +static int virtio_uml_remove(struct platform_device *pdev) +{ + struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); + + unregister_virtio_device(&vu_dev->vdev); + return 0; +} + +/* Command line device list */ + +static void vu_cmdline_release_dev(struct device *d) +{ +} + +static struct device vu_cmdline_parent = { + .init_name = "virtio-uml-cmdline", + .release = vu_cmdline_release_dev, +}; + +static bool vu_cmdline_parent_registered; +static int vu_cmdline_id; + +static int vu_unregister_cmdline_device(struct device *dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; + + kfree(pdata->socket_path); + platform_device_unregister(pdev); + return 0; +} + +static void vu_conn_broken(struct work_struct *wk) +{ + struct virtio_uml_platform_data *pdata; + struct virtio_uml_device *vu_dev; + + pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); + + vu_dev = platform_get_drvdata(pdata->pdev); + + virtio_break_device(&vu_dev->vdev); + + vu_unregister_cmdline_device(&pdata->pdev->dev, NULL); +} + +static int vu_cmdline_set(const char *device, const struct kernel_param *kp) +{ + const char *ids = strchr(device, ':'); + unsigned int virtio_device_id; + int processed, consumed, err; + char *socket_path; + struct virtio_uml_platform_data pdata, *ppdata; + struct platform_device *pdev; + + if (!ids || ids == device) + return -EINVAL; + + processed = sscanf(ids, ":%u%n:%d%n", + &virtio_device_id, &consumed, + &vu_cmdline_id, &consumed); + + if (processed < 1 || ids[consumed]) + return -EINVAL; + + if (!vu_cmdline_parent_registered) { + err = device_register(&vu_cmdline_parent); + if (err) { + pr_err("Failed to register parent device!\n"); + put_device(&vu_cmdline_parent); + return err; + } + vu_cmdline_parent_registered = true; + } + + socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL); + if (!socket_path) + return -ENOMEM; + + pdata.virtio_device_id = (u32) virtio_device_id; + pdata.socket_path = socket_path; + + pr_info("Registering device virtio-uml.%d id=%d at %s\n", + vu_cmdline_id, virtio_device_id, socket_path); + + pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml", + vu_cmdline_id++, &pdata, + sizeof(pdata)); + err = PTR_ERR_OR_ZERO(pdev); + if (err) + goto free; + + ppdata = pdev->dev.platform_data; + ppdata->pdev = pdev; + INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken); + + return 0; + +free: + kfree(socket_path); + return err; +} + +static int vu_cmdline_get_device(struct device *dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; + char *buffer = data; + unsigned int len = strlen(buffer); + + snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n", + pdata->socket_path, pdata->virtio_device_id, pdev->id); + return 0; +} + +static int vu_cmdline_get(char *buffer, const struct kernel_param *kp) +{ + buffer[0] = '\0'; + if (vu_cmdline_parent_registered) + device_for_each_child(&vu_cmdline_parent, buffer, + vu_cmdline_get_device); + return strlen(buffer) + 1; +} + +static const struct kernel_param_ops vu_cmdline_param_ops = { + .set = vu_cmdline_set, + .get = vu_cmdline_get, +}; + +device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR); +__uml_help(vu_cmdline_param_ops, +"virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n" +" Configure a virtio device over a vhost-user socket.\n" +" See virtio_ids.h for a list of possible virtio device id values.\n" +" Optionally use a specific platform_device id.\n\n" +); + + +static void vu_unregister_cmdline_devices(void) +{ + if (vu_cmdline_parent_registered) { + device_for_each_child(&vu_cmdline_parent, NULL, + vu_unregister_cmdline_device); + device_unregister(&vu_cmdline_parent); + vu_cmdline_parent_registered = false; + } +} + +/* Platform driver */ + +static const struct of_device_id virtio_uml_match[] = { + { .compatible = "virtio,uml", }, + { } +}; +MODULE_DEVICE_TABLE(of, virtio_uml_match); + +static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); + + if (!vu_dev->no_vq_suspend) { + struct virtqueue *vq; + + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + struct virtio_uml_vq_info *info = vq->priv; + + info->suspended = true; + vhost_user_set_vring_enable(vu_dev, vq->index, false); + } + } + + if (!device_may_wakeup(&vu_dev->vdev.dev)) { + vu_dev->suspended = true; + return 0; + } + + return irq_set_irq_wake(vu_dev->irq, 1); +} + +static int virtio_uml_resume(struct platform_device *pdev) +{ + struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); + + if (!vu_dev->no_vq_suspend) { + struct virtqueue *vq; + + virtio_device_for_each_vq((&vu_dev->vdev), vq) { + struct virtio_uml_vq_info *info = vq->priv; + + info->suspended = false; + vhost_user_set_vring_enable(vu_dev, vq->index, true); + } + } + + vu_dev->suspended = false; + + if (!device_may_wakeup(&vu_dev->vdev.dev)) + return 0; + + return irq_set_irq_wake(vu_dev->irq, 0); +} + +static struct platform_driver virtio_uml_driver = { + .probe = virtio_uml_probe, + .remove = virtio_uml_remove, + .driver = { + .name = "virtio-uml", + .of_match_table = virtio_uml_match, + }, + .suspend = virtio_uml_suspend, + .resume = virtio_uml_resume, +}; + +static int __init virtio_uml_init(void) +{ + return platform_driver_register(&virtio_uml_driver); +} + +static void __exit virtio_uml_exit(void) +{ + platform_driver_unregister(&virtio_uml_driver); + vu_unregister_cmdline_devices(); +} + +module_init(virtio_uml_init); +module_exit(virtio_uml_exit); +__uml_exitcall(virtio_uml_exit); + +MODULE_DESCRIPTION("UML driver for vhost-user virtio devices"); +MODULE_LICENSE("GPL"); diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c new file mode 100644 index 0000000000..6918de5e29 --- /dev/null +++ b/arch/um/drivers/xterm.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <stddef.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <termios.h> +#include "chan_user.h" +#include <os.h> +#include <um_malloc.h> +#include "xterm.h" + +struct xterm_chan { + int pid; + int helper_pid; + int chan_fd; + char *title; + int device; + int raw; + struct termios tt; +}; + +static void *xterm_init(char *str, int device, const struct chan_opts *opts) +{ + struct xterm_chan *data; + + data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL); + if (data == NULL) + return NULL; + *data = ((struct xterm_chan) { .pid = -1, + .helper_pid = -1, + .chan_fd = -1, + .device = device, + .title = opts->xterm_title, + .raw = opts->raw } ); + return data; +} + +/* Only changed by xterm_setup, which is a setup */ +static char *terminal_emulator = CONFIG_XTERM_CHAN_DEFAULT_EMULATOR; +static char *title_switch = "-T"; +static char *exec_switch = "-e"; + +static int __init xterm_setup(char *line, int *add) +{ + *add = 0; + terminal_emulator = line; + + line = strchr(line, ','); + if (line == NULL) + return 0; + + *line++ = '\0'; + if (*line) + title_switch = line; + + line = strchr(line, ','); + if (line == NULL) + return 0; + + *line++ = '\0'; + if (*line) + exec_switch = line; + + return 0; +} + +__uml_setup("xterm=", xterm_setup, +"xterm=<terminal emulator>,<title switch>,<exec switch>\n" +" Specifies an alternate terminal emulator to use for the debugger,\n" +" consoles, and serial lines when they are attached to the xterm channel.\n" +" The values are the terminal emulator binary, the switch it uses to set\n" +" its title, and the switch it uses to execute a subprocess,\n" +" respectively. The title switch must have the form '<switch> title',\n" +" not '<switch>=title'. Similarly, the exec switch must have the form\n" +" '<switch> command arg1 arg2 ...'.\n" +" The default values are 'xterm=" CONFIG_XTERM_CHAN_DEFAULT_EMULATOR + ",-T,-e'.\n" +" Values for gnome-terminal are 'xterm=gnome-terminal,-t,-x'.\n\n" +); + +static int xterm_open(int input, int output, int primary, void *d, + char **dev_out) +{ + struct xterm_chan *data = d; + int pid, fd, new, err; + char title[256], file[] = "/tmp/xterm-pipeXXXXXX"; + char *argv[] = { terminal_emulator, title_switch, title, exec_switch, + OS_LIB_PATH "/uml/port-helper", "-uml-socket", + file, NULL }; + + if (access(argv[4], X_OK) < 0) + argv[4] = "port-helper"; + + /* + * Check that DISPLAY is set, this doesn't guarantee the xterm + * will work but w/o it we can be pretty sure it won't. + */ + if (getenv("DISPLAY") == NULL) { + printk(UM_KERN_ERR "xterm_open: $DISPLAY not set.\n"); + return -ENODEV; + } + + /* + * This business of getting a descriptor to a temp file, + * deleting the file and closing the descriptor is just to get + * a known-unused name for the Unix socket that we really + * want. + */ + fd = mkstemp(file); + if (fd < 0) { + err = -errno; + printk(UM_KERN_ERR "xterm_open : mkstemp failed, errno = %d\n", + errno); + return err; + } + + if (unlink(file)) { + err = -errno; + printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n", + errno); + close(fd); + return err; + } + close(fd); + + fd = os_create_unix_socket(file, sizeof(file), 1); + if (fd < 0) { + printk(UM_KERN_ERR "xterm_open : create_unix_socket failed, " + "errno = %d\n", -fd); + return fd; + } + + sprintf(title, data->title, data->device); + pid = run_helper(NULL, NULL, argv); + if (pid < 0) { + err = pid; + printk(UM_KERN_ERR "xterm_open : run_helper failed, " + "errno = %d\n", -err); + goto out_close1; + } + + err = os_set_fd_block(fd, 0); + if (err < 0) { + printk(UM_KERN_ERR "xterm_open : failed to set descriptor " + "non-blocking, err = %d\n", -err); + goto out_kill; + } + + data->chan_fd = fd; + new = xterm_fd(fd, &data->helper_pid); + if (new < 0) { + err = new; + printk(UM_KERN_ERR "xterm_open : os_rcv_fd failed, err = %d\n", + -err); + goto out_kill; + } + + err = os_set_fd_block(new, 0); + if (err) { + printk(UM_KERN_ERR "xterm_open : failed to set xterm " + "descriptor non-blocking, err = %d\n", -err); + goto out_close2; + } + + CATCH_EINTR(err = tcgetattr(new, &data->tt)); + if (err) { + new = err; + goto out_close2; + } + + if (data->raw) { + err = raw(new); + if (err) { + new = err; + goto out_close2; + } + } + + unlink(file); + data->pid = pid; + *dev_out = NULL; + + return new; + + out_close2: + close(new); + out_kill: + os_kill_process(pid, 1); + out_close1: + close(fd); + + return err; +} + +static void xterm_close(int fd, void *d) +{ + struct xterm_chan *data = d; + + if (data->pid != -1) + os_kill_process(data->pid, 1); + data->pid = -1; + + if (data->helper_pid != -1) + os_kill_process(data->helper_pid, 0); + data->helper_pid = -1; + + if (data->chan_fd != -1) + os_close_file(data->chan_fd); + os_close_file(fd); +} + +const struct chan_ops xterm_ops = { + .type = "xterm", + .init = xterm_init, + .open = xterm_open, + .close = xterm_close, + .read = generic_read, + .write = generic_write, + .console_write = generic_console_write, + .window_size = generic_window_size, + .free = generic_free, + .winch = 1, +}; diff --git a/arch/um/drivers/xterm.h b/arch/um/drivers/xterm.h new file mode 100644 index 0000000000..5968da3a6a --- /dev/null +++ b/arch/um/drivers/xterm.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + */ + +#ifndef __XTERM_H__ +#define __XTERM_H__ + +extern int xterm_fd(int socket, int *pid_out); + +#endif + diff --git a/arch/um/drivers/xterm_kern.c b/arch/um/drivers/xterm_kern.c new file mode 100644 index 0000000000..8011e51993 --- /dev/null +++ b/arch/um/drivers/xterm_kern.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/irqreturn.h> +#include <asm/irq.h> +#include <irq_kern.h> +#include <os.h> +#include "xterm.h" + +struct xterm_wait { + struct completion ready; + int fd; + int pid; + int new_fd; +}; + +static irqreturn_t xterm_interrupt(int irq, void *data) +{ + struct xterm_wait *xterm = data; + int fd; + + fd = os_rcv_fd(xterm->fd, &xterm->pid); + if (fd == -EAGAIN) + return IRQ_NONE; + + xterm->new_fd = fd; + complete(&xterm->ready); + + return IRQ_HANDLED; +} + +int xterm_fd(int socket, int *pid_out) +{ + struct xterm_wait *data; + int err, ret; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (data == NULL) { + printk(KERN_ERR "xterm_fd : failed to allocate xterm_wait\n"); + return -ENOMEM; + } + + /* This is a locked semaphore... */ + *data = ((struct xterm_wait) { .fd = socket, + .pid = -1, + .new_fd = -1 }); + init_completion(&data->ready); + + err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt, + IRQF_SHARED, "xterm", data); + if (err < 0) { + printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, " + "err = %d\n", err); + ret = err; + goto out; + } + + /* ... so here we wait for an xterm interrupt. + * + * XXX Note, if the xterm doesn't work for some reason (eg. DISPLAY + * isn't set) this will hang... */ + wait_for_completion(&data->ready); + + um_free_irq(XTERM_IRQ, data); + + ret = data->new_fd; + *pid_out = data->pid; + out: + kfree(data); + + return ret; +} |