diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/usb/mon | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/usb/mon/Kconfig | 13 | ||||
-rw-r--r-- | drivers/usb/mon/Makefile | 8 | ||||
-rw-r--r-- | drivers/usb/mon/mon_bin.c | 1420 | ||||
-rw-r--r-- | drivers/usb/mon/mon_main.c | 435 | ||||
-rw-r--r-- | drivers/usb/mon/mon_stat.c | 71 | ||||
-rw-r--r-- | drivers/usb/mon/mon_text.c | 773 | ||||
-rw-r--r-- | drivers/usb/mon/usb_mon.h | 76 |
7 files changed, 2796 insertions, 0 deletions
diff --git a/drivers/usb/mon/Kconfig b/drivers/usb/mon/Kconfig new file mode 100644 index 000000000..ffc7cd422 --- /dev/null +++ b/drivers/usb/mon/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# USB Monitor configuration +# + +config USB_MON + tristate "USB Monitor" + help + If you select this option, a component which captures the USB traffic + between peripheral-specific drivers and HC drivers will be built. + For more information, see <file:Documentation/usb/usbmon.rst>. + + If unsure, say Y, if allowed, otherwise M. diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile new file mode 100644 index 000000000..09f43e896 --- /dev/null +++ b/drivers/usb/mon/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for USB monitor +# + +usbmon-y := mon_main.o mon_stat.o mon_text.o mon_bin.o + +obj-$(CONFIG_USB_MON) += usbmon.o diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c new file mode 100644 index 000000000..35483217b --- /dev/null +++ b/drivers/usb/mon/mon_bin.c @@ -0,0 +1,1420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The USB Monitor, inspired by Dave Harding's USBMon. + * + * This is a binary format reader. + * + * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) + * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) + */ + +#include <linux/kernel.h> +#include <linux/sched/signal.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/export.h> +#include <linux/usb.h> +#include <linux/poll.h> +#include <linux/compat.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/time64.h> + +#include <linux/uaccess.h> + +#include "usb_mon.h" + +/* + * Defined by USB 2.0 clause 9.3, table 9.2. + */ +#define SETUP_LEN 8 + +/* ioctl macros */ +#define MON_IOC_MAGIC 0x92 + +#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) +/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ +#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) +#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) +#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) +#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) +#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) +#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) +/* #9 was MON_IOCT_SETAPI */ +#define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) + +#ifdef CONFIG_COMPAT +#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) +#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) +#define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) +#endif + +/* + * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). + * But it's all right. Just use a simple way to make sure the chunk is never + * smaller than a page. + * + * N.B. An application does not know our chunk size. + * + * Woops, get_zeroed_page() returns a single page. I guess we're stuck with + * page-sized chunks for the time being. + */ +#define CHUNK_SIZE PAGE_SIZE +#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) + +/* + * The magic limit was calculated so that it allows the monitoring + * application to pick data once in two ticks. This way, another application, + * which presumably drives the bus, gets to hog CPU, yet we collect our data. + * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an + * enormous overhead built into the bus protocol, so we need about 1000 KB. + * + * This is still too much for most cases, where we just snoop a few + * descriptor fetches for enumeration. So, the default is a "reasonable" + * amount for systems with HZ=250 and incomplete bus saturation. + * + * XXX What about multi-megabyte URBs which take minutes to transfer? + */ +#define BUFF_MAX CHUNK_ALIGN(1200*1024) +#define BUFF_DFL CHUNK_ALIGN(300*1024) +#define BUFF_MIN CHUNK_ALIGN(8*1024) + +/* + * The per-event API header (2 per URB). + * + * This structure is seen in userland as defined by the documentation. + */ +struct mon_bin_hdr { + u64 id; /* URB ID - from submission to callback */ + unsigned char type; /* Same as in text API; extensible. */ + unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ + unsigned char epnum; /* Endpoint number and transfer direction */ + unsigned char devnum; /* Device address */ + unsigned short busnum; /* Bus number */ + char flag_setup; + char flag_data; + s64 ts_sec; /* ktime_get_real_ts64 */ + s32 ts_usec; /* ktime_get_real_ts64 */ + int status; + unsigned int len_urb; /* Length of data (submitted or actual) */ + unsigned int len_cap; /* Delivered length */ + union { + unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ + struct iso_rec { + int error_count; + int numdesc; + } iso; + } s; + int interval; + int start_frame; + unsigned int xfer_flags; + unsigned int ndesc; /* Actual number of ISO descriptors */ +}; + +/* + * ISO vector, packed into the head of data stream. + * This has to take 16 bytes to make sure that the end of buffer + * wrap is not happening in the middle of a descriptor. + */ +struct mon_bin_isodesc { + int iso_status; + unsigned int iso_off; + unsigned int iso_len; + u32 _pad; +}; + +/* per file statistic */ +struct mon_bin_stats { + u32 queued; + u32 dropped; +}; + +struct mon_bin_get { + struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ + void __user *data; + size_t alloc; /* Length of data (can be zero) */ +}; + +struct mon_bin_mfetch { + u32 __user *offvec; /* Vector of events fetched */ + u32 nfetch; /* Number of events to fetch (out: fetched) */ + u32 nflush; /* Number of events to flush */ +}; + +#ifdef CONFIG_COMPAT +struct mon_bin_get32 { + u32 hdr32; + u32 data32; + u32 alloc32; +}; + +struct mon_bin_mfetch32 { + u32 offvec32; + u32 nfetch32; + u32 nflush32; +}; +#endif + +/* Having these two values same prevents wrapping of the mon_bin_hdr */ +#define PKT_ALIGN 64 +#define PKT_SIZE 64 + +#define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ +#define PKT_SZ_API1 64 /* API 1 size: extra fields */ + +#define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ + +/* max number of USB bus supported */ +#define MON_BIN_MAX_MINOR 128 + +/* + * The buffer: map of used pages. + */ +struct mon_pgmap { + struct page *pg; + unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ +}; + +/* + * This gets associated with an open file struct. + */ +struct mon_reader_bin { + /* The buffer: one per open. */ + spinlock_t b_lock; /* Protect b_cnt, b_in */ + unsigned int b_size; /* Current size of the buffer - bytes */ + unsigned int b_cnt; /* Bytes used */ + unsigned int b_in, b_out; /* Offsets into buffer - bytes */ + unsigned int b_read; /* Amount of read data in curr. pkt. */ + struct mon_pgmap *b_vec; /* The map array */ + wait_queue_head_t b_wait; /* Wait for data here */ + + struct mutex fetch_lock; /* Protect b_read, b_out */ + int mmap_active; + + /* A list of these is needed for "bus 0". Some time later. */ + struct mon_reader r; + + /* Stats */ + unsigned int cnt_lost; +}; + +static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, + unsigned int offset) +{ + return (struct mon_bin_hdr *) + (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); +} + +#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) + +static unsigned char xfer_to_pipe[4] = { + PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT +}; + +static struct class *mon_bin_class; +static dev_t mon_bin_dev0; +static struct cdev mon_bin_cdev; + +static void mon_buff_area_fill(const struct mon_reader_bin *rp, + unsigned int offset, unsigned int size); +static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); +static int mon_alloc_buff(struct mon_pgmap *map, int npages); +static void mon_free_buff(struct mon_pgmap *map, int npages); + +/* + * This is a "chunked memcpy". It does not manipulate any counters. + */ +static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, + unsigned int off, const unsigned char *from, unsigned int length) +{ + unsigned int step_len; + unsigned char *buf; + unsigned int in_page; + + while (length) { + /* + * Determine step_len. + */ + step_len = length; + in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); + if (in_page < step_len) + step_len = in_page; + + /* + * Copy data and advance pointers. + */ + buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; + memcpy(buf, from, step_len); + if ((off += step_len) >= this->b_size) off = 0; + from += step_len; + length -= step_len; + } + return off; +} + +/* + * This is a little worse than the above because it's "chunked copy_to_user". + * The return value is an error code, not an offset. + */ +static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, + char __user *to, int length) +{ + unsigned int step_len; + unsigned char *buf; + unsigned int in_page; + + while (length) { + /* + * Determine step_len. + */ + step_len = length; + in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); + if (in_page < step_len) + step_len = in_page; + + /* + * Copy data and advance pointers. + */ + buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; + if (copy_to_user(to, buf, step_len)) + return -EINVAL; + if ((off += step_len) >= this->b_size) off = 0; + to += step_len; + length -= step_len; + } + return 0; +} + +/* + * Allocate an (aligned) area in the buffer. + * This is called under b_lock. + * Returns ~0 on failure. + */ +static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, + unsigned int size) +{ + unsigned int offset; + + size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + if (rp->b_cnt + size > rp->b_size) + return ~0; + offset = rp->b_in; + rp->b_cnt += size; + if ((rp->b_in += size) >= rp->b_size) + rp->b_in -= rp->b_size; + return offset; +} + +/* + * This is the same thing as mon_buff_area_alloc, only it does not allow + * buffers to wrap. This is needed by applications which pass references + * into mmap-ed buffers up their stacks (libpcap can do that). + * + * Currently, we always have the header stuck with the data, although + * it is not strictly speaking necessary. + * + * When a buffer would wrap, we place a filler packet to mark the space. + */ +static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, + unsigned int size) +{ + unsigned int offset; + unsigned int fill_size; + + size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + if (rp->b_cnt + size > rp->b_size) + return ~0; + if (rp->b_in + size > rp->b_size) { + /* + * This would wrap. Find if we still have space after + * skipping to the end of the buffer. If we do, place + * a filler packet and allocate a new packet. + */ + fill_size = rp->b_size - rp->b_in; + if (rp->b_cnt + size + fill_size > rp->b_size) + return ~0; + mon_buff_area_fill(rp, rp->b_in, fill_size); + + offset = 0; + rp->b_in = size; + rp->b_cnt += size + fill_size; + } else if (rp->b_in + size == rp->b_size) { + offset = rp->b_in; + rp->b_in = 0; + rp->b_cnt += size; + } else { + offset = rp->b_in; + rp->b_in += size; + rp->b_cnt += size; + } + return offset; +} + +/* + * Return a few (kilo-)bytes to the head of the buffer. + * This is used if a data fetch fails. + */ +static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) +{ + + /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ + rp->b_cnt -= size; + if (rp->b_in < size) + rp->b_in += rp->b_size; + rp->b_in -= size; +} + +/* + * This has to be called under both b_lock and fetch_lock, because + * it accesses both b_cnt and b_out. + */ +static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) +{ + + size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + rp->b_cnt -= size; + if ((rp->b_out += size) >= rp->b_size) + rp->b_out -= rp->b_size; +} + +static void mon_buff_area_fill(const struct mon_reader_bin *rp, + unsigned int offset, unsigned int size) +{ + struct mon_bin_hdr *ep; + + ep = MON_OFF2HDR(rp, offset); + memset(ep, 0, PKT_SIZE); + ep->type = '@'; + ep->len_cap = size - PKT_SIZE; +} + +static inline char mon_bin_get_setup(unsigned char *setupb, + const struct urb *urb, char ev_type) +{ + + if (urb->setup_packet == NULL) + return 'Z'; + memcpy(setupb, urb->setup_packet, SETUP_LEN); + return 0; +} + +static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, + unsigned int offset, struct urb *urb, unsigned int length, + char *flag) +{ + int i; + struct scatterlist *sg; + unsigned int this_len; + + *flag = 0; + if (urb->num_sgs == 0) { + if (urb->transfer_buffer == NULL) { + *flag = 'Z'; + return length; + } + mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); + length = 0; + + } else { + /* If IOMMU coalescing occurred, we cannot trust sg_page */ + if (urb->transfer_flags & URB_DMA_SG_COMBINED) { + *flag = 'D'; + return length; + } + + /* Copy up to the first non-addressable segment */ + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + if (length == 0 || PageHighMem(sg_page(sg))) + break; + this_len = min_t(unsigned int, sg->length, length); + offset = mon_copy_to_buff(rp, offset, sg_virt(sg), + this_len); + length -= this_len; + } + if (i == 0) + *flag = 'D'; + } + + return length; +} + +/* + * This is the look-ahead pass in case of 'C Zi', when actual_length cannot + * be used to determine the length of the whole contiguous buffer. + */ +static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp, + struct urb *urb, unsigned int ndesc) +{ + struct usb_iso_packet_descriptor *fp; + unsigned int length; + + length = 0; + fp = urb->iso_frame_desc; + while (ndesc-- != 0) { + if (fp->actual_length != 0) { + if (fp->offset + fp->actual_length > length) + length = fp->offset + fp->actual_length; + } + fp++; + } + return length; +} + +static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, + unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) +{ + struct mon_bin_isodesc *dp; + struct usb_iso_packet_descriptor *fp; + + fp = urb->iso_frame_desc; + while (ndesc-- != 0) { + dp = (struct mon_bin_isodesc *) + (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); + dp->iso_status = fp->status; + dp->iso_off = fp->offset; + dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; + dp->_pad = 0; + if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) + offset = 0; + fp++; + } +} + +static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, + char ev_type, int status) +{ + const struct usb_endpoint_descriptor *epd = &urb->ep->desc; + struct timespec64 ts; + unsigned long flags; + unsigned int urb_length; + unsigned int offset; + unsigned int length; + unsigned int delta; + unsigned int ndesc, lendesc; + unsigned char dir; + struct mon_bin_hdr *ep; + char data_tag = 0; + + ktime_get_real_ts64(&ts); + + spin_lock_irqsave(&rp->b_lock, flags); + + /* + * Find the maximum allowable length, then allocate space. + */ + urb_length = (ev_type == 'S') ? + urb->transfer_buffer_length : urb->actual_length; + length = urb_length; + + if (usb_endpoint_xfer_isoc(epd)) { + if (urb->number_of_packets < 0) { + ndesc = 0; + } else if (urb->number_of_packets >= ISODESC_MAX) { + ndesc = ISODESC_MAX; + } else { + ndesc = urb->number_of_packets; + } + if (ev_type == 'C' && usb_urb_dir_in(urb)) + length = mon_bin_collate_isodesc(rp, urb, ndesc); + } else { + ndesc = 0; + } + lendesc = ndesc*sizeof(struct mon_bin_isodesc); + + /* not an issue unless there's a subtle bug in a HCD somewhere */ + if (length >= urb->transfer_buffer_length) + length = urb->transfer_buffer_length; + + if (length >= rp->b_size/5) + length = rp->b_size/5; + + if (usb_urb_dir_in(urb)) { + if (ev_type == 'S') { + length = 0; + data_tag = '<'; + } + /* Cannot rely on endpoint number in case of control ep.0 */ + dir = USB_DIR_IN; + } else { + if (ev_type == 'C') { + length = 0; + data_tag = '>'; + } + dir = 0; + } + + if (rp->mmap_active) { + offset = mon_buff_area_alloc_contiguous(rp, + length + PKT_SIZE + lendesc); + } else { + offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); + } + if (offset == ~0) { + rp->cnt_lost++; + spin_unlock_irqrestore(&rp->b_lock, flags); + return; + } + + ep = MON_OFF2HDR(rp, offset); + if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; + + /* + * Fill the allocated area. + */ + memset(ep, 0, PKT_SIZE); + ep->type = ev_type; + ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; + ep->epnum = dir | usb_endpoint_num(epd); + ep->devnum = urb->dev->devnum; + ep->busnum = urb->dev->bus->busnum; + ep->id = (unsigned long) urb; + ep->ts_sec = ts.tv_sec; + ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; + ep->status = status; + ep->len_urb = urb_length; + ep->len_cap = length + lendesc; + ep->xfer_flags = urb->transfer_flags; + + if (usb_endpoint_xfer_int(epd)) { + ep->interval = urb->interval; + } else if (usb_endpoint_xfer_isoc(epd)) { + ep->interval = urb->interval; + ep->start_frame = urb->start_frame; + ep->s.iso.error_count = urb->error_count; + ep->s.iso.numdesc = urb->number_of_packets; + } + + if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { + ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); + } else { + ep->flag_setup = '-'; + } + + if (ndesc != 0) { + ep->ndesc = ndesc; + mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); + if ((offset += lendesc) >= rp->b_size) + offset -= rp->b_size; + } + + if (length != 0) { + length = mon_bin_get_data(rp, offset, urb, length, + &ep->flag_data); + if (length > 0) { + delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + ep->len_cap -= length; + delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + mon_buff_area_shrink(rp, delta); + } + } else { + ep->flag_data = data_tag; + } + + spin_unlock_irqrestore(&rp->b_lock, flags); + + wake_up(&rp->b_wait); +} + +static void mon_bin_submit(void *data, struct urb *urb) +{ + struct mon_reader_bin *rp = data; + mon_bin_event(rp, urb, 'S', -EINPROGRESS); +} + +static void mon_bin_complete(void *data, struct urb *urb, int status) +{ + struct mon_reader_bin *rp = data; + mon_bin_event(rp, urb, 'C', status); +} + +static void mon_bin_error(void *data, struct urb *urb, int error) +{ + struct mon_reader_bin *rp = data; + struct timespec64 ts; + unsigned long flags; + unsigned int offset; + struct mon_bin_hdr *ep; + + ktime_get_real_ts64(&ts); + + spin_lock_irqsave(&rp->b_lock, flags); + + offset = mon_buff_area_alloc(rp, PKT_SIZE); + if (offset == ~0) { + /* Not incrementing cnt_lost. Just because. */ + spin_unlock_irqrestore(&rp->b_lock, flags); + return; + } + + ep = MON_OFF2HDR(rp, offset); + + memset(ep, 0, PKT_SIZE); + ep->type = 'E'; + ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; + ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; + ep->epnum |= usb_endpoint_num(&urb->ep->desc); + ep->devnum = urb->dev->devnum; + ep->busnum = urb->dev->bus->busnum; + ep->id = (unsigned long) urb; + ep->ts_sec = ts.tv_sec; + ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; + ep->status = error; + + ep->flag_setup = '-'; + ep->flag_data = 'E'; + + spin_unlock_irqrestore(&rp->b_lock, flags); + + wake_up(&rp->b_wait); +} + +static int mon_bin_open(struct inode *inode, struct file *file) +{ + struct mon_bus *mbus; + struct mon_reader_bin *rp; + size_t size; + int rc; + + mutex_lock(&mon_lock); + mbus = mon_bus_lookup(iminor(inode)); + if (mbus == NULL) { + mutex_unlock(&mon_lock); + return -ENODEV; + } + if (mbus != &mon_bus0 && mbus->u_bus == NULL) { + printk(KERN_ERR TAG ": consistency error on open\n"); + mutex_unlock(&mon_lock); + return -ENODEV; + } + + rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); + if (rp == NULL) { + rc = -ENOMEM; + goto err_alloc; + } + spin_lock_init(&rp->b_lock); + init_waitqueue_head(&rp->b_wait); + mutex_init(&rp->fetch_lock); + rp->b_size = BUFF_DFL; + + size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); + if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { + rc = -ENOMEM; + goto err_allocvec; + } + + if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) + goto err_allocbuff; + + rp->r.m_bus = mbus; + rp->r.r_data = rp; + rp->r.rnf_submit = mon_bin_submit; + rp->r.rnf_error = mon_bin_error; + rp->r.rnf_complete = mon_bin_complete; + + mon_reader_add(mbus, &rp->r); + + file->private_data = rp; + mutex_unlock(&mon_lock); + return 0; + +err_allocbuff: + kfree(rp->b_vec); +err_allocvec: + kfree(rp); +err_alloc: + mutex_unlock(&mon_lock); + return rc; +} + +/* + * Extract an event from buffer and copy it to user space. + * Wait if there is no event ready. + * Returns zero or error. + */ +static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, + struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, + void __user *data, unsigned int nbytes) +{ + unsigned long flags; + struct mon_bin_hdr *ep; + size_t step_len; + unsigned int offset; + int rc; + + mutex_lock(&rp->fetch_lock); + + if ((rc = mon_bin_wait_event(file, rp)) < 0) { + mutex_unlock(&rp->fetch_lock); + return rc; + } + + ep = MON_OFF2HDR(rp, rp->b_out); + + if (copy_to_user(hdr, ep, hdrbytes)) { + mutex_unlock(&rp->fetch_lock); + return -EFAULT; + } + + step_len = min(ep->len_cap, nbytes); + if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; + + if (copy_from_buf(rp, offset, data, step_len)) { + mutex_unlock(&rp->fetch_lock); + return -EFAULT; + } + + spin_lock_irqsave(&rp->b_lock, flags); + mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); + spin_unlock_irqrestore(&rp->b_lock, flags); + rp->b_read = 0; + + mutex_unlock(&rp->fetch_lock); + return 0; +} + +static int mon_bin_release(struct inode *inode, struct file *file) +{ + struct mon_reader_bin *rp = file->private_data; + struct mon_bus* mbus = rp->r.m_bus; + + mutex_lock(&mon_lock); + + if (mbus->nreaders <= 0) { + printk(KERN_ERR TAG ": consistency error on close\n"); + mutex_unlock(&mon_lock); + return 0; + } + mon_reader_del(mbus, &rp->r); + + mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); + kfree(rp->b_vec); + kfree(rp); + + mutex_unlock(&mon_lock); + return 0; +} + +static ssize_t mon_bin_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_bin *rp = file->private_data; + unsigned int hdrbytes = PKT_SZ_API0; + unsigned long flags; + struct mon_bin_hdr *ep; + unsigned int offset; + size_t step_len; + char *ptr; + ssize_t done = 0; + int rc; + + mutex_lock(&rp->fetch_lock); + + if ((rc = mon_bin_wait_event(file, rp)) < 0) { + mutex_unlock(&rp->fetch_lock); + return rc; + } + + ep = MON_OFF2HDR(rp, rp->b_out); + + if (rp->b_read < hdrbytes) { + step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read)); + ptr = ((char *)ep) + rp->b_read; + if (step_len && copy_to_user(buf, ptr, step_len)) { + mutex_unlock(&rp->fetch_lock); + return -EFAULT; + } + nbytes -= step_len; + buf += step_len; + rp->b_read += step_len; + done += step_len; + } + + if (rp->b_read >= hdrbytes) { + step_len = ep->len_cap; + step_len -= rp->b_read - hdrbytes; + if (step_len > nbytes) + step_len = nbytes; + offset = rp->b_out + PKT_SIZE; + offset += rp->b_read - hdrbytes; + if (offset >= rp->b_size) + offset -= rp->b_size; + if (copy_from_buf(rp, offset, buf, step_len)) { + mutex_unlock(&rp->fetch_lock); + return -EFAULT; + } + nbytes -= step_len; + buf += step_len; + rp->b_read += step_len; + done += step_len; + } + + /* + * Check if whole packet was read, and if so, jump to the next one. + */ + if (rp->b_read >= hdrbytes + ep->len_cap) { + spin_lock_irqsave(&rp->b_lock, flags); + mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); + spin_unlock_irqrestore(&rp->b_lock, flags); + rp->b_read = 0; + } + + mutex_unlock(&rp->fetch_lock); + return done; +} + +/* + * Remove at most nevents from chunked buffer. + * Returns the number of removed events. + */ +static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) +{ + unsigned long flags; + struct mon_bin_hdr *ep; + int i; + + mutex_lock(&rp->fetch_lock); + spin_lock_irqsave(&rp->b_lock, flags); + for (i = 0; i < nevents; ++i) { + if (MON_RING_EMPTY(rp)) + break; + + ep = MON_OFF2HDR(rp, rp->b_out); + mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); + } + spin_unlock_irqrestore(&rp->b_lock, flags); + rp->b_read = 0; + mutex_unlock(&rp->fetch_lock); + return i; +} + +/* + * Fetch at most max event offsets into the buffer and put them into vec. + * The events are usually freed later with mon_bin_flush. + * Return the effective number of events fetched. + */ +static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, + u32 __user *vec, unsigned int max) +{ + unsigned int cur_out; + unsigned int bytes, avail; + unsigned int size; + unsigned int nevents; + struct mon_bin_hdr *ep; + unsigned long flags; + int rc; + + mutex_lock(&rp->fetch_lock); + + if ((rc = mon_bin_wait_event(file, rp)) < 0) { + mutex_unlock(&rp->fetch_lock); + return rc; + } + + spin_lock_irqsave(&rp->b_lock, flags); + avail = rp->b_cnt; + spin_unlock_irqrestore(&rp->b_lock, flags); + + cur_out = rp->b_out; + nevents = 0; + bytes = 0; + while (bytes < avail) { + if (nevents >= max) + break; + + ep = MON_OFF2HDR(rp, cur_out); + if (put_user(cur_out, &vec[nevents])) { + mutex_unlock(&rp->fetch_lock); + return -EFAULT; + } + + nevents++; + size = ep->len_cap + PKT_SIZE; + size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + if ((cur_out += size) >= rp->b_size) + cur_out -= rp->b_size; + bytes += size; + } + + mutex_unlock(&rp->fetch_lock); + return nevents; +} + +/* + * Count events. This is almost the same as the above mon_bin_fetch, + * only we do not store offsets into user vector, and we have no limit. + */ +static int mon_bin_queued(struct mon_reader_bin *rp) +{ + unsigned int cur_out; + unsigned int bytes, avail; + unsigned int size; + unsigned int nevents; + struct mon_bin_hdr *ep; + unsigned long flags; + + mutex_lock(&rp->fetch_lock); + + spin_lock_irqsave(&rp->b_lock, flags); + avail = rp->b_cnt; + spin_unlock_irqrestore(&rp->b_lock, flags); + + cur_out = rp->b_out; + nevents = 0; + bytes = 0; + while (bytes < avail) { + ep = MON_OFF2HDR(rp, cur_out); + + nevents++; + size = ep->len_cap + PKT_SIZE; + size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); + if ((cur_out += size) >= rp->b_size) + cur_out -= rp->b_size; + bytes += size; + } + + mutex_unlock(&rp->fetch_lock); + return nevents; +} + +/* + */ +static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct mon_reader_bin *rp = file->private_data; + // struct mon_bus* mbus = rp->r.m_bus; + int ret = 0; + struct mon_bin_hdr *ep; + unsigned long flags; + + switch (cmd) { + + case MON_IOCQ_URB_LEN: + /* + * N.B. This only returns the size of data, without the header. + */ + spin_lock_irqsave(&rp->b_lock, flags); + if (!MON_RING_EMPTY(rp)) { + ep = MON_OFF2HDR(rp, rp->b_out); + ret = ep->len_cap; + } + spin_unlock_irqrestore(&rp->b_lock, flags); + break; + + case MON_IOCQ_RING_SIZE: + mutex_lock(&rp->fetch_lock); + ret = rp->b_size; + mutex_unlock(&rp->fetch_lock); + break; + + case MON_IOCT_RING_SIZE: + /* + * Changing the buffer size will flush it's contents; the new + * buffer is allocated before releasing the old one to be sure + * the device will stay functional also in case of memory + * pressure. + */ + { + int size; + struct mon_pgmap *vec; + + if (arg < BUFF_MIN || arg > BUFF_MAX) + return -EINVAL; + + size = CHUNK_ALIGN(arg); + vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap), + GFP_KERNEL); + if (vec == NULL) { + ret = -ENOMEM; + break; + } + + ret = mon_alloc_buff(vec, size/CHUNK_SIZE); + if (ret < 0) { + kfree(vec); + break; + } + + mutex_lock(&rp->fetch_lock); + spin_lock_irqsave(&rp->b_lock, flags); + if (rp->mmap_active) { + mon_free_buff(vec, size/CHUNK_SIZE); + kfree(vec); + ret = -EBUSY; + } else { + mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); + kfree(rp->b_vec); + rp->b_vec = vec; + rp->b_size = size; + rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; + rp->cnt_lost = 0; + } + spin_unlock_irqrestore(&rp->b_lock, flags); + mutex_unlock(&rp->fetch_lock); + } + break; + + case MON_IOCH_MFLUSH: + ret = mon_bin_flush(rp, arg); + break; + + case MON_IOCX_GET: + case MON_IOCX_GETX: + { + struct mon_bin_get getb; + + if (copy_from_user(&getb, (void __user *)arg, + sizeof(struct mon_bin_get))) + return -EFAULT; + + if (getb.alloc > 0x10000000) /* Want to cast to u32 */ + return -EINVAL; + ret = mon_bin_get_event(file, rp, getb.hdr, + (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, + getb.data, (unsigned int)getb.alloc); + } + break; + + case MON_IOCX_MFETCH: + { + struct mon_bin_mfetch mfetch; + struct mon_bin_mfetch __user *uptr; + + uptr = (struct mon_bin_mfetch __user *)arg; + + if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) + return -EFAULT; + + if (mfetch.nflush) { + ret = mon_bin_flush(rp, mfetch.nflush); + if (ret < 0) + return ret; + if (put_user(ret, &uptr->nflush)) + return -EFAULT; + } + ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); + if (ret < 0) + return ret; + if (put_user(ret, &uptr->nfetch)) + return -EFAULT; + ret = 0; + } + break; + + case MON_IOCG_STATS: { + struct mon_bin_stats __user *sp; + unsigned int nevents; + unsigned int ndropped; + + spin_lock_irqsave(&rp->b_lock, flags); + ndropped = rp->cnt_lost; + rp->cnt_lost = 0; + spin_unlock_irqrestore(&rp->b_lock, flags); + nevents = mon_bin_queued(rp); + + sp = (struct mon_bin_stats __user *)arg; + if (put_user(ndropped, &sp->dropped)) + return -EFAULT; + if (put_user(nevents, &sp->queued)) + return -EFAULT; + + } + break; + + default: + return -ENOTTY; + } + + return ret; +} + +#ifdef CONFIG_COMPAT +static long mon_bin_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct mon_reader_bin *rp = file->private_data; + int ret; + + switch (cmd) { + + case MON_IOCX_GET32: + case MON_IOCX_GETX32: + { + struct mon_bin_get32 getb; + + if (copy_from_user(&getb, (void __user *)arg, + sizeof(struct mon_bin_get32))) + return -EFAULT; + + ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), + (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, + compat_ptr(getb.data32), getb.alloc32); + if (ret < 0) + return ret; + } + return 0; + + case MON_IOCX_MFETCH32: + { + struct mon_bin_mfetch32 mfetch; + struct mon_bin_mfetch32 __user *uptr; + + uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); + + if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) + return -EFAULT; + + if (mfetch.nflush32) { + ret = mon_bin_flush(rp, mfetch.nflush32); + if (ret < 0) + return ret; + if (put_user(ret, &uptr->nflush32)) + return -EFAULT; + } + ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), + mfetch.nfetch32); + if (ret < 0) + return ret; + if (put_user(ret, &uptr->nfetch32)) + return -EFAULT; + } + return 0; + + case MON_IOCG_STATS: + return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); + + case MON_IOCQ_URB_LEN: + case MON_IOCQ_RING_SIZE: + case MON_IOCT_RING_SIZE: + case MON_IOCH_MFLUSH: + return mon_bin_ioctl(file, cmd, arg); + + default: + ; + } + return -ENOTTY; +} +#endif /* CONFIG_COMPAT */ + +static __poll_t +mon_bin_poll(struct file *file, struct poll_table_struct *wait) +{ + struct mon_reader_bin *rp = file->private_data; + __poll_t mask = 0; + unsigned long flags; + + if (file->f_mode & FMODE_READ) + poll_wait(file, &rp->b_wait, wait); + + spin_lock_irqsave(&rp->b_lock, flags); + if (!MON_RING_EMPTY(rp)) + mask |= EPOLLIN | EPOLLRDNORM; /* readable */ + spin_unlock_irqrestore(&rp->b_lock, flags); + return mask; +} + +/* + * open and close: just keep track of how many times the device is + * mapped, to use the proper memory allocation function. + */ +static void mon_bin_vma_open(struct vm_area_struct *vma) +{ + struct mon_reader_bin *rp = vma->vm_private_data; + unsigned long flags; + + spin_lock_irqsave(&rp->b_lock, flags); + rp->mmap_active++; + spin_unlock_irqrestore(&rp->b_lock, flags); +} + +static void mon_bin_vma_close(struct vm_area_struct *vma) +{ + unsigned long flags; + + struct mon_reader_bin *rp = vma->vm_private_data; + spin_lock_irqsave(&rp->b_lock, flags); + rp->mmap_active--; + spin_unlock_irqrestore(&rp->b_lock, flags); +} + +/* + * Map ring pages to user space. + */ +static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) +{ + struct mon_reader_bin *rp = vmf->vma->vm_private_data; + unsigned long offset, chunk_idx; + struct page *pageptr; + unsigned long flags; + + spin_lock_irqsave(&rp->b_lock, flags); + offset = vmf->pgoff << PAGE_SHIFT; + if (offset >= rp->b_size) { + spin_unlock_irqrestore(&rp->b_lock, flags); + return VM_FAULT_SIGBUS; + } + chunk_idx = offset / CHUNK_SIZE; + pageptr = rp->b_vec[chunk_idx].pg; + get_page(pageptr); + vmf->page = pageptr; + spin_unlock_irqrestore(&rp->b_lock, flags); + return 0; +} + +static const struct vm_operations_struct mon_bin_vm_ops = { + .open = mon_bin_vma_open, + .close = mon_bin_vma_close, + .fault = mon_bin_vma_fault, +}; + +static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) +{ + /* don't do anything here: "fault" will set up page table entries */ + vma->vm_ops = &mon_bin_vm_ops; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = filp->private_data; + mon_bin_vma_open(vma); + return 0; +} + +static const struct file_operations mon_fops_binary = { + .owner = THIS_MODULE, + .open = mon_bin_open, + .llseek = no_llseek, + .read = mon_bin_read, + /* .write = mon_text_write, */ + .poll = mon_bin_poll, + .unlocked_ioctl = mon_bin_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mon_bin_compat_ioctl, +#endif + .release = mon_bin_release, + .mmap = mon_bin_mmap, +}; + +static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) +{ + DECLARE_WAITQUEUE(waita, current); + unsigned long flags; + + add_wait_queue(&rp->b_wait, &waita); + set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irqsave(&rp->b_lock, flags); + while (MON_RING_EMPTY(rp)) { + spin_unlock_irqrestore(&rp->b_lock, flags); + + if (file->f_flags & O_NONBLOCK) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->b_wait, &waita); + return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ + } + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&rp->b_wait, &waita); + return -EINTR; + } + set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irqsave(&rp->b_lock, flags); + } + spin_unlock_irqrestore(&rp->b_lock, flags); + + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->b_wait, &waita); + return 0; +} + +static int mon_alloc_buff(struct mon_pgmap *map, int npages) +{ + int n; + unsigned long vaddr; + + for (n = 0; n < npages; n++) { + vaddr = get_zeroed_page(GFP_KERNEL); + if (vaddr == 0) { + while (n-- != 0) + free_page((unsigned long) map[n].ptr); + return -ENOMEM; + } + map[n].ptr = (unsigned char *) vaddr; + map[n].pg = virt_to_page((void *) vaddr); + } + return 0; +} + +static void mon_free_buff(struct mon_pgmap *map, int npages) +{ + int n; + + for (n = 0; n < npages; n++) + free_page((unsigned long) map[n].ptr); +} + +int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) +{ + struct device *dev; + unsigned minor = ubus? ubus->busnum: 0; + + if (minor >= MON_BIN_MAX_MINOR) + return 0; + + dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL, + MKDEV(MAJOR(mon_bin_dev0), minor), NULL, + "usbmon%d", minor); + if (IS_ERR(dev)) + return 0; + + mbus->classdev = dev; + return 1; +} + +void mon_bin_del(struct mon_bus *mbus) +{ + device_destroy(mon_bin_class, mbus->classdev->devt); +} + +int __init mon_bin_init(void) +{ + int rc; + + mon_bin_class = class_create(THIS_MODULE, "usbmon"); + if (IS_ERR(mon_bin_class)) { + rc = PTR_ERR(mon_bin_class); + goto err_class; + } + + rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); + if (rc < 0) + goto err_dev; + + cdev_init(&mon_bin_cdev, &mon_fops_binary); + mon_bin_cdev.owner = THIS_MODULE; + + rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); + if (rc < 0) + goto err_add; + + return 0; + +err_add: + unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); +err_dev: + class_destroy(mon_bin_class); +err_class: + return rc; +} + +void mon_bin_exit(void) +{ + cdev_del(&mon_bin_cdev); + unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); + class_destroy(mon_bin_class); +} diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c new file mode 100644 index 000000000..9812d102a --- /dev/null +++ b/drivers/usb/mon/mon_main.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The USB Monitor, inspired by Dave Harding's USBMon. + * + * mon_main.c: Main file, module initiation and exit, registrations, etc. + * + * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com) + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/usb/hcd.h> +#include <linux/slab.h> +#include <linux/notifier.h> +#include <linux/mutex.h> + +#include "usb_mon.h" + + +static void mon_stop(struct mon_bus *mbus); +static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); +static void mon_bus_drop(struct kref *r); +static void mon_bus_init(struct usb_bus *ubus); + +DEFINE_MUTEX(mon_lock); + +struct mon_bus mon_bus0; /* Pseudo bus meaning "all buses" */ +static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ + +/* + * Link a reader into the bus. + * + * This must be called with mon_lock taken because of mbus->ref. + */ +void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r) +{ + unsigned long flags; + struct list_head *p; + + spin_lock_irqsave(&mbus->lock, flags); + if (mbus->nreaders == 0) { + if (mbus == &mon_bus0) { + list_for_each (p, &mon_buses) { + struct mon_bus *m1; + m1 = list_entry(p, struct mon_bus, bus_link); + m1->u_bus->monitored = 1; + } + } else { + mbus->u_bus->monitored = 1; + } + } + mbus->nreaders++; + list_add_tail(&r->r_link, &mbus->r_list); + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_get(&mbus->ref); +} + +/* + * Unlink reader from the bus. + * + * This is called with mon_lock taken, so we can decrement mbus->ref. + */ +void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + list_del(&r->r_link); + --mbus->nreaders; + if (mbus->nreaders == 0) + mon_stop(mbus); + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_put(&mbus->ref, mon_bus_drop); +} + +/* + */ +static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_submit(r->r_data, urb); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_submit(struct usb_bus *ubus, struct urb *urb) +{ + struct mon_bus *mbus; + + mbus = ubus->mon_bus; + if (mbus != NULL) + mon_bus_submit(mbus, urb); + mon_bus_submit(&mon_bus0, urb); +} + +/* + */ +static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_error(r->r_data, urb, error); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error) +{ + struct mon_bus *mbus; + + mbus = ubus->mon_bus; + if (mbus != NULL) + mon_bus_submit_error(mbus, urb, error); + mon_bus_submit_error(&mon_bus0, urb, error); +} + +/* + */ +static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb, int status) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_complete(r->r_data, urb, status); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status) +{ + struct mon_bus *mbus; + + mbus = ubus->mon_bus; + if (mbus != NULL) + mon_bus_complete(mbus, urb, status); + mon_bus_complete(&mon_bus0, urb, status); +} + +/* int (*unlink_urb) (struct urb *urb, int status); */ + +/* + * Stop monitoring. + */ +static void mon_stop(struct mon_bus *mbus) +{ + struct usb_bus *ubus; + struct list_head *p; + + if (mbus == &mon_bus0) { + list_for_each (p, &mon_buses) { + mbus = list_entry(p, struct mon_bus, bus_link); + /* + * We do not change nreaders here, so rely on mon_lock. + */ + if (mbus->nreaders == 0 && (ubus = mbus->u_bus) != NULL) + ubus->monitored = 0; + } + } else { + /* + * A stop can be called for a dissolved mon_bus in case of + * a reader staying across an rmmod foo_hcd, so test ->u_bus. + */ + if (mon_bus0.nreaders == 0 && (ubus = mbus->u_bus) != NULL) { + ubus->monitored = 0; + mb(); + } + } +} + +/* + * Add a USB bus (usually by a modprobe foo-hcd) + * + * This does not return an error code because the core cannot care less + * if monitoring is not established. + */ +static void mon_bus_add(struct usb_bus *ubus) +{ + mon_bus_init(ubus); + mutex_lock(&mon_lock); + if (mon_bus0.nreaders != 0) + ubus->monitored = 1; + mutex_unlock(&mon_lock); +} + +/* + * Remove a USB bus (either from rmmod foo-hcd or from a hot-remove event). + */ +static void mon_bus_remove(struct usb_bus *ubus) +{ + struct mon_bus *mbus = ubus->mon_bus; + + mutex_lock(&mon_lock); + list_del(&mbus->bus_link); + if (mbus->text_inited) + mon_text_del(mbus); + if (mbus->bin_inited) + mon_bin_del(mbus); + + mon_dissolve(mbus, ubus); + kref_put(&mbus->ref, mon_bus_drop); + mutex_unlock(&mon_lock); +} + +static int mon_notify(struct notifier_block *self, unsigned long action, + void *dev) +{ + switch (action) { + case USB_BUS_ADD: + mon_bus_add(dev); + break; + case USB_BUS_REMOVE: + mon_bus_remove(dev); + } + return NOTIFY_OK; +} + +static struct notifier_block mon_nb = { + .notifier_call = mon_notify, +}; + +/* + * Ops + */ +static const struct usb_mon_operations mon_ops_0 = { + .urb_submit = mon_submit, + .urb_submit_error = mon_submit_error, + .urb_complete = mon_complete, +}; + +/* + * Tear usb_bus and mon_bus apart. + */ +static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus) +{ + + if (ubus->monitored) { + ubus->monitored = 0; + mb(); + } + + ubus->mon_bus = NULL; + mbus->u_bus = NULL; + mb(); + + /* We want synchronize_irq() here, but that needs an argument. */ +} + +/* + */ +static void mon_bus_drop(struct kref *r) +{ + struct mon_bus *mbus = container_of(r, struct mon_bus, ref); + kfree(mbus); +} + +/* + * Initialize a bus for us: + * - allocate mon_bus + * - refcount USB bus struct + * - link + */ +static void mon_bus_init(struct usb_bus *ubus) +{ + struct mon_bus *mbus; + + mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL); + if (mbus == NULL) + goto err_alloc; + kref_init(&mbus->ref); + spin_lock_init(&mbus->lock); + INIT_LIST_HEAD(&mbus->r_list); + + /* + * We don't need to take a reference to ubus, because we receive + * a notification if the bus is about to be removed. + */ + mbus->u_bus = ubus; + ubus->mon_bus = mbus; + + mbus->text_inited = mon_text_add(mbus, ubus); + mbus->bin_inited = mon_bin_add(mbus, ubus); + + mutex_lock(&mon_lock); + list_add_tail(&mbus->bus_link, &mon_buses); + mutex_unlock(&mon_lock); + return; + +err_alloc: + return; +} + +static void mon_bus0_init(void) +{ + struct mon_bus *mbus = &mon_bus0; + + kref_init(&mbus->ref); + spin_lock_init(&mbus->lock); + INIT_LIST_HEAD(&mbus->r_list); + + mbus->text_inited = mon_text_add(mbus, NULL); + mbus->bin_inited = mon_bin_add(mbus, NULL); +} + +/* + * Search a USB bus by number. Notice that USB bus numbers start from one, + * which we may later use to identify "all" with zero. + * + * This function must be called with mon_lock held. + * + * This is obviously inefficient and may be revised in the future. + */ +struct mon_bus *mon_bus_lookup(unsigned int num) +{ + struct list_head *p; + struct mon_bus *mbus; + + if (num == 0) { + return &mon_bus0; + } + list_for_each (p, &mon_buses) { + mbus = list_entry(p, struct mon_bus, bus_link); + if (mbus->u_bus->busnum == num) { + return mbus; + } + } + return NULL; +} + +static int __init mon_init(void) +{ + struct usb_bus *ubus; + int rc, id; + + if ((rc = mon_text_init()) != 0) + goto err_text; + if ((rc = mon_bin_init()) != 0) + goto err_bin; + + mon_bus0_init(); + + if (usb_mon_register(&mon_ops_0) != 0) { + printk(KERN_NOTICE TAG ": unable to register with the core\n"); + rc = -ENODEV; + goto err_reg; + } + // MOD_INC_USE_COUNT(which_module?); + + mutex_lock(&usb_bus_idr_lock); + idr_for_each_entry(&usb_bus_idr, ubus, id) + mon_bus_init(ubus); + usb_register_notify(&mon_nb); + mutex_unlock(&usb_bus_idr_lock); + return 0; + +err_reg: + mon_bin_exit(); +err_bin: + mon_text_exit(); +err_text: + return rc; +} + +static void __exit mon_exit(void) +{ + struct mon_bus *mbus; + struct list_head *p; + + usb_unregister_notify(&mon_nb); + usb_mon_deregister(); + + mutex_lock(&mon_lock); + + while (!list_empty(&mon_buses)) { + p = mon_buses.next; + mbus = list_entry(p, struct mon_bus, bus_link); + list_del(p); + + if (mbus->text_inited) + mon_text_del(mbus); + if (mbus->bin_inited) + mon_bin_del(mbus); + + /* + * This never happens, because the open/close paths in + * file level maintain module use counters and so rmmod fails + * before reaching here. However, better be safe... + */ + if (mbus->nreaders) { + printk(KERN_ERR TAG + ": Outstanding opens (%d) on usb%d, leaking...\n", + mbus->nreaders, mbus->u_bus->busnum); + kref_get(&mbus->ref); /* Force leak */ + } + + mon_dissolve(mbus, mbus->u_bus); + kref_put(&mbus->ref, mon_bus_drop); + } + + mbus = &mon_bus0; + if (mbus->text_inited) + mon_text_del(mbus); + if (mbus->bin_inited) + mon_bin_del(mbus); + + mutex_unlock(&mon_lock); + + mon_text_exit(); + mon_bin_exit(); +} + +module_init(mon_init); +module_exit(mon_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c new file mode 100644 index 000000000..98ab0cc47 --- /dev/null +++ b/drivers/usb/mon/mon_stat.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The USB Monitor, inspired by Dave Harding's USBMon. + * + * This is the 's' or 'stat' reader which debugs usbmon itself. + * Note that this code blows through locks, so make sure that + * /dbg/usbmon/0s is well protected from non-root users. + * + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/usb.h> +#include <linux/fs.h> +#include <linux/uaccess.h> + +#include "usb_mon.h" + +#define STAT_BUF_SIZE 80 + +struct snap { + int slen; + char str[STAT_BUF_SIZE]; +}; + +static int mon_stat_open(struct inode *inode, struct file *file) +{ + struct mon_bus *mbus; + struct snap *sp; + + sp = kmalloc(sizeof(struct snap), GFP_KERNEL); + if (sp == NULL) + return -ENOMEM; + + mbus = inode->i_private; + + sp->slen = snprintf(sp->str, STAT_BUF_SIZE, + "nreaders %d events %u text_lost %u\n", + mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost); + + file->private_data = sp; + return 0; +} + +static ssize_t mon_stat_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct snap *sp = file->private_data; + + return simple_read_from_buffer(buf, nbytes, ppos, sp->str, sp->slen); +} + +static int mon_stat_release(struct inode *inode, struct file *file) +{ + struct snap *sp = file->private_data; + file->private_data = NULL; + kfree(sp); + return 0; +} + +const struct file_operations mon_fops_stat = { + .owner = THIS_MODULE, + .open = mon_stat_open, + .llseek = no_llseek, + .read = mon_stat_read, + /* .write = mon_stat_write, */ + /* .poll = mon_stat_poll, */ + /* .unlocked_ioctl = mon_stat_ioctl, */ + .release = mon_stat_release, +}; diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c new file mode 100644 index 000000000..39cb14164 --- /dev/null +++ b/drivers/usb/mon/mon_text.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The USB Monitor, inspired by Dave Harding's USBMon. + * + * This is a text format reader. + */ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/usb.h> +#include <linux/slab.h> +#include <linux/sched/signal.h> +#include <linux/time.h> +#include <linux/ktime.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/debugfs.h> +#include <linux/scatterlist.h> +#include <linux/uaccess.h> + +#include "usb_mon.h" + +/* + * No, we do not want arbitrarily long data strings. + * Use the binary interface if you want to capture bulk data! + */ +#define DATA_MAX 32 + +/* + * Defined by USB 2.0 clause 9.3, table 9.2. + */ +#define SETUP_MAX 8 + +/* + * This limit exists to prevent OOMs when the user process stops reading. + * If usbmon were available to unprivileged processes, it might be open + * to a local DoS. But we have to keep to root in order to prevent + * password sniffing from HID devices. + */ +#define EVENT_MAX (4*PAGE_SIZE / sizeof(struct mon_event_text)) + +/* + * Potentially unlimited number; we limit it for similar allocations. + * The usbfs limits this to 128, but we're not quite as generous. + */ +#define ISODESC_MAX 5 + +#define PRINTF_DFL 250 /* with 5 ISOs segs */ + +struct mon_iso_desc { + int status; + unsigned int offset; + unsigned int length; /* Unsigned here, signed in URB. Historic. */ +}; + +struct mon_event_text { + struct list_head e_link; + int type; /* submit, complete, etc. */ + unsigned long id; /* From pointer, most of the time */ + unsigned int tstamp; + int busnum; + char devnum; + char epnum; + char is_in; + char xfertype; + int length; /* Depends on type: xfer length or act length */ + int status; + int interval; + int start_frame; + int error_count; + char setup_flag; + char data_flag; + int numdesc; /* Full number */ + struct mon_iso_desc isodesc[ISODESC_MAX]; + unsigned char setup[SETUP_MAX]; + unsigned char data[DATA_MAX]; +}; + +#define SLAB_NAME_SZ 30 +struct mon_reader_text { + struct kmem_cache *e_slab; + int nevents; + struct list_head e_list; + struct mon_reader r; /* In C, parent class can be placed anywhere */ + + wait_queue_head_t wait; + int printf_size; + size_t printf_offset; + size_t printf_togo; + char *printf_buf; + struct mutex printf_lock; + + char slab_name[SLAB_NAME_SZ]; +}; + +static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */ + +static void mon_text_ctor(void *); + +struct mon_text_ptr { + int cnt, limit; + char *pbuf; +}; + +static struct mon_event_text * + mon_text_read_wait(struct mon_reader_text *rp, struct file *file); +static void mon_text_read_head_t(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); +static void mon_text_read_head_u(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); +static void mon_text_read_statset(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); +static void mon_text_read_intstat(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); +static void mon_text_read_isostat(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); +static void mon_text_read_isodesc(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); +static void mon_text_read_data(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep); + +/* + * mon_text_submit + * mon_text_complete + * + * May be called from an interrupt. + * + * This is called with the whole mon_bus locked, so no additional lock. + */ + +static inline char mon_text_get_setup(struct mon_event_text *ep, + struct urb *urb, char ev_type, struct mon_bus *mbus) +{ + + if (ep->xfertype != USB_ENDPOINT_XFER_CONTROL || ev_type != 'S') + return '-'; + + if (urb->setup_packet == NULL) + return 'Z'; /* '0' would be not as pretty. */ + + memcpy(ep->setup, urb->setup_packet, SETUP_MAX); + return 0; +} + +static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, + int len, char ev_type, struct mon_bus *mbus) +{ + void *src; + + if (len <= 0) + return 'L'; + if (len >= DATA_MAX) + len = DATA_MAX; + + if (ep->is_in) { + if (ev_type != 'C') + return '<'; + } else { + if (ev_type != 'S') + return '>'; + } + + if (urb->num_sgs == 0) { + src = urb->transfer_buffer; + if (src == NULL) + return 'Z'; /* '0' would be not as pretty. */ + } else { + struct scatterlist *sg = urb->sg; + + if (PageHighMem(sg_page(sg))) + return 'D'; + + /* For the text interface we copy only the first sg buffer */ + len = min_t(int, sg->length, len); + src = sg_virt(sg); + } + + memcpy(ep->data, src, len); + return 0; +} + +static inline unsigned int mon_get_timestamp(void) +{ + struct timespec64 now; + unsigned int stamp; + + ktime_get_ts64(&now); + stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC; + return stamp; +} + +static void mon_text_event(struct mon_reader_text *rp, struct urb *urb, + char ev_type, int status) +{ + struct mon_event_text *ep; + unsigned int stamp; + struct usb_iso_packet_descriptor *fp; + struct mon_iso_desc *dp; + int i, ndesc; + + stamp = mon_get_timestamp(); + + if (rp->nevents >= EVENT_MAX || + (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + rp->r.m_bus->cnt_text_lost++; + return; + } + + ep->type = ev_type; + ep->id = (unsigned long) urb; + ep->busnum = urb->dev->bus->busnum; + ep->devnum = urb->dev->devnum; + ep->epnum = usb_endpoint_num(&urb->ep->desc); + ep->xfertype = usb_endpoint_type(&urb->ep->desc); + ep->is_in = usb_urb_dir_in(urb); + ep->tstamp = stamp; + ep->length = (ev_type == 'S') ? + urb->transfer_buffer_length : urb->actual_length; + /* Collecting status makes debugging sense for submits, too */ + ep->status = status; + + if (ep->xfertype == USB_ENDPOINT_XFER_INT) { + ep->interval = urb->interval; + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { + ep->interval = urb->interval; + ep->start_frame = urb->start_frame; + ep->error_count = urb->error_count; + } + ep->numdesc = urb->number_of_packets; + if (ep->xfertype == USB_ENDPOINT_XFER_ISOC && + urb->number_of_packets > 0) { + if ((ndesc = urb->number_of_packets) > ISODESC_MAX) + ndesc = ISODESC_MAX; + fp = urb->iso_frame_desc; + dp = ep->isodesc; + for (i = 0; i < ndesc; i++) { + dp->status = fp->status; + dp->offset = fp->offset; + dp->length = (ev_type == 'S') ? + fp->length : fp->actual_length; + fp++; + dp++; + } + /* Wasteful, but simple to understand: ISO 'C' is sparse. */ + if (ev_type == 'C') + ep->length = urb->transfer_buffer_length; + } + + ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus); + ep->data_flag = mon_text_get_data(ep, urb, ep->length, ev_type, + rp->r.m_bus); + + rp->nevents++; + list_add_tail(&ep->e_link, &rp->e_list); + wake_up(&rp->wait); +} + +static void mon_text_submit(void *data, struct urb *urb) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, urb, 'S', -EINPROGRESS); +} + +static void mon_text_complete(void *data, struct urb *urb, int status) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, urb, 'C', status); +} + +static void mon_text_error(void *data, struct urb *urb, int error) +{ + struct mon_reader_text *rp = data; + struct mon_event_text *ep; + + if (rp->nevents >= EVENT_MAX || + (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + rp->r.m_bus->cnt_text_lost++; + return; + } + + ep->type = 'E'; + ep->id = (unsigned long) urb; + ep->busnum = urb->dev->bus->busnum; + ep->devnum = urb->dev->devnum; + ep->epnum = usb_endpoint_num(&urb->ep->desc); + ep->xfertype = usb_endpoint_type(&urb->ep->desc); + ep->is_in = usb_urb_dir_in(urb); + ep->tstamp = mon_get_timestamp(); + ep->length = 0; + ep->status = error; + + ep->setup_flag = '-'; + ep->data_flag = 'E'; + + rp->nevents++; + list_add_tail(&ep->e_link, &rp->e_list); + wake_up(&rp->wait); +} + +/* + * Fetch next event from the circular buffer. + */ +static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp, + struct mon_bus *mbus) +{ + struct list_head *p; + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + if (list_empty(&rp->e_list)) { + spin_unlock_irqrestore(&mbus->lock, flags); + return NULL; + } + p = rp->e_list.next; + list_del(p); + --rp->nevents; + spin_unlock_irqrestore(&mbus->lock, flags); + return list_entry(p, struct mon_event_text, e_link); +} + +/* + */ +static int mon_text_open(struct inode *inode, struct file *file) +{ + struct mon_bus *mbus; + struct mon_reader_text *rp; + int rc; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL); + if (rp == NULL) { + rc = -ENOMEM; + goto err_alloc; + } + INIT_LIST_HEAD(&rp->e_list); + init_waitqueue_head(&rp->wait); + mutex_init(&rp->printf_lock); + + rp->printf_size = PRINTF_DFL; + rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL); + if (rp->printf_buf == NULL) { + rc = -ENOMEM; + goto err_alloc_pr; + } + + rp->r.m_bus = mbus; + rp->r.r_data = rp; + rp->r.rnf_submit = mon_text_submit; + rp->r.rnf_error = mon_text_error; + rp->r.rnf_complete = mon_text_complete; + + snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); + rp->e_slab = kmem_cache_create(rp->slab_name, + sizeof(struct mon_event_text), sizeof(long), 0, + mon_text_ctor); + if (rp->e_slab == NULL) { + rc = -ENOMEM; + goto err_slab; + } + + mon_reader_add(mbus, &rp->r); + + file->private_data = rp; + mutex_unlock(&mon_lock); + return 0; + +// err_busy: +// kmem_cache_destroy(rp->e_slab); +err_slab: + kfree(rp->printf_buf); +err_alloc_pr: + kfree(rp); +err_alloc: + mutex_unlock(&mon_lock); + return rc; +} + +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, + char __user * const buf, const size_t nbytes) +{ + const size_t togo = min(nbytes, rp->printf_togo); + + if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) + return -EFAULT; + rp->printf_togo -= togo; + rp->printf_offset += togo; + return togo; +} + +/* ppos is not advanced since the llseek operation is not permitted. */ +static ssize_t mon_text_read_t(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; + ssize_t ret; + + mutex_lock(&rp->printf_lock); + + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_t(rp, &ptr, ep); + mon_text_read_statset(rp, &ptr, ep); + ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); + } + + ret = mon_text_copy_to_user(rp, buf, nbytes); + mutex_unlock(&rp->printf_lock); + return ret; +} + +/* ppos is not advanced since the llseek operation is not permitted. */ +static ssize_t mon_text_read_u(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; + ssize_t ret; + + mutex_lock(&rp->printf_lock); + + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_u(rp, &ptr, ep); + if (ep->type == 'E') { + mon_text_read_statset(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { + mon_text_read_isostat(rp, &ptr, ep); + mon_text_read_isodesc(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { + mon_text_read_intstat(rp, &ptr, ep); + } else { + mon_text_read_statset(rp, &ptr, ep); + } + ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); + } + + ret = mon_text_copy_to_user(rp, buf, nbytes); + mutex_unlock(&rp->printf_lock); + return ret; +} + +static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, + struct file *file) +{ + struct mon_bus *mbus = rp->r.m_bus; + DECLARE_WAITQUEUE(waita, current); + struct mon_event_text *ep; + + add_wait_queue(&rp->wait, &waita); + set_current_state(TASK_INTERRUPTIBLE); + while ((ep = mon_text_fetch(rp, mbus)) == NULL) { + if (file->f_flags & O_NONBLOCK) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EWOULDBLOCK); + } + /* + * We do not count nwaiters, because ->release is supposed + * to be called when all openers are gone only. + */ + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EINTR); + } + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ep; +} + +static void mon_text_read_head_t(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + char udir, utype; + + udir = (ep->is_in ? 'i' : 'o'); + switch (ep->xfertype) { + case USB_ENDPOINT_XFER_ISOC: utype = 'Z'; break; + case USB_ENDPOINT_XFER_INT: utype = 'I'; break; + case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break; + default: /* PIPE_BULK */ utype = 'B'; + } + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + "%lx %u %c %c%c:%03u:%02u", + ep->id, ep->tstamp, ep->type, + utype, udir, ep->devnum, ep->epnum); +} + +static void mon_text_read_head_u(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + char udir, utype; + + udir = (ep->is_in ? 'i' : 'o'); + switch (ep->xfertype) { + case USB_ENDPOINT_XFER_ISOC: utype = 'Z'; break; + case USB_ENDPOINT_XFER_INT: utype = 'I'; break; + case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break; + default: /* PIPE_BULK */ utype = 'B'; + } + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + "%lx %u %c %c%c:%d:%03u:%u", + ep->id, ep->tstamp, ep->type, + utype, udir, ep->busnum, ep->devnum, ep->epnum); +} + +static void mon_text_read_statset(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + + if (ep->setup_flag == 0) { /* Setup packet is present and captured */ + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " s %02x %02x %04x %04x %04x", + ep->setup[0], + ep->setup[1], + (ep->setup[3] << 8) | ep->setup[2], + (ep->setup[5] << 8) | ep->setup[4], + (ep->setup[7] << 8) | ep->setup[6]); + } else if (ep->setup_flag != '-') { /* Unable to capture setup packet */ + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %c __ __ ____ ____ ____", ep->setup_flag); + } else { /* No setup for this kind of URB */ + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %d", ep->status); + } +} + +static void mon_text_read_intstat(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %d:%d", ep->status, ep->interval); +} + +static void mon_text_read_isostat(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + if (ep->type == 'S') { + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %d:%d:%d", ep->status, ep->interval, ep->start_frame); + } else { + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %d:%d:%d:%d", + ep->status, ep->interval, ep->start_frame, ep->error_count); + } +} + +static void mon_text_read_isodesc(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + int ndesc; /* Display this many */ + int i; + const struct mon_iso_desc *dp; + + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %d", ep->numdesc); + ndesc = ep->numdesc; + if (ndesc > ISODESC_MAX) + ndesc = ISODESC_MAX; + if (ndesc < 0) + ndesc = 0; + dp = ep->isodesc; + for (i = 0; i < ndesc; i++) { + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %d:%u:%u", dp->status, dp->offset, dp->length); + dp++; + } +} + +static void mon_text_read_data(struct mon_reader_text *rp, + struct mon_text_ptr *p, const struct mon_event_text *ep) +{ + int data_len, i; + + if ((data_len = ep->length) > 0) { + if (ep->data_flag == 0) { + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " ="); + if (data_len >= DATA_MAX) + data_len = DATA_MAX; + for (i = 0; i < data_len; i++) { + if (i % 4 == 0) { + p->cnt += scnprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + " "); + } + p->cnt += scnprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + "%02x", ep->data[i]); + } + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + "\n"); + } else { + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, + " %c\n", ep->data_flag); + } + } else { + p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n"); + } +} + +static int mon_text_release(struct inode *inode, struct file *file) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_bus *mbus; + /* unsigned long flags; */ + struct list_head *p; + struct mon_event_text *ep; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + if (mbus->nreaders <= 0) { + printk(KERN_ERR TAG ": consistency error on close\n"); + mutex_unlock(&mon_lock); + return 0; + } + mon_reader_del(mbus, &rp->r); + + /* + * In theory, e_list is protected by mbus->lock. However, + * after mon_reader_del has finished, the following is the case: + * - we are not on reader list anymore, so new events won't be added; + * - whole mbus may be dropped if it was orphaned. + * So, we better not touch mbus. + */ + /* spin_lock_irqsave(&mbus->lock, flags); */ + while (!list_empty(&rp->e_list)) { + p = rp->e_list.next; + ep = list_entry(p, struct mon_event_text, e_link); + list_del(p); + --rp->nevents; + kmem_cache_free(rp->e_slab, ep); + } + /* spin_unlock_irqrestore(&mbus->lock, flags); */ + + kmem_cache_destroy(rp->e_slab); + kfree(rp->printf_buf); + kfree(rp); + + mutex_unlock(&mon_lock); + return 0; +} + +static const struct file_operations mon_fops_text_t = { + .owner = THIS_MODULE, + .open = mon_text_open, + .llseek = no_llseek, + .read = mon_text_read_t, + .release = mon_text_release, +}; + +static const struct file_operations mon_fops_text_u = { + .owner = THIS_MODULE, + .open = mon_text_open, + .llseek = no_llseek, + .read = mon_text_read_u, + .release = mon_text_release, +}; + +int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus) +{ + enum { NAMESZ = 10 }; + char name[NAMESZ]; + int busnum = ubus? ubus->busnum: 0; + int rc; + + if (mon_dir == NULL) + return 0; + + if (ubus != NULL) { + rc = snprintf(name, NAMESZ, "%dt", busnum); + if (rc <= 0 || rc >= NAMESZ) + goto err_print_t; + mbus->dent_t = debugfs_create_file(name, 0600, mon_dir, mbus, + &mon_fops_text_t); + } + + rc = snprintf(name, NAMESZ, "%du", busnum); + if (rc <= 0 || rc >= NAMESZ) + goto err_print_u; + mbus->dent_u = debugfs_create_file(name, 0600, mon_dir, mbus, + &mon_fops_text_u); + + rc = snprintf(name, NAMESZ, "%ds", busnum); + if (rc <= 0 || rc >= NAMESZ) + goto err_print_s; + mbus->dent_s = debugfs_create_file(name, 0600, mon_dir, mbus, + &mon_fops_stat); + + return 1; + +err_print_s: + debugfs_remove(mbus->dent_u); + mbus->dent_u = NULL; +err_print_u: + if (ubus != NULL) { + debugfs_remove(mbus->dent_t); + mbus->dent_t = NULL; + } +err_print_t: + return 0; +} + +void mon_text_del(struct mon_bus *mbus) +{ + debugfs_remove(mbus->dent_u); + debugfs_remove(mbus->dent_t); + debugfs_remove(mbus->dent_s); +} + +/* + * Slab interface: constructor. + */ +static void mon_text_ctor(void *mem) +{ + /* + * Nothing to initialize. No, really! + * So, we fill it with garbage to emulate a reused object. + */ + memset(mem, 0xe5, sizeof(struct mon_event_text)); +} + +int __init mon_text_init(void) +{ + mon_dir = debugfs_create_dir("usbmon", usb_debug_root); + return 0; +} + +void mon_text_exit(void) +{ + debugfs_remove(mon_dir); +} diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h new file mode 100644 index 000000000..aa64efaba --- /dev/null +++ b/drivers/usb/mon/usb_mon.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The USB Monitor, inspired by Dave Harding's USBMon. + * + * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com) + */ + +#ifndef __USB_MON_H +#define __USB_MON_H + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/kref.h> +/* #include <linux/usb.h> */ /* We use struct pointers only in this header */ + +#define TAG "usbmon" + +struct mon_bus { + struct list_head bus_link; + spinlock_t lock; + struct usb_bus *u_bus; + + int text_inited; + int bin_inited; + struct dentry *dent_s; /* Debugging file */ + struct dentry *dent_t; /* Text interface file */ + struct dentry *dent_u; /* Second text interface file */ + struct device *classdev; /* Device in usbmon class */ + + /* Ref */ + int nreaders; /* Under mon_lock AND mbus->lock */ + struct list_head r_list; /* Chain of readers (usually one) */ + struct kref ref; /* Under mon_lock */ + + /* Stats */ + unsigned int cnt_events; + unsigned int cnt_text_lost; +}; + +/* + * An instance of a process which opened a file (but can fork later) + */ +struct mon_reader { + struct list_head r_link; + struct mon_bus *m_bus; + void *r_data; /* Use container_of instead? */ + + void (*rnf_submit)(void *data, struct urb *urb); + void (*rnf_error)(void *data, struct urb *urb, int error); + void (*rnf_complete)(void *data, struct urb *urb, int status); +}; + +void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r); +void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r); + +struct mon_bus *mon_bus_lookup(unsigned int num); + +int /*bool*/ mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus); +void mon_text_del(struct mon_bus *mbus); +int /*bool*/ mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus); +void mon_bin_del(struct mon_bus *mbus); + +int __init mon_text_init(void); +void mon_text_exit(void); +int __init mon_bin_init(void); +void mon_bin_exit(void); + +/* + */ +extern struct mutex mon_lock; + +extern const struct file_operations mon_fops_stat; + +extern struct mon_bus mon_bus0; /* Only for redundant checks */ + +#endif /* __USB_MON_H */ |