diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/usb/early | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/usb/early')
-rw-r--r-- | drivers/usb/early/Makefile | 7 | ||||
-rw-r--r-- | drivers/usb/early/ehci-dbgp.c | 1092 | ||||
-rw-r--r-- | drivers/usb/early/xhci-dbc.c | 1007 | ||||
-rw-r--r-- | drivers/usb/early/xhci-dbc.h | 222 |
4 files changed, 2328 insertions, 0 deletions
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile new file mode 100644 index 0000000000..7b77b49d3b --- /dev/null +++ b/drivers/usb/early/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for early USB devices +# + +obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o +obj-$(CONFIG_EARLY_PRINTK_USB_XDBC) += xhci-dbc.o diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c new file mode 100644 index 0000000000..45b42d8f64 --- /dev/null +++ b/drivers/usb/early/ehci-dbgp.c @@ -0,0 +1,1092 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Standalone EHCI usb debug driver + * + * Originally written by: + * Eric W. Biederman" <ebiederm@xmission.com> and + * Yinghai Lu <yhlu.kernel@gmail.com> + * + * Changes for early/late printk and HW errata: + * Jason Wessel <jason.wessel@windriver.com> + * Copyright (C) 2009 Wind River Systems, Inc. + * + */ + +#include <linux/console.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/pci_regs.h> +#include <linux/pci_ids.h> +#include <linux/usb/ch9.h> +#include <linux/usb/ehci_def.h> +#include <linux/delay.h> +#include <linux/serial_core.h> +#include <linux/kgdb.h> +#include <linux/kthread.h> +#include <asm/io.h> +#include <asm/pci-direct.h> +#include <asm/fixmap.h> + +/* The code here is intended to talk directly to the EHCI debug port + * and does not require that you have any kind of USB host controller + * drivers or USB device drivers compiled into the kernel. + * + * If you make a change to anything in here, the following test cases + * need to pass where a USB debug device works in the following + * configurations. + * + * 1. boot args: earlyprintk=dbgp + * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set + * o kernel compiled with CONFIG_USB_EHCI_HCD=y + * 2. boot args: earlyprintk=dbgp,keep + * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set + * o kernel compiled with CONFIG_USB_EHCI_HCD=y + * 3. boot args: earlyprintk=dbgp console=ttyUSB0 + * o kernel has CONFIG_USB_EHCI_HCD=y and + * CONFIG_USB_SERIAL_DEBUG=y + * 4. boot args: earlyprintk=vga,dbgp + * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set + * o kernel compiled with CONFIG_USB_EHCI_HCD=y + * + * For the 4th configuration you can turn on or off the DBGP_DEBUG + * such that you can debug the dbgp device's driver code. + */ + +static int dbgp_phys_port = 1; + +static struct ehci_caps __iomem *ehci_caps; +static struct ehci_regs __iomem *ehci_regs; +static struct ehci_dbg_port __iomem *ehci_debug; +static int dbgp_not_safe; /* Cannot use debug device during ehci reset */ +static unsigned int dbgp_endpoint_out; +static unsigned int dbgp_endpoint_in; + +struct ehci_dev { + u32 bus; + u32 slot; + u32 func; +}; + +static struct ehci_dev ehci_dev; + +#define USB_DEBUG_DEVNUM 127 + +#ifdef DBGP_DEBUG +#define dbgp_printk printk +static void dbgp_ehci_status(char *str) +{ + if (!ehci_debug) + return; + dbgp_printk("dbgp: %s\n", str); + dbgp_printk(" Debug control: %08x", readl(&ehci_debug->control)); + dbgp_printk(" ehci cmd : %08x", readl(&ehci_regs->command)); + dbgp_printk(" ehci conf flg: %08x\n", + readl(&ehci_regs->configured_flag)); + dbgp_printk(" ehci status : %08x", readl(&ehci_regs->status)); + dbgp_printk(" ehci portsc : %08x\n", + readl(&ehci_regs->port_status[dbgp_phys_port - 1])); +} +#else +static inline void dbgp_ehci_status(char *str) { } +static inline void dbgp_printk(const char *fmt, ...) { } +#endif + +static inline u32 dbgp_len_update(u32 x, u32 len) +{ + return (x & ~0x0f) | (len & 0x0f); +} + +#ifdef CONFIG_KGDB +static struct kgdb_io kgdbdbgp_io_ops; +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops) +#else +#define dbgp_kgdb_mode (0) +#endif + +/* Local version of HC_LENGTH macro as ehci struct is not available here */ +#define EARLY_HC_LENGTH(p) (0x00ff & (p)) /* bits 7 : 0 */ + +/* + * USB Packet IDs (PIDs) + */ + +/* token */ +#define USB_PID_OUT 0xe1 +#define USB_PID_IN 0x69 +#define USB_PID_SOF 0xa5 +#define USB_PID_SETUP 0x2d +/* handshake */ +#define USB_PID_ACK 0xd2 +#define USB_PID_NAK 0x5a +#define USB_PID_STALL 0x1e +#define USB_PID_NYET 0x96 +/* data */ +#define USB_PID_DATA0 0xc3 +#define USB_PID_DATA1 0x4b +#define USB_PID_DATA2 0x87 +#define USB_PID_MDATA 0x0f +/* Special */ +#define USB_PID_PREAMBLE 0x3c +#define USB_PID_ERR 0x3c +#define USB_PID_SPLIT 0x78 +#define USB_PID_PING 0xb4 +#define USB_PID_UNDEF_0 0xf0 + +#define USB_PID_DATA_TOGGLE 0x88 +#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE) + +#define PCI_CAP_ID_EHCI_DEBUG 0xa + +#define HUB_ROOT_RESET_TIME 50 /* times are in msec */ +#define HUB_SHORT_RESET_TIME 10 +#define HUB_LONG_RESET_TIME 200 +#define HUB_RESET_TIMEOUT 500 + +#define DBGP_MAX_PACKET 8 +#define DBGP_TIMEOUT (250 * 1000) +#define DBGP_LOOPS 1000 + +static inline u32 dbgp_pid_write_update(u32 x, u32 tok) +{ + static int data0 = USB_PID_DATA1; + data0 ^= USB_PID_DATA_TOGGLE; + return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff); +} + +static inline u32 dbgp_pid_read_update(u32 x, u32 tok) +{ + return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff); +} + +static int dbgp_wait_until_complete(void) +{ + u32 ctrl; + int ret; + + ret = readl_poll_timeout_atomic(&ehci_debug->control, ctrl, + (ctrl & DBGP_DONE), 1, DBGP_TIMEOUT); + if (ret) + return -DBGP_TIMEOUT; + + /* + * Now that we have observed the completed transaction, + * clear the done bit. + */ + writel(ctrl | DBGP_DONE, &ehci_debug->control); + return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); +} + +static inline void dbgp_mdelay(int ms) +{ + int i; + + while (ms--) { + for (i = 0; i < 1000; i++) + outb(0x1, 0x80); + } +} + +static void dbgp_breath(void) +{ + /* Sleep to give the debug port a chance to breathe */ +} + +static int dbgp_wait_until_done(unsigned ctrl, int loop) +{ + u32 pids, lpid; + int ret; + +retry: + writel(ctrl | DBGP_GO, &ehci_debug->control); + ret = dbgp_wait_until_complete(); + pids = readl(&ehci_debug->pids); + lpid = DBGP_PID_GET(pids); + + if (ret < 0) { + /* A -DBGP_TIMEOUT failure here means the device has + * failed, perhaps because it was unplugged, in which + * case we do not want to hang the system so the dbgp + * will be marked as unsafe to use. EHCI reset is the + * only way to recover if you unplug the dbgp device. + */ + if (ret == -DBGP_TIMEOUT && !dbgp_not_safe) + dbgp_not_safe = 1; + if (ret == -DBGP_ERR_BAD && --loop > 0) + goto retry; + return ret; + } + + /* + * If the port is getting full or it has dropped data + * start pacing ourselves, not necessary but it's friendly. + */ + if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET)) + dbgp_breath(); + + /* If I get a NACK reissue the transmission */ + if (lpid == USB_PID_NAK) { + if (--loop > 0) + goto retry; + } + + return ret; +} + +static inline void dbgp_set_data(const void *buf, int size) +{ + const unsigned char *bytes = buf; + u32 lo, hi; + int i; + + lo = hi = 0; + for (i = 0; i < 4 && i < size; i++) + lo |= bytes[i] << (8*i); + for (; i < 8 && i < size; i++) + hi |= bytes[i] << (8*(i - 4)); + writel(lo, &ehci_debug->data03); + writel(hi, &ehci_debug->data47); +} + +static inline void dbgp_get_data(void *buf, int size) +{ + unsigned char *bytes = buf; + u32 lo, hi; + int i; + + lo = readl(&ehci_debug->data03); + hi = readl(&ehci_debug->data47); + for (i = 0; i < 4 && i < size; i++) + bytes[i] = (lo >> (8*i)) & 0xff; + for (; i < 8 && i < size; i++) + bytes[i] = (hi >> (8*(i - 4))) & 0xff; +} + +static int dbgp_bulk_write(unsigned devnum, unsigned endpoint, + const char *bytes, int size) +{ + int ret; + u32 addr; + u32 pids, ctrl; + + if (size > DBGP_MAX_PACKET) + return -1; + + addr = DBGP_EPADDR(devnum, endpoint); + + pids = readl(&ehci_debug->pids); + pids = dbgp_pid_write_update(pids, USB_PID_OUT); + + ctrl = readl(&ehci_debug->control); + ctrl = dbgp_len_update(ctrl, size); + ctrl |= DBGP_OUT; + ctrl |= DBGP_GO; + + dbgp_set_data(bytes, size); + writel(addr, &ehci_debug->address); + writel(pids, &ehci_debug->pids); + ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS); + + return ret; +} + +static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, + int size, int loops) +{ + u32 pids, addr, ctrl; + int ret; + + if (size > DBGP_MAX_PACKET) + return -1; + + addr = DBGP_EPADDR(devnum, endpoint); + + pids = readl(&ehci_debug->pids); + pids = dbgp_pid_read_update(pids, USB_PID_IN); + + ctrl = readl(&ehci_debug->control); + ctrl = dbgp_len_update(ctrl, size); + ctrl &= ~DBGP_OUT; + ctrl |= DBGP_GO; + + writel(addr, &ehci_debug->address); + writel(pids, &ehci_debug->pids); + ret = dbgp_wait_until_done(ctrl, loops); + if (ret < 0) + return ret; + + if (size > ret) + size = ret; + dbgp_get_data(data, size); + return ret; +} + +static int dbgp_control_msg(unsigned devnum, int requesttype, + int request, int value, int index, void *data, int size) +{ + u32 pids, addr, ctrl; + struct usb_ctrlrequest req; + int read; + int ret; + + read = (requesttype & USB_DIR_IN) != 0; + if (size > (read ? DBGP_MAX_PACKET : 0)) + return -1; + + /* Compute the control message */ + req.bRequestType = requesttype; + req.bRequest = request; + req.wValue = cpu_to_le16(value); + req.wIndex = cpu_to_le16(index); + req.wLength = cpu_to_le16(size); + + pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP); + addr = DBGP_EPADDR(devnum, 0); + + ctrl = readl(&ehci_debug->control); + ctrl = dbgp_len_update(ctrl, sizeof(req)); + ctrl |= DBGP_OUT; + ctrl |= DBGP_GO; + + /* Send the setup message */ + dbgp_set_data(&req, sizeof(req)); + writel(addr, &ehci_debug->address); + writel(pids, &ehci_debug->pids); + ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS); + if (ret < 0) + return ret; + + /* Read the result */ + return dbgp_bulk_read(devnum, 0, data, size, DBGP_LOOPS); +} + +/* Find a PCI capability */ +static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap) +{ + u8 pos; + int bytes; + + if (!(read_pci_config_16(num, slot, func, PCI_STATUS) & + PCI_STATUS_CAP_LIST)) + return 0; + + pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST); + for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { + u8 id; + + pos &= ~3; + id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID); + if (id == 0xff) + break; + if (id == cap) + return pos; + + pos = read_pci_config_byte(num, slot, func, + pos+PCI_CAP_LIST_NEXT); + } + return 0; +} + +static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func) +{ + u32 class; + + class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); + if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI) + return 0; + + return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG); +} + +static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc) +{ + u32 bus, slot, func; + + for (bus = 0; bus < 256; bus++) { + for (slot = 0; slot < 32; slot++) { + for (func = 0; func < 8; func++) { + unsigned cap; + + cap = __find_dbgp(bus, slot, func); + + if (!cap) + continue; + if (ehci_num-- != 0) + continue; + *rbus = bus; + *rslot = slot; + *rfunc = func; + return cap; + } + } + } + return 0; +} + +static int dbgp_ehci_startup(void) +{ + u32 ctrl, cmd, status; + int loop; + + /* Claim ownership, but do not enable yet */ + ctrl = readl(&ehci_debug->control); + ctrl |= DBGP_OWNER; + ctrl &= ~(DBGP_ENABLED | DBGP_INUSE); + writel(ctrl, &ehci_debug->control); + udelay(1); + + dbgp_ehci_status("EHCI startup"); + /* Start the ehci running */ + cmd = readl(&ehci_regs->command); + cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET); + cmd |= CMD_RUN; + writel(cmd, &ehci_regs->command); + + /* Ensure everything is routed to the EHCI */ + writel(FLAG_CF, &ehci_regs->configured_flag); + + /* Wait until the controller is no longer halted */ + loop = 1000; + do { + status = readl(&ehci_regs->status); + if (!(status & STS_HALT)) + break; + udelay(1); + } while (--loop > 0); + + if (!loop) { + dbgp_printk("ehci can not be started\n"); + return -ENODEV; + } + dbgp_printk("ehci started\n"); + return 0; +} + +static int dbgp_ehci_controller_reset(void) +{ + int loop = 250 * 1000; + u32 cmd; + + /* Reset the EHCI controller */ + cmd = readl(&ehci_regs->command); + cmd |= CMD_RESET; + writel(cmd, &ehci_regs->command); + do { + cmd = readl(&ehci_regs->command); + } while ((cmd & CMD_RESET) && (--loop > 0)); + + if (!loop) { + dbgp_printk("can not reset ehci\n"); + return -1; + } + dbgp_ehci_status("ehci reset done"); + return 0; +} +static int ehci_wait_for_port(int port); +/* Return 0 on success + * Return -ENODEV for any general failure + * Return -EIO if wait for port fails + */ +static int _dbgp_external_startup(void) +{ + int devnum; + struct usb_debug_descriptor dbgp_desc; + int ret; + u32 ctrl, portsc, cmd; + int dbg_port = dbgp_phys_port; + int tries = 3; + int reset_port_tries = 1; + int try_hard_once = 1; + +try_port_reset_again: + ret = dbgp_ehci_startup(); + if (ret) + return ret; + + /* Wait for a device to show up in the debug port */ + ret = ehci_wait_for_port(dbg_port); + if (ret < 0) { + portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + if (!(portsc & PORT_CONNECT) && try_hard_once) { + /* Last ditch effort to try to force enable + * the debug device by using the packet test + * ehci command to try and wake it up. */ + try_hard_once = 0; + cmd = readl(&ehci_regs->command); + cmd &= ~CMD_RUN; + writel(cmd, &ehci_regs->command); + portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + portsc |= PORT_TEST_PKT; + writel(portsc, &ehci_regs->port_status[dbg_port - 1]); + dbgp_ehci_status("Trying to force debug port online"); + mdelay(50); + dbgp_ehci_controller_reset(); + goto try_port_reset_again; + } else if (reset_port_tries--) { + goto try_port_reset_again; + } + dbgp_printk("No device found in debug port\n"); + return -EIO; + } + dbgp_ehci_status("wait for port done"); + + /* Enable the debug port */ + ctrl = readl(&ehci_debug->control); + ctrl |= DBGP_CLAIM; + writel(ctrl, &ehci_debug->control); + ctrl = readl(&ehci_debug->control); + if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) { + dbgp_printk("No device in debug port\n"); + writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control); + return -ENODEV; + } + dbgp_ehci_status("debug ported enabled"); + + /* Completely transfer the debug device to the debug controller */ + portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + portsc &= ~PORT_PE; + writel(portsc, &ehci_regs->port_status[dbg_port - 1]); + + dbgp_mdelay(100); + +try_again: + /* Find the debug device and make it device number 127 */ + for (devnum = 0; devnum <= 127; devnum++) { + ret = dbgp_control_msg(devnum, + USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, + &dbgp_desc, sizeof(dbgp_desc)); + if (ret > 0) + break; + } + if (devnum > 127) { + dbgp_printk("Could not find attached debug device\n"); + goto err; + } + dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint; + dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint; + + /* Move the device to 127 if it isn't already there */ + if (devnum != USB_DEBUG_DEVNUM) { + ret = dbgp_control_msg(devnum, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); + if (ret < 0) { + dbgp_printk("Could not move attached device to %d\n", + USB_DEBUG_DEVNUM); + goto err; + } + dbgp_printk("debug device renamed to 127\n"); + } + + /* Enable the debug interface */ + ret = dbgp_control_msg(USB_DEBUG_DEVNUM, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0); + if (ret < 0) { + dbgp_printk(" Could not enable the debug device\n"); + goto err; + } + dbgp_printk("debug interface enabled\n"); + /* Perform a small write to get the even/odd data state in sync + */ + ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1); + if (ret < 0) { + dbgp_printk("dbgp_bulk_write failed: %d\n", ret); + goto err; + } + dbgp_printk("small write done\n"); + dbgp_not_safe = 0; + + return 0; +err: + if (tries--) + goto try_again; + return -ENODEV; +} + +static int ehci_reset_port(int port) +{ + u32 portsc; + u32 delay_time, delay; + int loop; + + dbgp_ehci_status("reset port"); + /* Reset the usb debug port */ + portsc = readl(&ehci_regs->port_status[port - 1]); + portsc &= ~PORT_PE; + portsc |= PORT_RESET; + writel(portsc, &ehci_regs->port_status[port - 1]); + + delay = HUB_ROOT_RESET_TIME; + for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT; + delay_time += delay) { + dbgp_mdelay(delay); + portsc = readl(&ehci_regs->port_status[port - 1]); + if (!(portsc & PORT_RESET)) + break; + } + if (portsc & PORT_RESET) { + /* force reset to complete */ + loop = 100 * 1000; + writel(portsc & ~(PORT_RWC_BITS | PORT_RESET), + &ehci_regs->port_status[port - 1]); + do { + udelay(1); + portsc = readl(&ehci_regs->port_status[port-1]); + } while ((portsc & PORT_RESET) && (--loop > 0)); + } + + /* Device went away? */ + if (!(portsc & PORT_CONNECT)) + return -ENOTCONN; + + /* bomb out completely if something weird happened */ + if ((portsc & PORT_CSC)) + return -EINVAL; + + /* If we've finished resetting, then break out of the loop */ + if (!(portsc & PORT_RESET) && (portsc & PORT_PE)) + return 0; + return -EBUSY; +} + +static int ehci_wait_for_port(int port) +{ + u32 status; + int ret, reps; + + for (reps = 0; reps < 300; reps++) { + status = readl(&ehci_regs->status); + if (status & STS_PCD) + break; + dbgp_mdelay(1); + } + ret = ehci_reset_port(port); + if (ret == 0) + return 0; + return -ENOTCONN; +} + +typedef void (*set_debug_port_t)(int port); + +static void __init default_set_debug_port(int port) +{ +} + +static set_debug_port_t __initdata set_debug_port = default_set_debug_port; + +static void __init nvidia_set_debug_port(int port) +{ + u32 dword; + dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, + 0x74); + dword &= ~(0x0f<<12); + dword |= ((port & 0x0f)<<12); + write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74, + dword); + dbgp_printk("set debug port to %d\n", port); +} + +static void __init detect_set_debug_port(void) +{ + u32 vendorid; + + vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, + 0x00); + + if ((vendorid & 0xffff) == 0x10de) { + dbgp_printk("using nvidia set_debug_port\n"); + set_debug_port = nvidia_set_debug_port; + } +} + +/* The code in early_ehci_bios_handoff() is derived from the usb pci + * quirk initialization, but altered so as to use the early PCI + * routines. */ +#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ +#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ +static void __init early_ehci_bios_handoff(void) +{ + u32 hcc_params = readl(&ehci_caps->hcc_params); + int offset = (hcc_params >> 8) & 0xff; + u32 cap; + int msec; + + if (!offset) + return; + + cap = read_pci_config(ehci_dev.bus, ehci_dev.slot, + ehci_dev.func, offset); + dbgp_printk("dbgp: ehci BIOS state %08x\n", cap); + + if ((cap & 0xff) == 1 && (cap & EHCI_USBLEGSUP_BIOS)) { + dbgp_printk("dbgp: BIOS handoff\n"); + write_pci_config_byte(ehci_dev.bus, ehci_dev.slot, + ehci_dev.func, offset + 3, 1); + } + + /* if boot firmware now owns EHCI, spin till it hands it over. */ + msec = 1000; + while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { + mdelay(10); + msec -= 10; + cap = read_pci_config(ehci_dev.bus, ehci_dev.slot, + ehci_dev.func, offset); + } + + if (cap & EHCI_USBLEGSUP_BIOS) { + /* well, possibly buggy BIOS... try to shut it down, + * and hope nothing goes too wrong */ + dbgp_printk("dbgp: BIOS handoff failed: %08x\n", cap); + write_pci_config_byte(ehci_dev.bus, ehci_dev.slot, + ehci_dev.func, offset + 2, 0); + } + + /* just in case, always disable EHCI SMIs */ + write_pci_config_byte(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, + offset + EHCI_USBLEGCTLSTS, 0); +} + +static int __init ehci_setup(void) +{ + u32 ctrl, portsc, hcs_params; + u32 debug_port, new_debug_port = 0, n_ports; + int ret, i; + int port_map_tried; + int playtimes = 3; + + early_ehci_bios_handoff(); + +try_next_time: + port_map_tried = 0; + +try_next_port: + + hcs_params = readl(&ehci_caps->hcs_params); + debug_port = HCS_DEBUG_PORT(hcs_params); + dbgp_phys_port = debug_port; + n_ports = HCS_N_PORTS(hcs_params); + + dbgp_printk("debug_port: %d\n", debug_port); + dbgp_printk("n_ports: %d\n", n_ports); + dbgp_ehci_status(""); + + for (i = 1; i <= n_ports; i++) { + portsc = readl(&ehci_regs->port_status[i-1]); + dbgp_printk("portstatus%d: %08x\n", i, portsc); + } + + if (port_map_tried && (new_debug_port != debug_port)) { + if (--playtimes) { + set_debug_port(new_debug_port); + goto try_next_time; + } + return -1; + } + + /* Only reset the controller if it is not already in the + * configured state */ + if (!(readl(&ehci_regs->configured_flag) & FLAG_CF)) { + if (dbgp_ehci_controller_reset() != 0) + return -1; + } else { + dbgp_ehci_status("ehci skip - already configured"); + } + + ret = _dbgp_external_startup(); + if (ret == -EIO) + goto next_debug_port; + + if (ret < 0) { + /* Things didn't work so remove my claim */ + ctrl = readl(&ehci_debug->control); + ctrl &= ~(DBGP_CLAIM | DBGP_OUT); + writel(ctrl, &ehci_debug->control); + return -1; + } + return 0; + +next_debug_port: + port_map_tried |= (1<<(debug_port - 1)); + new_debug_port = ((debug_port-1+1)%n_ports) + 1; + if (port_map_tried != ((1<<n_ports) - 1)) { + set_debug_port(new_debug_port); + goto try_next_port; + } + if (--playtimes) { + set_debug_port(new_debug_port); + goto try_next_time; + } + + return -1; +} + +int __init early_dbgp_init(char *s) +{ + u32 debug_port, bar, offset; + u32 bus, slot, func, cap; + void __iomem *ehci_bar; + u32 dbgp_num; + u32 bar_val; + char *e; + int ret; + u8 byte; + + if (!early_pci_allowed()) + return -1; + + dbgp_num = 0; + if (*s) + dbgp_num = simple_strtoul(s, &e, 10); + dbgp_printk("dbgp_num: %d\n", dbgp_num); + + cap = find_dbgp(dbgp_num, &bus, &slot, &func); + if (!cap) + return -1; + + dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot, + func); + + debug_port = read_pci_config(bus, slot, func, cap); + bar = (debug_port >> 29) & 0x7; + bar = (bar * 4) + 0xc; + offset = (debug_port >> 16) & 0xfff; + dbgp_printk("bar: %02x offset: %03x\n", bar, offset); + if (bar != PCI_BASE_ADDRESS_0) { + dbgp_printk("only debug ports on bar 1 handled.\n"); + + return -1; + } + + bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); + dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset); + if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) { + dbgp_printk("only simple 32bit mmio bars supported\n"); + + return -1; + } + + /* double check if the mem space is enabled */ + byte = read_pci_config_byte(bus, slot, func, 0x04); + if (!(byte & 0x2)) { + byte |= 0x02; + write_pci_config_byte(bus, slot, func, 0x04, byte); + dbgp_printk("mmio for ehci enabled\n"); + } + + /* + * FIXME I don't have the bar size so just guess PAGE_SIZE is more + * than enough. 1K is the biggest I have seen. + */ + set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK); + ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE); + ehci_bar += bar_val & ~PAGE_MASK; + dbgp_printk("ehci_bar: %p\n", ehci_bar); + + ehci_caps = ehci_bar; + ehci_regs = ehci_bar + EARLY_HC_LENGTH(readl(&ehci_caps->hc_capbase)); + ehci_debug = ehci_bar + offset; + ehci_dev.bus = bus; + ehci_dev.slot = slot; + ehci_dev.func = func; + + detect_set_debug_port(); + + ret = ehci_setup(); + if (ret < 0) { + dbgp_printk("ehci_setup failed\n"); + ehci_debug = NULL; + + return -1; + } + dbgp_ehci_status("early_init_complete"); + + return 0; +} + +static void early_dbgp_write(struct console *con, const char *str, u32 n) +{ + int chunk; + char buf[DBGP_MAX_PACKET]; + int use_cr = 0; + u32 cmd, ctrl; + int reset_run = 0; + + if (!ehci_debug || dbgp_not_safe) + return; + + cmd = readl(&ehci_regs->command); + if (unlikely(!(cmd & CMD_RUN))) { + /* If the ehci controller is not in the run state do extended + * checks to see if the acpi or some other initialization also + * reset the ehci debug port */ + ctrl = readl(&ehci_debug->control); + if (!(ctrl & DBGP_ENABLED)) { + dbgp_not_safe = 1; + _dbgp_external_startup(); + } else { + cmd |= CMD_RUN; + writel(cmd, &ehci_regs->command); + reset_run = 1; + } + } + while (n > 0) { + for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0; + str++, chunk++, n--) { + if (!use_cr && *str == '\n') { + use_cr = 1; + buf[chunk] = '\r'; + str--; + n++; + continue; + } + if (use_cr) + use_cr = 0; + buf[chunk] = *str; + } + if (chunk > 0) { + dbgp_bulk_write(USB_DEBUG_DEVNUM, + dbgp_endpoint_out, buf, chunk); + } + } + if (unlikely(reset_run)) { + cmd = readl(&ehci_regs->command); + cmd &= ~CMD_RUN; + writel(cmd, &ehci_regs->command); + } +} + +struct console early_dbgp_console = { + .name = "earlydbg", + .write = early_dbgp_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +#if IS_ENABLED(CONFIG_USB) +int dbgp_reset_prep(struct usb_hcd *hcd) +{ + int ret = xen_dbgp_reset_prep(hcd); + u32 ctrl; + + if (ret) + return ret; + + dbgp_not_safe = 1; + if (!ehci_debug) + return 0; + + if ((early_dbgp_console.index != -1 && + !(early_dbgp_console.flags & CON_BOOT)) || + dbgp_kgdb_mode) + return 1; + /* This means the console is not initialized, or should get + * shutdown so as to allow for reuse of the usb device, which + * means it is time to shutdown the usb debug port. */ + ctrl = readl(&ehci_debug->control); + if (ctrl & DBGP_ENABLED) { + ctrl &= ~(DBGP_CLAIM); + writel(ctrl, &ehci_debug->control); + } + return 0; +} +EXPORT_SYMBOL_GPL(dbgp_reset_prep); + +int dbgp_external_startup(struct usb_hcd *hcd) +{ + return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); +} +EXPORT_SYMBOL_GPL(dbgp_external_startup); +#endif /* USB */ + +#ifdef CONFIG_KGDB + +static char kgdbdbgp_buf[DBGP_MAX_PACKET]; +static int kgdbdbgp_buf_sz; +static int kgdbdbgp_buf_idx; +static int kgdbdbgp_loop_cnt = DBGP_LOOPS; + +static int kgdbdbgp_read_char(void) +{ + int ret; + + if (kgdbdbgp_buf_idx < kgdbdbgp_buf_sz) { + char ch = kgdbdbgp_buf[kgdbdbgp_buf_idx++]; + return ch; + } + + ret = dbgp_bulk_read(USB_DEBUG_DEVNUM, dbgp_endpoint_in, + &kgdbdbgp_buf, DBGP_MAX_PACKET, + kgdbdbgp_loop_cnt); + if (ret <= 0) + return NO_POLL_CHAR; + kgdbdbgp_buf_sz = ret; + kgdbdbgp_buf_idx = 1; + return kgdbdbgp_buf[0]; +} + +static void kgdbdbgp_write_char(u8 chr) +{ + early_dbgp_write(NULL, &chr, 1); +} + +static struct kgdb_io kgdbdbgp_io_ops = { + .name = "kgdbdbgp", + .read_char = kgdbdbgp_read_char, + .write_char = kgdbdbgp_write_char, +}; + +static int kgdbdbgp_wait_time; + +static int __init kgdbdbgp_parse_config(char *str) +{ + char *ptr; + + if (!ehci_debug) { + if (early_dbgp_init(str)) + return -1; + } + ptr = strchr(str, ','); + if (ptr) { + ptr++; + kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); + } + kgdb_register_io_module(&kgdbdbgp_io_ops); + if (early_dbgp_console.index != -1) + kgdbdbgp_io_ops.cons = &early_dbgp_console; + + return 0; +} +early_param("kgdbdbgp", kgdbdbgp_parse_config); + +static int kgdbdbgp_reader_thread(void *ptr) +{ + int ret; + + while (readl(&ehci_debug->control) & DBGP_ENABLED) { + kgdbdbgp_loop_cnt = 1; + ret = kgdbdbgp_read_char(); + kgdbdbgp_loop_cnt = DBGP_LOOPS; + if (ret != NO_POLL_CHAR) { + if (ret == 0x3 || ret == '$') { + if (ret == '$') + kgdbdbgp_buf_idx--; + kgdb_breakpoint(); + } + continue; + } + schedule_timeout_interruptible(kgdbdbgp_wait_time * HZ); + } + return 0; +} + +static int __init kgdbdbgp_start_thread(void) +{ + if (dbgp_kgdb_mode && kgdbdbgp_wait_time) + kthread_run(kgdbdbgp_reader_thread, NULL, "%s", "dbgp"); + + return 0; +} +device_initcall(kgdbdbgp_start_thread); +#endif /* CONFIG_KGDB */ diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c new file mode 100644 index 0000000000..341408410e --- /dev/null +++ b/drivers/usb/early/xhci-dbc.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xhci-dbc.c - xHCI debug capability early driver + * + * Copyright (C) 2016 Intel Corporation + * + * Author: Lu Baolu <baolu.lu@linux.intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include <linux/console.h> +#include <linux/pci_regs.h> +#include <linux/pci_ids.h> +#include <linux/memblock.h> +#include <linux/io.h> +#include <asm/pci-direct.h> +#include <asm/fixmap.h> +#include <linux/bcd.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/usb/xhci-dbgp.h> + +#include "../host/xhci.h" +#include "xhci-dbc.h" + +static struct xdbc_state xdbc; +static bool early_console_keep; + +#ifdef XDBC_TRACE +#define xdbc_trace trace_printk +#else +static inline void xdbc_trace(const char *fmt, ...) { } +#endif /* XDBC_TRACE */ + +static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func) +{ + u64 val64, sz64, mask64; + void __iomem *base; + u32 val, sz; + u8 byte; + + val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); + write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0); + sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); + write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val); + + if (val == 0xffffffff || sz == 0xffffffff) { + pr_notice("invalid mmio bar\n"); + return NULL; + } + + val64 = val & PCI_BASE_ADDRESS_MEM_MASK; + sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; + mask64 = PCI_BASE_ADDRESS_MEM_MASK; + + if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { + val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); + write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0); + sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); + write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val); + + val64 |= (u64)val << 32; + sz64 |= (u64)sz << 32; + mask64 |= ~0ULL << 32; + } + + sz64 &= mask64; + + if (!sz64) { + pr_notice("invalid mmio address\n"); + return NULL; + } + + sz64 = 1ULL << __ffs64(sz64); + + /* Check if the mem space is enabled: */ + byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND); + if (!(byte & PCI_COMMAND_MEMORY)) { + byte |= PCI_COMMAND_MEMORY; + write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte); + } + + xdbc.xhci_start = val64; + xdbc.xhci_length = sz64; + base = early_ioremap(val64, sz64); + + return base; +} + +static void * __init xdbc_get_page(dma_addr_t *dma_addr) +{ + void *virt; + + virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!virt) + return NULL; + + if (dma_addr) + *dma_addr = (dma_addr_t)__pa(virt); + + return virt; +} + +static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f) +{ + u32 bus, dev, func, class; + + for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) { + for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) { + for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) { + + class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION); + if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI) + continue; + + if (xdbc_num-- != 0) + continue; + + *b = bus; + *d = dev; + *f = func; + + return 0; + } + } + } + + return -1; +} + +static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay) +{ + u32 result; + + /* Can not use readl_poll_timeout_atomic() for early boot things */ + do { + result = readl(ptr); + result &= mask; + if (result == done) + return 0; + udelay(delay); + wait -= delay; + } while (wait > 0); + + return -ETIMEDOUT; +} + +static void __init xdbc_bios_handoff(void) +{ + int offset, timeout; + u32 val; + + offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY); + val = readl(xdbc.xhci_base + offset); + + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset); + timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10); + + if (timeout) { + pr_notice("failed to hand over xHCI control from BIOS\n"); + writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset); + } + } + + /* Disable BIOS SMIs and clear all SMI events: */ + val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET); + val &= XHCI_LEGACY_DISABLE_SMI; + val |= XHCI_LEGACY_SMI_EVENTS; + writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET); +} + +static int __init +xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring) +{ + seg->trbs = xdbc_get_page(&seg->dma); + if (!seg->trbs) + return -ENOMEM; + + ring->segment = seg; + + return 0; +} + +static void __init xdbc_free_ring(struct xdbc_ring *ring) +{ + struct xdbc_segment *seg = ring->segment; + + if (!seg) + return; + + memblock_phys_free(seg->dma, PAGE_SIZE); + ring->segment = NULL; +} + +static void xdbc_reset_ring(struct xdbc_ring *ring) +{ + struct xdbc_segment *seg = ring->segment; + struct xdbc_trb *link_trb; + + memset(seg->trbs, 0, PAGE_SIZE); + + ring->enqueue = seg->trbs; + ring->dequeue = seg->trbs; + ring->cycle_state = 1; + + if (ring != &xdbc.evt_ring) { + link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1]; + link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma)); + link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma)); + link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE); + } +} + +static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size) +{ + int i; + + for (i = 0; i < size; i++) + s[i] = cpu_to_le16(c[i]); +} + +static void xdbc_mem_init(void) +{ + struct xdbc_ep_context *ep_in, *ep_out; + struct usb_string_descriptor *s_desc; + struct xdbc_erst_entry *entry; + struct xdbc_strings *strings; + struct xdbc_context *ctx; + unsigned int max_burst; + u32 string_length; + int index = 0; + u32 dev_info; + + xdbc_reset_ring(&xdbc.evt_ring); + xdbc_reset_ring(&xdbc.in_ring); + xdbc_reset_ring(&xdbc.out_ring); + memset(xdbc.table_base, 0, PAGE_SIZE); + memset(xdbc.out_buf, 0, PAGE_SIZE); + + /* Initialize event ring segment table: */ + xdbc.erst_size = 16; + xdbc.erst_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; + xdbc.erst_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; + + index += XDBC_ERST_ENTRY_NUM; + entry = (struct xdbc_erst_entry *)xdbc.erst_base; + + entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma); + entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT); + entry->__reserved_0 = 0; + + /* Initialize ERST registers: */ + writel(1, &xdbc.xdbc_reg->ersts); + xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba); + xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp); + + /* Debug capability contexts: */ + xdbc.dbcc_size = 64 * 3; + xdbc.dbcc_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; + xdbc.dbcc_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; + + index += XDBC_DBCC_ENTRY_NUM; + + /* Popluate the strings: */ + xdbc.string_size = sizeof(struct xdbc_strings); + xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; + xdbc.string_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; + strings = (struct xdbc_strings *)xdbc.string_base; + + index += XDBC_STRING_ENTRY_NUM; + + /* Serial string: */ + s_desc = (struct usb_string_descriptor *)strings->serial; + s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2; + s_desc->bDescriptorType = USB_DT_STRING; + + xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL)); + string_length = s_desc->bLength; + string_length <<= 8; + + /* Product string: */ + s_desc = (struct usb_string_descriptor *)strings->product; + s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2; + s_desc->bDescriptorType = USB_DT_STRING; + + xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT)); + string_length += s_desc->bLength; + string_length <<= 8; + + /* Manufacture string: */ + s_desc = (struct usb_string_descriptor *)strings->manufacturer; + s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2; + s_desc->bDescriptorType = USB_DT_STRING; + + xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER)); + string_length += s_desc->bLength; + string_length <<= 8; + + /* String0: */ + strings->string0[0] = 4; + strings->string0[1] = USB_DT_STRING; + strings->string0[2] = 0x09; + strings->string0[3] = 0x04; + + string_length += 4; + + /* Populate info Context: */ + ctx = (struct xdbc_context *)xdbc.dbcc_base; + + ctx->info.string0 = cpu_to_le64(xdbc.string_dma); + ctx->info.manufacturer = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH); + ctx->info.product = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2); + ctx->info.serial = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3); + ctx->info.length = cpu_to_le32(string_length); + + /* Populate bulk out endpoint context: */ + max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control)); + ep_out = (struct xdbc_ep_context *)&ctx->out; + + ep_out->ep_info1 = 0; + ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst)); + ep_out->deq = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state); + + /* Populate bulk in endpoint context: */ + ep_in = (struct xdbc_ep_context *)&ctx->in; + + ep_in->ep_info1 = 0; + ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst)); + ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state); + + /* Set DbC context and info registers: */ + xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp); + + dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL); + writel(dev_info, &xdbc.xdbc_reg->devinfo1); + + dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID); + writel(dev_info, &xdbc.xdbc_reg->devinfo2); + + xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET; + xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET; +} + +static void xdbc_do_reset_debug_port(u32 id, u32 count) +{ + void __iomem *ops_reg; + void __iomem *portsc; + u32 val, cap_length; + int i; + + cap_length = readl(xdbc.xhci_base) & 0xff; + ops_reg = xdbc.xhci_base + cap_length; + + id--; + for (i = id; i < (id + count); i++) { + portsc = ops_reg + 0x400 + i * 0x10; + val = readl(portsc); + if (!(val & PORT_CONNECT)) + writel(val | PORT_RESET, portsc); + } +} + +static void xdbc_reset_debug_port(void) +{ + u32 val, port_offset, port_count; + int offset = 0; + + do { + offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL); + if (!offset) + break; + + val = readl(xdbc.xhci_base + offset); + if (XHCI_EXT_PORT_MAJOR(val) != 0x3) + continue; + + val = readl(xdbc.xhci_base + offset + 8); + port_offset = XHCI_EXT_PORT_OFF(val); + port_count = XHCI_EXT_PORT_COUNT(val); + + xdbc_do_reset_debug_port(port_offset, port_count); + } while (1); +} + +static void +xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4) +{ + struct xdbc_trb *trb, *link_trb; + + trb = ring->enqueue; + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); + trb->field[3] = cpu_to_le32(field4); + + ++(ring->enqueue); + if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) { + link_trb = ring->enqueue; + if (ring->cycle_state) + link_trb->field[3] |= cpu_to_le32(TRB_CYCLE); + else + link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + + ring->enqueue = ring->segment->trbs; + ring->cycle_state ^= 1; + } +} + +static void xdbc_ring_doorbell(int target) +{ + writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell); +} + +static int xdbc_start(void) +{ + u32 ctrl, status; + int ret; + + ctrl = readl(&xdbc.xdbc_reg->control); + writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control); + ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100); + if (ret) { + xdbc_trace("failed to initialize hardware\n"); + return ret; + } + + /* Reset port to avoid bus hang: */ + if (xdbc.vendor == PCI_VENDOR_ID_INTEL) + xdbc_reset_debug_port(); + + /* Wait for port connection: */ + ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100); + if (ret) { + xdbc_trace("waiting for connection timed out\n"); + return ret; + } + + /* Wait for debug device to be configured: */ + ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100); + if (ret) { + xdbc_trace("waiting for device configuration timed out\n"); + return ret; + } + + /* Check port number: */ + status = readl(&xdbc.xdbc_reg->status); + if (!DCST_DEBUG_PORT(status)) { + xdbc_trace("invalid root hub port number\n"); + return -ENODEV; + } + + xdbc.port_number = DCST_DEBUG_PORT(status); + + xdbc_trace("DbC is running now, control 0x%08x port ID %d\n", + readl(&xdbc.xdbc_reg->control), xdbc.port_number); + + return 0; +} + +static int xdbc_bulk_transfer(void *data, int size, bool read) +{ + struct xdbc_ring *ring; + struct xdbc_trb *trb; + u32 length, control; + u32 cycle; + u64 addr; + + if (size > XDBC_MAX_PACKET) { + xdbc_trace("bad parameter, size %d\n", size); + return -EINVAL; + } + + if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) || + !(xdbc.flags & XDBC_FLAGS_CONFIGURED) || + (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) || + (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) { + + xdbc_trace("connection not ready, flags %08x\n", xdbc.flags); + return -EIO; + } + + ring = (read ? &xdbc.in_ring : &xdbc.out_ring); + trb = ring->enqueue; + cycle = ring->cycle_state; + length = TRB_LEN(size); + control = TRB_TYPE(TRB_NORMAL) | TRB_IOC; + + if (cycle) + control &= cpu_to_le32(~TRB_CYCLE); + else + control |= cpu_to_le32(TRB_CYCLE); + + if (read) { + memset(xdbc.in_buf, 0, XDBC_MAX_PACKET); + addr = xdbc.in_dma; + xdbc.flags |= XDBC_FLAGS_IN_PROCESS; + } else { + memcpy_and_pad(xdbc.out_buf, XDBC_MAX_PACKET, data, size, 0); + addr = xdbc.out_dma; + xdbc.flags |= XDBC_FLAGS_OUT_PROCESS; + } + + xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control); + + /* + * Add a barrier between writes of trb fields and flipping + * the cycle bit: + */ + wmb(); + if (cycle) + trb->field[3] |= cpu_to_le32(cycle); + else + trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + + xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL); + + return size; +} + +static int xdbc_handle_external_reset(void) +{ + int ret = 0; + + xdbc.flags = 0; + writel(0, &xdbc.xdbc_reg->control); + ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10); + if (ret) + goto reset_out; + + xdbc_mem_init(); + + ret = xdbc_start(); + if (ret < 0) + goto reset_out; + + xdbc_trace("dbc recovered\n"); + + xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED; + + xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); + + return 0; + +reset_out: + xdbc_trace("failed to recover from external reset\n"); + return ret; +} + +static int __init xdbc_early_setup(void) +{ + int ret; + + writel(0, &xdbc.xdbc_reg->control); + ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100); + if (ret) + return ret; + + /* Allocate the table page: */ + xdbc.table_base = xdbc_get_page(&xdbc.table_dma); + if (!xdbc.table_base) + return -ENOMEM; + + /* Get and store the transfer buffer: */ + xdbc.out_buf = xdbc_get_page(&xdbc.out_dma); + if (!xdbc.out_buf) + return -ENOMEM; + + /* Allocate the event ring: */ + ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring); + if (ret < 0) + return ret; + + /* Allocate IN/OUT endpoint transfer rings: */ + ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring); + if (ret < 0) + return ret; + + ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring); + if (ret < 0) + return ret; + + xdbc_mem_init(); + + ret = xdbc_start(); + if (ret < 0) { + writel(0, &xdbc.xdbc_reg->control); + return ret; + } + + xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED; + + xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); + + return 0; +} + +int __init early_xdbc_parse_parameter(char *s, int keep_early) +{ + unsigned long dbgp_num = 0; + u32 bus, dev, func, offset; + char *e; + int ret; + + if (!early_pci_allowed()) + return -EPERM; + + early_console_keep = keep_early; + + if (xdbc.xdbc_reg) + return 0; + + if (*s) { + dbgp_num = simple_strtoul(s, &e, 10); + if (s == e) + dbgp_num = 0; + } + + pr_notice("dbgp_num: %lu\n", dbgp_num); + + /* Locate the host controller: */ + ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func); + if (ret) { + pr_notice("failed to locate xhci host\n"); + return -ENODEV; + } + + xdbc.vendor = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID); + xdbc.device = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID); + xdbc.bus = bus; + xdbc.dev = dev; + xdbc.func = func; + + /* Map the IO memory: */ + xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func); + if (!xdbc.xhci_base) + return -EINVAL; + + /* Locate DbC registers: */ + offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG); + if (!offset) { + pr_notice("xhci host doesn't support debug capability\n"); + early_iounmap(xdbc.xhci_base, xdbc.xhci_length); + xdbc.xhci_base = NULL; + xdbc.xhci_length = 0; + + return -ENODEV; + } + xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset); + + return 0; +} + +int __init early_xdbc_setup_hardware(void) +{ + int ret; + + if (!xdbc.xdbc_reg) + return -ENODEV; + + xdbc_bios_handoff(); + + raw_spin_lock_init(&xdbc.lock); + + ret = xdbc_early_setup(); + if (ret) { + pr_notice("failed to setup the connection to host\n"); + + xdbc_free_ring(&xdbc.evt_ring); + xdbc_free_ring(&xdbc.out_ring); + xdbc_free_ring(&xdbc.in_ring); + + if (xdbc.table_dma) + memblock_phys_free(xdbc.table_dma, PAGE_SIZE); + + if (xdbc.out_dma) + memblock_phys_free(xdbc.out_dma, PAGE_SIZE); + + xdbc.table_base = NULL; + xdbc.out_buf = NULL; + } + + return ret; +} + +static void xdbc_handle_port_status(struct xdbc_trb *evt_trb) +{ + u32 port_reg; + + port_reg = readl(&xdbc.xdbc_reg->portsc); + if (port_reg & PORTSC_CONN_CHANGE) { + xdbc_trace("connect status change event\n"); + + /* Check whether cable unplugged: */ + if (!(port_reg & PORTSC_CONN_STATUS)) { + xdbc.flags = 0; + xdbc_trace("cable unplugged\n"); + } + } + + if (port_reg & PORTSC_RESET_CHANGE) + xdbc_trace("port reset change event\n"); + + if (port_reg & PORTSC_LINK_CHANGE) + xdbc_trace("port link status change event\n"); + + if (port_reg & PORTSC_CONFIG_CHANGE) + xdbc_trace("config error change\n"); + + /* Write back the value to clear RW1C bits: */ + writel(port_reg, &xdbc.xdbc_reg->portsc); +} + +static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb) +{ + u32 comp_code; + int ep_id; + + comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2])); + ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3])); + + switch (comp_code) { + case COMP_SUCCESS: + case COMP_SHORT_PACKET: + break; + case COMP_TRB_ERROR: + case COMP_BABBLE_DETECTED_ERROR: + case COMP_USB_TRANSACTION_ERROR: + case COMP_STALL_ERROR: + default: + if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) + xdbc.flags |= XDBC_FLAGS_OUT_STALL; + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) + xdbc.flags |= XDBC_FLAGS_IN_STALL; + + xdbc_trace("endpoint %d stalled\n", ep_id); + break; + } + + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { + xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; + xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); + } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { + xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; + } else { + xdbc_trace("invalid endpoint id %d\n", ep_id); + } +} + +static void xdbc_handle_events(void) +{ + struct xdbc_trb *evt_trb; + bool update_erdp = false; + u32 reg; + u8 cmd; + + cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND); + if (!(cmd & PCI_COMMAND_MASTER)) { + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd); + } + + if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) + return; + + /* Handle external reset events: */ + reg = readl(&xdbc.xdbc_reg->control); + if (!(reg & CTRL_DBC_ENABLE)) { + if (xdbc_handle_external_reset()) { + xdbc_trace("failed to recover connection\n"); + return; + } + } + + /* Handle configure-exit event: */ + reg = readl(&xdbc.xdbc_reg->control); + if (reg & CTRL_DBC_RUN_CHANGE) { + writel(reg, &xdbc.xdbc_reg->control); + if (reg & CTRL_DBC_RUN) + xdbc.flags |= XDBC_FLAGS_CONFIGURED; + else + xdbc.flags &= ~XDBC_FLAGS_CONFIGURED; + } + + /* Handle endpoint stall event: */ + reg = readl(&xdbc.xdbc_reg->control); + if (reg & CTRL_HALT_IN_TR) { + xdbc.flags |= XDBC_FLAGS_IN_STALL; + } else { + xdbc.flags &= ~XDBC_FLAGS_IN_STALL; + if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS)) + xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); + } + + if (reg & CTRL_HALT_OUT_TR) + xdbc.flags |= XDBC_FLAGS_OUT_STALL; + else + xdbc.flags &= ~XDBC_FLAGS_OUT_STALL; + + /* Handle the events in the event ring: */ + evt_trb = xdbc.evt_ring.dequeue; + while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) { + /* + * Add a barrier between reading the cycle flag and any + * reads of the event's flags/data below: + */ + rmb(); + + switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) { + case TRB_TYPE(TRB_PORT_STATUS): + xdbc_handle_port_status(evt_trb); + break; + case TRB_TYPE(TRB_TRANSFER): + xdbc_handle_tx_event(evt_trb); + break; + default: + break; + } + + ++(xdbc.evt_ring.dequeue); + if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) { + xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs; + xdbc.evt_ring.cycle_state ^= 1; + } + + evt_trb = xdbc.evt_ring.dequeue; + update_erdp = true; + } + + /* Update event ring dequeue pointer: */ + if (update_erdp) + xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp); +} + +static int xdbc_bulk_write(const char *bytes, int size) +{ + int ret, timeout = 0; + unsigned long flags; + +retry: + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&xdbc.lock, flags)) + return -EAGAIN; + } else { + raw_spin_lock_irqsave(&xdbc.lock, flags); + } + + xdbc_handle_events(); + + /* Check completion of the previous request: */ + if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) { + raw_spin_unlock_irqrestore(&xdbc.lock, flags); + udelay(100); + timeout += 100; + goto retry; + } + + if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) { + raw_spin_unlock_irqrestore(&xdbc.lock, flags); + xdbc_trace("previous transfer not completed yet\n"); + + return -ETIMEDOUT; + } + + ret = xdbc_bulk_transfer((void *)bytes, size, false); + raw_spin_unlock_irqrestore(&xdbc.lock, flags); + + return ret; +} + +static void early_xdbc_write(struct console *con, const char *str, u32 n) +{ + /* static variables are zeroed, so buf is always NULL terminated */ + static char buf[XDBC_MAX_PACKET + 1]; + int chunk, ret; + int use_cr = 0; + + if (!xdbc.xdbc_reg) + return; + + while (n > 0) { + for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) { + + if (!use_cr && *str == '\n') { + use_cr = 1; + buf[chunk] = '\r'; + str--; + n++; + continue; + } + + if (use_cr) + use_cr = 0; + buf[chunk] = *str; + } + + if (chunk > 0) { + ret = xdbc_bulk_write(buf, chunk); + if (ret < 0) + xdbc_trace("missed message {%s}\n", buf); + } + } +} + +static struct console early_xdbc_console = { + .name = "earlyxdbc", + .write = early_xdbc_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +void __init early_xdbc_register_console(void) +{ + if (early_console) + return; + + early_console = &early_xdbc_console; + if (early_console_keep) + early_console->flags &= ~CON_BOOT; + else + early_console->flags |= CON_BOOT; + register_console(early_console); +} + +static void xdbc_unregister_console(void) +{ + if (console_is_registered(&early_xdbc_console)) + unregister_console(&early_xdbc_console); +} + +static int xdbc_scrub_function(void *ptr) +{ + unsigned long flags; + + while (true) { + raw_spin_lock_irqsave(&xdbc.lock, flags); + xdbc_handle_events(); + + if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) { + raw_spin_unlock_irqrestore(&xdbc.lock, flags); + break; + } + + raw_spin_unlock_irqrestore(&xdbc.lock, flags); + schedule_timeout_interruptible(1); + } + + xdbc_unregister_console(); + writel(0, &xdbc.xdbc_reg->control); + xdbc_trace("dbc scrub function exits\n"); + + return 0; +} + +static int __init xdbc_init(void) +{ + unsigned long flags; + void __iomem *base; + int ret = 0; + u32 offset; + + if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) + return 0; + + /* + * It's time to shut down the DbC, so that the debug + * port can be reused by the host controller: + */ + if (early_xdbc_console.index == -1 || + (early_xdbc_console.flags & CON_BOOT)) { + xdbc_trace("hardware not used anymore\n"); + goto free_and_quit; + } + + base = ioremap(xdbc.xhci_start, xdbc.xhci_length); + if (!base) { + xdbc_trace("failed to remap the io address\n"); + ret = -ENOMEM; + goto free_and_quit; + } + + raw_spin_lock_irqsave(&xdbc.lock, flags); + early_iounmap(xdbc.xhci_base, xdbc.xhci_length); + xdbc.xhci_base = base; + offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG); + xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset); + raw_spin_unlock_irqrestore(&xdbc.lock, flags); + + kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc"); + + return 0; + +free_and_quit: + xdbc_free_ring(&xdbc.evt_ring); + xdbc_free_ring(&xdbc.out_ring); + xdbc_free_ring(&xdbc.in_ring); + memblock_phys_free(xdbc.table_dma, PAGE_SIZE); + memblock_phys_free(xdbc.out_dma, PAGE_SIZE); + writel(0, &xdbc.xdbc_reg->control); + early_iounmap(xdbc.xhci_base, xdbc.xhci_length); + + return ret; +} +subsys_initcall(xdbc_init); diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h new file mode 100644 index 0000000000..8b4d71de45 --- /dev/null +++ b/drivers/usb/early/xhci-dbc.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * xhci-dbc.h - xHCI debug capability early driver + * + * Copyright (C) 2016 Intel Corporation + * + * Author: Lu Baolu <baolu.lu@linux.intel.com> + */ + +#ifndef __LINUX_XHCI_DBC_H +#define __LINUX_XHCI_DBC_H + +#include <linux/types.h> +#include <linux/usb/ch9.h> + +/* + * xHCI Debug Capability Register interfaces: + */ +struct xdbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; /* Event Ring Segment Table Size*/ + __le32 __reserved_0; /* 0c~0f reserved bits */ + __le64 erstba; /* Event Ring Segment Table Base Address */ + __le64 erdp; /* Event Ring Dequeue Pointer */ + __le32 control; + __le32 status; + __le32 portsc; /* Port status and control */ + __le32 __reserved_1; /* 2b~28 reserved bits */ + __le64 dccp; /* Debug Capability Context Pointer */ + __le32 devinfo1; /* Device Descriptor Info Register 1 */ + __le32 devinfo2; /* Device Descriptor Info Register 2 */ +}; + +#define DEBUG_MAX_BURST(p) (((p) >> 16) & 0xff) + +#define CTRL_DBC_RUN BIT(0) +#define CTRL_PORT_ENABLE BIT(1) +#define CTRL_HALT_OUT_TR BIT(2) +#define CTRL_HALT_IN_TR BIT(3) +#define CTRL_DBC_RUN_CHANGE BIT(4) +#define CTRL_DBC_ENABLE BIT(31) + +#define DCST_DEBUG_PORT(p) (((p) >> 24) & 0xff) + +#define PORTSC_CONN_STATUS BIT(0) +#define PORTSC_CONN_CHANGE BIT(17) +#define PORTSC_RESET_CHANGE BIT(21) +#define PORTSC_LINK_CHANGE BIT(22) +#define PORTSC_CONFIG_CHANGE BIT(23) + +/* + * xHCI Debug Capability data structures: + */ +struct xdbc_trb { + __le32 field[4]; +}; + +struct xdbc_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 __reserved_0; +}; + +struct xdbc_info_context { + __le64 string0; + __le64 manufacturer; + __le64 product; + __le64 serial; + __le32 length; + __le32 __reserved_0[7]; +}; + +struct xdbc_ep_context { + __le32 ep_info1; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 __reserved_0[11]; +}; + +struct xdbc_context { + struct xdbc_info_context info; + struct xdbc_ep_context out; + struct xdbc_ep_context in; +}; + +#define XDBC_INFO_CONTEXT_SIZE 48 +#define XDBC_MAX_STRING_LENGTH 64 +#define XDBC_STRING_MANUFACTURER "Linux Foundation" +#define XDBC_STRING_PRODUCT "Linux USB GDB Target" +#define XDBC_STRING_SERIAL "0001" + +struct xdbc_strings { + char string0[XDBC_MAX_STRING_LENGTH]; + char manufacturer[XDBC_MAX_STRING_LENGTH]; + char product[XDBC_MAX_STRING_LENGTH]; + char serial[XDBC_MAX_STRING_LENGTH]; +}; + +#define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */ +#define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */ +#define XDBC_PRODUCT_ID 0x0011 /* __le16 idProduct; device 0011 */ +#define XDBC_DEVICE_REV 0x0010 /* 0.10 */ + +/* + * xHCI Debug Capability software state structures: + */ +struct xdbc_segment { + struct xdbc_trb *trbs; + dma_addr_t dma; +}; + +#define XDBC_TRBS_PER_SEGMENT 256 + +struct xdbc_ring { + struct xdbc_segment *segment; + struct xdbc_trb *enqueue; + struct xdbc_trb *dequeue; + u32 cycle_state; +}; + +/* + * These are the "Endpoint ID" (also known as "Context Index") values for the + * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data + * structure. + * According to the "eXtensible Host Controller Interface for Universal Serial + * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer + * Rings", these should be 0 and 1, and those are the values AMD machines give + * you; but Intel machines seem to use the formula from section "4.5.1 Device + * Context Index", which is supposed to be used for the Device Context only. + * Luckily the values from Intel don't overlap with those from AMD, so we can + * just test for both. + */ +#define XDBC_EPID_OUT 0 +#define XDBC_EPID_IN 1 +#define XDBC_EPID_OUT_INTEL 2 +#define XDBC_EPID_IN_INTEL 3 + +struct xdbc_state { + u16 vendor; + u16 device; + u32 bus; + u32 dev; + u32 func; + void __iomem *xhci_base; + u64 xhci_start; + size_t xhci_length; + int port_number; + + /* DbC register base */ + struct xdbc_regs __iomem *xdbc_reg; + + /* DbC table page */ + dma_addr_t table_dma; + void *table_base; + + /* event ring segment table */ + dma_addr_t erst_dma; + size_t erst_size; + void *erst_base; + + /* event ring segments */ + struct xdbc_ring evt_ring; + struct xdbc_segment evt_seg; + + /* debug capability contexts */ + dma_addr_t dbcc_dma; + size_t dbcc_size; + void *dbcc_base; + + /* descriptor strings */ + dma_addr_t string_dma; + size_t string_size; + void *string_base; + + /* bulk OUT endpoint */ + struct xdbc_ring out_ring; + struct xdbc_segment out_seg; + void *out_buf; + dma_addr_t out_dma; + + /* bulk IN endpoint */ + struct xdbc_ring in_ring; + struct xdbc_segment in_seg; + void *in_buf; + dma_addr_t in_dma; + + u32 flags; + + /* spinlock for early_xdbc_write() reentrancy */ + raw_spinlock_t lock; +}; + +#define XDBC_PCI_MAX_BUSES 256 +#define XDBC_PCI_MAX_DEVICES 32 +#define XDBC_PCI_MAX_FUNCTION 8 + +#define XDBC_TABLE_ENTRY_SIZE 64 +#define XDBC_ERST_ENTRY_NUM 1 +#define XDBC_DBCC_ENTRY_NUM 3 +#define XDBC_STRING_ENTRY_NUM 4 + +/* Bits definitions for xdbc_state.flags: */ +#define XDBC_FLAGS_INITIALIZED BIT(0) +#define XDBC_FLAGS_IN_STALL BIT(1) +#define XDBC_FLAGS_OUT_STALL BIT(2) +#define XDBC_FLAGS_IN_PROCESS BIT(3) +#define XDBC_FLAGS_OUT_PROCESS BIT(4) +#define XDBC_FLAGS_CONFIGURED BIT(5) + +#define XDBC_MAX_PACKET 1024 + +/* Door bell target: */ +#define OUT_EP_DOORBELL 0 +#define IN_EP_DOORBELL 1 +#define DOOR_BELL_TARGET(p) (((p) & 0xff) << 8) + +#define xdbc_read64(regs) xhci_read_64(NULL, (regs)) +#define xdbc_write64(val, regs) xhci_write_64(NULL, (val), (regs)) + +#endif /* __LINUX_XHCI_DBC_H */ |