diff options
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/ohci.h | 2 | ||||
-rw-r--r-- | drivers/usb/host/pci-quirks.c | 144 | ||||
-rw-r--r-- | drivers/usb/host/pci-quirks.h | 34 | ||||
-rw-r--r-- | drivers/usb/host/xhci-debugfs.c | 2 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ext-caps.h | 27 | ||||
-rw-r--r-- | drivers/usb/host/xhci-hub.c | 4 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 96 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mtk-sch.c | 411 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mtk.h | 19 | ||||
-rw-r--r-- | drivers/usb/host/xhci-plat.c | 26 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 118 | ||||
-rw-r--r-- | drivers/usb/host/xhci-trace.h | 23 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 28 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 48 |
14 files changed, 700 insertions, 282 deletions
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index aac6285b37..631dda6174 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -337,7 +337,7 @@ typedef struct urb_priv { u16 length; // # tds in this request u16 td_cnt; // tds already serviced struct list_head pending; - struct td *td[]; // all TDs in this request + struct td *td[] __counted_by(length); // all TDs in this request } urb_priv_t; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 2665832f9a..1f9c1b1435 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -60,6 +60,23 @@ #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ +/* ASMEDIA quirk use */ +#define ASMT_DATA_WRITE0_REG 0xF8 +#define ASMT_DATA_WRITE1_REG 0xFC +#define ASMT_CONTROL_REG 0xE0 +#define ASMT_CONTROL_WRITE_BIT 0x02 +#define ASMT_WRITEREG_CMD 0x10423 +#define ASMT_FLOWCTL_ADDR 0xFA30 +#define ASMT_FLOWCTL_DATA 0xBA +#define ASMT_PSEUDO_DATA 0 + +/* Intel quirk use */ +#define USB_INTEL_XUSB2PR 0xD0 +#define USB_INTEL_USB2PRM 0xD4 +#define USB_INTEL_USB3_PSSEN 0xD8 +#define USB_INTEL_USB3PRM 0xDC + +#ifdef CONFIG_USB_PCI_AMD /* AMD quirk use */ #define AB_REG_BAR_LOW 0xe0 #define AB_REG_BAR_HIGH 0xe1 @@ -93,21 +110,6 @@ #define NB_PIF0_PWRDOWN_0 0x01100012 #define NB_PIF0_PWRDOWN_1 0x01100013 -#define USB_INTEL_XUSB2PR 0xD0 -#define USB_INTEL_USB2PRM 0xD4 -#define USB_INTEL_USB3_PSSEN 0xD8 -#define USB_INTEL_USB3PRM 0xDC - -/* ASMEDIA quirk use */ -#define ASMT_DATA_WRITE0_REG 0xF8 -#define ASMT_DATA_WRITE1_REG 0xFC -#define ASMT_CONTROL_REG 0xE0 -#define ASMT_CONTROL_WRITE_BIT 0x02 -#define ASMT_WRITEREG_CMD 0x10423 -#define ASMT_FLOWCTL_ADDR 0xFA30 -#define ASMT_FLOWCTL_DATA 0xBA -#define ASMT_PSEUDO_DATA 0 - /* * amd_chipset_gen values represent AMD different chipset generations */ @@ -458,50 +460,6 @@ void usb_amd_quirk_pll_disable(void) } EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); -static int usb_asmedia_wait_write(struct pci_dev *pdev) -{ - unsigned long retry_count; - unsigned char value; - - for (retry_count = 1000; retry_count > 0; --retry_count) { - - pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value); - - if (value == 0xff) { - dev_err(&pdev->dev, "%s: check_ready ERROR", __func__); - return -EIO; - } - - if ((value & ASMT_CONTROL_WRITE_BIT) == 0) - return 0; - - udelay(50); - } - - dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); - return -ETIMEDOUT; -} - -void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) -{ - if (usb_asmedia_wait_write(pdev) != 0) - return; - - /* send command and address to device */ - pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD); - pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR); - pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); - - if (usb_asmedia_wait_write(pdev) != 0) - return; - - /* send data to device */ - pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA); - pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA); - pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); -} -EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol); - void usb_amd_quirk_pll_enable(void) { usb_amd_quirk_pll(0); @@ -630,7 +588,62 @@ bool usb_amd_pt_check_port(struct device *device, int port) return !(value & BIT(port_shift)); } EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); +#endif /* CONFIG_USB_PCI_AMD */ + +static int usb_asmedia_wait_write(struct pci_dev *pdev) +{ + unsigned long retry_count; + unsigned char value; + + for (retry_count = 1000; retry_count > 0; --retry_count) { + + pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value); + + if (value == 0xff) { + dev_err(&pdev->dev, "%s: check_ready ERROR", __func__); + return -EIO; + } + + if ((value & ASMT_CONTROL_WRITE_BIT) == 0) + return 0; + + udelay(50); + } + + dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); + return -ETIMEDOUT; +} + +void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) +{ + if (usb_asmedia_wait_write(pdev) != 0) + return; + /* send command and address to device */ + pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD); + pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR); + pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); + + if (usb_asmedia_wait_write(pdev) != 0) + return; + + /* send data to device */ + pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA); + pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA); + pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); +} +EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol); + +static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) +{ + u16 cmd; + + return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); +} + +#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) + +#if defined(CONFIG_HAS_IOPORT) && IS_ENABLED(CONFIG_USB_UHCI_HCD) /* * Make sure the controller is completely inactive, unable to * generate interrupts or do DMA. @@ -712,14 +725,7 @@ reset_needed: } EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); -static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) -{ - u16 cmd; - return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); -} - #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) -#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) static void quirk_usb_handoff_uhci(struct pci_dev *pdev) { @@ -739,6 +745,12 @@ static void quirk_usb_handoff_uhci(struct pci_dev *pdev) uhci_check_and_reset_hc(pdev, base); } +#else /* defined(CONFIG_HAS_IOPORT && IS_ENABLED(CONFIG_USB_UHCI_HCD) */ + +static void quirk_usb_handoff_uhci(struct pci_dev *pdev) {} + +#endif /* defined(CONFIG_HAS_IOPORT && IS_ENABLED(CONFIG_USB_UHCI_HCD) */ + static int mmio_resource_enabled(struct pci_dev *pdev, int idx) { return pci_resource_start(pdev, idx) && mmio_enabled(pdev); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index e729de21fa..a5230b0b9e 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -2,9 +2,7 @@ #ifndef __LINUX_USB_PCI_QUIRKS_H #define __LINUX_USB_PCI_QUIRKS_H -#ifdef CONFIG_USB_PCI -void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); -int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); +#ifdef CONFIG_USB_PCI_AMD int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); bool usb_amd_hang_symptom_quirk(void); bool usb_amd_prefetch_quirk(void); @@ -12,23 +10,41 @@ void usb_amd_dev_put(void); bool usb_amd_quirk_pll_check(void); void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); -void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); -void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); -void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); bool usb_amd_pt_check_port(struct device *device, int port); #else -struct pci_dev; +static inline bool usb_amd_hang_symptom_quirk(void) +{ + return false; +}; +static inline bool usb_amd_prefetch_quirk(void) +{ + return false; +} static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} -static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} static inline void usb_amd_dev_put(void) {} -static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} +static inline bool usb_amd_quirk_pll_check(void) +{ + return false; +} static inline void sb800_prefetch(struct device *dev, int on) {} static inline bool usb_amd_pt_check_port(struct device *device, int port) { return false; } +#endif /* CONFIG_USB_PCI_AMD */ + +#ifdef CONFIG_USB_PCI +void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); +int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); +void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); +void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); +#else +struct pci_dev; +static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} #endif /* CONFIG_USB_PCI */ #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 99baa60ef5..6d142cd61b 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -204,7 +204,7 @@ static void xhci_ring_dump_segment(struct seq_file *s, for (i = 0; i < TRBS_PER_SEGMENT; i++) { trb = &seg->trbs[i]; dma = seg->dma + i * sizeof(*trb); - seq_printf(s, "%pad: %s\n", &dma, + seq_printf(s, "%2u %pad: %s\n", seg->num, &dma, xhci_decode_trb(str, XHCI_MSG_MAX, le32_to_cpu(trb->generic.field[0]), le32_to_cpu(trb->generic.field[1]), le32_to_cpu(trb->generic.field[2]), diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index e8af0a125f..96eb36a587 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -79,6 +79,33 @@ /* true: Controller Not Ready to accept doorbell or op reg writes after reset */ #define XHCI_STS_CNR (1 << 11) +/** + * struct xhci_protocol_caps + * @revision: major revision, minor revision, capability ID, + * and next capability pointer. + * @name_string: Four ASCII characters to say which spec this xHC + * follows, typically "USB ". + * @port_info: Port offset, count, and protocol-defined information. + */ +struct xhci_protocol_caps { + u32 revision; + u32 name_string; + u32 port_info; +}; + +#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) +#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f) +#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) +#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f) +#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03) +#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03) +#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01) +#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03) +#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff) + #include <linux/io.h> /** diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0df5d807a7..0980ade2a2 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1262,7 +1262,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, retval = -ENODEV; break; } - trace_xhci_get_port_status(wIndex, temp); + trace_xhci_get_port_status(port, temp); status = xhci_get_port_status(hcd, bus_state, wIndex, temp, &flags); if (status == 0xffffffff) @@ -1687,7 +1687,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) retval = -ENODEV; break; } - trace_xhci_hub_status_data(i, temp); + trace_xhci_hub_status_data(ports[i], temp); if ((temp & mask) != 0 || (bus_state->port_c_suspend & 1 << i) || diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 0a37f0d511..6211658684 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -29,6 +29,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, unsigned int cycle_state, unsigned int max_packet, + unsigned int num, gfp_t flags) { struct xhci_segment *seg; @@ -60,6 +61,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE); } + seg->num = num; seg->dma = dma; seg->next = NULL; @@ -128,7 +130,7 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *first, struct xhci_segment *last, unsigned int num_segs) { - struct xhci_segment *next; + struct xhci_segment *next, *seg; bool chain_links; if (!ring || !first || !last) @@ -144,13 +146,18 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, xhci_link_segments(last, next, ring->type, chain_links); ring->num_segs += num_segs; - if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { - ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control - &= ~cpu_to_le32(LINK_TOGGLE); - last->trbs[TRBS_PER_SEGMENT-1].link.control - |= cpu_to_le32(LINK_TOGGLE); + if (ring->enq_seg == ring->last_seg) { + if (ring->type != TYPE_EVENT) { + ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control + &= ~cpu_to_le32(LINK_TOGGLE); + last->trbs[TRBS_PER_SEGMENT-1].link.control + |= cpu_to_le32(LINK_TOGGLE); + } ring->last_seg = last; } + + for (seg = last; seg != ring->last_seg; seg = seg->next) + seg->next->num = seg->num + 1; } /* @@ -320,8 +327,9 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, /* Allocate segments and link them for a ring */ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, - unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) + unsigned int num_segs, unsigned int num, + unsigned int cycle_state, enum xhci_ring_type type, + unsigned int max_packet, gfp_t flags) { struct xhci_segment *prev; bool chain_links; @@ -331,16 +339,17 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST))); - prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); if (!prev) return -ENOMEM; - num_segs--; + num++; *first = prev; - while (num_segs > 0) { + while (num < num_segs) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); + next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, + flags); if (!next) { prev = *first; while (prev) { @@ -353,7 +362,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, xhci_link_segments(prev, next, type, chain_links); prev = next; - num_segs--; + num++; } xhci_link_segments(prev, *first, type, chain_links); *last = prev; @@ -388,7 +397,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, - &ring->last_seg, num_segs, cycle_state, type, + &ring->last_seg, num_segs, 0, cycle_state, type, max_packet, flags); if (ret) goto fail; @@ -428,7 +437,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, int ret; ret = xhci_alloc_segments_for_ring(xhci, &first, &last, - num_new_segs, ring->cycle_state, ring->type, + num_new_segs, ring->enq_seg->num + 1, + ring->cycle_state, ring->type, ring->bounce_buf_len, flags); if (ret) return -ENOMEM; @@ -1766,7 +1776,7 @@ void xhci_free_command(struct xhci_hcd *xhci, kfree(command); } -int xhci_alloc_erst(struct xhci_hcd *xhci, +static int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_ring *evt_ring, struct xhci_erst *erst, gfp_t flags) @@ -1797,23 +1807,13 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, } static void -xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) +xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - size_t erst_size; - u64 tmp64; u32 tmp; if (!ir) return; - erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries; - if (ir->erst.entries) - dma_free_coherent(dev, erst_size, - ir->erst.entries, - ir->erst.erst_dma_addr); - ir->erst.entries = NULL; - /* * Clean out interrupter registers except ERSTBA. Clearing either the * low or high 32 bits of ERSTBA immediately causes the controller to @@ -1824,14 +1824,30 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) tmp &= ERST_SIZE_MASK; writel(tmp, &ir->ir_set->erst_size); - tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); - tmp64 &= (u64) ERST_PTR_MASK; - xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue); + xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); } +} - /* free interrrupter event ring */ +static void +xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) +{ + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + size_t erst_size; + + if (!ir) + return; + + erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries; + if (ir->erst.entries) + dma_free_coherent(dev, erst_size, + ir->erst.entries, + ir->erst.erst_dma_addr); + ir->erst.entries = NULL; + + /* free interrupter event ring */ if (ir->event_ring) xhci_ring_free(xhci, ir->event_ring); + ir->event_ring = NULL; kfree(ir); @@ -1844,6 +1860,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) cancel_delayed_work_sync(&xhci->cmd_timer); + xhci_remove_interrupter(xhci, xhci->interrupter); xhci_free_interrupter(xhci, xhci->interrupter); xhci->interrupter = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring"); @@ -1933,7 +1950,6 @@ no_bw: static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { - u64 temp; dma_addr_t deq; deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, @@ -1941,16 +1957,12 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter if (!deq) xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ - temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); - temp &= ERST_PTR_MASK; /* Don't clear the EHB bit (which is RW1C) because * there might be more events to service. */ - temp &= ~ERST_EHB; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Write event ring dequeue pointer, preserving EHB bit"); - xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, - &ir->ir_set->erst_dequeue); + xhci_write_64(xhci, deq & ERST_PTR_MASK, &ir->ir_set->erst_dequeue); } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, @@ -2238,14 +2250,18 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags) { struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct xhci_interrupter *ir; + unsigned int num_segs; int ret; ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev)); if (!ir) return NULL; - ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, - 0, flags); + num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2), + ERST_MAX_SEGS); + + ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0, + flags); if (!ir->event_ring) { xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); kfree(ir); @@ -2281,7 +2297,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, /* set ERST count with the number of entries in the segment table */ erst_size = readl(&ir->ir_set->erst_size); erst_size &= ERST_SIZE_MASK; - erst_size |= ERST_NUM_SEGS; + erst_size |= ir->event_ring->num_segs; writel(erst_size, &ir->ir_set->erst_size); erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 579899eb24..61f3f8bbdc 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -19,6 +19,18 @@ #define HS_BW_BOUNDARY 6144 /* usb2 spec section11.18.1: at most 188 FS bytes per microframe */ #define FS_PAYLOAD_MAX 188 +#define LS_PAYLOAD_MAX 18 +/* section 11.18.1, per fs frame */ +#define FS_BW_BOUNDARY 1157 +#define LS_BW_BOUNDARY 144 + +/* + * max number of microframes for split transfer, assume extra-cs budget is 0 + * for fs isoc in : 1 ss + 1 idle + 6 cs (roundup(1023/188)) + */ +#define TT_MICROFRAMES_MAX 8 +/* offset from SS for fs/ls isoc/intr ep (ss + idle) */ +#define CS_OFFSET 2 #define DBG_BUF_EN 64 @@ -237,17 +249,26 @@ static void drop_tt(struct usb_device *udev) static struct mu3h_sch_ep_info * create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev, - struct usb_host_endpoint *ep) + struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx) { struct mu3h_sch_ep_info *sch_ep; struct mu3h_sch_bw_info *bw_info; struct mu3h_sch_tt *tt = NULL; + u32 len; bw_info = get_bw_info(mtk, udev, ep); if (!bw_info) return ERR_PTR(-ENODEV); - sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL); + if (is_fs_or_ls(udev->speed)) + len = TT_MICROFRAMES_MAX; + else if ((udev->speed >= USB_SPEED_SUPER) && + usb_endpoint_xfer_isoc(&ep->desc)) + len = get_esit(ep_ctx); + else + len = 1; + + sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table, len), GFP_KERNEL); if (!sch_ep) return ERR_PTR(-ENOMEM); @@ -279,7 +300,11 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx, u32 mult; u32 esit_pkts; u32 max_esit_payload; + u32 bw_per_microframe; + u32 *bwb_table; + int i; + bwb_table = sch_ep->bw_budget_table; ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2)); @@ -313,7 +338,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx, * opportunities per microframe */ sch_ep->pkts = max_burst + 1; - sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts; + bwb_table[0] = maxpkt * sch_ep->pkts; } else if (sch_ep->speed >= USB_SPEED_SUPER) { /* usb3_r1 spec section4.4.7 & 4.4.8 */ sch_ep->cs_count = 0; @@ -330,6 +355,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx, if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) { sch_ep->pkts = esit_pkts; sch_ep->num_budget_microframes = 1; + bwb_table[0] = maxpkt * sch_ep->pkts; } if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) { @@ -346,18 +372,52 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx, DIV_ROUND_UP(esit_pkts, sch_ep->pkts); sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1); + bw_per_microframe = maxpkt * sch_ep->pkts; + + for (i = 0; i < sch_ep->num_budget_microframes - 1; i++) + bwb_table[i] = bw_per_microframe; + + /* last one <= bw_per_microframe */ + bwb_table[i] = maxpkt * esit_pkts - i * bw_per_microframe; } - sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts; } else if (is_fs_or_ls(sch_ep->speed)) { sch_ep->pkts = 1; /* at most one packet for each microframe */ /* - * num_budget_microframes and cs_count will be updated when + * @cs_count will be updated to add extra-cs when * check TT for INT_OUT_EP, ISOC/INT_IN_EP type + * @maxpkt <= 1023; */ sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX); sch_ep->num_budget_microframes = sch_ep->cs_count; - sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX); + + /* init budget table */ + if (ep_type == ISOC_OUT_EP) { + for (i = 0; i < sch_ep->cs_count - 1; i++) + bwb_table[i] = FS_PAYLOAD_MAX; + + bwb_table[i] = maxpkt - i * FS_PAYLOAD_MAX; + } else if (ep_type == INT_OUT_EP) { + /* only first one used (maxpkt <= 64), others zero */ + bwb_table[0] = maxpkt; + } else { /* INT_IN_EP or ISOC_IN_EP */ + bwb_table[0] = 0; /* start split */ + bwb_table[1] = 0; /* idle */ + /* + * @cs_count will be updated according to cs position + * (add 1 or 2 extra-cs), but assume only first + * @num_budget_microframes elements will be used later, + * although in fact it does not (extra-cs budget many receive + * some data for IN ep); + * @cs_count is 1 for INT_IN_EP (maxpkt <= 64); + */ + for (i = 0; i < sch_ep->cs_count - 1; i++) + bwb_table[i + CS_OFFSET] = FS_PAYLOAD_MAX; + + bwb_table[i + CS_OFFSET] = maxpkt - i * FS_PAYLOAD_MAX; + /* ss + idle */ + sch_ep->num_budget_microframes += CS_OFFSET; + } } } @@ -374,7 +434,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw, for (j = 0; j < sch_ep->num_budget_microframes; j++) { k = XHCI_MTK_BW_INDEX(base + j); - bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe; + bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j]; if (bw > max_bw) max_bw = bw; } @@ -382,56 +442,152 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw, return max_bw; } +/* + * for OUT: get first SS consumed bw; + * for IN: get first CS consumed bw; + */ +static u16 get_fs_bw(struct mu3h_sch_ep_info *sch_ep, int offset) +{ + struct mu3h_sch_tt *tt = sch_ep->sch_tt; + u16 fs_bw; + + if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP) + fs_bw = tt->fs_bus_bw_out[XHCI_MTK_BW_INDEX(offset)]; + else /* skip ss + idle */ + fs_bw = tt->fs_bus_bw_in[XHCI_MTK_BW_INDEX(offset + CS_OFFSET)]; + + return fs_bw; +} + static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep, bool used) { - int bw_updated; u32 base; - int i, j; - - bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1); + int i, j, k; for (i = 0; i < sch_ep->num_esit; i++) { base = sch_ep->offset + i * sch_ep->esit; - for (j = 0; j < sch_ep->num_budget_microframes; j++) - sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated; + for (j = 0; j < sch_ep->num_budget_microframes; j++) { + k = XHCI_MTK_BW_INDEX(base + j); + if (used) + sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j]; + else + sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j]; + } } } -static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) +static int check_ls_budget_microframes(struct mu3h_sch_ep_info *sch_ep, int offset) +{ + struct mu3h_sch_tt *tt = sch_ep->sch_tt; + int i; + + if (sch_ep->speed != USB_SPEED_LOW) + return 0; + + if (sch_ep->ep_type == INT_OUT_EP) + i = XHCI_MTK_BW_INDEX(offset); + else if (sch_ep->ep_type == INT_IN_EP) + i = XHCI_MTK_BW_INDEX(offset + CS_OFFSET); /* skip ss + idle */ + else + return -EINVAL; + + if (tt->ls_bus_bw[i] + sch_ep->maxpkt > LS_PAYLOAD_MAX) + return -ESCH_BW_OVERFLOW; + + return 0; +} + +static int check_fs_budget_microframes(struct mu3h_sch_ep_info *sch_ep, int offset) { struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 tmp; - int base; + int i, k; + + /* + * for OUT eps, will transfer exactly assigned length of data, + * so can't allocate more than 188 bytes; + * but it's not for IN eps, usually it can't receive full + * 188 bytes in a uframe, if it not assign full 188 bytes, + * can add another one; + */ + for (i = 0; i < sch_ep->num_budget_microframes; i++) { + k = XHCI_MTK_BW_INDEX(offset + i); + if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP) + tmp = tt->fs_bus_bw_out[k] + sch_ep->bw_budget_table[i]; + else /* ep_type : ISOC IN / INTR IN */ + tmp = tt->fs_bus_bw_in[k]; + + if (tmp > FS_PAYLOAD_MAX) + return -ESCH_BW_OVERFLOW; + } + + return 0; +} + +static int check_fs_budget_frames(struct mu3h_sch_ep_info *sch_ep, int offset) +{ + struct mu3h_sch_tt *tt = sch_ep->sch_tt; + u32 head, tail; int i, j, k; + /* bugdet scheduled may cross at most two fs frames */ + j = XHCI_MTK_BW_INDEX(offset) / UFRAMES_PER_FRAME; + k = XHCI_MTK_BW_INDEX(offset + sch_ep->num_budget_microframes - 1) / UFRAMES_PER_FRAME; + + if (j != k) { + head = tt->fs_frame_bw[j]; + tail = tt->fs_frame_bw[k]; + } else { + head = tt->fs_frame_bw[j]; + tail = 0; + } + + j = roundup(offset, UFRAMES_PER_FRAME); + for (i = 0; i < sch_ep->num_budget_microframes; i++) { + if ((offset + i) < j) + head += sch_ep->bw_budget_table[i]; + else + tail += sch_ep->bw_budget_table[i]; + } + + if (head > FS_BW_BOUNDARY || tail > FS_BW_BOUNDARY) + return -ESCH_BW_OVERFLOW; + + return 0; +} + +static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) +{ + int i, base; + int ret = 0; + for (i = 0; i < sch_ep->num_esit; i++) { base = offset + i * sch_ep->esit; - /* - * Compared with hs bus, no matter what ep type, - * the hub will always delay one uframe to send data - */ - for (j = 0; j < sch_ep->num_budget_microframes; j++) { - k = XHCI_MTK_BW_INDEX(base + j); - tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe; - if (tmp > FS_PAYLOAD_MAX) - return -ESCH_BW_OVERFLOW; - } + ret = check_ls_budget_microframes(sch_ep, base); + if (ret) + goto err; + + ret = check_fs_budget_microframes(sch_ep, base); + if (ret) + goto err; + + ret = check_fs_budget_frames(sch_ep, base); + if (ret) + goto err; } - return 0; +err: + return ret; } -static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset) +static int check_ss_and_cs(struct mu3h_sch_ep_info *sch_ep, u32 offset) { u32 start_ss, last_ss; u32 start_cs, last_cs; - if (!sch_ep->sch_tt) - return 0; - - start_ss = offset % 8; + start_ss = offset % UFRAMES_PER_FRAME; if (sch_ep->ep_type == ISOC_OUT_EP) { last_ss = start_ss + sch_ep->cs_count - 1; @@ -444,6 +600,7 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset) return -ESCH_SS_Y6; } else { + /* maxpkt <= 1023, cs <= 6 */ u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); /* @@ -454,44 +611,171 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset) return -ESCH_SS_Y6; /* one uframe for ss + one uframe for idle */ - start_cs = (start_ss + 2) % 8; + start_cs = (start_ss + CS_OFFSET) % UFRAMES_PER_FRAME; last_cs = start_cs + cs_count - 1; - if (last_cs > 7) return -ESCH_CS_OVERFLOW; + /* add extra-cs */ + cs_count += (last_cs == 7) ? 1 : 2; if (cs_count > 7) cs_count = 7; /* HW limit */ sch_ep->cs_count = cs_count; - /* ss, idle are ignored */ - sch_ep->num_budget_microframes = cs_count; - /* - * if interval=1, maxp >752, num_budge_micoframe is larger - * than sch_ep->esit, will overstep boundary - */ - if (sch_ep->num_budget_microframes > sch_ep->esit) - sch_ep->num_budget_microframes = sch_ep->esit; } + return 0; +} + +/* + * when isoc-out transfers 188 bytes in a uframe, and send isoc/intr's + * ss token in the uframe, may cause 'bit stuff error' in downstream + * port; + * when isoc-out transfer less than 188 bytes in a uframe, shall send + * isoc-in's ss after isoc-out's ss (but hw can't ensure the sequence, + * so just avoid overlap). + */ +static int check_isoc_ss_overlap(struct mu3h_sch_ep_info *sch_ep, u32 offset) +{ + struct mu3h_sch_tt *tt = sch_ep->sch_tt; + int base; + int i, j, k; + + if (!tt) + return 0; + + for (i = 0; i < sch_ep->num_esit; i++) { + base = offset + i * sch_ep->esit; + + if (sch_ep->ep_type == ISOC_OUT_EP) { + for (j = 0; j < sch_ep->num_budget_microframes; j++) { + k = XHCI_MTK_BW_INDEX(base + j); + if (tt->in_ss_cnt[k]) + return -ESCH_SS_OVERLAP; + } + } else if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) { + k = XHCI_MTK_BW_INDEX(base); + /* only check IN's ss */ + if (tt->fs_bus_bw_out[k]) + return -ESCH_SS_OVERLAP; + } + } + + return 0; +} + +static int check_sch_tt_budget(struct mu3h_sch_ep_info *sch_ep, u32 offset) +{ + int ret; + + ret = check_ss_and_cs(sch_ep, offset); + if (ret) + return ret; + + ret = check_isoc_ss_overlap(sch_ep, offset); + if (ret) + return ret; + return check_fs_bus_bw(sch_ep, offset); } +/* allocate microframes in the ls/fs frame */ +static int alloc_sch_portion_of_frame(struct mu3h_sch_ep_info *sch_ep) +{ + struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info; + const u32 bw_boundary = get_bw_boundary(sch_ep->speed); + u32 bw_max, fs_bw_min; + u32 offset, offset_min; + u16 fs_bw; + int frames; + int i, j; + int ret; + + frames = sch_ep->esit / UFRAMES_PER_FRAME; + + for (i = 0; i < UFRAMES_PER_FRAME; i++) { + fs_bw_min = FS_PAYLOAD_MAX; + offset_min = XHCI_MTK_MAX_ESIT; + + for (j = 0; j < frames; j++) { + offset = (i + j * UFRAMES_PER_FRAME) % sch_ep->esit; + + ret = check_sch_tt_budget(sch_ep, offset); + if (ret) + continue; + + /* check hs bw domain */ + bw_max = get_max_bw(sch_bw, sch_ep, offset); + if (bw_max > bw_boundary) { + ret = -ESCH_BW_OVERFLOW; + continue; + } + + /* use best-fit between frames */ + fs_bw = get_fs_bw(sch_ep, offset); + if (fs_bw < fs_bw_min) { + fs_bw_min = fs_bw; + offset_min = offset; + } + + if (!fs_bw_min) + break; + } + + /* use first-fit between microframes in a frame */ + if (offset_min < XHCI_MTK_MAX_ESIT) + break; + } + + if (offset_min == XHCI_MTK_MAX_ESIT) + return -ESCH_BW_OVERFLOW; + + sch_ep->offset = offset_min; + + return 0; +} + static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used) { struct mu3h_sch_tt *tt = sch_ep->sch_tt; - int bw_updated; + u16 *fs_bus_bw; u32 base; - int i, j; + int i, j, k, f; - bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1); + if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP) + fs_bus_bw = tt->fs_bus_bw_out; + else + fs_bus_bw = tt->fs_bus_bw_in; for (i = 0; i < sch_ep->num_esit; i++) { base = sch_ep->offset + i * sch_ep->esit; - for (j = 0; j < sch_ep->num_budget_microframes; j++) - tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated; + for (j = 0; j < sch_ep->num_budget_microframes; j++) { + k = XHCI_MTK_BW_INDEX(base + j); + f = k / UFRAMES_PER_FRAME; + if (used) { + if (sch_ep->speed == USB_SPEED_LOW) + tt->ls_bus_bw[k] += (u8)sch_ep->bw_budget_table[j]; + + fs_bus_bw[k] += (u16)sch_ep->bw_budget_table[j]; + tt->fs_frame_bw[f] += (u16)sch_ep->bw_budget_table[j]; + } else { + if (sch_ep->speed == USB_SPEED_LOW) + tt->ls_bus_bw[k] -= (u8)sch_ep->bw_budget_table[j]; + + fs_bus_bw[k] -= (u16)sch_ep->bw_budget_table[j]; + tt->fs_frame_bw[f] -= (u16)sch_ep->bw_budget_table[j]; + } + } + + if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) { + k = XHCI_MTK_BW_INDEX(base); + if (used) + tt->in_ss_cnt[k]++; + else + tt->in_ss_cnt[k]--; + } } if (used) @@ -513,7 +797,8 @@ static int load_ep_bw(struct mu3h_sch_bw_info *sch_bw, return 0; } -static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep) +/* allocate microframes for hs/ss/ssp */ +static int alloc_sch_microframes(struct mu3h_sch_ep_info *sch_ep) { struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info; const u32 bw_boundary = get_bw_boundary(sch_ep->speed); @@ -521,16 +806,12 @@ static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep) u32 worst_bw; u32 min_bw = ~0; int min_index = -1; - int ret = 0; /* * Search through all possible schedule microframes. * and find a microframe where its worst bandwidth is minimum. */ for (offset = 0; offset < sch_ep->esit; offset++) { - ret = check_sch_tt(sch_ep, offset); - if (ret) - continue; worst_bw = get_max_bw(sch_bw, sch_ep, offset); if (worst_bw > bw_boundary) @@ -540,21 +821,29 @@ static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep) min_bw = worst_bw; min_index = offset; } - - /* use first-fit for LS/FS */ - if (sch_ep->sch_tt && min_index >= 0) - break; - - if (min_bw == 0) - break; } if (min_index < 0) - return ret ? ret : -ESCH_BW_OVERFLOW; + return -ESCH_BW_OVERFLOW; sch_ep->offset = min_index; - return load_ep_bw(sch_bw, sch_ep, true); + return 0; +} + +static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep) +{ + int ret; + + if (sch_ep->sch_tt) + ret = alloc_sch_portion_of_frame(sch_ep); + else + ret = alloc_sch_microframes(sch_ep); + + if (ret) + return ret; + + return load_ep_bw(sch_ep->bw_info, sch_ep, true); } static void destroy_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev, @@ -651,7 +940,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed)); - sch_ep = create_sch_ep(mtk, udev, ep); + sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx); if (IS_ERR_OR_NULL(sch_ep)) return -ENOMEM; diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index ac042077db..f5e2bd66bb 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -30,12 +30,23 @@ #define XHCI_MTK_MAX_ESIT (1 << 6) #define XHCI_MTK_BW_INDEX(x) ((x) & (XHCI_MTK_MAX_ESIT - 1)) +#define UFRAMES_PER_FRAME 8 +#define XHCI_MTK_FRAMES_CNT (XHCI_MTK_MAX_ESIT / UFRAMES_PER_FRAME) + /** - * @fs_bus_bw: array to keep track of bandwidth already used for FS + * @fs_bus_bw_out: save bandwidth used by FS/LS OUT eps in each uframes + * @fs_bus_bw_in: save bandwidth used by FS/LS IN eps in each uframes + * @ls_bus_bw: save bandwidth used by LS eps in each uframes + * @fs_frame_bw: save bandwidth used by FS/LS eps in each FS frames + * @in_ss_cnt: the count of Start-Split for IN eps * @ep_list: Endpoints using this TT */ struct mu3h_sch_tt { - u32 fs_bus_bw[XHCI_MTK_MAX_ESIT]; + u16 fs_bus_bw_out[XHCI_MTK_MAX_ESIT]; + u16 fs_bus_bw_in[XHCI_MTK_MAX_ESIT]; + u8 ls_bus_bw[XHCI_MTK_MAX_ESIT]; + u16 fs_frame_bw[XHCI_MTK_FRAMES_CNT]; + u8 in_ss_cnt[XHCI_MTK_MAX_ESIT]; struct list_head ep_list; }; @@ -58,7 +69,6 @@ struct mu3h_sch_bw_info { * @num_esit: number of @esit in a period * @num_budget_microframes: number of continuous uframes * (@repeat==1) scheduled within the interval - * @bw_cost_per_microframe: bandwidth cost per microframe * @hentry: hash table entry * @endpoint: linked into bandwidth domain which it belongs to * @tt_endpoint: linked into mu3h_sch_tt's list which it belongs to @@ -83,12 +93,12 @@ struct mu3h_sch_bw_info { * times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets * according to @pkts and @repeat. normal mode is used by * default + * @bw_budget_table: table to record bandwidth budget per microframe */ struct mu3h_sch_ep_info { u32 esit; u32 num_esit; u32 num_budget_microframes; - u32 bw_cost_per_microframe; struct list_head endpoint; struct hlist_node hentry; struct list_head tt_endpoint; @@ -108,6 +118,7 @@ struct mu3h_sch_ep_info { u32 pkts; u32 cs_count; u32 burst_mode; + u32 bw_budget_table[]; }; #define MU3C_U3_PORT_MAX 4 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 732cdeb739..d68e9abcdc 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -250,6 +250,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s if (device_property_read_bool(tmpdev, "quirk-broken-port-ped")) xhci->quirks |= XHCI_BROKEN_PORT_PED; + if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk")) + xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK; + device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); } @@ -433,7 +436,7 @@ void xhci_plat_remove(struct platform_device *dev) } EXPORT_SYMBOL_GPL(xhci_plat_remove); -static int __maybe_unused xhci_plat_suspend(struct device *dev) +static int xhci_plat_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -461,7 +464,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) return 0; } -static int __maybe_unused xhci_plat_resume(struct device *dev) +static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -483,7 +486,7 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) if (ret) goto disable_clks; - ret = xhci_resume(xhci, PMSG_RESUME); + ret = xhci_resume(xhci, pmsg); if (ret) goto disable_clks; @@ -502,6 +505,16 @@ disable_clks: return ret; } +static int xhci_plat_resume(struct device *dev) +{ + return xhci_plat_resume_common(dev, PMSG_RESUME); +} + +static int xhci_plat_restore(struct device *dev) +{ + return xhci_plat_resume_common(dev, PMSG_RESTORE); +} + static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); @@ -524,7 +537,12 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) } const struct dev_pm_ops xhci_plat_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume) + .suspend = pm_sleep_ptr(xhci_plat_suspend), + .resume = pm_sleep_ptr(xhci_plat_resume), + .freeze = pm_sleep_ptr(xhci_plat_suspend), + .thaw = pm_sleep_ptr(xhci_plat_resume), + .poweroff = pm_sleep_ptr(xhci_plat_suspend), + .restore = pm_sleep_ptr(xhci_plat_restore), SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e5dc0723a..9673354d70 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -144,7 +144,7 @@ static void next_trb(struct xhci_hcd *xhci, struct xhci_segment **seg, union xhci_trb **trb) { - if (trb_is_link(*trb)) { + if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) { *seg = (*seg)->next; *trb = ((*seg)->trbs); } else { @@ -450,8 +450,9 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) * In the future we should distinguish between -ENODEV and -ETIMEDOUT * and try to recover a -ETIMEDOUT with a host controller reset. */ - ret = xhci_handshake(&xhci->op_regs->cmd_ring, - CMD_RING_RUNNING, 0, 5 * 1000 * 1000); + ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000, + XHCI_STATE_REMOVING); if (ret < 0) { xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); xhci_halt(xhci); @@ -1879,7 +1880,6 @@ static void handle_port_status(struct xhci_hcd *xhci, if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Port change event with invalid port ID %d\n", port_id); - inc_deq(xhci, ir->event_ring); return; } @@ -1906,7 +1906,7 @@ static void handle_port_status(struct xhci_hcd *xhci, xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", hcd->self.busnum, hcd_portnum + 1, port_id, portsc); - trace_xhci_handle_port_status(hcd_portnum, portsc); + trace_xhci_handle_port_status(port, portsc); if (hcd->state == HC_STATE_SUSPENDED) { xhci_dbg(xhci, "resume root hub\n"); @@ -2007,8 +2007,6 @@ static void handle_port_status(struct xhci_hcd *xhci, } cleanup: - /* Update event ring dequeue pointer before dropping the lock */ - inc_deq(xhci, ir->event_ring); /* Don't make the USB core poll the roothub if we got a bad port status * change event. Besides, at that point we can't tell which roothub @@ -2377,6 +2375,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: + /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ + if (td->error_mid_td) + break; if (remaining) { frame->status = short_framestatus; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) @@ -2392,9 +2393,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_BANDWIDTH_OVERRUN_ERROR: frame->status = -ECOMM; break; - case COMP_ISOCH_BUFFER_OVERRUN: case COMP_BABBLE_DETECTED_ERROR: + sum_trbs_for_length = true; + fallthrough; + case COMP_ISOCH_BUFFER_OVERRUN: frame->status = -EOVERFLOW; + if (ep_trb != td->last_trb) + td->error_mid_td = true; break; case COMP_INCOMPATIBLE_DEVICE_ERROR: case COMP_STALL_ERROR: @@ -2402,8 +2407,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; + sum_trbs_for_length = true; if (ep_trb != td->last_trb) - return 0; + td->error_mid_td = true; break; case COMP_STOPPED: sum_trbs_for_length = true; @@ -2423,6 +2429,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; } + if (td->urb_length_set) + goto finish_td; + if (sum_trbs_for_length) frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; @@ -2431,6 +2440,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->urb->actual_length += frame->actual_length; +finish_td: + /* Don't give back TD yet if we encountered an error mid TD */ + if (td->error_mid_td && ep_trb != td->last_trb) { + xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); + td->urb_length_set = true; + return 0; + } + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } @@ -2809,17 +2826,51 @@ static int handle_tx_event(struct xhci_hcd *xhci, } if (!ep_seg) { - if (!ep->skip || - !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { - /* Some host controllers give a spurious - * successful event after a short transfer. - * Ignore it. - */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; - goto cleanup; + + if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + skip_isoc_td(xhci, td, ep, status); + goto cleanup; + } + + /* + * Some hosts give a spurious success event after a short + * transfer. Ignore it. + */ + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + ep_ring->last_td_was_short) { + ep_ring->last_td_was_short = false; + goto cleanup; + } + + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * So this event might be for the next TD. + */ + if (td->error_mid_td && + !list_is_last(&td->td_list, &ep_ring->td_list)) { + struct xhci_td *td_next = list_next_entry(td, td_list); + + ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, + td_next->last_trb, ep_trb_dma, false); + if (ep_seg) { + /* give back previous TD, start handling new */ + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + inc_deq(xhci, ep_ring); + xhci_td_cleanup(xhci, td, ep_ring, td->status); + td = td_next; } + } + + if (!ep_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " @@ -2831,9 +2882,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_trb_dma, true); return -ESHUTDOWN; } - - skip_isoc_td(xhci, td, ep, status); - goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) ep_ring->last_td_was_short = true; @@ -2884,13 +2932,6 @@ cleanup: trb_comp_code != COMP_MISSED_SERVICE_ERROR && trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; - /* - * Do not update event ring dequeue pointer if we're in a loop - * processing missed tds. - */ - if (!handling_skipped_tds) - inc_deq(xhci, ir->event_ring); - /* * If ep->skip is set, it means there are missed tds on the * endpoint ring need to take care of. @@ -2922,9 +2963,7 @@ err_out: static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { union xhci_trb *event; - int update_ptrs = 1; u32 trb_type; - int ret; /* Event ring hasn't been allocated yet. */ if (!ir || !ir->event_ring || !ir->event_ring->dequeue) { @@ -2954,12 +2993,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) break; case TRB_PORT_STATUS: handle_port_status(xhci, ir, event); - update_ptrs = 0; break; case TRB_TRANSFER: - ret = handle_tx_event(xhci, ir, &event->trans_event); - if (ret >= 0) - update_ptrs = 0; + handle_tx_event(xhci, ir, &event->trans_event); break; case TRB_DEV_NOTE: handle_device_notification(xhci, event); @@ -2979,9 +3015,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) return 0; } - if (update_ptrs) - /* Update SW event ring dequeue pointer */ - inc_deq(xhci, ir->event_ring); + /* Update SW event ring dequeue pointer */ + inc_deq(xhci, ir->event_ring); /* Are there more items on the event ring? Caller will call us again to * check. @@ -3013,13 +3048,12 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, * Per 4.9.4, Software writes to the ERDP register shall * always advance the Event Ring Dequeue Pointer value. */ - if ((temp_64 & (u64) ~ERST_PTR_MASK) == - ((u64) deq & (u64) ~ERST_PTR_MASK)) + if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK)) return; /* Update HC event ring dequeue pointer */ - temp_64 &= ERST_DESI_MASK; - temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); + temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; + temp_64 |= deq & ERST_PTR_MASK; } /* Clear the event handler busy flag (RW1C) */ diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index d6b32f2ad9..ac47b1c054 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -509,35 +509,38 @@ DEFINE_EVENT(xhci_log_ring, xhci_inc_deq, ); DECLARE_EVENT_CLASS(xhci_log_portsc, - TP_PROTO(u32 portnum, u32 portsc), - TP_ARGS(portnum, portsc), + TP_PROTO(struct xhci_port *port, u32 portsc), + TP_ARGS(port, portsc), TP_STRUCT__entry( + __field(u32, busnum) __field(u32, portnum) __field(u32, portsc) ), TP_fast_assign( - __entry->portnum = portnum; + __entry->busnum = port->rhub->hcd->self.busnum; + __entry->portnum = port->hcd_portnum; __entry->portsc = portsc; ), - TP_printk("port-%d: %s", + TP_printk("port %d-%d: %s", + __entry->busnum, __entry->portnum, xhci_decode_portsc(__get_buf(XHCI_MSG_MAX), __entry->portsc) ) ); DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status, - TP_PROTO(u32 portnum, u32 portsc), - TP_ARGS(portnum, portsc) + TP_PROTO(struct xhci_port *port, u32 portsc), + TP_ARGS(port, portsc) ); DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status, - TP_PROTO(u32 portnum, u32 portsc), - TP_ARGS(portnum, portsc) + TP_PROTO(struct xhci_port *port, u32 portsc), + TP_ARGS(port, portsc) ); DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data, - TP_PROTO(u32 portnum, u32 portsc), - TP_ARGS(portnum, portsc) + TP_PROTO(struct xhci_port *port, u32 portsc), + TP_ARGS(port, portsc) ); DECLARE_EVENT_CLASS(xhci_log_doorbell, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 132b76fa7c..884b0898d9 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -82,6 +82,29 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) } /* + * xhci_handshake_check_state - same as xhci_handshake but takes an additional + * exit_state parameter, and bails out with an error immediately when xhc_state + * has exit_state flag set. + */ +int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, + u32 mask, u32 done, int usec, unsigned int exit_state) +{ + u32 result; + int ret; + + ret = readl_poll_timeout_atomic(ptr, result, + (result & mask) == done || + result == U32_MAX || + xhci->xhc_state & exit_state, + 1, usec); + + if (result == U32_MAX || xhci->xhc_state & exit_state) + return -ENODEV; + + return ret; +} + +/* * Disable interrupts and begin the xHCI halting process. */ void xhci_quiesce(struct xhci_hcd *xhci) @@ -201,7 +224,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); + ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command, + CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING); if (ret) return ret; @@ -520,7 +544,7 @@ int xhci_run(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; + temp_64 &= ERST_PTR_MASK; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "ERST deq = 64'h%0lx", (long unsigned int) temp_64); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 5df3704825..6f53a950d9 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -525,7 +525,7 @@ struct xhci_intr_reg { * a work queue (or delayed service routine)? */ #define ERST_EHB (1 << 3) -#define ERST_PTR_MASK (0xf) +#define ERST_PTR_MASK (GENMASK_ULL(63, 4)) /** * struct xhci_run_regs @@ -558,33 +558,6 @@ struct xhci_doorbell_array { #define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) #define DB_VALUE_HOST 0x00000000 -/** - * struct xhci_protocol_caps - * @revision: major revision, minor revision, capability ID, - * and next capability pointer. - * @name_string: Four ASCII characters to say which spec this xHC - * follows, typically "USB ". - * @port_info: Port offset, count, and protocol-defined information. - */ -struct xhci_protocol_caps { - u32 revision; - u32 name_string; - u32 port_info; -}; - -#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) -#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) -#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f) -#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) -#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) - -#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f) -#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03) -#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03) -#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01) -#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03) -#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff) - #define PLT_MASK (0x03 << 6) #define PLT_SYM (0x00 << 6) #define PLT_ASYM_RX (0x02 << 6) @@ -1545,6 +1518,7 @@ struct xhci_segment { union xhci_trb *trbs; /* private to HCD */ struct xhci_segment *next; + unsigned int num; dma_addr_t dma; /* Max packet sized bounce buffer for td-fragmant alignment */ dma_addr_t bounce_dma; @@ -1573,6 +1547,7 @@ struct xhci_td { struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; + bool error_mid_td; unsigned int num_trbs; }; @@ -1666,15 +1641,11 @@ struct xhci_scratchpad { struct urb_priv { int num_tds; int num_tds_done; - struct xhci_td td[]; + struct xhci_td td[] __counted_by(num_tds); }; -/* - * Each segment table entry is 4*32bits long. 1K seems like an ok size: - * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, - * meaning 64 ring segments. - * Initial allocated size of the ERST, in number of entries */ -#define ERST_NUM_SEGS 1 +/* Reasonable limit for number of Event Ring segments (spec allows 32k) */ +#define ERST_MAX_SEGS 2 /* Poll every 60 seconds */ #define POLL_TIMEOUT 60 /* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ @@ -2078,13 +2049,8 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags); -int xhci_alloc_erst(struct xhci_hcd *xhci, - struct xhci_ring *evt_ring, - struct xhci_erst *erst, - gfp_t flags); void xhci_initialize_ring_info(struct xhci_ring *ring, unsigned int cycle_state); -void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); @@ -2119,6 +2085,8 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); +int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, + u32 mask, u32 done, int usec, unsigned int exit_state); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); |