diff options
Diffstat (limited to 'drivers/usb/cdns3')
28 files changed, 19130 insertions, 0 deletions
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig new file mode 100644 index 000000000..b98ca0a13 --- /dev/null +++ b/drivers/usb/cdns3/Kconfig @@ -0,0 +1,122 @@ +config USB_CDNS_SUPPORT + tristate "Cadence USB Support" + depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA + select USB_XHCI_PLATFORM if USB_XHCI_HCD + select USB_ROLE_SWITCH + help + Say Y here if your system has a Cadence USBSS or USBSSP + dual-role controller. + It supports: dual-role switch, Host-only, and Peripheral-only. + +config USB_CDNS_HOST + bool + +if USB_CDNS_SUPPORT + +config USB_CDNS3 + tristate "Cadence USB3 Dual-Role Controller" + depends on USB_CDNS_SUPPORT + help + Say Y here if your system has a Cadence USB3 dual-role controller. + It supports: dual-role switch, Host-only, and Peripheral-only. + + If you choose to build this driver is a dynamically linked + as module, the module will be called cdns3.ko. +endif + +if USB_CDNS3 + +config USB_CDNS3_GADGET + bool "Cadence USB3 device controller" + depends on USB_GADGET=y || USB_GADGET=USB_CDNS3 + help + Say Y here to enable device controller functionality of the + Cadence USBSS-DEV driver. + + This controller supports FF, HS and SS mode. It doesn't support + LS and SSP mode. + +config USB_CDNS3_HOST + bool "Cadence USB3 host controller" + depends on USB=y || USB=USB_CDNS3 + select USB_CDNS_HOST + help + Say Y here to enable host controller functionality of the + Cadence driver. + + Host controller is compliant with XHCI so it will use + standard XHCI driver. + +config USB_CDNS3_PCI_WRAP + tristate "Cadence USB3 support on PCIe-based platforms" + depends on USB_PCI && ACPI + default USB_CDNS3 + help + If you're using the USBSS Core IP with a PCIe, please say + 'Y' or 'M' here. + + If you choose to build this driver as module it will + be dynamically linked and module will be called cdns3-pci.ko + +config USB_CDNS3_TI + tristate "Cadence USB3 support on TI platforms" + depends on ARCH_K3 || COMPILE_TEST + default USB_CDNS3 + help + Say 'Y' or 'M' here if you are building for Texas Instruments + platforms that contain Cadence USB3 controller core. + + e.g. J721e. + +config USB_CDNS3_IMX + tristate "Cadence USB3 support on NXP i.MX platforms" + depends on ARCH_MXC || COMPILE_TEST + default USB_CDNS3 + help + Say 'Y' or 'M' here if you are building for NXP i.MX + platforms that contain Cadence USB3 controller core. + + For example, imx8qm and imx8qxp. + +endif + +if USB_CDNS_SUPPORT + +config USB_CDNSP_PCI + tristate "Cadence CDNSP Dual-Role Controller" + depends on USB_CDNS_SUPPORT && USB_PCI && ACPI + help + Say Y here if your system has a Cadence CDNSP dual-role controller. + It supports: dual-role switch Host-only, and Peripheral-only. + + If you choose to build this driver is a dynamically linked + module, the module will be called cdnsp.ko. +endif + +if USB_CDNSP_PCI + +config USB_CDNSP_GADGET + bool "Cadence CDNSP device controller" + depends on USB_GADGET=y || USB_GADGET=USB_CDNSP_PCI + help + Say Y here to enable device controller functionality of the + Cadence CDNSP-DEV driver. + + Cadence CDNSP Device Controller in device mode is + very similar to XHCI controller. Therefore some algorithms + used has been taken from host driver. + This controller supports FF, HS, SS and SSP mode. + It doesn't support LS. + +config USB_CDNSP_HOST + bool "Cadence CDNSP host controller" + depends on USB=y || USB=USB_CDNSP_PCI + select USB_CDNS_HOST + help + Say Y here to enable host controller functionality of the + Cadence driver. + + Host controller is compliant with XHCI so it uses + standard XHCI driver. + +endif diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile new file mode 100644 index 000000000..61edb2f89 --- /dev/null +++ b/drivers/usb/cdns3/Makefile @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0 +# define_trace.h needs to know how to find our header +CFLAGS_cdns3-trace.o := -I$(src) +CFLAGS_cdnsp-trace.o := -I$(src) + +cdns-usb-common-y := core.o drd.o +cdns3-y := cdns3-plat.o + +ifeq ($(CONFIG_USB),m) +obj-m += cdns-usb-common.o +obj-m += cdns3.o +else +obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns-usb-common.o +obj-$(CONFIG_USB_CDNS3) += cdns3.o +endif + +cdns-usb-common-$(CONFIG_USB_CDNS_HOST) += host.o +cdns3-$(CONFIG_USB_CDNS3_GADGET) += cdns3-gadget.o cdns3-ep0.o + +ifneq ($(CONFIG_USB_CDNS3_GADGET),) +cdns3-$(CONFIG_TRACING) += cdns3-trace.o +endif + +obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o +obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o +obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o + +cdnsp-udc-pci-y := cdnsp-pci.o + +ifdef CONFIG_USB_CDNSP_PCI +ifeq ($(CONFIG_USB),m) +obj-m += cdnsp-udc-pci.o +else +obj-$(CONFIG_USB_CDNSP_PCI) += cdnsp-udc-pci.o +endif +endif + +cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET) += cdnsp-ring.o cdnsp-gadget.o \ + cdnsp-mem.o cdnsp-ep0.o + +ifneq ($(CONFIG_USB_CDNSP_GADGET),) +cdnsp-udc-pci-$(CONFIG_TRACING) += cdnsp-trace.o +endif diff --git a/drivers/usb/cdns3/cdns3-debug.h b/drivers/usb/cdns3/cdns3-debug.h new file mode 100644 index 000000000..a5c6a29e1 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-debug.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS DRD Driver. + * Debug header file. + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ +#ifndef __LINUX_CDNS3_DEBUG +#define __LINUX_CDNS3_DEBUG + +#include "core.h" + +static inline char *cdns3_decode_usb_irq(char *str, + enum usb_device_speed speed, + u32 usb_ists) +{ + int ret; + + ret = sprintf(str, "IRQ %08x = ", usb_ists); + + if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { + ret += sprintf(str + ret, "Connection %s\n", + usb_speed_string(speed)); + } + if (usb_ists & USB_ISTS_DIS2I || usb_ists & USB_ISTS_DISI) + ret += sprintf(str + ret, "Disconnection "); + if (usb_ists & USB_ISTS_L2ENTI) + ret += sprintf(str + ret, "suspended "); + if (usb_ists & USB_ISTS_L1ENTI) + ret += sprintf(str + ret, "L1 enter "); + if (usb_ists & USB_ISTS_L1EXTI) + ret += sprintf(str + ret, "L1 exit "); + if (usb_ists & USB_ISTS_L2ENTI) + ret += sprintf(str + ret, "L2 enter "); + if (usb_ists & USB_ISTS_L2EXTI) + ret += sprintf(str + ret, "L2 exit "); + if (usb_ists & USB_ISTS_U3EXTI) + ret += sprintf(str + ret, "U3 exit "); + if (usb_ists & USB_ISTS_UWRESI) + ret += sprintf(str + ret, "Warm Reset "); + if (usb_ists & USB_ISTS_UHRESI) + ret += sprintf(str + ret, "Hot Reset "); + if (usb_ists & USB_ISTS_U2RESI) + ret += sprintf(str + ret, "Reset"); + + return str; +} + +static inline char *cdns3_decode_ep_irq(char *str, + u32 ep_sts, + const char *ep_name) +{ + int ret; + + ret = sprintf(str, "IRQ for %s: %08x ", ep_name, ep_sts); + + if (ep_sts & EP_STS_SETUP) + ret += sprintf(str + ret, "SETUP "); + if (ep_sts & EP_STS_IOC) + ret += sprintf(str + ret, "IOC "); + if (ep_sts & EP_STS_ISP) + ret += sprintf(str + ret, "ISP "); + if (ep_sts & EP_STS_DESCMIS) + ret += sprintf(str + ret, "DESCMIS "); + if (ep_sts & EP_STS_STREAMR) + ret += sprintf(str + ret, "STREAMR "); + if (ep_sts & EP_STS_MD_EXIT) + ret += sprintf(str + ret, "MD_EXIT "); + if (ep_sts & EP_STS_TRBERR) + ret += sprintf(str + ret, "TRBERR "); + if (ep_sts & EP_STS_NRDY) + ret += sprintf(str + ret, "NRDY "); + if (ep_sts & EP_STS_PRIME) + ret += sprintf(str + ret, "PRIME "); + if (ep_sts & EP_STS_SIDERR) + ret += sprintf(str + ret, "SIDERRT "); + if (ep_sts & EP_STS_OUTSMM) + ret += sprintf(str + ret, "OUTSMM "); + if (ep_sts & EP_STS_ISOERR) + ret += sprintf(str + ret, "ISOERR "); + if (ep_sts & EP_STS_IOT) + ret += sprintf(str + ret, "IOT "); + + return str; +} + +static inline char *cdns3_decode_epx_irq(char *str, + char *ep_name, + u32 ep_sts) +{ + return cdns3_decode_ep_irq(str, ep_sts, ep_name); +} + +static inline char *cdns3_decode_ep0_irq(char *str, + int dir, + u32 ep_sts) +{ + return cdns3_decode_ep_irq(str, ep_sts, + dir ? "ep0IN" : "ep0OUT"); +} + +/** + * Debug a transfer ring. + * + * Prints out all TRBs in the endpoint ring, even those after the Link TRB. + *. + */ +static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *ring, char *str) +{ + dma_addr_t addr = priv_ep->trb_pool_dma; + struct cdns3_trb *trb; + int trb_per_sector; + int ret = 0; + int i; + + trb_per_sector = GET_TRBS_PER_SEGMENT(priv_ep->type); + + trb = &priv_ep->trb_pool[priv_ep->dequeue]; + ret += sprintf(str + ret, "\n\t\tRing contents for %s:", priv_ep->name); + + ret += sprintf(str + ret, + "\n\t\tRing deq index: %d, trb: %p (virt), 0x%llx (dma)\n", + priv_ep->dequeue, trb, + (unsigned long long)cdns3_trb_virt_to_dma(priv_ep, trb)); + + trb = &priv_ep->trb_pool[priv_ep->enqueue]; + ret += sprintf(str + ret, + "\t\tRing enq index: %d, trb: %p (virt), 0x%llx (dma)\n", + priv_ep->enqueue, trb, + (unsigned long long)cdns3_trb_virt_to_dma(priv_ep, trb)); + + ret += sprintf(str + ret, + "\t\tfree trbs: %d, CCS=%d, PCS=%d\n", + priv_ep->free_trbs, priv_ep->ccs, priv_ep->pcs); + + if (trb_per_sector > TRBS_PER_SEGMENT) + trb_per_sector = TRBS_PER_SEGMENT; + + if (trb_per_sector > TRBS_PER_SEGMENT) { + sprintf(str + ret, "\t\tTransfer ring %d too big\n", + trb_per_sector); + return str; + } + + for (i = 0; i < trb_per_sector; ++i) { + trb = &ring[i]; + ret += sprintf(str + ret, + "\t\t@%pad %08x %08x %08x\n", &addr, + le32_to_cpu(trb->buffer), + le32_to_cpu(trb->length), + le32_to_cpu(trb->control)); + addr += sizeof(*trb); + } + + return str; +} + +#endif /*__LINUX_CDNS3_DEBUG*/ diff --git a/drivers/usb/cdns3/cdns3-ep0.c b/drivers/usb/cdns3/cdns3-ep0.c new file mode 100644 index 000000000..e29989d57 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-ep0.c @@ -0,0 +1,895 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver - gadget side. + * + * Copyright (C) 2018 Cadence Design Systems. + * Copyright (C) 2017-2018 NXP + * + * Authors: Pawel Jez <pjez@cadence.com>, + * Pawel Laszczak <pawell@cadence.com> + * Peter Chen <peter.chen@nxp.com> + */ + +#include <linux/usb/composite.h> +#include <linux/iopoll.h> + +#include "cdns3-gadget.h" +#include "cdns3-trace.h" + +static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, +}; + +/** + * cdns3_ep0_run_transfer - Do transfer on default endpoint hardware + * @priv_dev: extended gadget object + * @dma_addr: physical address where data is/will be stored + * @length: data length + * @erdy: set it to 1 when ERDY packet should be sent - + * exit from flow control state + * @zlp: add zero length packet + */ +static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, + dma_addr_t dma_addr, + unsigned int length, int erdy, int zlp) +{ + struct cdns3_usb_regs __iomem *regs = priv_dev->regs; + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + + priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length)); + + if (zlp) { + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL)); + priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0)); + priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); + } else { + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); + priv_ep->trb_pool[1].control = 0; + } + + trace_cdns3_prepare_trb(priv_ep, priv_ep->trb_pool); + + cdns3_select_ep(priv_dev, priv_dev->ep0_data_dir); + + writel(EP_STS_TRBERR, ®s->ep_sts); + writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), ®s->ep_traddr); + trace_cdns3_doorbell_ep0(priv_dev->ep0_data_dir ? "ep0in" : "ep0out", + readl(®s->ep_traddr)); + + /* TRB should be prepared before starting transfer. */ + writel(EP_CMD_DRDY, ®s->ep_cmd); + + /* Resume controller before arming transfer. */ + __cdns3_gadget_wakeup(priv_dev); + + if (erdy) + writel(EP_CMD_ERDY, &priv_dev->regs->ep_cmd); +} + +/** + * cdns3_ep0_delegate_req - Returns status of handling setup packet + * Setup is handled by gadget driver + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns zero on success or negative value on failure + */ +static int cdns3_ep0_delegate_req(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + int ret; + + spin_unlock(&priv_dev->lock); + priv_dev->setup_pending = 1; + ret = priv_dev->gadget_driver->setup(&priv_dev->gadget, ctrl_req); + priv_dev->setup_pending = 0; + spin_lock(&priv_dev->lock); + return ret; +} + +static void cdns3_prepare_setup_packet(struct cdns3_device *priv_dev) +{ + priv_dev->ep0_data_dir = 0; + priv_dev->ep0_stage = CDNS3_SETUP_STAGE; + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, + sizeof(struct usb_ctrlrequest), 0, 0); +} + +static void cdns3_ep0_complete_setup(struct cdns3_device *priv_dev, + u8 send_stall, u8 send_erdy) +{ + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + struct usb_request *request; + + request = cdns3_next_request(&priv_ep->pending_req_list); + if (request) + list_del_init(&request->list); + + if (send_stall) { + trace_cdns3_halt(priv_ep, send_stall, 0); + /* set_stall on ep0 */ + cdns3_select_ep(priv_dev, 0x00); + writel(EP_CMD_SSTALL, &priv_dev->regs->ep_cmd); + } else { + cdns3_prepare_setup_packet(priv_dev); + } + + priv_dev->ep0_stage = CDNS3_SETUP_STAGE; + writel((send_erdy ? EP_CMD_ERDY : 0) | EP_CMD_REQ_CMPL, + &priv_dev->regs->ep_cmd); +} + +/** + * cdns3_req_ep0_set_configuration - Handling of SET_CONFIG standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, USB_GADGET_DELAYED_STATUS on deferred status stage, + * error code on error + */ +static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + enum usb_device_state device_state = priv_dev->gadget.state; + u32 config = le16_to_cpu(ctrl_req->wValue); + int result = 0; + + switch (device_state) { + case USB_STATE_ADDRESS: + result = cdns3_ep0_delegate_req(priv_dev, ctrl_req); + + if (result || !config) + goto reset_config; + + break; + case USB_STATE_CONFIGURED: + result = cdns3_ep0_delegate_req(priv_dev, ctrl_req); + if (!config && !result) + goto reset_config; + + break; + default: + return -EINVAL; + } + + return 0; + +reset_config: + if (result != USB_GADGET_DELAYED_STATUS) + cdns3_hw_reset_eps_config(priv_dev); + + usb_gadget_set_state(&priv_dev->gadget, + USB_STATE_ADDRESS); + + return result; +} + +/** + * cdns3_req_ep0_set_address - Handling of SET_ADDRESS standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + enum usb_device_state device_state = priv_dev->gadget.state; + u32 reg; + u32 addr; + + addr = le16_to_cpu(ctrl_req->wValue); + + if (addr > USB_DEVICE_MAX_ADDRESS) { + dev_err(priv_dev->dev, + "Device address (%d) cannot be greater than %d\n", + addr, USB_DEVICE_MAX_ADDRESS); + return -EINVAL; + } + + if (device_state == USB_STATE_CONFIGURED) { + dev_err(priv_dev->dev, + "can't set_address from configured state\n"); + return -EINVAL; + } + + reg = readl(&priv_dev->regs->usb_cmd); + + writel(reg | USB_CMD_FADDR(addr) | USB_CMD_SET_ADDR, + &priv_dev->regs->usb_cmd); + + usb_gadget_set_state(&priv_dev->gadget, + (addr ? USB_STATE_ADDRESS : USB_STATE_DEFAULT)); + + return 0; +} + +/** + * cdns3_req_ep0_get_status - Handling of GET_STATUS standard USB request + * @priv_dev: extended gadget object + * @ctrl: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl) +{ + struct cdns3_endpoint *priv_ep; + __le16 *response_pkt; + u16 usb_status = 0; + u32 recip; + u8 index; + + recip = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recip) { + case USB_RECIP_DEVICE: + /* self powered */ + if (priv_dev->is_selfpowered) + usb_status = BIT(USB_DEVICE_SELF_POWERED); + + if (priv_dev->wake_up_flag) + usb_status |= BIT(USB_DEVICE_REMOTE_WAKEUP); + + if (priv_dev->gadget.speed != USB_SPEED_SUPER) + break; + + if (priv_dev->u1_allowed) + usb_status |= BIT(USB_DEV_STAT_U1_ENABLED); + + if (priv_dev->u2_allowed) + usb_status |= BIT(USB_DEV_STAT_U2_ENABLED); + + break; + case USB_RECIP_INTERFACE: + return cdns3_ep0_delegate_req(priv_dev, ctrl); + case USB_RECIP_ENDPOINT: + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); + priv_ep = priv_dev->eps[index]; + + /* check if endpoint is stalled or stall is pending */ + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); + if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || + (priv_ep->flags & EP_STALL_PENDING)) + usb_status = BIT(USB_ENDPOINT_HALT); + break; + default: + return -EINVAL; + } + + response_pkt = (__le16 *)priv_dev->setup_buf; + *response_pkt = cpu_to_le16(usb_status); + + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, + sizeof(*response_pkt), 1, 0); + return 0; +} + +static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + enum usb_device_state state; + enum usb_device_speed speed; + int ret = 0; + u32 wValue; + u16 tmode; + + wValue = le16_to_cpu(ctrl->wValue); + state = priv_dev->gadget.state; + speed = priv_dev->gadget.speed; + + switch (wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + priv_dev->wake_up_flag = !!set; + break; + case USB_DEVICE_U1_ENABLE: + if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER) + return -EINVAL; + + priv_dev->u1_allowed = !!set; + break; + case USB_DEVICE_U2_ENABLE: + if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER) + return -EINVAL; + + priv_dev->u2_allowed = !!set; + break; + case USB_DEVICE_LTM_ENABLE: + ret = -EINVAL; + break; + case USB_DEVICE_TEST_MODE: + if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH) + return -EINVAL; + + tmode = le16_to_cpu(ctrl->wIndex); + + if (!set || (tmode & 0xff) != 0) + return -EINVAL; + + tmode >>= 8; + switch (tmode) { + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + cdns3_set_register_bit(&priv_dev->regs->usb_cmd, + USB_CMD_STMODE | + USB_STS_TMODE_SEL(tmode - 1)); + break; + default: + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int cdns3_ep0_feature_handle_intf(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + u32 wValue; + int ret = 0; + + wValue = le16_to_cpu(ctrl->wValue); + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + struct cdns3_endpoint *priv_ep; + int ret = 0; + u8 index; + + if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) + return -EINVAL; + + if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN)) + return 0; + + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); + priv_ep = priv_dev->eps[index]; + + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); + + if (set) + __cdns3_gadget_ep_set_halt(priv_ep); + else if (!(priv_ep->flags & EP_WEDGE)) + ret = __cdns3_gadget_ep_clear_halt(priv_ep); + + cdns3_select_ep(priv_dev, 0x00); + + return ret; +} + +/** + * cdns3_req_ep0_handle_feature - + * Handling of GET/SET_FEATURE standard USB request + * + * @priv_dev: extended gadget object + * @ctrl: pointer to received setup packet + * @set: must be set to 1 for SET_FEATURE request + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_handle_feature(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + int ret = 0; + u32 recip; + + recip = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recip) { + case USB_RECIP_DEVICE: + ret = cdns3_ep0_feature_handle_device(priv_dev, ctrl, set); + break; + case USB_RECIP_INTERFACE: + ret = cdns3_ep0_feature_handle_intf(priv_dev, ctrl, set); + break; + case USB_RECIP_ENDPOINT: + ret = cdns3_ep0_feature_handle_endpoint(priv_dev, ctrl, set); + break; + default: + return -EINVAL; + } + + return ret; +} + +/** + * cdns3_req_ep0_set_sel - Handling of SET_SEL standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + if (priv_dev->gadget.state < USB_STATE_ADDRESS) + return -EINVAL; + + if (le16_to_cpu(ctrl_req->wLength) != 6) { + dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", + ctrl_req->wLength); + return -EINVAL; + } + + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, 6, 1, 0); + return 0; +} + +/** + * cdns3_req_ep0_set_isoch_delay - + * Handling of GET_ISOCH_DELAY standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + if (ctrl_req->wIndex || ctrl_req->wLength) + return -EINVAL; + + priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); + + return 0; +} + +/** + * cdns3_ep0_standard_request - Handling standard USB requests + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_ep0_standard_request(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + int ret; + + switch (ctrl_req->bRequest) { + case USB_REQ_SET_ADDRESS: + ret = cdns3_req_ep0_set_address(priv_dev, ctrl_req); + break; + case USB_REQ_SET_CONFIGURATION: + ret = cdns3_req_ep0_set_configuration(priv_dev, ctrl_req); + break; + case USB_REQ_GET_STATUS: + ret = cdns3_req_ep0_get_status(priv_dev, ctrl_req); + break; + case USB_REQ_CLEAR_FEATURE: + ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 0); + break; + case USB_REQ_SET_FEATURE: + ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 1); + break; + case USB_REQ_SET_SEL: + ret = cdns3_req_ep0_set_sel(priv_dev, ctrl_req); + break; + case USB_REQ_SET_ISOCH_DELAY: + ret = cdns3_req_ep0_set_isoch_delay(priv_dev, ctrl_req); + break; + default: + ret = cdns3_ep0_delegate_req(priv_dev, ctrl_req); + break; + } + + return ret; +} + +static void __pending_setup_status_handler(struct cdns3_device *priv_dev) +{ + struct usb_request *request = priv_dev->pending_status_request; + + if (priv_dev->status_completion_no_call && request && + request->complete) { + request->complete(&priv_dev->eps[0]->endpoint, request); + priv_dev->status_completion_no_call = 0; + } +} + +void cdns3_pending_setup_status_handler(struct work_struct *work) +{ + struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, + pending_status_wq); + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + __pending_setup_status_handler(priv_dev); + spin_unlock_irqrestore(&priv_dev->lock, flags); +} + +/** + * cdns3_ep0_setup_phase - Handling setup USB requests + * @priv_dev: extended gadget object + */ +static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev) +{ + struct usb_ctrlrequest *ctrl = priv_dev->setup_buf; + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + int result; + + priv_dev->ep0_data_dir = ctrl->bRequestType & USB_DIR_IN; + + trace_cdns3_ctrl_req(ctrl); + + if (!list_empty(&priv_ep->pending_req_list)) { + struct usb_request *request; + + request = cdns3_next_request(&priv_ep->pending_req_list); + priv_ep->dir = priv_dev->ep0_data_dir; + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), + -ECONNRESET); + } + + if (le16_to_cpu(ctrl->wLength)) + priv_dev->ep0_stage = CDNS3_DATA_STAGE; + else + priv_dev->ep0_stage = CDNS3_STATUS_STAGE; + + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + result = cdns3_ep0_standard_request(priv_dev, ctrl); + else + result = cdns3_ep0_delegate_req(priv_dev, ctrl); + + if (result == USB_GADGET_DELAYED_STATUS) + return; + + if (result < 0) + cdns3_ep0_complete_setup(priv_dev, 1, 1); + else if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) + cdns3_ep0_complete_setup(priv_dev, 0, 1); +} + +static void cdns3_transfer_completed(struct cdns3_device *priv_dev) +{ + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + + if (!list_empty(&priv_ep->pending_req_list)) { + struct usb_request *request; + + trace_cdns3_complete_trb(priv_ep, priv_ep->trb_pool); + request = cdns3_next_request(&priv_ep->pending_req_list); + + request->actual = + TRB_LEN(le32_to_cpu(priv_ep->trb_pool->length)); + + priv_ep->dir = priv_dev->ep0_data_dir; + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 0); + } + + cdns3_ep0_complete_setup(priv_dev, 0, 0); +} + +/** + * cdns3_check_new_setup - Check if controller receive new SETUP packet. + * @priv_dev: extended gadget object + * + * The SETUP packet can be kept in on-chip memory or in system memory. + */ +static bool cdns3_check_new_setup(struct cdns3_device *priv_dev) +{ + u32 ep_sts_reg; + + cdns3_select_ep(priv_dev, USB_DIR_OUT); + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + + return !!(ep_sts_reg & (EP_STS_SETUP | EP_STS_STPWAIT)); +} + +/** + * cdns3_check_ep0_interrupt_proceed - Processes interrupt related to endpoint 0 + * @priv_dev: extended gadget object + * @dir: USB_DIR_IN for IN direction, USB_DIR_OUT for OUT direction + */ +void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir) +{ + u32 ep_sts_reg; + + cdns3_select_ep(priv_dev, dir); + + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + writel(ep_sts_reg, &priv_dev->regs->ep_sts); + + trace_cdns3_ep0_irq(priv_dev, ep_sts_reg); + + __pending_setup_status_handler(priv_dev); + + if (ep_sts_reg & EP_STS_SETUP) + priv_dev->wait_for_setup = 1; + + if (priv_dev->wait_for_setup && ep_sts_reg & EP_STS_IOC) { + priv_dev->wait_for_setup = 0; + cdns3_ep0_setup_phase(priv_dev); + } else if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) { + priv_dev->ep0_data_dir = dir; + cdns3_transfer_completed(priv_dev); + } + + if (ep_sts_reg & EP_STS_DESCMIS) { + if (dir == 0 && !priv_dev->setup_pending) + cdns3_prepare_setup_packet(priv_dev); + } +} + +/** + * cdns3_gadget_ep0_enable + * @ep: pointer to endpoint zero object + * @desc: pointer to usb endpoint descriptor + * + * Function shouldn't be called by gadget driver, + * endpoint 0 is allways active + */ +static int cdns3_gadget_ep0_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + return -EINVAL; +} + +/** + * cdns3_gadget_ep0_disable + * @ep: pointer to endpoint zero object + * + * Function shouldn't be called by gadget driver, + * endpoint 0 is allways active + */ +static int cdns3_gadget_ep0_disable(struct usb_ep *ep) +{ + return -EINVAL; +} + +/** + * cdns3_gadget_ep0_set_halt + * @ep: pointer to endpoint zero object + * @value: 1 for set stall, 0 for clear stall + * + * Returns 0 + */ +static int cdns3_gadget_ep0_set_halt(struct usb_ep *ep, int value) +{ + /* TODO */ + return 0; +} + +/** + * cdns3_gadget_ep0_queue - Transfer data on endpoint zero + * @ep: pointer to endpoint zero object + * @request: pointer to request object + * @gfp_flags: gfp flags + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_ep0_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + unsigned long flags; + int ret = 0; + u8 zlp = 0; + int i; + + spin_lock_irqsave(&priv_dev->lock, flags); + trace_cdns3_ep0_queue(priv_dev, request); + + /* cancel the request if controller receive new SETUP packet. */ + if (cdns3_check_new_setup(priv_dev)) { + spin_unlock_irqrestore(&priv_dev->lock, flags); + return -ECONNRESET; + } + + /* send STATUS stage. Should be called only for SET_CONFIGURATION */ + if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) { + u32 val; + + cdns3_select_ep(priv_dev, 0x00); + + /* + * Configure all non-control EPs which are not enabled by class driver + */ + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { + priv_ep = priv_dev->eps[i]; + if (priv_ep && priv_ep->flags & EP_CLAIMED && + !(priv_ep->flags & EP_ENABLED)) + cdns3_ep_config(priv_ep, 0); + } + + cdns3_set_hw_configuration(priv_dev); + cdns3_ep0_complete_setup(priv_dev, 0, 1); + /* wait until configuration set */ + ret = readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val, + val & USB_STS_CFGSTS_MASK, 1, 100); + if (ret == -ETIMEDOUT) + dev_warn(priv_dev->dev, "timeout for waiting configuration set\n"); + + request->actual = 0; + priv_dev->status_completion_no_call = true; + priv_dev->pending_status_request = request; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED); + spin_unlock_irqrestore(&priv_dev->lock, flags); + + /* + * Since there is no completion interrupt for status stage, + * it needs to call ->completion in software after + * ep0_queue is back. + */ + queue_work(system_freezable_wq, &priv_dev->pending_status_wq); + return ret; + } + + if (!list_empty(&priv_ep->pending_req_list)) { + dev_err(priv_dev->dev, + "can't handle multiple requests for ep0\n"); + spin_unlock_irqrestore(&priv_dev->lock, flags); + return -EBUSY; + } + + ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, + priv_dev->ep0_data_dir); + if (ret) { + spin_unlock_irqrestore(&priv_dev->lock, flags); + dev_err(priv_dev->dev, "failed to map request\n"); + return -EINVAL; + } + + request->status = -EINPROGRESS; + list_add_tail(&request->list, &priv_ep->pending_req_list); + + if (request->zero && request->length && + (request->length % ep->maxpacket == 0)) + zlp = 1; + + cdns3_ep0_run_transfer(priv_dev, request->dma, request->length, 1, zlp); + + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_gadget_ep_set_wedge - Set wedge on selected endpoint + * @ep: endpoint object + * + * Returns 0 + */ +int cdns3_gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + dev_dbg(priv_dev->dev, "Wedge for %s\n", ep->name); + cdns3_gadget_ep_set_halt(ep, 1); + priv_ep->flags |= EP_WEDGE; + + return 0; +} + +static const struct usb_ep_ops cdns3_gadget_ep0_ops = { + .enable = cdns3_gadget_ep0_enable, + .disable = cdns3_gadget_ep0_disable, + .alloc_request = cdns3_gadget_ep_alloc_request, + .free_request = cdns3_gadget_ep_free_request, + .queue = cdns3_gadget_ep0_queue, + .dequeue = cdns3_gadget_ep_dequeue, + .set_halt = cdns3_gadget_ep0_set_halt, + .set_wedge = cdns3_gadget_ep_set_wedge, +}; + +/** + * cdns3_ep0_config - Configures default endpoint + * @priv_dev: extended gadget object + * + * Functions sets parameters: maximal packet size and enables interrupts + */ +void cdns3_ep0_config(struct cdns3_device *priv_dev) +{ + struct cdns3_usb_regs __iomem *regs; + struct cdns3_endpoint *priv_ep; + u32 max_packet_size = 64; + u32 ep_cfg; + + regs = priv_dev->regs; + + if (priv_dev->gadget.speed == USB_SPEED_SUPER) + max_packet_size = 512; + + priv_ep = priv_dev->eps[0]; + + if (!list_empty(&priv_ep->pending_req_list)) { + struct usb_request *request; + + request = cdns3_next_request(&priv_ep->pending_req_list); + list_del_init(&request->list); + } + + priv_dev->u1_allowed = 0; + priv_dev->u2_allowed = 0; + + priv_dev->gadget.ep0->maxpacket = max_packet_size; + cdns3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(max_packet_size); + + /* init ep out */ + cdns3_select_ep(priv_dev, USB_DIR_OUT); + + if (priv_dev->dev_ver >= DEV_VER_V3) { + cdns3_set_register_bit(&priv_dev->regs->dtrans, + BIT(0) | BIT(16)); + cdns3_set_register_bit(&priv_dev->regs->tdl_from_trb, + BIT(0) | BIT(16)); + } + + ep_cfg = EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size); + + if (!(priv_ep->flags & EP_CONFIGURED)) + writel(ep_cfg, ®s->ep_cfg); + + writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN, + ®s->ep_sts_en); + + /* init ep in */ + cdns3_select_ep(priv_dev, USB_DIR_IN); + + if (!(priv_ep->flags & EP_CONFIGURED)) + writel(ep_cfg, ®s->ep_cfg); + + priv_ep->flags |= EP_CONFIGURED; + + writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, ®s->ep_sts_en); + + cdns3_set_register_bit(®s->usb_conf, USB_CONF_U1DS | USB_CONF_U2DS); +} + +/** + * cdns3_init_ep0 - Initializes software endpoint 0 of gadget + * @priv_dev: extended gadget object + * @priv_ep: extended endpoint object + * + * Returns 0 on success else error code. + */ +int cdns3_init_ep0(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + sprintf(priv_ep->name, "ep0"); + + /* fill linux fields */ + priv_ep->endpoint.ops = &cdns3_gadget_ep0_ops; + priv_ep->endpoint.maxburst = 1; + usb_ep_set_maxpacket_limit(&priv_ep->endpoint, + CDNS3_EP0_MAX_PACKET_LIMIT); + priv_ep->endpoint.address = 0; + priv_ep->endpoint.caps.type_control = 1; + priv_ep->endpoint.caps.dir_in = 1; + priv_ep->endpoint.caps.dir_out = 1; + priv_ep->endpoint.name = priv_ep->name; + priv_ep->endpoint.desc = &cdns3_gadget_ep0_desc; + priv_dev->gadget.ep0 = &priv_ep->endpoint; + priv_ep->type = USB_ENDPOINT_XFER_CONTROL; + + return cdns3_allocate_trb_pool(priv_ep); +} diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c new file mode 100644 index 000000000..ccdd525bd --- /dev/null +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -0,0 +1,3500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver - gadget side. + * + * Copyright (C) 2018-2019 Cadence Design Systems. + * Copyright (C) 2017-2018 NXP + * + * Authors: Pawel Jez <pjez@cadence.com>, + * Pawel Laszczak <pawell@cadence.com> + * Peter Chen <peter.chen@nxp.com> + */ + +/* + * Work around 1: + * At some situations, the controller may get stale data address in TRB + * at below sequences: + * 1. Controller read TRB includes data address + * 2. Software updates TRBs includes data address and Cycle bit + * 3. Controller read TRB which includes Cycle bit + * 4. DMA run with stale data address + * + * To fix this problem, driver needs to make the first TRB in TD as invalid. + * After preparing all TRBs driver needs to check the position of DMA and + * if the DMA point to the first just added TRB and doorbell is 1, + * then driver must defer making this TRB as valid. This TRB will be make + * as valid during adding next TRB only if DMA is stopped or at TRBERR + * interrupt. + * + * Issue has been fixed in DEV_VER_V3 version of controller. + * + * Work around 2: + * Controller for OUT endpoints has shared on-chip buffers for all incoming + * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA + * in correct order. If the first packet in the buffer will not be handled, + * then the following packets directed for other endpoints and functions + * will be blocked. + * Additionally the packets directed to one endpoint can block entire on-chip + * buffers. In this case transfer to other endpoints also will blocked. + * + * To resolve this issue after raising the descriptor missing interrupt + * driver prepares internal usb_request object and use it to arm DMA transfer. + * + * The problematic situation was observed in case when endpoint has been enabled + * but no usb_request were queued. Driver try detects such endpoints and will + * use this workaround only for these endpoint. + * + * Driver use limited number of buffer. This number can be set by macro + * CDNS3_WA2_NUM_BUFFERS. + * + * Such blocking situation was observed on ACM gadget. For this function + * host send OUT data packet but ACM function is not prepared for this packet. + * It's cause that buffer placed in on chip memory block transfer to other + * endpoints. + * + * Issue has been fixed in DEV_VER_V2 version of controller. + * + */ + +#include <linux/dma-mapping.h> +#include <linux/usb/gadget.h> +#include <linux/module.h> +#include <linux/dmapool.h> +#include <linux/iopoll.h> + +#include "core.h" +#include "gadget-export.h" +#include "cdns3-gadget.h" +#include "cdns3-trace.h" +#include "drd.h" + +static int __cdns3_gadget_ep_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags); + +static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request); + +static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request); + +/** + * cdns3_clear_register_bit - clear bit in given register. + * @ptr: address of device controller register to be read and changed + * @mask: bits requested to clar + */ +static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) +{ + mask = readl(ptr) & ~mask; + writel(mask, ptr); +} + +/** + * cdns3_set_register_bit - set bit in given register. + * @ptr: address of device controller register to be read and changed + * @mask: bits requested to set + */ +void cdns3_set_register_bit(void __iomem *ptr, u32 mask) +{ + mask = readl(ptr) | mask; + writel(mask, ptr); +} + +/** + * cdns3_ep_addr_to_index - Macro converts endpoint address to + * index of endpoint object in cdns3_device.eps[] container + * @ep_addr: endpoint address for which endpoint object is required + * + */ +u8 cdns3_ep_addr_to_index(u8 ep_addr) +{ + return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); +} + +static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + int dma_index; + + dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; + + return dma_index / TRB_SIZE; +} + +/** + * cdns3_next_request - returns next request from list + * @list: list containing requests + * + * Returns request or NULL if no requests in list + */ +struct usb_request *cdns3_next_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct usb_request, list); +} + +/** + * cdns3_next_align_buf - returns next buffer from list + * @list: list containing buffers + * + * Returns buffer or NULL if no buffers in list + */ +static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) +{ + return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); +} + +/** + * cdns3_next_priv_request - returns next request from list + * @list: list containing requests + * + * Returns request or NULL if no requests in list + */ +static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct cdns3_request, list); +} + +/** + * cdns3_select_ep - selects endpoint + * @priv_dev: extended gadget object + * @ep: endpoint address + */ +void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) +{ + if (priv_dev->selected_ep == ep) + return; + + priv_dev->selected_ep = ep; + writel(ep, &priv_dev->regs->ep_sel); +} + +/** + * cdns3_get_tdl - gets current tdl for selected endpoint. + * @priv_dev: extended gadget object + * + * Before calling this function the appropriate endpoint must + * be selected by means of cdns3_select_ep function. + */ +static int cdns3_get_tdl(struct cdns3_device *priv_dev) +{ + if (priv_dev->dev_ver < DEV_VER_V3) + return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); + else + return readl(&priv_dev->regs->ep_tdl); +} + +dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *trb) +{ + u32 offset = (char *)trb - (char *)priv_ep->trb_pool; + + return priv_ep->trb_pool_dma + offset; +} + +static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (priv_ep->trb_pool) { + dma_pool_free(priv_dev->eps_dma_pool, + priv_ep->trb_pool, priv_ep->trb_pool_dma); + priv_ep->trb_pool = NULL; + } +} + +/** + * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint + * @priv_ep: endpoint object + * + * Function will return 0 on success or -ENOMEM on allocation error + */ +int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + int ring_size = TRB_RING_SIZE; + int num_trbs = ring_size / TRB_SIZE; + struct cdns3_trb *link_trb; + + if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) + cdns3_free_trb_pool(priv_ep); + + if (!priv_ep->trb_pool) { + priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool, + GFP_ATOMIC, + &priv_ep->trb_pool_dma); + + if (!priv_ep->trb_pool) + return -ENOMEM; + + priv_ep->alloc_ring_size = ring_size; + } + + memset(priv_ep->trb_pool, 0, ring_size); + + priv_ep->num_trbs = num_trbs; + + if (!priv_ep->num) + return 0; + + /* Initialize the last TRB as Link TRB */ + link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); + + if (priv_ep->use_streams) { + /* + * For stream capable endpoints driver use single correct TRB. + * The last trb has zeroed cycle bit + */ + link_trb->control = 0; + } else { + link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); + link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); + } + return 0; +} + +/** + * cdns3_ep_stall_flush - Stalls and flushes selected endpoint + * @priv_ep: endpoint object + * + * Endpoint must be selected before call to this function + */ +static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + int val; + + trace_cdns3_halt(priv_ep, 1, 1); + + writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, + &priv_dev->regs->ep_cmd); + + /* wait for DFLUSH cleared */ + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_DFLUSH), 1, 1000); + priv_ep->flags |= EP_STALLED; + priv_ep->flags &= ~EP_STALL_PENDING; +} + +/** + * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. + * @priv_dev: extended gadget object + */ +void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) +{ + int i; + + writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); + + cdns3_allow_enable_l1(priv_dev, 0); + priv_dev->hw_configured_flag = 0; + priv_dev->onchip_used_size = 0; + priv_dev->out_mem_is_allocated = 0; + priv_dev->wait_for_setup = 0; + priv_dev->using_streams = 0; + + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) + if (priv_dev->eps[i]) + priv_dev->eps[i]->flags &= ~EP_CONFIGURED; +} + +/** + * cdns3_ep_inc_trb - increment a trb index. + * @index: Pointer to the TRB index to increment. + * @cs: Cycle state + * @trb_in_seg: number of TRBs in segment + * + * The index should never point to the link TRB. After incrementing, + * if it is point to the link TRB, wrap around to the beginning and revert + * cycle state bit The + * link TRB is always at the last TRB entry. + */ +static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) +{ + (*index)++; + if (*index == (trb_in_seg - 1)) { + *index = 0; + *cs ^= 1; + } +} + +/** + * cdns3_ep_inc_enq - increment endpoint's enqueue pointer + * @priv_ep: The endpoint whose enqueue pointer we're incrementing + */ +static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) +{ + priv_ep->free_trbs--; + cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); +} + +/** + * cdns3_ep_inc_deq - increment endpoint's dequeue pointer + * @priv_ep: The endpoint whose dequeue pointer we're incrementing + */ +static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) +{ + priv_ep->free_trbs++; + cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); +} + +/** + * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. + * @priv_dev: Extended gadget object + * @enable: Enable/disable permit to transition to L1. + * + * If bit USB_CONF_L1EN is set and device receive Extended Token packet, + * then controller answer with ACK handshake. + * If bit USB_CONF_L1DS is set and device receive Extended Token packet, + * then controller answer with NYET handshake. + */ +void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) +{ + if (enable) + writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); + else + writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); +} + +enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) +{ + u32 reg; + + reg = readl(&priv_dev->regs->usb_sts); + + if (DEV_SUPERSPEED(reg)) + return USB_SPEED_SUPER; + else if (DEV_HIGHSPEED(reg)) + return USB_SPEED_HIGH; + else if (DEV_FULLSPEED(reg)) + return USB_SPEED_FULL; + else if (DEV_LOWSPEED(reg)) + return USB_SPEED_LOW; + return USB_SPEED_UNKNOWN; +} + +/** + * cdns3_start_all_request - add to ring all request not started + * @priv_dev: Extended gadget object + * @priv_ep: The endpoint for whom request will be started. + * + * Returns return ENOMEM if transfer ring i not enough TRBs to start + * all requests. + */ +static int cdns3_start_all_request(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + struct usb_request *request; + int ret = 0; + u8 pending_empty = list_empty(&priv_ep->pending_req_list); + + /* + * If the last pending transfer is INTERNAL + * OR streams are enabled for this endpoint + * do NOT start new transfer till the last one is pending + */ + if (!pending_empty) { + struct cdns3_request *priv_req; + + request = cdns3_next_request(&priv_ep->pending_req_list); + priv_req = to_cdns3_request(request); + if ((priv_req->flags & REQUEST_INTERNAL) || + (priv_ep->flags & EP_TDLCHK_EN) || + priv_ep->use_streams) { + dev_dbg(priv_dev->dev, "Blocking external request\n"); + return ret; + } + } + + while (!list_empty(&priv_ep->deferred_req_list)) { + request = cdns3_next_request(&priv_ep->deferred_req_list); + + if (!priv_ep->use_streams) { + ret = cdns3_ep_run_transfer(priv_ep, request); + } else { + priv_ep->stream_sg_idx = 0; + ret = cdns3_ep_run_stream_transfer(priv_ep, request); + } + if (ret) + return ret; + + list_move_tail(&request->list, &priv_ep->pending_req_list); + if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) + break; + } + + priv_ep->flags &= ~EP_RING_FULL; + return ret; +} + +/* + * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set + * driver try to detect whether endpoint need additional internal + * buffer for unblocking on-chip FIFO buffer. This flag will be cleared + * if before first DESCMISS interrupt the DMA will be armed. + */ +#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ + if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ + priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ + (reg) |= EP_STS_EN_DESCMISEN; \ + } } while (0) + +static void __cdns3_descmiss_copy_data(struct usb_request *request, + struct usb_request *descmiss_req) +{ + int length = request->actual + descmiss_req->actual; + struct scatterlist *s = request->sg; + + if (!s) { + if (length <= request->length) { + memcpy(&((u8 *)request->buf)[request->actual], + descmiss_req->buf, + descmiss_req->actual); + request->actual = length; + } else { + /* It should never occures */ + request->status = -ENOMEM; + } + } else { + if (length <= sg_dma_len(s)) { + void *p = phys_to_virt(sg_dma_address(s)); + + memcpy(&((u8 *)p)[request->actual], + descmiss_req->buf, + descmiss_req->actual); + request->actual = length; + } else { + request->status = -ENOMEM; + } + } +} + +/** + * cdns3_wa2_descmiss_copy_data - copy data from internal requests to + * request queued by class driver. + * @priv_ep: extended endpoint object + * @request: request object + */ +static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, + struct usb_request *request) +{ + struct usb_request *descmiss_req; + struct cdns3_request *descmiss_priv_req; + + while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { + int chunk_end; + + descmiss_priv_req = + cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + descmiss_req = &descmiss_priv_req->request; + + /* driver can't touch pending request */ + if (descmiss_priv_req->flags & REQUEST_PENDING) + break; + + chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; + request->status = descmiss_req->status; + __cdns3_descmiss_copy_data(request, descmiss_req); + list_del_init(&descmiss_priv_req->list); + kfree(descmiss_req->buf); + cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); + --priv_ep->wa2_counter; + + if (!chunk_end) + break; + } +} + +static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req) +{ + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && + priv_req->flags & REQUEST_INTERNAL) { + struct usb_request *req; + + req = cdns3_next_request(&priv_ep->deferred_req_list); + + priv_ep->descmis_req = NULL; + + if (!req) + return NULL; + + /* unmap the gadget request before copying data */ + usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, + priv_ep->dir); + + cdns3_wa2_descmiss_copy_data(priv_ep, req); + if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && + req->length != req->actual) { + /* wait for next part of transfer */ + /* re-map the gadget request buffer*/ + usb_gadget_map_request_by_dev(priv_dev->sysdev, req, + usb_endpoint_dir_in(priv_ep->endpoint.desc)); + return NULL; + } + + if (req->status == -EINPROGRESS) + req->status = 0; + + list_del_init(&req->list); + cdns3_start_all_request(priv_dev, priv_ep); + return req; + } + + return &priv_req->request; +} + +static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req) +{ + int deferred = 0; + + /* + * If transfer was queued before DESCMISS appear than we + * can disable handling of DESCMISS interrupt. Driver assumes that it + * can disable special treatment for this endpoint. + */ + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { + u32 reg; + + cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); + priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; + reg = readl(&priv_dev->regs->ep_sts_en); + reg &= ~EP_STS_EN_DESCMISEN; + trace_cdns3_wa2(priv_ep, "workaround disabled\n"); + writel(reg, &priv_dev->regs->ep_sts_en); + } + + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { + u8 pending_empty = list_empty(&priv_ep->pending_req_list); + u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); + + /* + * DESCMISS transfer has been finished, so data will be + * directly copied from internal allocated usb_request + * objects. + */ + if (pending_empty && !descmiss_empty && + !(priv_req->flags & REQUEST_INTERNAL)) { + cdns3_wa2_descmiss_copy_data(priv_ep, + &priv_req->request); + + trace_cdns3_wa2(priv_ep, "get internal stored data"); + + list_add_tail(&priv_req->request.list, + &priv_ep->pending_req_list); + cdns3_gadget_giveback(priv_ep, priv_req, + priv_req->request.status); + + /* + * Intentionally driver returns positive value as + * correct value. It informs that transfer has + * been finished. + */ + return EINPROGRESS; + } + + /* + * Driver will wait for completion DESCMISS transfer, + * before starts new, not DESCMISS transfer. + */ + if (!pending_empty && !descmiss_empty) { + trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); + deferred = 1; + } + + if (priv_req->flags & REQUEST_INTERNAL) + list_add_tail(&priv_req->list, + &priv_ep->wa2_descmiss_req_list); + } + + return deferred; +} + +static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_request *priv_req; + + while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { + u8 chain; + + priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); + + trace_cdns3_wa2(priv_ep, "removes eldest request"); + + kfree(priv_req->request.buf); + list_del_init(&priv_req->list); + cdns3_gadget_ep_free_request(&priv_ep->endpoint, + &priv_req->request); + --priv_ep->wa2_counter; + + if (!chain) + break; + } +} + +/** + * cdns3_wa2_descmissing_packet - handles descriptor missing event. + * @priv_ep: extended gadget object + * + * This function is used only for WA2. For more information see Work around 2 + * description. + */ +static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_request *priv_req; + struct usb_request *request; + u8 pending_empty = list_empty(&priv_ep->pending_req_list); + + /* check for pending transfer */ + if (!pending_empty) { + trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); + return; + } + + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { + priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; + priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; + } + + trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); + + if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { + trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); + cdns3_wa2_remove_old_request(priv_ep); + } + + request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, + GFP_ATOMIC); + if (!request) + goto err; + + priv_req = to_cdns3_request(request); + priv_req->flags |= REQUEST_INTERNAL; + + /* if this field is still assigned it indicate that transfer related + * with this request has not been finished yet. Driver in this + * case simply allocate next request and assign flag REQUEST_INTERNAL_CH + * flag to previous one. It will indicate that current request is + * part of the previous one. + */ + if (priv_ep->descmis_req) + priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; + + priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, + GFP_ATOMIC); + priv_ep->wa2_counter++; + + if (!priv_req->request.buf) { + cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); + goto err; + } + + priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; + priv_ep->descmis_req = priv_req; + + __cdns3_gadget_ep_queue(&priv_ep->endpoint, + &priv_ep->descmis_req->request, + GFP_ATOMIC); + + return; + +err: + dev_err(priv_ep->cdns3_dev->dev, + "Failed: No sufficient memory for DESCMIS\n"); +} + +static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) +{ + u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); + + if (tdl) { + u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; + + writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, + &priv_dev->regs->ep_cmd); + } +} + +static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) +{ + u32 ep_sts_reg; + + /* select EP0-out */ + cdns3_select_ep(priv_dev, 0); + + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + + if (EP_STS_OUTQ_VAL(ep_sts_reg)) { + u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); + struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; + + if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && + outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { + u8 pending_empty = list_empty(&outq_ep->pending_req_list); + + if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || + (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || + !pending_empty) { + } else { + u32 ep_sts_en_reg; + u32 ep_cmd_reg; + + cdns3_select_ep(priv_dev, outq_ep->num | + outq_ep->dir); + ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); + ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); + + outq_ep->flags |= EP_TDLCHK_EN; + cdns3_set_register_bit(&priv_dev->regs->ep_cfg, + EP_CFG_TDL_CHK); + + cdns3_wa2_enable_detection(priv_dev, outq_ep, + ep_sts_en_reg); + writel(ep_sts_en_reg, + &priv_dev->regs->ep_sts_en); + /* reset tdl value to zero */ + cdns3_wa2_reset_tdl(priv_dev); + /* + * Memory barrier - Reset tdl before ringing the + * doorbell. + */ + wmb(); + if (EP_CMD_DRDY & ep_cmd_reg) { + trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); + + } else { + trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); + /* + * ring doorbell to generate DESCMIS irq + */ + writel(EP_CMD_DRDY, + &priv_dev->regs->ep_cmd); + } + } + } + } +} + +/** + * cdns3_gadget_giveback - call struct usb_request's ->complete callback + * @priv_ep: The endpoint to whom the request belongs to + * @priv_req: The request we're giving back + * @status: completion code for the request + * + * Must be called with controller's lock held and interrupts disabled. This + * function will unmap @req and call its ->complete() callback to notify upper + * layers that it has completed. + */ +void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req, + int status) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct usb_request *request = &priv_req->request; + + list_del_init(&request->list); + + if (request->status == -EINPROGRESS) + request->status = status; + + usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, + priv_ep->dir); + + if ((priv_req->flags & REQUEST_UNALIGNED) && + priv_ep->dir == USB_DIR_OUT && !request->status) { + /* Make DMA buffer CPU accessible */ + dma_sync_single_for_cpu(priv_dev->sysdev, + priv_req->aligned_buf->dma, + priv_req->aligned_buf->size, + priv_req->aligned_buf->dir); + memcpy(request->buf, priv_req->aligned_buf->buf, + request->length); + } + + priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); + /* All TRBs have finished, clear the counter */ + priv_req->finished_trb = 0; + trace_cdns3_gadget_giveback(priv_req); + + if (priv_dev->dev_ver < DEV_VER_V2) { + request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, + priv_req); + if (!request) + return; + } + + if (request->complete) { + spin_unlock(&priv_dev->lock); + usb_gadget_giveback_request(&priv_ep->endpoint, + request); + spin_lock(&priv_dev->lock); + } + + if (request->buf == priv_dev->zlp_buf) + cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); +} + +static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) +{ + /* Work around for stale data address in TRB*/ + if (priv_ep->wa1_set) { + trace_cdns3_wa1(priv_ep, "restore cycle bit"); + + priv_ep->wa1_set = 0; + priv_ep->wa1_trb_index = 0xFFFF; + if (priv_ep->wa1_cycle_bit) { + priv_ep->wa1_trb->control = + priv_ep->wa1_trb->control | cpu_to_le32(0x1); + } else { + priv_ep->wa1_trb->control = + priv_ep->wa1_trb->control & cpu_to_le32(~0x1); + } + } +} + +static void cdns3_free_aligned_request_buf(struct work_struct *work) +{ + struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, + aligned_buf_wq); + struct cdns3_aligned_buf *buf, *tmp; + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + + list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { + if (!buf->in_use) { + list_del(&buf->list); + + /* + * Re-enable interrupts to free DMA capable memory. + * Driver can't free this memory with disabled + * interrupts. + */ + spin_unlock_irqrestore(&priv_dev->lock, flags); + dma_free_noncoherent(priv_dev->sysdev, buf->size, + buf->buf, buf->dma, buf->dir); + kfree(buf); + spin_lock_irqsave(&priv_dev->lock, flags); + } + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); +} + +static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) +{ + struct cdns3_endpoint *priv_ep = priv_req->priv_ep; + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_aligned_buf *buf; + + /* check if buffer is aligned to 8. */ + if (!((uintptr_t)priv_req->request.buf & 0x7)) + return 0; + + buf = priv_req->aligned_buf; + + if (!buf || priv_req->request.length > buf->size) { + buf = kzalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + buf->size = priv_req->request.length; + buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; + + buf->buf = dma_alloc_noncoherent(priv_dev->sysdev, + buf->size, + &buf->dma, + buf->dir, + GFP_ATOMIC); + if (!buf->buf) { + kfree(buf); + return -ENOMEM; + } + + if (priv_req->aligned_buf) { + trace_cdns3_free_aligned_request(priv_req); + priv_req->aligned_buf->in_use = 0; + queue_work(system_freezable_wq, + &priv_dev->aligned_buf_wq); + } + + buf->in_use = 1; + priv_req->aligned_buf = buf; + + list_add_tail(&buf->list, + &priv_dev->aligned_buf_list); + } + + if (priv_ep->dir == USB_DIR_IN) { + /* Make DMA buffer CPU accessible */ + dma_sync_single_for_cpu(priv_dev->sysdev, + buf->dma, buf->size, buf->dir); + memcpy(buf->buf, priv_req->request.buf, + priv_req->request.length); + } + + /* Transfer DMA buffer ownership back to device */ + dma_sync_single_for_device(priv_dev->sysdev, + buf->dma, buf->size, buf->dir); + + priv_req->flags |= REQUEST_UNALIGNED; + trace_cdns3_prepare_aligned_request(priv_req); + + return 0; +} + +static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *trb) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (!priv_ep->wa1_set) { + u32 doorbell; + + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + + if (doorbell) { + priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; + priv_ep->wa1_set = 1; + priv_ep->wa1_trb = trb; + priv_ep->wa1_trb_index = priv_ep->enqueue; + trace_cdns3_wa1(priv_ep, "set guard"); + return 0; + } + } + return 1; +} + +static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + int dma_index; + u32 doorbell; + + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); + + if (!doorbell || dma_index != priv_ep->wa1_trb_index) + cdns3_wa1_restore_cycle_bit(priv_ep); +} + +static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_request *priv_req; + struct cdns3_trb *trb; + dma_addr_t trb_dma; + int address; + u32 control; + u32 length; + u32 tdl; + unsigned int sg_idx = priv_ep->stream_sg_idx; + + priv_req = to_cdns3_request(request); + address = priv_ep->endpoint.desc->bEndpointAddress; + + priv_ep->flags |= EP_PENDING_REQUEST; + + /* must allocate buffer aligned to 8 */ + if (priv_req->flags & REQUEST_UNALIGNED) + trb_dma = priv_req->aligned_buf->dma; + else + trb_dma = request->dma; + + /* For stream capable endpoints driver use only single TD. */ + trb = priv_ep->trb_pool + priv_ep->enqueue; + priv_req->start_trb = priv_ep->enqueue; + priv_req->end_trb = priv_req->start_trb; + priv_req->trb = trb; + + cdns3_select_ep(priv_ep->cdns3_dev, address); + + control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | + TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; + + if (!request->num_sgs) { + trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); + length = request->length; + } else { + trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); + length = request->sg[sg_idx].length; + } + + tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); + + trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); + + /* + * For DEV_VER_V2 controller version we have enabled + * USB_CONF2_EN_TDL_TRB in DMULT configuration. + * This enables TDL calculation based on TRB, hence setting TDL in TRB. + */ + if (priv_dev->dev_ver >= DEV_VER_V2) { + if (priv_dev->gadget.speed == USB_SPEED_SUPER) + trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); + } + priv_req->flags |= REQUEST_PENDING; + + trb->control = cpu_to_le32(control); + + trace_cdns3_prepare_trb(priv_ep, priv_req->trb); + + /* + * Memory barrier - Cycle Bit must be set before trb->length and + * trb->buffer fields. + */ + wmb(); + + /* always first element */ + writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + &priv_dev->regs->ep_traddr); + + if (!(priv_ep->flags & EP_STALLED)) { + trace_cdns3_ring(priv_ep); + /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ + writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); + + priv_ep->prime_flag = false; + + /* + * Controller version DEV_VER_V2 tdl calculation + * is based on TRB + */ + + if (priv_dev->dev_ver < DEV_VER_V2) + writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, + &priv_dev->regs->ep_cmd); + else if (priv_dev->dev_ver > DEV_VER_V2) + writel(tdl, &priv_dev->regs->ep_tdl); + + priv_ep->last_stream_id = priv_req->request.stream_id; + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | + EP_CMD_ERDY, &priv_dev->regs->ep_cmd); + + trace_cdns3_doorbell_epx(priv_ep->name, + readl(&priv_dev->regs->ep_traddr)); + } + + /* WORKAROUND for transition to L0 */ + __cdns3_gadget_wakeup(priv_dev); + + return 0; +} + +static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (priv_dev->dev_ver < DEV_VER_V3) + return; + + if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { + writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + } +} + +/** + * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware + * @priv_ep: endpoint object + * @request: request object + * + * Returns zero on success or negative value on failure + */ +static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_request *priv_req; + struct cdns3_trb *trb; + struct cdns3_trb *link_trb = NULL; + dma_addr_t trb_dma; + u32 togle_pcs = 1; + int sg_iter = 0; + int num_trb_req; + int trb_burst; + int num_trb; + int address; + u32 control; + int pcs; + u16 total_tdl = 0; + struct scatterlist *s = NULL; + bool sg_supported = !!(request->num_mapped_sgs); + + num_trb_req = sg_supported ? request->num_mapped_sgs : 1; + + /* ISO transfer require each SOF have a TD, each TD include some TRBs */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + num_trb = priv_ep->interval * num_trb_req; + else + num_trb = num_trb_req; + + priv_req = to_cdns3_request(request); + address = priv_ep->endpoint.desc->bEndpointAddress; + + priv_ep->flags |= EP_PENDING_REQUEST; + + /* must allocate buffer aligned to 8 */ + if (priv_req->flags & REQUEST_UNALIGNED) + trb_dma = priv_req->aligned_buf->dma; + else + trb_dma = request->dma; + + trb = priv_ep->trb_pool + priv_ep->enqueue; + priv_req->start_trb = priv_ep->enqueue; + priv_req->trb = trb; + + cdns3_select_ep(priv_ep->cdns3_dev, address); + + /* prepare ring */ + if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { + int doorbell, dma_index; + u32 ch_bit = 0; + + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); + + /* Driver can't update LINK TRB if it is current processed. */ + if (doorbell && dma_index == priv_ep->num_trbs - 1) { + priv_ep->flags |= EP_DEFERRED_DRDY; + return -ENOBUFS; + } + + /*updating C bt in Link TRB before starting DMA*/ + link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); + /* + * For TRs size equal 2 enabling TRB_CHAIN for epXin causes + * that DMA stuck at the LINK TRB. + * On the other hand, removing TRB_CHAIN for longer TRs for + * epXout cause that DMA stuck after handling LINK TRB. + * To eliminate this strange behavioral driver set TRB_CHAIN + * bit only for TR size > 2. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || + TRBS_PER_SEGMENT > 2) + ch_bit = TRB_CHAIN; + + link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | + TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { + /* + * ISO require LINK TRB must be first one of TD. + * Fill LINK TRBs for left trb space to simply software process logic. + */ + while (priv_ep->enqueue) { + *trb = *link_trb; + trace_cdns3_prepare_trb(priv_ep, trb); + + cdns3_ep_inc_enq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->enqueue; + priv_req->trb = trb; + } + } + } + + if (num_trb > priv_ep->free_trbs) { + priv_ep->flags |= EP_RING_FULL; + return -ENOBUFS; + } + + if (priv_dev->dev_ver <= DEV_VER_V2) + togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); + + /* set incorrect Cycle Bit for first trb*/ + control = priv_ep->pcs ? 0 : TRB_CYCLE; + trb->length = 0; + if (priv_dev->dev_ver >= DEV_VER_V2) { + u16 td_size; + + td_size = DIV_ROUND_UP(request->length, + priv_ep->endpoint.maxpacket); + if (priv_dev->gadget.speed == USB_SPEED_SUPER) + trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); + else + control |= TRB_TDL_HS_SIZE(td_size); + } + + do { + u32 length; + + if (!(sg_iter % num_trb_req) && sg_supported) + s = request->sg; + + /* fill TRB */ + control |= TRB_TYPE(TRB_NORMAL); + if (sg_supported) { + trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); + length = sg_dma_len(s); + } else { + trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); + length = request->length; + } + + if (priv_ep->flags & EP_TDLCHK_EN) + total_tdl += DIV_ROUND_UP(length, + priv_ep->endpoint.maxpacket); + + trb_burst = priv_ep->trb_burst_size; + + /* + * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still + * met problem when do ISO transfer if sg enabled. + * + * Data pattern likes below when sg enabled, package size is 1k and mult is 2 + * [UVC Header(8B) ] [data(3k - 8)] ... + * + * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen + * as below pattern: + * 0xd000: wrong + * 0xe000: wrong + * 0xf000: correct + * 0x10000: wrong + * 0x11000: wrong + * 0x12000: correct + * ... + * + * But it is still unclear about why error have not happen below 0xd000, it should + * cross 4k bounder. But anyway, the below code can fix this problem. + * + * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2) + if (ALIGN_DOWN(trb->buffer, SZ_4K) != + ALIGN_DOWN(trb->buffer + length, SZ_4K)) + trb_burst = 16; + + trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) | + TRB_LEN(length)); + pcs = priv_ep->pcs ? TRB_CYCLE : 0; + + /* + * first trb should be prepared as last to avoid processing + * transfer to early + */ + if (sg_iter != 0) + control |= pcs; + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { + control |= TRB_IOC | TRB_ISP; + } else { + /* for last element in TD or in SG list */ + if (sg_iter == (num_trb - 1) && sg_iter != 0) + control |= pcs | TRB_IOC | TRB_ISP; + } + + if (sg_iter) + trb->control = cpu_to_le32(control); + else + priv_req->trb->control = cpu_to_le32(control); + + if (sg_supported) { + trb->control |= cpu_to_le32(TRB_ISP); + /* Don't set chain bit for last TRB */ + if ((sg_iter % num_trb_req) < num_trb_req - 1) + trb->control |= cpu_to_le32(TRB_CHAIN); + + s = sg_next(s); + } + + control = 0; + ++sg_iter; + priv_req->end_trb = priv_ep->enqueue; + cdns3_ep_inc_enq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->enqueue; + trb->length = 0; + } while (sg_iter < num_trb); + + trb = priv_req->trb; + + priv_req->flags |= REQUEST_PENDING; + priv_req->num_of_trb = num_trb; + + if (sg_iter == 1) + trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); + + if (priv_dev->dev_ver < DEV_VER_V2 && + (priv_ep->flags & EP_TDLCHK_EN)) { + u16 tdl = total_tdl; + u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); + + if (tdl > EP_CMD_TDL_MAX) { + tdl = EP_CMD_TDL_MAX; + priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; + } + + if (old_tdl < tdl) { + tdl -= old_tdl; + writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, + &priv_dev->regs->ep_cmd); + } + } + + /* + * Memory barrier - cycle bit must be set before other filds in trb. + */ + wmb(); + + /* give the TD to the consumer*/ + if (togle_pcs) + trb->control = trb->control ^ cpu_to_le32(1); + + if (priv_dev->dev_ver <= DEV_VER_V2) + cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); + + if (num_trb > 1) { + int i = 0; + + while (i < num_trb) { + trace_cdns3_prepare_trb(priv_ep, trb + i); + if (trb + i == link_trb) { + trb = priv_ep->trb_pool; + num_trb = num_trb - i; + i = 0; + } else { + i++; + } + } + } else { + trace_cdns3_prepare_trb(priv_ep, priv_req->trb); + } + + /* + * Memory barrier - Cycle Bit must be set before trb->length and + * trb->buffer fields. + */ + wmb(); + + /* + * For DMULT mode we can set address to transfer ring only once after + * enabling endpoint. + */ + if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { + /* + * Until SW is not ready to handle the OUT transfer the ISO OUT + * Endpoint should be disabled (EP_CFG.ENABLE = 0). + * EP_CFG_ENABLE must be set before updating ep_traddr. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && + !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { + priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; + cdns3_set_register_bit(&priv_dev->regs->ep_cfg, + EP_CFG_ENABLE); + } + + writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + + priv_req->start_trb * TRB_SIZE), + &priv_dev->regs->ep_traddr); + + priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; + } + + if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { + trace_cdns3_ring(priv_ep); + /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ + writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + cdns3_rearm_drdy_if_needed(priv_ep); + trace_cdns3_doorbell_epx(priv_ep->name, + readl(&priv_dev->regs->ep_traddr)); + } + + /* WORKAROUND for transition to L0 */ + __cdns3_gadget_wakeup(priv_dev); + + return 0; +} + +void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) +{ + struct cdns3_endpoint *priv_ep; + struct usb_ep *ep; + + if (priv_dev->hw_configured_flag) + return; + + writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); + + cdns3_set_register_bit(&priv_dev->regs->usb_conf, + USB_CONF_U1EN | USB_CONF_U2EN); + + priv_dev->hw_configured_flag = 1; + + list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { + if (ep->enabled) { + priv_ep = ep_to_cdns3_ep(ep); + cdns3_start_all_request(priv_dev, priv_ep); + } + } + + cdns3_allow_enable_l1(priv_dev, 1); +} + +/** + * cdns3_trb_handled - check whether trb has been handled by DMA + * + * @priv_ep: extended endpoint object. + * @priv_req: request object for checking + * + * Endpoint must be selected before invoking this function. + * + * Returns false if request has not been handled by DMA, else returns true. + * + * SR - start ring + * ER - end ring + * DQ = priv_ep->dequeue - dequeue position + * EQ = priv_ep->enqueue - enqueue position + * ST = priv_req->start_trb - index of first TRB in transfer ring + * ET = priv_req->end_trb - index of last TRB in transfer ring + * CI = current_index - index of processed TRB by DMA. + * + * As first step, we check if the TRB between the ST and ET. + * Then, we check if cycle bit for index priv_ep->dequeue + * is correct. + * + * some rules: + * 1. priv_ep->dequeue never equals to current_index. + * 2 priv_ep->enqueue never exceed priv_ep->dequeue + * 3. exception: priv_ep->enqueue == priv_ep->dequeue + * and priv_ep->free_trbs is zero. + * This case indicate that TR is full. + * + * At below two cases, the request have been handled. + * Case 1 - priv_ep->dequeue < current_index + * SR ... EQ ... DQ ... CI ... ER + * SR ... DQ ... CI ... EQ ... ER + * + * Case 2 - priv_ep->dequeue > current_index + * This situation takes place when CI go through the LINK TRB at the end of + * transfer ring. + * SR ... CI ... EQ ... DQ ... ER + */ +static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_trb *trb; + int current_index = 0; + int handled = 0; + int doorbell; + + current_index = cdns3_get_dma_pos(priv_dev, priv_ep); + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + + /* current trb doesn't belong to this request */ + if (priv_req->start_trb < priv_req->end_trb) { + if (priv_ep->dequeue > priv_req->end_trb) + goto finish; + + if (priv_ep->dequeue < priv_req->start_trb) + goto finish; + } + + if ((priv_req->start_trb > priv_req->end_trb) && + (priv_ep->dequeue > priv_req->end_trb) && + (priv_ep->dequeue < priv_req->start_trb)) + goto finish; + + if ((priv_req->start_trb == priv_req->end_trb) && + (priv_ep->dequeue != priv_req->end_trb)) + goto finish; + + trb = &priv_ep->trb_pool[priv_ep->dequeue]; + + if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) + goto finish; + + if (doorbell == 1 && current_index == priv_ep->dequeue) + goto finish; + + /* The corner case for TRBS_PER_SEGMENT equal 2). */ + if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { + handled = 1; + goto finish; + } + + if (priv_ep->enqueue == priv_ep->dequeue && + priv_ep->free_trbs == 0) { + handled = 1; + } else if (priv_ep->dequeue < current_index) { + if ((current_index == (priv_ep->num_trbs - 1)) && + !priv_ep->dequeue) + goto finish; + + handled = 1; + } else if (priv_ep->dequeue > current_index) { + handled = 1; + } + +finish: + trace_cdns3_request_handled(priv_req, current_index, handled); + + return handled; +} + +static void cdns3_transfer_completed(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + struct cdns3_request *priv_req; + struct usb_request *request; + struct cdns3_trb *trb; + bool request_handled = false; + bool transfer_end = false; + + while (!list_empty(&priv_ep->pending_req_list)) { + request = cdns3_next_request(&priv_ep->pending_req_list); + priv_req = to_cdns3_request(request); + + trb = priv_ep->trb_pool + priv_ep->dequeue; + + /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ + while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { + + /* ISO ep_traddr may stop at LINK TRB */ + if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) && + priv_ep->type == USB_ENDPOINT_XFER_ISOC) + break; + + trace_cdns3_complete_trb(priv_ep, trb); + cdns3_ep_inc_deq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->dequeue; + } + + if (!request->stream_id) { + /* Re-select endpoint. It could be changed by other CPU + * during handling usb_gadget_giveback_request. + */ + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + + while (cdns3_trb_handled(priv_ep, priv_req)) { + priv_req->finished_trb++; + if (priv_req->finished_trb >= priv_req->num_of_trb) + request_handled = true; + + trb = priv_ep->trb_pool + priv_ep->dequeue; + trace_cdns3_complete_trb(priv_ep, trb); + + if (!transfer_end) + request->actual += + TRB_LEN(le32_to_cpu(trb->length)); + + if (priv_req->num_of_trb > 1 && + le32_to_cpu(trb->control) & TRB_SMM && + le32_to_cpu(trb->control) & TRB_CHAIN) + transfer_end = true; + + cdns3_ep_inc_deq(priv_ep); + } + + if (request_handled) { + /* TRBs are duplicated by priv_ep->interval time for ISO IN */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir) + request->actual /= priv_ep->interval; + + cdns3_gadget_giveback(priv_ep, priv_req, 0); + request_handled = false; + transfer_end = false; + } else { + goto prepare_next_td; + } + + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && + TRBS_PER_SEGMENT == 2) + break; + } else { + /* Re-select endpoint. It could be changed by other CPU + * during handling usb_gadget_giveback_request. + */ + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + + trb = priv_ep->trb_pool; + trace_cdns3_complete_trb(priv_ep, trb); + + if (trb != priv_req->trb) + dev_warn(priv_dev->dev, + "request_trb=0x%p, queue_trb=0x%p\n", + priv_req->trb, trb); + + request->actual += TRB_LEN(le32_to_cpu(trb->length)); + + if (!request->num_sgs || + (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { + priv_ep->stream_sg_idx = 0; + cdns3_gadget_giveback(priv_ep, priv_req, 0); + } else { + priv_ep->stream_sg_idx++; + cdns3_ep_run_stream_transfer(priv_ep, request); + } + break; + } + } + priv_ep->flags &= ~EP_PENDING_REQUEST; + +prepare_next_td: + if (!(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) + cdns3_start_all_request(priv_dev, priv_ep); +} + +void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + cdns3_wa1_restore_cycle_bit(priv_ep); + + if (rearm) { + trace_cdns3_ring(priv_ep); + + /* Cycle Bit must be updated before arming DMA. */ + wmb(); + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + + __cdns3_gadget_wakeup(priv_dev); + + trace_cdns3_doorbell_epx(priv_ep->name, + readl(&priv_dev->regs->ep_traddr)); + } +} + +static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) +{ + u16 tdl = priv_ep->pending_tdl; + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (tdl > EP_CMD_TDL_MAX) { + tdl = EP_CMD_TDL_MAX; + priv_ep->pending_tdl -= EP_CMD_TDL_MAX; + } else { + priv_ep->pending_tdl = 0; + } + + writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); +} + +/** + * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint + * @priv_ep: endpoint object + * + * Returns 0 + */ +static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + u32 ep_sts_reg; + struct usb_request *deferred_request; + struct usb_request *pending_request; + u32 tdl = 0; + + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + + trace_cdns3_epx_irq(priv_dev, priv_ep); + + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + writel(ep_sts_reg, &priv_dev->regs->ep_sts); + + if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { + bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); + + tdl = cdns3_get_tdl(priv_dev); + + /* + * Continue the previous transfer: + * There is some racing between ERDY and PRIME. The device send + * ERDY and almost in the same time Host send PRIME. It cause + * that host ignore the ERDY packet and driver has to send it + * again. + */ + if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || + EP_STS_HOSTPP(ep_sts_reg))) { + writel(EP_CMD_ERDY | + EP_CMD_ERDY_SID(priv_ep->last_stream_id), + &priv_dev->regs->ep_cmd); + ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); + } else { + priv_ep->prime_flag = true; + + pending_request = cdns3_next_request(&priv_ep->pending_req_list); + deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); + + if (deferred_request && !pending_request) { + cdns3_start_all_request(priv_dev, priv_ep); + } + } + } + + if (ep_sts_reg & EP_STS_TRBERR) { + if (priv_ep->flags & EP_STALL_PENDING && + !(ep_sts_reg & EP_STS_DESCMIS && + priv_dev->dev_ver < DEV_VER_V2)) { + cdns3_ep_stall_flush(priv_ep); + } + + /* + * For isochronous transfer driver completes request on + * IOC or on TRBERR. IOC appears only when device receive + * OUT data packet. If host disable stream or lost some packet + * then the only way to finish all queued transfer is to do it + * on TRBERR event. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && + !priv_ep->wa1_set) { + if (!priv_ep->dir) { + u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); + + ep_cfg &= ~EP_CFG_ENABLE; + writel(ep_cfg, &priv_dev->regs->ep_cfg); + priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; + priv_ep->flags |= EP_UPDATE_EP_TRBADDR; + } + cdns3_transfer_completed(priv_dev, priv_ep); + } else if (!(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) { + if (priv_ep->flags & EP_DEFERRED_DRDY) { + priv_ep->flags &= ~EP_DEFERRED_DRDY; + cdns3_start_all_request(priv_dev, priv_ep); + } else { + cdns3_rearm_transfer(priv_ep, + priv_ep->wa1_set); + } + } + } + + if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || + (ep_sts_reg & EP_STS_IOT)) { + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { + if (ep_sts_reg & EP_STS_ISP) + priv_ep->flags |= EP_QUIRK_END_TRANSFER; + else + priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; + } + + if (!priv_ep->use_streams) { + if ((ep_sts_reg & EP_STS_IOC) || + (ep_sts_reg & EP_STS_ISP)) { + cdns3_transfer_completed(priv_dev, priv_ep); + } else if ((priv_ep->flags & EP_TDLCHK_EN) & + priv_ep->pending_tdl) { + /* handle IOT with pending tdl */ + cdns3_reprogram_tdl(priv_ep); + } + } else if (priv_ep->dir == USB_DIR_OUT) { + priv_ep->ep_sts_pending |= ep_sts_reg; + } else if (ep_sts_reg & EP_STS_IOT) { + cdns3_transfer_completed(priv_dev, priv_ep); + } + } + + /* + * MD_EXIT interrupt sets when stream capable endpoint exits + * from MOVE DATA state of Bulk IN/OUT stream protocol state machine + */ + if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && + (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { + priv_ep->ep_sts_pending = 0; + cdns3_transfer_completed(priv_dev, priv_ep); + } + + /* + * WA2: this condition should only be meet when + * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or + * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. + * In other cases this interrupt will be disabled. + */ + if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && + !(priv_ep->flags & EP_STALLED)) + cdns3_wa2_descmissing_packet(priv_ep); + + return 0; +} + +static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) +{ + if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) + priv_dev->gadget_driver->disconnect(&priv_dev->gadget); +} + +/** + * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device + * @priv_dev: extended gadget object + * @usb_ists: bitmap representation of device's reported interrupts + * (usb_ists register value) + */ +static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, + u32 usb_ists) +__must_hold(&priv_dev->lock) +{ + int speed = 0; + + trace_cdns3_usb_irq(priv_dev, usb_ists); + if (usb_ists & USB_ISTS_L1ENTI) { + /* + * WORKAROUND: CDNS3 controller has issue with hardware resuming + * from L1. To fix it, if any DMA transfer is pending driver + * must starts driving resume signal immediately. + */ + if (readl(&priv_dev->regs->drbl)) + __cdns3_gadget_wakeup(priv_dev); + } + + /* Connection detected */ + if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { + speed = cdns3_get_speed(priv_dev); + priv_dev->gadget.speed = speed; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); + cdns3_ep0_config(priv_dev); + } + + /* Disconnection detected */ + if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { + spin_unlock(&priv_dev->lock); + cdns3_disconnect_gadget(priv_dev); + spin_lock(&priv_dev->lock); + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); + cdns3_hw_reset_eps_config(priv_dev); + } + + if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { + if (priv_dev->gadget_driver && + priv_dev->gadget_driver->suspend) { + spin_unlock(&priv_dev->lock); + priv_dev->gadget_driver->suspend(&priv_dev->gadget); + spin_lock(&priv_dev->lock); + } + } + + if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { + if (priv_dev->gadget_driver && + priv_dev->gadget_driver->resume) { + spin_unlock(&priv_dev->lock); + priv_dev->gadget_driver->resume(&priv_dev->gadget); + spin_lock(&priv_dev->lock); + } + } + + /* reset*/ + if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { + if (priv_dev->gadget_driver) { + spin_unlock(&priv_dev->lock); + usb_gadget_udc_reset(&priv_dev->gadget, + priv_dev->gadget_driver); + spin_lock(&priv_dev->lock); + + /*read again to check the actual speed*/ + speed = cdns3_get_speed(priv_dev); + priv_dev->gadget.speed = speed; + cdns3_hw_reset_eps_config(priv_dev); + cdns3_ep0_config(priv_dev); + } + } +} + +/** + * cdns3_device_irq_handler - interrupt handler for device part of controller + * + * @irq: irq number for cdns3 core device + * @data: structure of cdns3 + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns3_device_irq_handler(int irq, void *data) +{ + struct cdns3_device *priv_dev = data; + struct cdns *cdns = dev_get_drvdata(priv_dev->dev); + irqreturn_t ret = IRQ_NONE; + u32 reg; + + if (cdns->in_lpm) + return ret; + + /* check USB device interrupt */ + reg = readl(&priv_dev->regs->usb_ists); + if (reg) { + /* After masking interrupts the new interrupts won't be + * reported in usb_ists/ep_ists. In order to not lose some + * of them driver disables only detected interrupts. + * They will be enabled ASAP after clearing source of + * interrupt. This an unusual behavior only applies to + * usb_ists register. + */ + reg = ~reg & readl(&priv_dev->regs->usb_ien); + /* mask deferred interrupt. */ + writel(reg, &priv_dev->regs->usb_ien); + ret = IRQ_WAKE_THREAD; + } + + /* check endpoint interrupt */ + reg = readl(&priv_dev->regs->ep_ists); + if (reg) { + writel(0, &priv_dev->regs->ep_ien); + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +/** + * cdns3_device_thread_irq_handler - interrupt handler for device part + * of controller + * + * @irq: irq number for cdns3 core device + * @data: structure of cdns3 + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) +{ + struct cdns3_device *priv_dev = data; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + unsigned int bit; + unsigned long reg; + + spin_lock_irqsave(&priv_dev->lock, flags); + + reg = readl(&priv_dev->regs->usb_ists); + if (reg) { + writel(reg, &priv_dev->regs->usb_ists); + writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); + cdns3_check_usb_interrupt_proceed(priv_dev, reg); + ret = IRQ_HANDLED; + } + + reg = readl(&priv_dev->regs->ep_ists); + + /* handle default endpoint OUT */ + if (reg & EP_ISTS_EP_OUT0) { + cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); + ret = IRQ_HANDLED; + } + + /* handle default endpoint IN */ + if (reg & EP_ISTS_EP_IN0) { + cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); + ret = IRQ_HANDLED; + } + + /* check if interrupt from non default endpoint, if no exit */ + reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); + if (!reg) + goto irqend; + + for_each_set_bit(bit, ®, + sizeof(u32) * BITS_PER_BYTE) { + cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); + ret = IRQ_HANDLED; + } + + if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) + cdns3_wa2_check_outq_status(priv_dev); + +irqend: + writel(~0, &priv_dev->regs->ep_ien); + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP + * + * The real reservation will occur during write to EP_CFG register, + * this function is used to check if the 'size' reservation is allowed. + * + * @priv_dev: extended gadget object + * @size: the size (KB) for EP would like to allocate + * @is_in: endpoint direction + * + * Return 0 if the required size can met or negative value on failure + */ +static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, + int size, int is_in) +{ + int remained; + + /* 2KB are reserved for EP0*/ + remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; + + if (is_in) { + if (remained < size) + return -EPERM; + + priv_dev->onchip_used_size += size; + } else { + int required; + + /** + * ALL OUT EPs are shared the same chunk onchip memory, so + * driver checks if it already has assigned enough buffers + */ + if (priv_dev->out_mem_is_allocated >= size) + return 0; + + required = size - priv_dev->out_mem_is_allocated; + + if (required > remained) + return -EPERM; + + priv_dev->out_mem_is_allocated += required; + priv_dev->onchip_used_size += required; + } + + return 0; +} + +static void cdns3_configure_dmult(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + struct cdns3_usb_regs __iomem *regs = priv_dev->regs; + + /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ + if (priv_dev->dev_ver <= DEV_VER_V2) + writel(USB_CONF_DMULT, ®s->usb_conf); + + if (priv_dev->dev_ver == DEV_VER_V2) + writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); + + if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { + u32 mask; + + if (priv_ep->dir) + mask = BIT(priv_ep->num + 16); + else + mask = BIT(priv_ep->num); + + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { + cdns3_set_register_bit(®s->tdl_from_trb, mask); + cdns3_set_register_bit(®s->tdl_beh, mask); + cdns3_set_register_bit(®s->tdl_beh2, mask); + cdns3_set_register_bit(®s->dma_adv_td, mask); + } + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) + cdns3_set_register_bit(®s->tdl_from_trb, mask); + + cdns3_set_register_bit(®s->dtrans, mask); + } +} + +/** + * cdns3_ep_config - Configure hardware endpoint + * @priv_ep: extended endpoint object + * @enable: set EP_CFG_ENABLE bit in ep_cfg register. + */ +int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) +{ + bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + u32 bEndpointAddress = priv_ep->num | priv_ep->dir; + u32 max_packet_size = priv_ep->wMaxPacketSize; + u8 maxburst = priv_ep->bMaxBurst; + u32 ep_cfg = 0; + u8 buffering; + int ret; + + buffering = priv_dev->ep_buf_size - 1; + + cdns3_configure_dmult(priv_dev, priv_ep); + + switch (priv_ep->type) { + case USB_ENDPOINT_XFER_INT: + ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); + + if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) + ep_cfg |= EP_CFG_TDL_CHK; + break; + case USB_ENDPOINT_XFER_BULK: + ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); + + if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) + ep_cfg |= EP_CFG_TDL_CHK; + break; + default: + ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); + buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1; + } + + switch (priv_dev->gadget.speed) { + case USB_SPEED_FULL: + max_packet_size = is_iso_ep ? 1023 : 64; + break; + case USB_SPEED_HIGH: + max_packet_size = is_iso_ep ? 1024 : 512; + break; + case USB_SPEED_SUPER: + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { + max_packet_size = 1024; + maxburst = priv_dev->ep_buf_size - 1; + } + break; + default: + /* all other speed are not supported */ + return -EINVAL; + } + + if (max_packet_size == 1024) + priv_ep->trb_burst_size = 128; + else if (max_packet_size >= 512) + priv_ep->trb_burst_size = 64; + else + priv_ep->trb_burst_size = 16; + + /* + * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs + * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the + * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI + * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This + * results in data corruption when it crosses the 4K border. The corruption + * specifically occurs from the position (4K - (address & 0x7F)) to 4K. + * + * So force trb_burst_size to 16 at such platform. + */ + if (priv_dev->dev_ver < DEV_VER_V2) + priv_ep->trb_burst_size = 16; + + buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); + maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); + + /* onchip buffer is only allocated before configuration */ + if (!priv_dev->hw_configured_flag) { + ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, + !!priv_ep->dir); + if (ret) { + dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); + return ret; + } + } + + if (enable) + ep_cfg |= EP_CFG_ENABLE; + + if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { + if (priv_dev->dev_ver >= DEV_VER_V3) { + u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); + + /* + * Stream capable endpoints are handled by using ep_tdl + * register. Other endpoints use TDL from TRB feature. + */ + cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, + mask); + } + + /* Enable Stream Bit TDL chk and SID chk */ + ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; + } + + ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | + EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */ + EP_CFG_BUFFERING(buffering) | + EP_CFG_MAXBURST(maxburst); + + cdns3_select_ep(priv_dev, bEndpointAddress); + writel(ep_cfg, &priv_dev->regs->ep_cfg); + priv_ep->flags |= EP_CONFIGURED; + + dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", + priv_ep->name, ep_cfg); + + return 0; +} + +/* Find correct direction for HW endpoint according to description */ +static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, + struct cdns3_endpoint *priv_ep) +{ + return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || + (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); +} + +static struct +cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, + struct usb_endpoint_descriptor *desc) +{ + struct usb_ep *ep; + struct cdns3_endpoint *priv_ep; + + list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { + unsigned long num; + int ret; + /* ep name pattern likes epXin or epXout */ + char c[2] = {ep->name[2], '\0'}; + + ret = kstrtoul(c, 10, &num); + if (ret) + return ERR_PTR(ret); + + priv_ep = ep_to_cdns3_ep(ep); + if (cdns3_ep_dir_is_correct(desc, priv_ep)) { + if (!(priv_ep->flags & EP_CLAIMED)) { + priv_ep->num = num; + return priv_ep; + } + } + } + + return ERR_PTR(-ENOENT); +} + +/* + * Cadence IP has one limitation that all endpoints must be configured + * (Type & MaxPacketSize) before setting configuration through hardware + * register, it means we can't change endpoints configuration after + * set_configuration. + * + * This function set EP_CLAIMED flag which is added when the gadget driver + * uses usb_ep_autoconfig to configure specific endpoint; + * When the udc driver receives set_configurion request, + * it goes through all claimed endpoints, and configure all endpoints + * accordingly. + * + * At usb_ep_ops.enable/disable, we only enable and disable endpoint through + * ep_cfg register which can be changed after set_configuration, and do + * some software operation accordingly. + */ +static struct +usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, + struct usb_endpoint_descriptor *desc, + struct usb_ss_ep_comp_descriptor *comp_desc) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct cdns3_endpoint *priv_ep; + unsigned long flags; + + priv_ep = cdns3_find_available_ep(priv_dev, desc); + if (IS_ERR(priv_ep)) { + dev_err(priv_dev->dev, "no available ep\n"); + return NULL; + } + + dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); + + spin_lock_irqsave(&priv_dev->lock, flags); + priv_ep->endpoint.desc = desc; + priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; + priv_ep->type = usb_endpoint_type(desc); + priv_ep->flags |= EP_CLAIMED; + priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + priv_ep->wMaxPacketSize = usb_endpoint_maxp(desc); + priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize); + priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK; + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) { + priv_ep->mult = USB_SS_MULT(comp_desc->bmAttributes) - 1; + priv_ep->bMaxBurst = comp_desc->bMaxBurst; + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); + return &priv_ep->endpoint; +} + +/** + * cdns3_gadget_ep_alloc_request - Allocates request + * @ep: endpoint object associated with request + * @gfp_flags: gfp flags + * + * Returns allocated request address, NULL on allocation error + */ +struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_request *priv_req; + + priv_req = kzalloc(sizeof(*priv_req), gfp_flags); + if (!priv_req) + return NULL; + + priv_req->priv_ep = priv_ep; + + trace_cdns3_alloc_request(priv_req); + return &priv_req->request; +} + +/** + * cdns3_gadget_ep_free_request - Free memory occupied by request + * @ep: endpoint object associated with request + * @request: request to free memory + */ +void cdns3_gadget_ep_free_request(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdns3_request *priv_req = to_cdns3_request(request); + + if (priv_req->aligned_buf) + priv_req->aligned_buf->in_use = 0; + + trace_cdns3_free_request(priv_req); + kfree(priv_req); +} + +/** + * cdns3_gadget_ep_enable - Enable endpoint + * @ep: endpoint object + * @desc: endpoint descriptor + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct cdns3_endpoint *priv_ep; + struct cdns3_device *priv_dev; + const struct usb_ss_ep_comp_descriptor *comp_desc; + u32 reg = EP_STS_EN_TRBERREN; + u32 bEndpointAddress; + unsigned long flags; + int enable = 1; + int ret = 0; + int val; + + if (!ep) { + pr_debug("usbss: ep not configured?\n"); + return -EINVAL; + } + + priv_ep = ep_to_cdns3_ep(ep); + priv_dev = priv_ep->cdns3_dev; + comp_desc = priv_ep->endpoint.comp_desc; + + if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) { + dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); + return -EINVAL; + } + + if (!desc->wMaxPacketSize) { + dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); + return -EINVAL; + } + + if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, + "%s is already enabled\n", priv_ep->name)) + return 0; + + spin_lock_irqsave(&priv_dev->lock, flags); + + priv_ep->endpoint.desc = desc; + priv_ep->type = usb_endpoint_type(desc); + priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + + if (priv_ep->interval > ISO_MAX_INTERVAL && + priv_ep->type == USB_ENDPOINT_XFER_ISOC) { + dev_err(priv_dev->dev, "Driver is limited to %d period\n", + ISO_MAX_INTERVAL); + + ret = -EINVAL; + goto exit; + } + + bEndpointAddress = priv_ep->num | priv_ep->dir; + cdns3_select_ep(priv_dev, bEndpointAddress); + + /* + * For some versions of controller at some point during ISO OUT traffic + * DMA reads Transfer Ring for the EP which has never got doorbell. + * This issue was detected only on simulation, but to avoid this issue + * driver add protection against it. To fix it driver enable ISO OUT + * endpoint before setting DRBL. This special treatment of ISO OUT + * endpoints are recommended by controller specification. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) + enable = 0; + + if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { + /* + * Enable stream support (SS mode) related interrupts + * in EP_STS_EN Register + */ + if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { + reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | + EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | + EP_STS_EN_STREAMREN; + priv_ep->use_streams = true; + ret = cdns3_ep_config(priv_ep, enable); + priv_dev->using_streams |= true; + } + } else { + ret = cdns3_ep_config(priv_ep, enable); + } + + if (ret) + goto exit; + + ret = cdns3_allocate_trb_pool(priv_ep); + if (ret) + goto exit; + + bEndpointAddress = priv_ep->num | priv_ep->dir; + cdns3_select_ep(priv_dev, bEndpointAddress); + + trace_cdns3_gadget_ep_enable(priv_ep); + + writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + + ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), + 1, 1000); + + if (unlikely(ret)) { + cdns3_free_trb_pool(priv_ep); + ret = -EINVAL; + goto exit; + } + + /* enable interrupt for selected endpoint */ + cdns3_set_register_bit(&priv_dev->regs->ep_ien, + BIT(cdns3_ep_addr_to_index(bEndpointAddress))); + + if (priv_dev->dev_ver < DEV_VER_V2) + cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); + + writel(reg, &priv_dev->regs->ep_sts_en); + + ep->desc = desc; + priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | + EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); + priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; + priv_ep->wa1_set = 0; + priv_ep->enqueue = 0; + priv_ep->dequeue = 0; + reg = readl(&priv_dev->regs->ep_sts); + priv_ep->pcs = !!EP_STS_CCS(reg); + priv_ep->ccs = !!EP_STS_CCS(reg); + /* one TRB is reserved for link TRB used in DMULT mode*/ + priv_ep->free_trbs = priv_ep->num_trbs - 1; +exit: + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_gadget_ep_disable - Disable endpoint + * @ep: endpoint object + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_ep_disable(struct usb_ep *ep) +{ + struct cdns3_endpoint *priv_ep; + struct cdns3_request *priv_req; + struct cdns3_device *priv_dev; + struct usb_request *request; + unsigned long flags; + int ret = 0; + u32 ep_cfg; + int val; + + if (!ep) { + pr_err("usbss: invalid parameters\n"); + return -EINVAL; + } + + priv_ep = ep_to_cdns3_ep(ep); + priv_dev = priv_ep->cdns3_dev; + + if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), + "%s is already disabled\n", priv_ep->name)) + return 0; + + spin_lock_irqsave(&priv_dev->lock, flags); + + trace_cdns3_gadget_ep_disable(priv_ep); + + cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); + + ep_cfg = readl(&priv_dev->regs->ep_cfg); + ep_cfg &= ~EP_CFG_ENABLE; + writel(ep_cfg, &priv_dev->regs->ep_cfg); + + /** + * Driver needs some time before resetting endpoint. + * It need waits for clearing DBUSY bit or for timeout expired. + * 10us is enough time for controller to stop transfer. + */ + readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, + !(val & EP_STS_DBUSY), 1, 10); + writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), + 1, 1000); + if (unlikely(ret)) + dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", + priv_ep->name); + + while (!list_empty(&priv_ep->pending_req_list)) { + request = cdns3_next_request(&priv_ep->pending_req_list); + + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), + -ESHUTDOWN); + } + + while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { + priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + + kfree(priv_req->request.buf); + cdns3_gadget_ep_free_request(&priv_ep->endpoint, + &priv_req->request); + list_del_init(&priv_req->list); + --priv_ep->wa2_counter; + } + + while (!list_empty(&priv_ep->deferred_req_list)) { + request = cdns3_next_request(&priv_ep->deferred_req_list); + + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), + -ESHUTDOWN); + } + + priv_ep->descmis_req = NULL; + + ep->desc = NULL; + priv_ep->flags &= ~EP_ENABLED; + priv_ep->use_streams = false; + + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * __cdns3_gadget_ep_queue - Transfer data on endpoint + * @ep: endpoint object + * @request: request object + * @gfp_flags: gfp flags + * + * Returns 0 on success, error code elsewhere + */ +static int __cdns3_gadget_ep_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_request *priv_req; + int ret = 0; + + request->actual = 0; + request->status = -EINPROGRESS; + priv_req = to_cdns3_request(request); + trace_cdns3_ep_queue(priv_req); + + if (priv_dev->dev_ver < DEV_VER_V2) { + ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, + priv_req); + + if (ret == EINPROGRESS) + return 0; + } + + ret = cdns3_prepare_aligned_request_buf(priv_req); + if (ret < 0) + return ret; + + ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, + usb_endpoint_dir_in(ep->desc)); + if (ret) + return ret; + + list_add_tail(&request->list, &priv_ep->deferred_req_list); + + /* + * For stream capable endpoint if prime irq flag is set then only start + * request. + * If hardware endpoint configuration has not been set yet then + * just queue request in deferred list. Transfer will be started in + * cdns3_set_hw_configuration. + */ + if (!request->stream_id) { + if (priv_dev->hw_configured_flag && + !(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) + cdns3_start_all_request(priv_dev, priv_ep); + } else { + if (priv_dev->hw_configured_flag && priv_ep->prime_flag) + cdns3_start_all_request(priv_dev, priv_ep); + } + + return 0; +} + +static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, + gfp_t gfp_flags) +{ + struct usb_request *zlp_request; + struct cdns3_endpoint *priv_ep; + struct cdns3_device *priv_dev; + unsigned long flags; + int ret; + + if (!request || !ep) + return -EINVAL; + + priv_ep = ep_to_cdns3_ep(ep); + priv_dev = priv_ep->cdns3_dev; + + spin_lock_irqsave(&priv_dev->lock, flags); + + ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); + + if (ret == 0 && request->zero && request->length && + (request->length % ep->maxpacket == 0)) { + struct cdns3_request *priv_req; + + zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); + zlp_request->buf = priv_dev->zlp_buf; + zlp_request->length = 0; + + priv_req = to_cdns3_request(zlp_request); + priv_req->flags |= REQUEST_ZLP; + + dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", + priv_ep->name); + ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); + return ret; +} + +/** + * cdns3_gadget_ep_dequeue - Remove request from transfer queue + * @ep: endpoint object associated with request + * @request: request object + * + * Returns 0 on success, error code elsewhere + */ +int cdns3_gadget_ep_dequeue(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev; + struct usb_request *req, *req_temp; + struct cdns3_request *priv_req; + struct cdns3_trb *link_trb; + u8 req_on_hw_ring = 0; + unsigned long flags; + int ret = 0; + int val; + + if (!ep || !request || !ep->desc) + return -EINVAL; + + priv_dev = priv_ep->cdns3_dev; + + spin_lock_irqsave(&priv_dev->lock, flags); + + priv_req = to_cdns3_request(request); + + trace_cdns3_ep_dequeue(priv_req); + + cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); + + list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, + list) { + if (request == req) { + req_on_hw_ring = 1; + goto found; + } + } + + list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, + list) { + if (request == req) + goto found; + } + + goto not_found; + +found: + link_trb = priv_req->trb; + + /* Update ring only if removed request is on pending_req_list list */ + if (req_on_hw_ring && link_trb) { + /* Stop DMA */ + writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd); + + /* wait for DFLUSH cleared */ + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_DFLUSH), 1, 1000); + + link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + + ((priv_req->end_trb + 1) * TRB_SIZE))); + link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | + TRB_TYPE(TRB_LINK) | TRB_CHAIN); + + if (priv_ep->wa1_trb == priv_req->trb) + cdns3_wa1_restore_cycle_bit(priv_ep); + } + + cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); + + req = cdns3_next_request(&priv_ep->pending_req_list); + if (req) + cdns3_rearm_transfer(priv_ep, 1); + +not_found: + spin_unlock_irqrestore(&priv_dev->lock, flags); + return ret; +} + +/** + * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint + * Should be called after acquiring spin_lock and selecting ep + * @priv_ep: endpoint object to set stall on. + */ +void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + trace_cdns3_halt(priv_ep, 1, 0); + + if (!(priv_ep->flags & EP_STALLED)) { + u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); + + if (!(ep_sts_reg & EP_STS_DBUSY)) + cdns3_ep_stall_flush(priv_ep); + else + priv_ep->flags |= EP_STALL_PENDING; + } +} + +/** + * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint + * Should be called after acquiring spin_lock and selecting ep + * @priv_ep: endpoint object to clear stall on + */ +int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct usb_request *request; + struct cdns3_request *priv_req; + struct cdns3_trb *trb = NULL; + struct cdns3_trb trb_tmp; + int ret; + int val; + + trace_cdns3_halt(priv_ep, 0, 0); + + request = cdns3_next_request(&priv_ep->pending_req_list); + if (request) { + priv_req = to_cdns3_request(request); + trb = priv_req->trb; + if (trb) { + trb_tmp = *trb; + trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); + } + } + + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + + /* wait for EPRST cleared */ + ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_EPRST), 1, 100); + if (ret) + return -EINVAL; + + priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); + + if (request) { + if (trb) + *trb = trb_tmp; + + cdns3_rearm_transfer(priv_ep, 1); + } + + cdns3_start_all_request(priv_dev, priv_ep); + return ret; +} + +/** + * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint + * @ep: endpoint object to set/clear stall on + * @value: 1 for set stall, 0 for clear stall + * + * Returns 0 on success, error code elsewhere + */ +int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + unsigned long flags; + int ret = 0; + + if (!(priv_ep->flags & EP_ENABLED)) + return -EPERM; + + spin_lock_irqsave(&priv_dev->lock, flags); + + cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); + + if (!value) { + priv_ep->flags &= ~EP_WEDGE; + ret = __cdns3_gadget_ep_clear_halt(priv_ep); + } else { + __cdns3_gadget_ep_set_halt(priv_ep); + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +extern const struct usb_ep_ops cdns3_gadget_ep0_ops; + +static const struct usb_ep_ops cdns3_gadget_ep_ops = { + .enable = cdns3_gadget_ep_enable, + .disable = cdns3_gadget_ep_disable, + .alloc_request = cdns3_gadget_ep_alloc_request, + .free_request = cdns3_gadget_ep_free_request, + .queue = cdns3_gadget_ep_queue, + .dequeue = cdns3_gadget_ep_dequeue, + .set_halt = cdns3_gadget_ep_set_halt, + .set_wedge = cdns3_gadget_ep_set_wedge, +}; + +/** + * cdns3_gadget_get_frame - Returns number of actual ITP frame + * @gadget: gadget object + * + * Returns number of actual ITP frame + */ +static int cdns3_gadget_get_frame(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + + return readl(&priv_dev->regs->usb_itpn); +} + +int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) +{ + enum usb_device_speed speed; + + speed = cdns3_get_speed(priv_dev); + + if (speed >= USB_SPEED_SUPER) + return 0; + + /* Start driving resume signaling to indicate remote wakeup. */ + writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); + + return 0; +} + +static int cdns3_gadget_wakeup(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv_dev->lock, flags); + ret = __cdns3_gadget_wakeup(priv_dev); + spin_unlock_irqrestore(&priv_dev->lock, flags); + return ret; +} + +static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, + int is_selfpowered) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + priv_dev->is_selfpowered = !!is_selfpowered; + spin_unlock_irqrestore(&priv_dev->lock, flags); + return 0; +} + +static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + + if (is_on) { + writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); + } else { + writel(~0, &priv_dev->regs->ep_ists); + writel(~0, &priv_dev->regs->usb_ists); + writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); + } + + return 0; +} + +static void cdns3_gadget_config(struct cdns3_device *priv_dev) +{ + struct cdns3_usb_regs __iomem *regs = priv_dev->regs; + u32 reg; + + cdns3_ep0_config(priv_dev); + + /* enable interrupts for endpoint 0 (in and out) */ + writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); + + /* + * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 + * revision of controller. + */ + if (priv_dev->dev_ver == DEV_VER_TI_V1) { + reg = readl(®s->dbg_link1); + + reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; + reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | + DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; + writel(reg, ®s->dbg_link1); + } + + /* + * By default some platforms has set protected access to memory. + * This cause problem with cache, so driver restore non-secure + * access to memory. + */ + reg = readl(®s->dma_axi_ctrl); + reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | + DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); + writel(reg, ®s->dma_axi_ctrl); + + /* enable generic interrupt*/ + writel(USB_IEN_INIT, ®s->usb_ien); + writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); + /* keep Fast Access bit */ + writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); + + cdns3_configure_dmult(priv_dev, NULL); +} + +/** + * cdns3_gadget_udc_start - Gadget start + * @gadget: gadget object + * @driver: driver which operates on this gadget + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_udc_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + unsigned long flags; + enum usb_device_speed max_speed = driver->max_speed; + + spin_lock_irqsave(&priv_dev->lock, flags); + priv_dev->gadget_driver = driver; + + /* limit speed if necessary */ + max_speed = min(driver->max_speed, gadget->max_speed); + + switch (max_speed) { + case USB_SPEED_FULL: + writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); + writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); + break; + case USB_SPEED_HIGH: + writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); + break; + case USB_SPEED_SUPER: + break; + default: + dev_err(priv_dev->dev, + "invalid maximum_speed parameter %d\n", + max_speed); + fallthrough; + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + max_speed = USB_SPEED_SUPER; + break; + } + + cdns3_gadget_config(priv_dev); + spin_unlock_irqrestore(&priv_dev->lock, flags); + return 0; +} + +/** + * cdns3_gadget_udc_stop - Stops gadget + * @gadget: gadget object + * + * Returns 0 + */ +static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct cdns3_endpoint *priv_ep; + u32 bEndpointAddress; + struct usb_ep *ep; + int val; + + priv_dev->gadget_driver = NULL; + + priv_dev->onchip_used_size = 0; + priv_dev->out_mem_is_allocated = 0; + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + + list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { + priv_ep = ep_to_cdns3_ep(ep); + bEndpointAddress = priv_ep->num | priv_ep->dir; + cdns3_select_ep(priv_dev, bEndpointAddress); + writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_EPRST), 1, 100); + + priv_ep->flags &= ~EP_CLAIMED; + } + + /* disable interrupt for device */ + writel(0, &priv_dev->regs->usb_ien); + writel(0, &priv_dev->regs->usb_pwr); + writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); + + return 0; +} + +/** + * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration + * @gadget: pointer to the USB gadget + * + * Used to record the maximum number of endpoints being used in a USB composite + * device. (across all configurations) This is to be used in the calculation + * of the TXFIFO sizes when resizing internal memory for individual endpoints. + * It will help ensured that the resizing logic reserves enough space for at + * least one max packet. + */ +static int cdns3_gadget_check_config(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct cdns3_endpoint *priv_ep; + struct usb_ep *ep; + int n_in = 0; + int iso = 0; + int out = 1; + int total; + int n; + + list_for_each_entry(ep, &gadget->ep_list, ep_list) { + priv_ep = ep_to_cdns3_ep(ep); + if (!(priv_ep->flags & EP_CLAIMED)) + continue; + + n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1); + if (ep->address & USB_DIR_IN) { + /* + * ISO transfer: DMA start move data when get ISO, only transfer + * data as min(TD size, iso). No benefit for allocate bigger + * internal memory than 'iso'. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + iso += n; + else + n_in++; + } else { + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + out = max_t(int, out, n); + } + } + + /* 2KB are reserved for EP0, 1KB for out*/ + total = 2 + n_in + out + iso; + + if (total > priv_dev->onchip_buffers) + return -ENOMEM; + + priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out); + + return 0; +} + +static const struct usb_gadget_ops cdns3_gadget_ops = { + .get_frame = cdns3_gadget_get_frame, + .wakeup = cdns3_gadget_wakeup, + .set_selfpowered = cdns3_gadget_set_selfpowered, + .pullup = cdns3_gadget_pullup, + .udc_start = cdns3_gadget_udc_start, + .udc_stop = cdns3_gadget_udc_stop, + .match_ep = cdns3_gadget_match_ep, + .check_config = cdns3_gadget_check_config, +}; + +static void cdns3_free_all_eps(struct cdns3_device *priv_dev) +{ + int i; + + /* ep0 OUT point to ep0 IN. */ + priv_dev->eps[16] = NULL; + + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) + if (priv_dev->eps[i]) { + cdns3_free_trb_pool(priv_dev->eps[i]); + devm_kfree(priv_dev->dev, priv_dev->eps[i]); + } +} + +/** + * cdns3_init_eps - Initializes software endpoints of gadget + * @priv_dev: extended gadget object + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_init_eps(struct cdns3_device *priv_dev) +{ + u32 ep_enabled_reg, iso_ep_reg; + struct cdns3_endpoint *priv_ep; + int ep_dir, ep_number; + u32 ep_mask; + int ret = 0; + int i; + + /* Read it from USB_CAP3 to USB_CAP5 */ + ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); + iso_ep_reg = readl(&priv_dev->regs->usb_cap4); + + dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); + + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { + ep_dir = i >> 4; /* i div 16 */ + ep_number = i & 0xF; /* i % 16 */ + ep_mask = BIT(i); + + if (!(ep_enabled_reg & ep_mask)) + continue; + + if (ep_dir && !ep_number) { + priv_dev->eps[i] = priv_dev->eps[0]; + continue; + } + + priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), + GFP_KERNEL); + if (!priv_ep) + goto err; + + /* set parent of endpoint object */ + priv_ep->cdns3_dev = priv_dev; + priv_dev->eps[i] = priv_ep; + priv_ep->num = ep_number; + priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; + + if (!ep_number) { + ret = cdns3_init_ep0(priv_dev, priv_ep); + if (ret) { + dev_err(priv_dev->dev, "Failed to init ep0\n"); + goto err; + } + } else { + snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", + ep_number, !!ep_dir ? "in" : "out"); + priv_ep->endpoint.name = priv_ep->name; + + usb_ep_set_maxpacket_limit(&priv_ep->endpoint, + CDNS3_EP_MAX_PACKET_LIMIT); + priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; + priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; + if (ep_dir) + priv_ep->endpoint.caps.dir_in = 1; + else + priv_ep->endpoint.caps.dir_out = 1; + + if (iso_ep_reg & ep_mask) + priv_ep->endpoint.caps.type_iso = 1; + + priv_ep->endpoint.caps.type_bulk = 1; + priv_ep->endpoint.caps.type_int = 1; + + list_add_tail(&priv_ep->endpoint.ep_list, + &priv_dev->gadget.ep_list); + } + + priv_ep->flags = 0; + + dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", + priv_ep->name, + priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", + priv_ep->endpoint.caps.type_iso ? "ISO" : ""); + + INIT_LIST_HEAD(&priv_ep->pending_req_list); + INIT_LIST_HEAD(&priv_ep->deferred_req_list); + INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); + } + + return 0; +err: + cdns3_free_all_eps(priv_dev); + return -ENOMEM; +} + +static void cdns3_gadget_release(struct device *dev) +{ + struct cdns3_device *priv_dev = container_of(dev, + struct cdns3_device, gadget.dev); + + kfree(priv_dev); +} + +static void cdns3_gadget_exit(struct cdns *cdns) +{ + struct cdns3_device *priv_dev; + + priv_dev = cdns->gadget_dev; + + + pm_runtime_mark_last_busy(cdns->dev); + pm_runtime_put_autosuspend(cdns->dev); + + usb_del_gadget(&priv_dev->gadget); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); + + cdns3_free_all_eps(priv_dev); + + while (!list_empty(&priv_dev->aligned_buf_list)) { + struct cdns3_aligned_buf *buf; + + buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); + dma_free_noncoherent(priv_dev->sysdev, buf->size, + buf->buf, + buf->dma, + buf->dir); + + list_del(&buf->list); + kfree(buf); + } + + dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, + priv_dev->setup_dma); + dma_pool_destroy(priv_dev->eps_dma_pool); + + kfree(priv_dev->zlp_buf); + usb_put_gadget(&priv_dev->gadget); + cdns->gadget_dev = NULL; + cdns_drd_gadget_off(cdns); +} + +static int cdns3_gadget_start(struct cdns *cdns) +{ + struct cdns3_device *priv_dev; + u32 max_speed; + int ret; + + priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); + if (!priv_dev) + return -ENOMEM; + + usb_initialize_gadget(cdns->dev, &priv_dev->gadget, + cdns3_gadget_release); + cdns->gadget_dev = priv_dev; + priv_dev->sysdev = cdns->dev; + priv_dev->dev = cdns->dev; + priv_dev->regs = cdns->dev_regs; + + device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", + &priv_dev->onchip_buffers); + + if (priv_dev->onchip_buffers <= 0) { + u32 reg = readl(&priv_dev->regs->usb_cap2); + + priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); + } + + if (!priv_dev->onchip_buffers) + priv_dev->onchip_buffers = 256; + + max_speed = usb_get_maximum_speed(cdns->dev); + + /* Check the maximum_speed parameter */ + switch (max_speed) { + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + break; + default: + dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", + max_speed); + fallthrough; + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + max_speed = USB_SPEED_SUPER; + break; + } + + /* fill gadget fields */ + priv_dev->gadget.max_speed = max_speed; + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + priv_dev->gadget.ops = &cdns3_gadget_ops; + priv_dev->gadget.name = "usb-ss-gadget"; + priv_dev->gadget.quirk_avoids_skb_reserve = 1; + priv_dev->gadget.irq = cdns->dev_irq; + + spin_lock_init(&priv_dev->lock); + INIT_WORK(&priv_dev->pending_status_wq, + cdns3_pending_setup_status_handler); + + INIT_WORK(&priv_dev->aligned_buf_wq, + cdns3_free_aligned_request_buf); + + /* initialize endpoint container */ + INIT_LIST_HEAD(&priv_dev->gadget.ep_list); + INIT_LIST_HEAD(&priv_dev->aligned_buf_list); + priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool", + priv_dev->sysdev, + TRB_RING_SIZE, 8, 0); + if (!priv_dev->eps_dma_pool) { + dev_err(priv_dev->dev, "Failed to create TRB dma pool\n"); + ret = -ENOMEM; + goto err1; + } + + ret = cdns3_init_eps(priv_dev); + if (ret) { + dev_err(priv_dev->dev, "Failed to create endpoints\n"); + goto err1; + } + + /* allocate memory for setup packet buffer */ + priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, + &priv_dev->setup_dma, GFP_DMA); + if (!priv_dev->setup_buf) { + ret = -ENOMEM; + goto err2; + } + + priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); + + dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", + readl(&priv_dev->regs->usb_cap6)); + dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", + readl(&priv_dev->regs->usb_cap1)); + dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", + readl(&priv_dev->regs->usb_cap2)); + + priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); + if (priv_dev->dev_ver >= DEV_VER_V2) + priv_dev->gadget.sg_supported = 1; + + priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); + if (!priv_dev->zlp_buf) { + ret = -ENOMEM; + goto err3; + } + + /* add USB gadget device */ + ret = usb_add_gadget(&priv_dev->gadget); + if (ret < 0) { + dev_err(priv_dev->dev, "Failed to add gadget\n"); + goto err4; + } + + return 0; +err4: + kfree(priv_dev->zlp_buf); +err3: + dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, + priv_dev->setup_dma); +err2: + cdns3_free_all_eps(priv_dev); +err1: + dma_pool_destroy(priv_dev->eps_dma_pool); + + usb_put_gadget(&priv_dev->gadget); + cdns->gadget_dev = NULL; + return ret; +} + +static int __cdns3_gadget_init(struct cdns *cdns) +{ + int ret = 0; + + /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ + ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); + return ret; + } + + cdns_drd_gadget_on(cdns); + pm_runtime_get_sync(cdns->dev); + + ret = cdns3_gadget_start(cdns); + if (ret) { + pm_runtime_put_sync(cdns->dev); + return ret; + } + + /* + * Because interrupt line can be shared with other components in + * driver it can't use IRQF_ONESHOT flag here. + */ + ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, + cdns3_device_irq_handler, + cdns3_device_thread_irq_handler, + IRQF_SHARED, dev_name(cdns->dev), + cdns->gadget_dev); + + if (ret) + goto err0; + + return 0; +err0: + cdns3_gadget_exit(cdns); + return ret; +} + +static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) +__must_hold(&cdns->lock) +{ + struct cdns3_device *priv_dev = cdns->gadget_dev; + + spin_unlock(&cdns->lock); + cdns3_disconnect_gadget(priv_dev); + spin_lock(&cdns->lock); + + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); + cdns3_hw_reset_eps_config(priv_dev); + + /* disable interrupt for device */ + writel(0, &priv_dev->regs->usb_ien); + + return 0; +} + +static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) +{ + struct cdns3_device *priv_dev = cdns->gadget_dev; + + if (!priv_dev->gadget_driver) + return 0; + + cdns3_gadget_config(priv_dev); + if (hibernated) + writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); + + return 0; +} + +/** + * cdns3_gadget_init - initialize device structure + * + * @cdns: cdns instance + * + * This function initializes the gadget. + */ +int cdns3_gadget_init(struct cdns *cdns) +{ + struct cdns_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = __cdns3_gadget_init; + rdrv->stop = cdns3_gadget_exit; + rdrv->suspend = cdns3_gadget_suspend; + rdrv->resume = cdns3_gadget_resume; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; + rdrv->name = "gadget"; + cdns->roles[USB_ROLE_DEVICE] = rdrv; + + return 0; +} diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h new file mode 100644 index 000000000..086a7bb83 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-gadget.h @@ -0,0 +1,1377 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * USBSS device controller driver header file + * + * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2017-2018 NXP + * + * Author: Pawel Laszczak <pawell@cadence.com> + * Pawel Jez <pjez@cadence.com> + * Peter Chen <peter.chen@nxp.com> + */ +#ifndef __LINUX_CDNS3_GADGET +#define __LINUX_CDNS3_GADGET +#include <linux/usb/gadget.h> +#include <linux/dma-direction.h> + +/* + * USBSS-DEV register interface. + * This corresponds to the USBSS Device Controller Interface + */ + +/** + * struct cdns3_usb_regs - device controller registers. + * @usb_conf: Global Configuration. + * @usb_sts: Global Status. + * @usb_cmd: Global Command. + * @usb_itpn: ITP/SOF number. + * @usb_lpm: Global Command. + * @usb_ien: USB Interrupt Enable. + * @usb_ists: USB Interrupt Status. + * @ep_sel: Endpoint Select. + * @ep_traddr: Endpoint Transfer Ring Address. + * @ep_cfg: Endpoint Configuration. + * @ep_cmd: Endpoint Command. + * @ep_sts: Endpoint Status. + * @ep_sts_sid: Endpoint Status. + * @ep_sts_en: Endpoint Status Enable. + * @drbl: Doorbell. + * @ep_ien: EP Interrupt Enable. + * @ep_ists: EP Interrupt Status. + * @usb_pwr: Global Power Configuration. + * @usb_conf2: Global Configuration 2. + * @usb_cap1: Capability 1. + * @usb_cap2: Capability 2. + * @usb_cap3: Capability 3. + * @usb_cap4: Capability 4. + * @usb_cap5: Capability 5. + * @usb_cap6: Capability 6. + * @usb_cpkt1: Custom Packet 1. + * @usb_cpkt2: Custom Packet 2. + * @usb_cpkt3: Custom Packet 3. + * @ep_dma_ext_addr: Upper address for DMA operations. + * @buf_addr: Address for On-chip Buffer operations. + * @buf_data: Data for On-chip Buffer operations. + * @buf_ctrl: On-chip Buffer Access Control. + * @dtrans: DMA Transfer Mode. + * @tdl_from_trb: Source of TD Configuration. + * @tdl_beh: TDL Behavior Configuration. + * @ep_tdl: Endpoint TDL. + * @tdl_beh2: TDL Behavior 2 Configuration. + * @dma_adv_td: DMA Advance TD Configuration. + * @reserved1: Reserved. + * @cfg_regs: Configuration. + * @reserved2: Reserved. + * @dma_axi_ctrl: AXI Control. + * @dma_axi_id: AXI ID register. + * @dma_axi_cap: AXI Capability. + * @dma_axi_ctrl0: AXI Control 0. + * @dma_axi_ctrl1: AXI Control 1. + */ +struct cdns3_usb_regs { + __le32 usb_conf; + __le32 usb_sts; + __le32 usb_cmd; + __le32 usb_itpn; + __le32 usb_lpm; + __le32 usb_ien; + __le32 usb_ists; + __le32 ep_sel; + __le32 ep_traddr; + __le32 ep_cfg; + __le32 ep_cmd; + __le32 ep_sts; + __le32 ep_sts_sid; + __le32 ep_sts_en; + __le32 drbl; + __le32 ep_ien; + __le32 ep_ists; + __le32 usb_pwr; + __le32 usb_conf2; + __le32 usb_cap1; + __le32 usb_cap2; + __le32 usb_cap3; + __le32 usb_cap4; + __le32 usb_cap5; + __le32 usb_cap6; + __le32 usb_cpkt1; + __le32 usb_cpkt2; + __le32 usb_cpkt3; + __le32 ep_dma_ext_addr; + __le32 buf_addr; + __le32 buf_data; + __le32 buf_ctrl; + __le32 dtrans; + __le32 tdl_from_trb; + __le32 tdl_beh; + __le32 ep_tdl; + __le32 tdl_beh2; + __le32 dma_adv_td; + __le32 reserved1[26]; + __le32 cfg_reg1; + __le32 dbg_link1; + __le32 dbg_link2; + __le32 cfg_regs[74]; + __le32 reserved2[51]; + __le32 dma_axi_ctrl; + __le32 dma_axi_id; + __le32 dma_axi_cap; + __le32 dma_axi_ctrl0; + __le32 dma_axi_ctrl1; +}; + +/* USB_CONF - bitmasks */ +/* Reset USB device configuration. */ +#define USB_CONF_CFGRST BIT(0) +/* Set Configuration. */ +#define USB_CONF_CFGSET BIT(1) +/* Disconnect USB device in SuperSpeed. */ +#define USB_CONF_USB3DIS BIT(3) +/* Disconnect USB device in HS/FS */ +#define USB_CONF_USB2DIS BIT(4) +/* Little Endian access - default */ +#define USB_CONF_LENDIAN BIT(5) +/* + * Big Endian access. Driver assume that byte order for + * SFRs access always is as Little Endian so this bit + * is not used. + */ +#define USB_CONF_BENDIAN BIT(6) +/* Device software reset. */ +#define USB_CONF_SWRST BIT(7) +/* Singular DMA transfer mode. Only for VER < DEV_VER_V3*/ +#define USB_CONF_DSING BIT(8) +/* Multiple DMA transfers mode. Only for VER < DEV_VER_V3 */ +#define USB_CONF_DMULT BIT(9) +/* DMA clock turn-off enable. */ +#define USB_CONF_DMAOFFEN BIT(10) +/* DMA clock turn-off disable. */ +#define USB_CONF_DMAOFFDS BIT(11) +/* Clear Force Full Speed. */ +#define USB_CONF_CFORCE_FS BIT(12) +/* Set Force Full Speed. */ +#define USB_CONF_SFORCE_FS BIT(13) +/* Device enable. */ +#define USB_CONF_DEVEN BIT(14) +/* Device disable. */ +#define USB_CONF_DEVDS BIT(15) +/* L1 LPM state entry enable (used in HS/FS mode). */ +#define USB_CONF_L1EN BIT(16) +/* L1 LPM state entry disable (used in HS/FS mode). */ +#define USB_CONF_L1DS BIT(17) +/* USB 2.0 clock gate disable. */ +#define USB_CONF_CLK2OFFEN BIT(18) +/* USB 2.0 clock gate enable. */ +#define USB_CONF_CLK2OFFDS BIT(19) +/* L0 LPM state entry request (used in HS/FS mode). */ +#define USB_CONF_LGO_L0 BIT(20) +/* USB 3.0 clock gate disable. */ +#define USB_CONF_CLK3OFFEN BIT(21) +/* USB 3.0 clock gate enable. */ +#define USB_CONF_CLK3OFFDS BIT(22) +/* Bit 23 is reserved*/ +/* U1 state entry enable (used in SS mode). */ +#define USB_CONF_U1EN BIT(24) +/* U1 state entry disable (used in SS mode). */ +#define USB_CONF_U1DS BIT(25) +/* U2 state entry enable (used in SS mode). */ +#define USB_CONF_U2EN BIT(26) +/* U2 state entry disable (used in SS mode). */ +#define USB_CONF_U2DS BIT(27) +/* U0 state entry request (used in SS mode). */ +#define USB_CONF_LGO_U0 BIT(28) +/* U1 state entry request (used in SS mode). */ +#define USB_CONF_LGO_U1 BIT(29) +/* U2 state entry request (used in SS mode). */ +#define USB_CONF_LGO_U2 BIT(30) +/* SS.Inactive state entry request (used in SS mode) */ +#define USB_CONF_LGO_SSINACT BIT(31) + +/* USB_STS - bitmasks */ +/* + * Configuration status. + * 1 - device is in the configured state. + * 0 - device is not configured. + */ +#define USB_STS_CFGSTS_MASK BIT(0) +#define USB_STS_CFGSTS(p) ((p) & USB_STS_CFGSTS_MASK) +/* + * On-chip memory overflow. + * 0 - On-chip memory status OK. + * 1 - On-chip memory overflow. + */ +#define USB_STS_OV_MASK BIT(1) +#define USB_STS_OV(p) ((p) & USB_STS_OV_MASK) +/* + * SuperSpeed connection status. + * 0 - USB in SuperSpeed mode disconnected. + * 1 - USB in SuperSpeed mode connected. + */ +#define USB_STS_USB3CONS_MASK BIT(2) +#define USB_STS_USB3CONS(p) ((p) & USB_STS_USB3CONS_MASK) +/* + * DMA transfer configuration status. + * 0 - single request. + * 1 - multiple TRB chain + * Supported only for controller version < DEV_VER_V3 + */ +#define USB_STS_DTRANS_MASK BIT(3) +#define USB_STS_DTRANS(p) ((p) & USB_STS_DTRANS_MASK) +/* + * Device speed. + * 0 - Undefined (value after reset). + * 1 - Low speed + * 2 - Full speed + * 3 - High speed + * 4 - Super speed + */ +#define USB_STS_USBSPEED_MASK GENMASK(6, 4) +#define USB_STS_USBSPEED(p) (((p) & USB_STS_USBSPEED_MASK) >> 4) +#define USB_STS_LS (0x1 << 4) +#define USB_STS_FS (0x2 << 4) +#define USB_STS_HS (0x3 << 4) +#define USB_STS_SS (0x4 << 4) +#define DEV_UNDEFSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == (0x0 << 4)) +#define DEV_LOWSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_LS) +#define DEV_FULLSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_FS) +#define DEV_HIGHSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_HS) +#define DEV_SUPERSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_SS) +/* + * Endianness for SFR access. + * 0 - Little Endian order (default after hardware reset). + * 1 - Big Endian order + */ +#define USB_STS_ENDIAN_MASK BIT(7) +#define USB_STS_ENDIAN(p) ((p) & USB_STS_ENDIAN_MASK) +/* + * HS/FS clock turn-off status. + * 0 - hsfs clock is always on. + * 1 - hsfs clock turn-off in L2 (HS/FS mode) is enabled + * (default after hardware reset). + */ +#define USB_STS_CLK2OFF_MASK BIT(8) +#define USB_STS_CLK2OFF(p) ((p) & USB_STS_CLK2OFF_MASK) +/* + * PCLK clock turn-off status. + * 0 - pclk clock is always on. + * 1 - pclk clock turn-off in U3 (SS mode) is enabled + * (default after hardware reset). + */ +#define USB_STS_CLK3OFF_MASK BIT(9) +#define USB_STS_CLK3OFF(p) ((p) & USB_STS_CLK3OFF_MASK) +/* + * Controller in reset state. + * 0 - Internal reset is active. + * 1 - Internal reset is not active and controller is fully operational. + */ +#define USB_STS_IN_RST_MASK BIT(10) +#define USB_STS_IN_RST(p) ((p) & USB_STS_IN_RST_MASK) +/* + * Status of the "TDL calculation basing on TRB" feature. + * 0 - disabled + * 1 - enabled + * Supported only for DEV_VER_V2 controller version. + */ +#define USB_STS_TDL_TRB_ENABLED BIT(11) +/* + * Device enable Status. + * 0 - USB device is disabled (VBUS input is disconnected from internal logic). + * 1 - USB device is enabled (VBUS input is connected to the internal logic). + */ +#define USB_STS_DEVS_MASK BIT(14) +#define USB_STS_DEVS(p) ((p) & USB_STS_DEVS_MASK) +/* + * Address status. + * 0 - USB device is default state. + * 1 - USB device is at least in address state. + */ +#define USB_STS_ADDRESSED_MASK BIT(15) +#define USB_STS_ADDRESSED(p) ((p) & USB_STS_ADDRESSED_MASK) +/* + * L1 LPM state enable status (used in HS/FS mode). + * 0 - Entering to L1 LPM state disabled. + * 1 - Entering to L1 LPM state enabled. + */ +#define USB_STS_L1ENS_MASK BIT(16) +#define USB_STS_L1ENS(p) ((p) & USB_STS_L1ENS_MASK) +/* + * Internal VBUS connection status (used both in HS/FS and SS mode). + * 0 - internal VBUS is not detected. + * 1 - internal VBUS is detected. + */ +#define USB_STS_VBUSS_MASK BIT(17) +#define USB_STS_VBUSS(p) ((p) & USB_STS_VBUSS_MASK) +/* + * HS/FS LPM state (used in FS/HS mode). + * 0 - L0 State + * 1 - L1 State + * 2 - L2 State + * 3 - L3 State + */ +#define USB_STS_LPMST_MASK GENMASK(19, 18) +#define DEV_L0_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x0 << 18)) +#define DEV_L1_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x1 << 18)) +#define DEV_L2_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x2 << 18)) +#define DEV_L3_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x3 << 18)) +/* + * Disable HS status (used in FS/HS mode). + * 0 - the disconnect bit for HS/FS mode is set . + * 1 - the disconnect bit for HS/FS mode is not set. + */ +#define USB_STS_USB2CONS_MASK BIT(20) +#define USB_STS_USB2CONS(p) ((p) & USB_STS_USB2CONS_MASK) +/* + * HS/FS mode connection status (used in FS/HS mode). + * 0 - High Speed operations in USB2.0 (FS/HS) mode not disabled. + * 1 - High Speed operations in USB2.0 (FS/HS). + */ +#define USB_STS_DISABLE_HS_MASK BIT(21) +#define USB_STS_DISABLE_HS(p) ((p) & USB_STS_DISABLE_HS_MASK) +/* + * U1 state enable status (used in SS mode). + * 0 - Entering to U1 state disabled. + * 1 - Entering to U1 state enabled. + */ +#define USB_STS_U1ENS_MASK BIT(24) +#define USB_STS_U1ENS(p) ((p) & USB_STS_U1ENS_MASK) +/* + * U2 state enable status (used in SS mode). + * 0 - Entering to U2 state disabled. + * 1 - Entering to U2 state enabled. + */ +#define USB_STS_U2ENS_MASK BIT(25) +#define USB_STS_U2ENS(p) ((p) & USB_STS_U2ENS_MASK) +/* + * SuperSpeed Link LTSSM state. This field reflects USBSS-DEV current + * SuperSpeed link state + */ +#define USB_STS_LST_MASK GENMASK(29, 26) +#define DEV_LST_U0 (((p) & USB_STS_LST_MASK) == (0x0 << 26)) +#define DEV_LST_U1 (((p) & USB_STS_LST_MASK) == (0x1 << 26)) +#define DEV_LST_U2 (((p) & USB_STS_LST_MASK) == (0x2 << 26)) +#define DEV_LST_U3 (((p) & USB_STS_LST_MASK) == (0x3 << 26)) +#define DEV_LST_DISABLED (((p) & USB_STS_LST_MASK) == (0x4 << 26)) +#define DEV_LST_RXDETECT (((p) & USB_STS_LST_MASK) == (0x5 << 26)) +#define DEV_LST_INACTIVE (((p) & USB_STS_LST_MASK) == (0x6 << 26)) +#define DEV_LST_POLLING (((p) & USB_STS_LST_MASK) == (0x7 << 26)) +#define DEV_LST_RECOVERY (((p) & USB_STS_LST_MASK) == (0x8 << 26)) +#define DEV_LST_HOT_RESET (((p) & USB_STS_LST_MASK) == (0x9 << 26)) +#define DEV_LST_COMP_MODE (((p) & USB_STS_LST_MASK) == (0xa << 26)) +#define DEV_LST_LB_STATE (((p) & USB_STS_LST_MASK) == (0xb << 26)) +/* + * DMA clock turn-off status. + * 0 - DMA clock is always on (default after hardware reset). + * 1 - DMA clock turn-off in U1, U2 and U3 (SS mode) is enabled. + */ +#define USB_STS_DMAOFF_MASK BIT(30) +#define USB_STS_DMAOFF(p) ((p) & USB_STS_DMAOFF_MASK) +/* + * SFR Endian status. + * 0 - Little Endian order (default after hardware reset). + * 1 - Big Endian order. + */ +#define USB_STS_ENDIAN2_MASK BIT(31) +#define USB_STS_ENDIAN2(p) ((p) & USB_STS_ENDIAN2_MASK) + +/* USB_CMD - bitmasks */ +/* Set Function Address */ +#define USB_CMD_SET_ADDR BIT(0) +/* + * Function Address This field is saved to the device only when the field + * SET_ADDR is set '1 ' during write to USB_CMD register. + * Software is responsible for entering the address of the device during + * SET_ADDRESS request service. This field should be set immediately after + * the SETUP packet is decoded, and prior to confirmation of the status phase + */ +#define USB_CMD_FADDR_MASK GENMASK(7, 1) +#define USB_CMD_FADDR(p) (((p) << 1) & USB_CMD_FADDR_MASK) +/* Send Function Wake Device Notification TP (used only in SS mode). */ +#define USB_CMD_SDNFW BIT(8) +/* Set Test Mode (used only in HS/FS mode). */ +#define USB_CMD_STMODE BIT(9) +/* Test mode selector (used only in HS/FS mode) */ +#define USB_STS_TMODE_SEL_MASK GENMASK(11, 10) +#define USB_STS_TMODE_SEL(p) (((p) << 10) & USB_STS_TMODE_SEL_MASK) +/* + * Send Latency Tolerance Message Device Notification TP (used only + * in SS mode). + */ +#define USB_CMD_SDNLTM BIT(12) +/* Send Custom Transaction Packet (used only in SS mode) */ +#define USB_CMD_SPKT BIT(13) +/*Device Notification 'Function Wake' - Interface value (only in SS mode. */ +#define USB_CMD_DNFW_INT_MASK GENMASK(23, 16) +#define USB_STS_DNFW_INT(p) (((p) << 16) & USB_CMD_DNFW_INT_MASK) +/* + * Device Notification 'Latency Tolerance Message' -373 BELT value [7:0] + * (used only in SS mode). + */ +#define USB_CMD_DNLTM_BELT_MASK GENMASK(27, 16) +#define USB_STS_DNLTM_BELT(p) (((p) << 16) & USB_CMD_DNLTM_BELT_MASK) + +/* USB_ITPN - bitmasks */ +/* + * ITP(SS) / SOF (HS/FS) number + * In SS mode this field represent number of last ITP received from host. + * In HS/FS mode this field represent number of last SOF received from host. + */ +#define USB_ITPN_MASK GENMASK(13, 0) +#define USB_ITPN(p) ((p) & USB_ITPN_MASK) + +/* USB_LPM - bitmasks */ +/* Host Initiated Resume Duration. */ +#define USB_LPM_HIRD_MASK GENMASK(3, 0) +#define USB_LPM_HIRD(p) ((p) & USB_LPM_HIRD_MASK) +/* Remote Wakeup Enable (bRemoteWake). */ +#define USB_LPM_BRW BIT(4) + +/* USB_IEN - bitmasks */ +/* SS connection interrupt enable */ +#define USB_IEN_CONIEN BIT(0) +/* SS disconnection interrupt enable. */ +#define USB_IEN_DISIEN BIT(1) +/* USB SS warm reset interrupt enable. */ +#define USB_IEN_UWRESIEN BIT(2) +/* USB SS hot reset interrupt enable */ +#define USB_IEN_UHRESIEN BIT(3) +/* SS link U3 state enter interrupt enable (suspend).*/ +#define USB_IEN_U3ENTIEN BIT(4) +/* SS link U3 state exit interrupt enable (wakeup). */ +#define USB_IEN_U3EXTIEN BIT(5) +/* SS link U2 state enter interrupt enable.*/ +#define USB_IEN_U2ENTIEN BIT(6) +/* SS link U2 state exit interrupt enable.*/ +#define USB_IEN_U2EXTIEN BIT(7) +/* SS link U1 state enter interrupt enable.*/ +#define USB_IEN_U1ENTIEN BIT(8) +/* SS link U1 state exit interrupt enable.*/ +#define USB_IEN_U1EXTIEN BIT(9) +/* ITP/SOF packet detected interrupt enable.*/ +#define USB_IEN_ITPIEN BIT(10) +/* Wakeup interrupt enable.*/ +#define USB_IEN_WAKEIEN BIT(11) +/* Send Custom Packet interrupt enable.*/ +#define USB_IEN_SPKTIEN BIT(12) +/* HS/FS mode connection interrupt enable.*/ +#define USB_IEN_CON2IEN BIT(16) +/* HS/FS mode disconnection interrupt enable.*/ +#define USB_IEN_DIS2IEN BIT(17) +/* USB reset (HS/FS mode) interrupt enable.*/ +#define USB_IEN_U2RESIEN BIT(18) +/* LPM L2 state enter interrupt enable.*/ +#define USB_IEN_L2ENTIEN BIT(20) +/* LPM L2 state exit interrupt enable.*/ +#define USB_IEN_L2EXTIEN BIT(21) +/* LPM L1 state enter interrupt enable.*/ +#define USB_IEN_L1ENTIEN BIT(24) +/* LPM L1 state exit interrupt enable.*/ +#define USB_IEN_L1EXTIEN BIT(25) +/* Configuration reset interrupt enable.*/ +#define USB_IEN_CFGRESIEN BIT(26) +/* Start of the USB SS warm reset interrupt enable.*/ +#define USB_IEN_UWRESSIEN BIT(28) +/* End of the USB SS warm reset interrupt enable.*/ +#define USB_IEN_UWRESEIEN BIT(29) + +#define USB_IEN_INIT (USB_IEN_U2RESIEN | USB_ISTS_DIS2I | USB_IEN_CON2IEN \ + | USB_IEN_UHRESIEN | USB_IEN_UWRESIEN | USB_IEN_DISIEN \ + | USB_IEN_CONIEN | USB_IEN_U3EXTIEN | USB_IEN_L2ENTIEN \ + | USB_IEN_L2EXTIEN | USB_IEN_L1ENTIEN | USB_IEN_U3ENTIEN) + +/* USB_ISTS - bitmasks */ +/* SS Connection detected. */ +#define USB_ISTS_CONI BIT(0) +/* SS Disconnection detected. */ +#define USB_ISTS_DISI BIT(1) +/* UUSB warm reset detectede. */ +#define USB_ISTS_UWRESI BIT(2) +/* USB hot reset detected. */ +#define USB_ISTS_UHRESI BIT(3) +/* U3 link state enter detected (suspend).*/ +#define USB_ISTS_U3ENTI BIT(4) +/* U3 link state exit detected (wakeup). */ +#define USB_ISTS_U3EXTI BIT(5) +/* U2 link state enter detected.*/ +#define USB_ISTS_U2ENTI BIT(6) +/* U2 link state exit detected.*/ +#define USB_ISTS_U2EXTI BIT(7) +/* U1 link state enter detected.*/ +#define USB_ISTS_U1ENTI BIT(8) +/* U1 link state exit detected.*/ +#define USB_ISTS_U1EXTI BIT(9) +/* ITP/SOF packet detected.*/ +#define USB_ISTS_ITPI BIT(10) +/* Wakeup detected.*/ +#define USB_ISTS_WAKEI BIT(11) +/* Send Custom Packet detected.*/ +#define USB_ISTS_SPKTI BIT(12) +/* HS/FS mode connection detected.*/ +#define USB_ISTS_CON2I BIT(16) +/* HS/FS mode disconnection detected.*/ +#define USB_ISTS_DIS2I BIT(17) +/* USB reset (HS/FS mode) detected.*/ +#define USB_ISTS_U2RESI BIT(18) +/* LPM L2 state enter detected.*/ +#define USB_ISTS_L2ENTI BIT(20) +/* LPM L2 state exit detected.*/ +#define USB_ISTS_L2EXTI BIT(21) +/* LPM L1 state enter detected.*/ +#define USB_ISTS_L1ENTI BIT(24) +/* LPM L1 state exit detected.*/ +#define USB_ISTS_L1EXTI BIT(25) +/* USB configuration reset detected.*/ +#define USB_ISTS_CFGRESI BIT(26) +/* Start of the USB warm reset detected.*/ +#define USB_ISTS_UWRESSI BIT(28) +/* End of the USB warm reset detected.*/ +#define USB_ISTS_UWRESEI BIT(29) + +/* USB_SEL - bitmasks */ +#define EP_SEL_EPNO_MASK GENMASK(3, 0) +/* Endpoint number. */ +#define EP_SEL_EPNO(p) ((p) & EP_SEL_EPNO_MASK) +/* Endpoint direction bit - 0 - OUT, 1 - IN. */ +#define EP_SEL_DIR BIT(7) + +#define select_ep_in(nr) (EP_SEL_EPNO(p) | EP_SEL_DIR) +#define select_ep_out (EP_SEL_EPNO(p)) + +/* EP_TRADDR - bitmasks */ +/* Transfer Ring address. */ +#define EP_TRADDR_TRADDR(p) ((p)) + +/* EP_CFG - bitmasks */ +/* Endpoint enable */ +#define EP_CFG_ENABLE BIT(0) +/* + * Endpoint type. + * 1 - isochronous + * 2 - bulk + * 3 - interrupt + */ +#define EP_CFG_EPTYPE_MASK GENMASK(2, 1) +#define EP_CFG_EPTYPE(p) (((p) << 1) & EP_CFG_EPTYPE_MASK) +/* Stream support enable (only in SS mode). */ +#define EP_CFG_STREAM_EN BIT(3) +/* TDL check (only in SS mode for BULK EP). */ +#define EP_CFG_TDL_CHK BIT(4) +/* SID check (only in SS mode for BULK OUT EP). */ +#define EP_CFG_SID_CHK BIT(5) +/* DMA transfer endianness. */ +#define EP_CFG_EPENDIAN BIT(7) +/* Max burst size (used only in SS mode). */ +#define EP_CFG_MAXBURST_MASK GENMASK(11, 8) +#define EP_CFG_MAXBURST(p) (((p) << 8) & EP_CFG_MAXBURST_MASK) +#define EP_CFG_MAXBURST_MAX 15 +/* ISO max burst. */ +#define EP_CFG_MULT_MASK GENMASK(15, 14) +#define EP_CFG_MULT(p) (((p) << 14) & EP_CFG_MULT_MASK) +#define EP_CFG_MULT_MAX 2 +/* ISO max burst. */ +#define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16) +#define EP_CFG_MAXPKTSIZE(p) (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK) +/* Max number of buffered packets. */ +#define EP_CFG_BUFFERING_MASK GENMASK(31, 27) +#define EP_CFG_BUFFERING(p) (((p) << 27) & EP_CFG_BUFFERING_MASK) +#define EP_CFG_BUFFERING_MAX 15 + +/* EP_CMD - bitmasks */ +/* Endpoint reset. */ +#define EP_CMD_EPRST BIT(0) +/* Endpoint STALL set. */ +#define EP_CMD_SSTALL BIT(1) +/* Endpoint STALL clear. */ +#define EP_CMD_CSTALL BIT(2) +/* Send ERDY TP. */ +#define EP_CMD_ERDY BIT(3) +/* Request complete. */ +#define EP_CMD_REQ_CMPL BIT(5) +/* Transfer descriptor ready. */ +#define EP_CMD_DRDY BIT(6) +/* Data flush. */ +#define EP_CMD_DFLUSH BIT(7) +/* + * Transfer Descriptor Length write (used only for Bulk Stream capable + * endpoints in SS mode). + * Bit Removed from DEV_VER_V3 controller version. + */ +#define EP_CMD_STDL BIT(8) +/* + * Transfer Descriptor Length (used only in SS mode for bulk endpoints). + * Bits Removed from DEV_VER_V3 controller version. + */ +#define EP_CMD_TDL_MASK GENMASK(15, 9) +#define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK) +#define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9) +#define EP_CMD_TDL_MAX (EP_CMD_TDL_MASK >> 9) + +/* ERDY Stream ID value (used in SS mode). */ +#define EP_CMD_ERDY_SID_MASK GENMASK(31, 16) +#define EP_CMD_ERDY_SID(p) (((p) << 16) & EP_CMD_ERDY_SID_MASK) + +/* EP_STS - bitmasks */ +/* Setup transfer complete. */ +#define EP_STS_SETUP BIT(0) +/* Endpoint STALL status. */ +#define EP_STS_STALL(p) ((p) & BIT(1)) +/* Interrupt On Complete. */ +#define EP_STS_IOC BIT(2) +/* Interrupt on Short Packet. */ +#define EP_STS_ISP BIT(3) +/* Transfer descriptor missing. */ +#define EP_STS_DESCMIS BIT(4) +/* Stream Rejected (used only in SS mode) */ +#define EP_STS_STREAMR BIT(5) +/* EXIT from MOVE DATA State (used only for stream transfers in SS mode). */ +#define EP_STS_MD_EXIT BIT(6) +/* TRB error. */ +#define EP_STS_TRBERR BIT(7) +/* Not ready (used only in SS mode). */ +#define EP_STS_NRDY BIT(8) +/* DMA busy bit. */ +#define EP_STS_DBUSY BIT(9) +/* Endpoint Buffer Empty */ +#define EP_STS_BUFFEMPTY(p) ((p) & BIT(10)) +/* Current Cycle Status */ +#define EP_STS_CCS(p) ((p) & BIT(11)) +/* Prime (used only in SS mode. */ +#define EP_STS_PRIME BIT(12) +/* Stream error (used only in SS mode). */ +#define EP_STS_SIDERR BIT(13) +/* OUT size mismatch. */ +#define EP_STS_OUTSMM BIT(14) +/* ISO transmission error. */ +#define EP_STS_ISOERR BIT(15) +/* Host Packet Pending (only for SS mode). */ +#define EP_STS_HOSTPP(p) ((p) & BIT(16)) +/* Stream Protocol State Machine State (only for Bulk stream endpoints). */ +#define EP_STS_SPSMST_MASK GENMASK(18, 17) +#define EP_STS_SPSMST_DISABLED(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +#define EP_STS_SPSMST_IDLE(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +#define EP_STS_SPSMST_START_STREAM(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +#define EP_STS_SPSMST_MOVE_DATA(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +/* Interrupt On Transfer complete. */ +#define EP_STS_IOT BIT(19) +/* OUT queue endpoint number. */ +#define EP_STS_OUTQ_NO_MASK GENMASK(27, 24) +#define EP_STS_OUTQ_NO(p) (((p) & EP_STS_OUTQ_NO_MASK) >> 24) +/* OUT queue valid flag. */ +#define EP_STS_OUTQ_VAL_MASK BIT(28) +#define EP_STS_OUTQ_VAL(p) ((p) & EP_STS_OUTQ_VAL_MASK) +/* SETUP WAIT. */ +#define EP_STS_STPWAIT BIT(31) + +/* EP_STS_SID - bitmasks */ +/* Stream ID (used only in SS mode). */ +#define EP_STS_SID_MASK GENMASK(15, 0) +#define EP_STS_SID(p) ((p) & EP_STS_SID_MASK) + +/* EP_STS_EN - bitmasks */ +/* SETUP interrupt enable. */ +#define EP_STS_EN_SETUPEN BIT(0) +/* OUT transfer missing descriptor enable. */ +#define EP_STS_EN_DESCMISEN BIT(4) +/* Stream Rejected enable. */ +#define EP_STS_EN_STREAMREN BIT(5) +/* Move Data Exit enable.*/ +#define EP_STS_EN_MD_EXITEN BIT(6) +/* TRB enable. */ +#define EP_STS_EN_TRBERREN BIT(7) +/* NRDY enable. */ +#define EP_STS_EN_NRDYEN BIT(8) +/* Prime enable. */ +#define EP_STS_EN_PRIMEEEN BIT(12) +/* Stream error enable. */ +#define EP_STS_EN_SIDERREN BIT(13) +/* OUT size mismatch enable. */ +#define EP_STS_EN_OUTSMMEN BIT(14) +/* ISO transmission error enable. */ +#define EP_STS_EN_ISOERREN BIT(15) +/* Interrupt on Transmission complete enable. */ +#define EP_STS_EN_IOTEN BIT(19) +/* Setup Wait interrupt enable. */ +#define EP_STS_EN_STPWAITEN BIT(31) + +/* DRBL- bitmasks */ +#define DB_VALUE_BY_INDEX(index) (1 << (index)) +#define DB_VALUE_EP0_OUT BIT(0) +#define DB_VALUE_EP0_IN BIT(16) + +/* EP_IEN - bitmasks */ +#define EP_IEN(index) (1 << (index)) +#define EP_IEN_EP_OUT0 BIT(0) +#define EP_IEN_EP_IN0 BIT(16) + +/* EP_ISTS - bitmasks */ +#define EP_ISTS(index) (1 << (index)) +#define EP_ISTS_EP_OUT0 BIT(0) +#define EP_ISTS_EP_IN0 BIT(16) + +/* USB_PWR- bitmasks */ +/*Power Shut Off capability enable*/ +#define PUSB_PWR_PSO_EN BIT(0) +/*Power Shut Off capability disable*/ +#define PUSB_PWR_PSO_DS BIT(1) +/* + * Enables turning-off Reference Clock. + * This bit is optional and implemented only when support for OTG is + * implemented (indicated by OTG_READY bit set to '1'). + */ +#define PUSB_PWR_STB_CLK_SWITCH_EN BIT(8) +/* + * Status bit indicating that operation required by STB_CLK_SWITCH_EN write + * is completed + */ +#define PUSB_PWR_STB_CLK_SWITCH_DONE BIT(9) +/* This bit informs if Fast Registers Access is enabled. */ +#define PUSB_PWR_FST_REG_ACCESS_STAT BIT(30) +/* Fast Registers Access Enable. */ +#define PUSB_PWR_FST_REG_ACCESS BIT(31) + +/* USB_CONF2- bitmasks */ +/* + * Writing 1 disables TDL calculation basing on TRB feature in controller + * for DMULT mode. + * Bit supported only for DEV_VER_V2 version. + */ +#define USB_CONF2_DIS_TDL_TRB BIT(1) +/* + * Writing 1 enables TDL calculation basing on TRB feature in controller + * for DMULT mode. + * Bit supported only for DEV_VER_V2 version. + */ +#define USB_CONF2_EN_TDL_TRB BIT(2) + +/* USB_CAP1- bitmasks */ +/* + * SFR Interface type + * These field reflects type of SFR interface implemented: + * 0x0 - OCP + * 0x1 - AHB, + * 0x2 - PLB + * 0x3 - AXI + * 0x4-0xF - reserved + */ +#define USB_CAP1_SFR_TYPE_MASK GENMASK(3, 0) +#define DEV_SFR_TYPE_OCP(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x0) +#define DEV_SFR_TYPE_AHB(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x1) +#define DEV_SFR_TYPE_PLB(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x2) +#define DEV_SFR_TYPE_AXI(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x3) +/* + * SFR Interface width + * These field reflects width of SFR interface implemented: + * 0x0 - 8 bit interface, + * 0x1 - 16 bit interface, + * 0x2 - 32 bit interface + * 0x3 - 64 bit interface + * 0x4-0xF - reserved + */ +#define USB_CAP1_SFR_WIDTH_MASK GENMASK(7, 4) +#define DEV_SFR_WIDTH_8(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x0 << 4)) +#define DEV_SFR_WIDTH_16(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x1 << 4)) +#define DEV_SFR_WIDTH_32(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x2 << 4)) +#define DEV_SFR_WIDTH_64(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x3 << 4)) +/* + * DMA Interface type + * These field reflects type of DMA interface implemented: + * 0x0 - OCP + * 0x1 - AHB, + * 0x2 - PLB + * 0x3 - AXI + * 0x4-0xF - reserved + */ +#define USB_CAP1_DMA_TYPE_MASK GENMASK(11, 8) +#define DEV_DMA_TYPE_OCP(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x0 << 8)) +#define DEV_DMA_TYPE_AHB(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x1 << 8)) +#define DEV_DMA_TYPE_PLB(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x2 << 8)) +#define DEV_DMA_TYPE_AXI(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x3 << 8)) +/* + * DMA Interface width + * These field reflects width of DMA interface implemented: + * 0x0 - reserved, + * 0x1 - reserved, + * 0x2 - 32 bit interface + * 0x3 - 64 bit interface + * 0x4-0xF - reserved + */ +#define USB_CAP1_DMA_WIDTH_MASK GENMASK(15, 12) +#define DEV_DMA_WIDTH_32(p) (((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x2 << 12)) +#define DEV_DMA_WIDTH_64(p) (((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x3 << 12)) +/* + * USB3 PHY Interface type + * These field reflects type of USB3 PHY interface implemented: + * 0x0 - USB PIPE, + * 0x1 - RMMI, + * 0x2-0xF - reserved + */ +#define USB_CAP1_U3PHY_TYPE_MASK GENMASK(19, 16) +#define DEV_U3PHY_PIPE(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x0 << 16)) +#define DEV_U3PHY_RMMI(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x1 << 16)) +/* + * USB3 PHY Interface width + * These field reflects width of USB3 PHY interface implemented: + * 0x0 - 8 bit PIPE interface, + * 0x1 - 16 bit PIPE interface, + * 0x2 - 32 bit PIPE interface, + * 0x3 - 64 bit PIPE interface + * 0x4-0xF - reserved + * Note: When SSIC interface is implemented this field shows the width of + * internal PIPE interface. The RMMI interface is always 20bit wide. + */ +#define USB_CAP1_U3PHY_WIDTH_MASK GENMASK(23, 20) +#define DEV_U3PHY_WIDTH_8(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x0 << 20)) +#define DEV_U3PHY_WIDTH_16(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x1 << 16)) +#define DEV_U3PHY_WIDTH_32(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x2 << 20)) +#define DEV_U3PHY_WIDTH_64(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x3 << 16)) + +/* + * USB2 PHY Interface enable + * These field informs if USB2 PHY interface is implemented: + * 0x0 - interface NOT implemented, + * 0x1 - interface implemented + */ +#define USB_CAP1_U2PHY_EN(p) ((p) & BIT(24)) +/* + * USB2 PHY Interface type + * These field reflects type of USB2 PHY interface implemented: + * 0x0 - UTMI, + * 0x1 - ULPI + */ +#define DEV_U2PHY_ULPI(p) ((p) & BIT(25)) +/* + * USB2 PHY Interface width + * These field reflects width of USB2 PHY interface implemented: + * 0x0 - 8 bit interface, + * 0x1 - 16 bit interface, + * Note: The ULPI interface is always 8bit wide. + */ +#define DEV_U2PHY_WIDTH_16(p) ((p) & BIT(26)) +/* + * OTG Ready + * 0x0 - pure device mode + * 0x1 - some features and ports for CDNS USB OTG controller are implemented. + */ +#define USB_CAP1_OTG_READY(p) ((p) & BIT(27)) + +/* + * When set, indicates that controller supports automatic internal TDL + * calculation basing on the size provided in TRB (TRB[22:17]) for DMULT mode + * Supported only for DEV_VER_V2 controller version. + */ +#define USB_CAP1_TDL_FROM_TRB(p) ((p) & BIT(28)) + +/* USB_CAP2- bitmasks */ +/* + * The actual size of the connected On-chip RAM memory in kB: + * - 0 means 256 kB (max supported mem size) + * - value other than 0 reflects the mem size in kB + */ +#define USB_CAP2_ACTUAL_MEM_SIZE(p) ((p) & GENMASK(7, 0)) +/* + * Max supported mem size + * These field reflects width of on-chip RAM address bus width, + * which determines max supported mem size: + * 0x0-0x7 - reserved, + * 0x8 - support for 4kB mem, + * 0x9 - support for 8kB mem, + * 0xA - support for 16kB mem, + * 0xB - support for 32kB mem, + * 0xC - support for 64kB mem, + * 0xD - support for 128kB mem, + * 0xE - support for 256kB mem, + * 0xF - reserved + */ +#define USB_CAP2_MAX_MEM_SIZE(p) ((p) & GENMASK(11, 8)) + +/* USB_CAP3- bitmasks */ +#define EP_IS_IMPLEMENTED(reg, index) ((reg) & (1 << (index))) + +/* USB_CAP4- bitmasks */ +#define EP_SUPPORT_ISO(reg, index) ((reg) & (1 << (index))) + +/* USB_CAP5- bitmasks */ +#define EP_SUPPORT_STREAM(reg, index) ((reg) & (1 << (index))) + +/* USB_CAP6- bitmasks */ +/* The USBSS-DEV Controller Internal build number. */ +#define GET_DEV_BASE_VERSION(p) ((p) & GENMASK(23, 0)) +/* The USBSS-DEV Controller version number. */ +#define GET_DEV_CUSTOM_VERSION(p) ((p) & GENMASK(31, 24)) + +#define DEV_VER_NXP_V1 0x00024502 +#define DEV_VER_TI_V1 0x00024509 +#define DEV_VER_V2 0x0002450C +#define DEV_VER_V3 0x0002450d + +/* DBG_LINK1- bitmasks */ +/* + * LFPS_MIN_DET_U1_EXIT value This parameter configures the minimum + * time required for decoding the received LFPS as an LFPS.U1_Exit. + */ +#define DBG_LINK1_LFPS_MIN_DET_U1_EXIT(p) ((p) & GENMASK(7, 0)) +/* + * LFPS_MIN_GEN_U1_EXIT value This parameter configures the minimum time for + * phytxelecidle deassertion when LFPS.U1_Exit + */ +#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK GENMASK(15, 8) +#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(p) (((p) << 8) & GENMASK(15, 8)) +/* + * RXDET_BREAK_DIS value This parameter configures terminating the Far-end + * Receiver termination detection sequence: + * 0: it is possible that USBSS_DEV will terminate Farend receiver + * termination detection sequence + * 1: USBSS_DEV will not terminate Far-end receiver termination + * detection sequence + */ +#define DBG_LINK1_RXDET_BREAK_DIS BIT(16) +/* LFPS_GEN_PING value This parameter configures the LFPS.Ping generation */ +#define DBG_LINK1_LFPS_GEN_PING(p) (((p) << 17) & GENMASK(21, 17)) +/* + * Set the LFPS_MIN_DET_U1_EXIT value Writing '1' to this bit writes the + * LFPS_MIN_DET_U1_EXIT field value to the device. This bit is automatically + * cleared. Writing '0' has no effect + */ +#define DBG_LINK1_LFPS_MIN_DET_U1_EXIT_SET BIT(24) +/* + * Set the LFPS_MIN_GEN_U1_EXIT value. Writing '1' to this bit writes the + * LFPS_MIN_GEN_U1_EXIT field value to the device. This bit is automatically + * cleared. Writing '0' has no effect + */ +#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET BIT(25) +/* + * Set the RXDET_BREAK_DIS value Writing '1' to this bit writes + * the RXDET_BREAK_DIS field value to the device. This bit is automatically + * cleared. Writing '0' has no effect + */ +#define DBG_LINK1_RXDET_BREAK_DIS_SET BIT(26) +/* + * Set the LFPS_GEN_PING_SET value Writing '1' to this bit writes + * the LFPS_GEN_PING field value to the device. This bit is automatically + * cleared. Writing '0' has no effect." + */ +#define DBG_LINK1_LFPS_GEN_PING_SET BIT(27) + +/* DMA_AXI_CTRL- bitmasks */ +/* The mawprot pin configuration. */ +#define DMA_AXI_CTRL_MARPROT(p) ((p) & GENMASK(2, 0)) +/* The marprot pin configuration. */ +#define DMA_AXI_CTRL_MAWPROT(p) (((p) & GENMASK(2, 0)) << 16) +#define DMA_AXI_CTRL_NON_SECURE 0x02 + +#define gadget_to_cdns3_device(g) (container_of(g, struct cdns3_device, gadget)) + +#define ep_to_cdns3_ep(ep) (container_of(ep, struct cdns3_endpoint, endpoint)) + +/*-------------------------------------------------------------------------*/ +/* + * USBSS-DEV DMA interface. + */ +#define TRBS_PER_SEGMENT 600 + +#define ISO_MAX_INTERVAL 10 + +#define MAX_TRB_LENGTH BIT(16) + +#if TRBS_PER_SEGMENT < 2 +#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2." +#endif + +#define TRBS_PER_STREAM_SEGMENT 2 + +#if TRBS_PER_STREAM_SEGMENT < 2 +#error "Incorrect TRBS_PER_STREAMS_SEGMENT. Minimal Transfer Ring size is 2." +#endif + +/* + *Only for ISOC endpoints - maximum number of TRBs is calculated as + * pow(2, bInterval-1) * number of usb requests. It is limitation made by + * driver to save memory. Controller must prepare TRB for each ITP even + * if bInterval > 1. It's the reason why driver needs so many TRBs for + * isochronous endpoints. + */ +#define TRBS_PER_ISOC_SEGMENT (ISO_MAX_INTERVAL * 8) + +#define GET_TRBS_PER_SEGMENT(ep_type) ((ep_type) == USB_ENDPOINT_XFER_ISOC ? \ + TRBS_PER_ISOC_SEGMENT : TRBS_PER_SEGMENT) +/** + * struct cdns3_trb - represent Transfer Descriptor block. + * @buffer: pointer to buffer data + * @length: length of data + * @control: control flags. + * + * This structure describes transfer block serviced by DMA module. + */ +struct cdns3_trb { + __le32 buffer; + __le32 length; + __le32 control; +}; + +#define TRB_SIZE (sizeof(struct cdns3_trb)) +#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT) +#define TRB_STREAM_RING_SIZE (TRB_SIZE * TRBS_PER_STREAM_SEGMENT) +#define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT) +#define TRB_CTRL_RING_SIZE (TRB_SIZE * 2) + +/* TRB bit mask */ +#define TRB_TYPE_BITMASK GENMASK(15, 10) +#define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) + +/* TRB type IDs */ +/* bulk, interrupt, isoc , and control data stage */ +#define TRB_NORMAL 1 +/* TRB for linking ring segments */ +#define TRB_LINK 6 + +/* Cycle bit - indicates TRB ownership by driver or hw*/ +#define TRB_CYCLE BIT(0) +/* + * When set to '1', the device will toggle its interpretation of the Cycle bit + */ +#define TRB_TOGGLE BIT(1) +/* + * The controller will set it if OUTSMM (OUT size mismatch) is detected, + * this bit is for normal TRB + */ +#define TRB_SMM BIT(1) + +/* + * Short Packet (SP). OUT EPs at DMULT=1 only. Indicates if the TRB was + * processed while USB short packet was received. No more buffers defined by + * the TD will be used. DMA will automatically advance to next TD. + * - Shall be set to 0 by Software when putting TRB on the Transfer Ring + * - Shall be set to 1 by Controller when Short Packet condition for this TRB + * is detected independent if ISP is set or not. + */ +#define TRB_SP BIT(1) + +/* Interrupt on short packet*/ +#define TRB_ISP BIT(2) +/*Setting this bit enables FIFO DMA operation mode*/ +#define TRB_FIFO_MODE BIT(3) +/* Set PCIe no snoop attribute */ +#define TRB_CHAIN BIT(4) +/* Interrupt on completion */ +#define TRB_IOC BIT(5) + +/* stream ID bitmasks. */ +#define TRB_STREAM_ID_BITMASK GENMASK(31, 16) +#define TRB_STREAM_ID(p) ((p) << 16) +#define TRB_FIELD_TO_STREAMID(p) (((p) & TRB_STREAM_ID_BITMASK) >> 16) + +/* Size of TD expressed in USB packets for HS/FS mode. */ +#define TRB_TDL_HS_SIZE(p) (((p) << 16) & GENMASK(31, 16)) +#define TRB_TDL_HS_SIZE_GET(p) (((p) & GENMASK(31, 16)) >> 16) + +/* transfer_len bitmasks. */ +#define TRB_LEN(p) ((p) & GENMASK(16, 0)) + +/* Size of TD expressed in USB packets for SS mode. */ +#define TRB_TDL_SS_SIZE(p) (((p) << 17) & GENMASK(23, 17)) +#define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17) + +/* transfer_len bitmasks - bits 31:24 */ +#define TRB_BURST_LEN(p) ((unsigned int)((p) << 24) & GENMASK(31, 24)) +#define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24) + +/* Data buffer pointer bitmasks*/ +#define TRB_BUFFER(p) ((p) & GENMASK(31, 0)) + +/*-------------------------------------------------------------------------*/ +/* Driver numeric constants */ + +/* Such declaration should be added to ch9.h */ +#define USB_DEVICE_MAX_ADDRESS 127 + +/* Endpoint init values */ +#define CDNS3_EP_MAX_PACKET_LIMIT 1024 +#define CDNS3_EP_MAX_STREAMS 15 +#define CDNS3_EP0_MAX_PACKET_LIMIT 512 + +/* All endpoints including EP0 */ +#define CDNS3_ENDPOINTS_MAX_COUNT 32 +#define CDNS3_EP_ZLP_BUF_SIZE 1024 + +#define CDNS3_MAX_NUM_DESCMISS_BUF 32 +#define CDNS3_DESCMIS_BUF_SIZE 2048 /* Bytes */ +#define CDNS3_WA2_NUM_BUFFERS 128 +/*-------------------------------------------------------------------------*/ +/* Used structs */ + +struct cdns3_device; + +/** + * struct cdns3_endpoint - extended device side representation of USB endpoint. + * @endpoint: usb endpoint + * @pending_req_list: list of requests queuing on transfer ring. + * @deferred_req_list: list of requests waiting for queuing on transfer ring. + * @wa2_descmiss_req_list: list of requests internally allocated by driver. + * @trb_pool: transfer ring - array of transaction buffers + * @trb_pool_dma: dma address of transfer ring + * @cdns3_dev: device associated with this endpoint + * @name: a human readable name e.g. ep1out + * @flags: specify the current state of endpoint + * @descmis_req: internal transfer object used for getting data from on-chip + * buffer. It can happen only if function driver doesn't send usb_request + * object on time. + * @dir: endpoint direction + * @num: endpoint number (1 - 15) + * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK + * @interval: interval between packets used for ISOC endpoint. + * @free_trbs: number of free TRBs in transfer ring + * @num_trbs: number of all TRBs in transfer ring + * @alloc_ring_size: size of the allocated TRB ring + * @pcs: producer cycle state + * @ccs: consumer cycle state + * @enqueue: enqueue index in transfer ring + * @dequeue: dequeue index in transfer ring + * @trb_burst_size: number of burst used in trb. + */ +struct cdns3_endpoint { + struct usb_ep endpoint; + struct list_head pending_req_list; + struct list_head deferred_req_list; + struct list_head wa2_descmiss_req_list; + int wa2_counter; + + struct cdns3_trb *trb_pool; + dma_addr_t trb_pool_dma; + + struct cdns3_device *cdns3_dev; + char name[20]; + +#define EP_ENABLED BIT(0) +#define EP_STALLED BIT(1) +#define EP_STALL_PENDING BIT(2) +#define EP_WEDGE BIT(3) +#define EP_TRANSFER_STARTED BIT(4) +#define EP_UPDATE_EP_TRBADDR BIT(5) +#define EP_PENDING_REQUEST BIT(6) +#define EP_RING_FULL BIT(7) +#define EP_CLAIMED BIT(8) +#define EP_DEFERRED_DRDY BIT(9) +#define EP_QUIRK_ISO_OUT_EN BIT(10) +#define EP_QUIRK_END_TRANSFER BIT(11) +#define EP_QUIRK_EXTRA_BUF_DET BIT(12) +#define EP_QUIRK_EXTRA_BUF_EN BIT(13) +#define EP_TDLCHK_EN BIT(15) +#define EP_CONFIGURED BIT(16) + u32 flags; + + struct cdns3_request *descmis_req; + + u8 dir; + u8 num; + u8 type; + u8 mult; + u8 bMaxBurst; + u16 wMaxPacketSize; + int interval; + + int free_trbs; + int num_trbs; + int alloc_ring_size; + u8 pcs; + u8 ccs; + int enqueue; + int dequeue; + u8 trb_burst_size; + + unsigned int wa1_set:1; + struct cdns3_trb *wa1_trb; + unsigned int wa1_trb_index; + unsigned int wa1_cycle_bit:1; + + /* Stream related */ + unsigned int use_streams:1; + unsigned int prime_flag:1; + u32 ep_sts_pending; + u16 last_stream_id; + u16 pending_tdl; + unsigned int stream_sg_idx; +}; + +/** + * struct cdns3_aligned_buf - represent aligned buffer used for DMA transfer + * @buf: aligned to 8 bytes data buffer. Buffer address used in + * TRB shall be aligned to 8. + * @dma: dma address + * @size: size of buffer + * @in_use: inform if this buffer is associated with usb_request + * @list: used to adding instance of this object to list + */ +struct cdns3_aligned_buf { + void *buf; + dma_addr_t dma; + u32 size; + enum dma_data_direction dir; + unsigned in_use:1; + struct list_head list; +}; + +/** + * struct cdns3_request - extended device side representation of usb_request + * object . + * @request: generic usb_request object describing single I/O request. + * @priv_ep: extended representation of usb_ep object + * @trb: the first TRB association with this request + * @start_trb: number of the first TRB in transfer ring + * @end_trb: number of the last TRB in transfer ring + * @aligned_buf: object holds information about aligned buffer associated whit + * this endpoint + * @flags: flag specifying special usage of request + * @list: used by internally allocated request to add to wa2_descmiss_req_list. + * @finished_trb: number of trb has already finished per request + * @num_of_trb: how many trbs in this request + */ +struct cdns3_request { + struct usb_request request; + struct cdns3_endpoint *priv_ep; + struct cdns3_trb *trb; + int start_trb; + int end_trb; + struct cdns3_aligned_buf *aligned_buf; +#define REQUEST_PENDING BIT(0) +#define REQUEST_INTERNAL BIT(1) +#define REQUEST_INTERNAL_CH BIT(2) +#define REQUEST_ZLP BIT(3) +#define REQUEST_UNALIGNED BIT(4) + u32 flags; + struct list_head list; + int finished_trb; + int num_of_trb; +}; + +#define to_cdns3_request(r) (container_of(r, struct cdns3_request, request)) + +/*Stages used during enumeration process.*/ +#define CDNS3_SETUP_STAGE 0x0 +#define CDNS3_DATA_STAGE 0x1 +#define CDNS3_STATUS_STAGE 0x2 + +/** + * struct cdns3_device - represent USB device. + * @dev: pointer to device structure associated whit this controller + * @sysdev: pointer to the DMA capable device + * @gadget: device side representation of the peripheral controller + * @gadget_driver: pointer to the gadget driver + * @dev_ver: device controller version. + * @lock: for synchronizing + * @regs: base address for device side registers + * @setup_buf: used while processing usb control requests + * @setup_dma: dma address for setup_buf + * @zlp_buf - zlp buffer + * @ep0_stage: ep0 stage during enumeration process. + * @ep0_data_dir: direction for control transfer + * @eps: array of pointers to all endpoints with exclusion ep0 + * @aligned_buf_list: list of aligned buffers internally allocated by driver + * @aligned_buf_wq: workqueue freeing no longer used aligned buf. + * @selected_ep: actually selected endpoint. It's used only to improve + * performance. + * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP. + * @u1_allowed: allow device transition to u1 state + * @u2_allowed: allow device transition to u2 state + * @is_selfpowered: device is self powered + * @setup_pending: setup packet is processing by gadget driver + * @hw_configured_flag: hardware endpoint configuration was set. + * @wake_up_flag: allow device to remote up the host + * @status_completion_no_call: indicate that driver is waiting for status s + * stage completion. It's used in deferred SET_CONFIGURATION request. + * @onchip_buffers: number of available on-chip buffers. + * @onchip_used_size: actual size of on-chip memory assigned to endpoints. + * @pending_status_wq: workqueue handling status stage for deferred requests. + * @pending_status_request: request for which status stage was deferred + */ +struct cdns3_device { + struct device *dev; + struct device *sysdev; + + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + +#define CDNS_REVISION_V0 0x00024501 +#define CDNS_REVISION_V1 0x00024509 + u32 dev_ver; + + /* generic spin-lock for drivers */ + spinlock_t lock; + + struct cdns3_usb_regs __iomem *regs; + + struct dma_pool *eps_dma_pool; + struct usb_ctrlrequest *setup_buf; + dma_addr_t setup_dma; + void *zlp_buf; + + u8 ep0_stage; + int ep0_data_dir; + + struct cdns3_endpoint *eps[CDNS3_ENDPOINTS_MAX_COUNT]; + + struct list_head aligned_buf_list; + struct work_struct aligned_buf_wq; + + u32 selected_ep; + u16 isoch_delay; + + unsigned wait_for_setup:1; + unsigned u1_allowed:1; + unsigned u2_allowed:1; + unsigned is_selfpowered:1; + unsigned setup_pending:1; + unsigned hw_configured_flag:1; + unsigned wake_up_flag:1; + unsigned status_completion_no_call:1; + unsigned using_streams:1; + int out_mem_is_allocated; + + struct work_struct pending_status_wq; + struct usb_request *pending_status_request; + + /*in KB */ + u16 onchip_buffers; + u16 onchip_used_size; + + u16 ep_buf_size; + u16 ep_iso_burst; +}; + +void cdns3_set_register_bit(void __iomem *ptr, u32 mask); +dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *trb); +enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev); +void cdns3_pending_setup_status_handler(struct work_struct *work); +void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev); +void cdns3_set_hw_configuration(struct cdns3_device *priv_dev); +void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep); +void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable); +struct usb_request *cdns3_next_request(struct list_head *list); +void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm); +int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep); +u8 cdns3_ep_addr_to_index(u8 ep_addr); +int cdns3_gadget_ep_set_wedge(struct usb_ep *ep); +int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value); +void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep); +int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep); +struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags); +void cdns3_gadget_ep_free_request(struct usb_ep *ep, + struct usb_request *request); +int cdns3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request); +void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req, + int status); + +int cdns3_init_ep0(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep); +void cdns3_ep0_config(struct cdns3_device *priv_dev); +int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable); +void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir); +int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev); + +#endif /* __LINUX_CDNS3_GADGET */ diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c new file mode 100644 index 000000000..59860d175 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-imx.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller + * + * Copyright (C) 2019 NXP + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/of_platform.h> +#include <linux/iopoll.h> +#include <linux/pm_runtime.h> +#include "core.h" + +#define USB3_CORE_CTRL1 0x00 +#define USB3_CORE_CTRL2 0x04 +#define USB3_INT_REG 0x08 +#define USB3_CORE_STATUS 0x0c +#define XHCI_DEBUG_LINK_ST 0x10 +#define XHCI_DEBUG_BUS 0x14 +#define USB3_SSPHY_CTRL1 0x40 +#define USB3_SSPHY_CTRL2 0x44 +#define USB3_SSPHY_STATUS 0x4c +#define USB2_PHY_CTRL1 0x50 +#define USB2_PHY_CTRL2 0x54 +#define USB2_PHY_STATUS 0x5c + +/* Register bits definition */ + +/* USB3_CORE_CTRL1 */ +#define SW_RESET_MASK GENMASK(31, 26) +#define PWR_SW_RESET BIT(31) +#define APB_SW_RESET BIT(30) +#define AXI_SW_RESET BIT(29) +#define RW_SW_RESET BIT(28) +#define PHY_SW_RESET BIT(27) +#define PHYAHB_SW_RESET BIT(26) +#define ALL_SW_RESET (PWR_SW_RESET | APB_SW_RESET | AXI_SW_RESET | \ + RW_SW_RESET | PHY_SW_RESET | PHYAHB_SW_RESET) +#define OC_DISABLE BIT(9) +#define MDCTRL_CLK_SEL BIT(7) +#define MODE_STRAP_MASK (0x7) +#define DEV_MODE (1 << 2) +#define HOST_MODE (1 << 1) +#define OTG_MODE (1 << 0) + +/* USB3_INT_REG */ +#define CLK_125_REQ BIT(29) +#define LPM_CLK_REQ BIT(28) +#define DEVU3_WAEKUP_EN BIT(14) +#define OTG_WAKEUP_EN BIT(12) +#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */ +#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */ + +/* USB3_CORE_STATUS */ +#define MDCTRL_CLK_STATUS BIT(15) +#define DEV_POWER_ON_READY BIT(13) +#define HOST_POWER_ON_READY BIT(12) + +/* USB3_SSPHY_STATUS */ +#define CLK_VALID_MASK (0x3f << 26) +#define CLK_VALID_COMPARE_BITS (0xf << 28) +#define PHY_REFCLK_REQ (1 << 0) + +/* OTG registers definition */ +#define OTGSTS 0x4 +/* OTGSTS */ +#define OTG_NRDY BIT(11) + +/* xHCI registers definition */ +#define XECP_PM_PMCSR 0x8018 +#define XECP_AUX_CTRL_REG1 0x8120 + +/* Register bits definition */ +/* XECP_AUX_CTRL_REG1 */ +#define CFG_RXDET_P3_EN BIT(15) + +/* XECP_PM_PMCSR */ +#define PS_MASK GENMASK(1, 0) +#define PS_D0 0 +#define PS_D1 1 + +struct cdns_imx { + struct device *dev; + void __iomem *noncore; + struct clk_bulk_data *clks; + int num_clks; + struct platform_device *cdns3_pdev; +}; + +static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset) +{ + return readl(data->noncore + offset); +} + +static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value) +{ + writel(value, data->noncore + offset); +} + +static const struct clk_bulk_data imx_cdns3_core_clks[] = { + { .id = "usb3_lpm_clk" }, + { .id = "usb3_bus_clk" }, + { .id = "usb3_aclk" }, + { .id = "usb3_ipg_clk" }, + { .id = "usb3_core_pclk" }, +}; + +static int cdns_imx_noncore_init(struct cdns_imx *data) +{ + u32 value; + int ret; + struct device *dev = data->dev; + + cdns_imx_writel(data, USB3_SSPHY_STATUS, CLK_VALID_MASK); + udelay(1); + ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value, + (value & CLK_VALID_COMPARE_BITS) == CLK_VALID_COMPARE_BITS, + 10, 100000); + if (ret) { + dev_err(dev, "wait clkvld timeout\n"); + return ret; + } + + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value |= ALL_SW_RESET; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + udelay(1); + + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value = (value & ~MODE_STRAP_MASK) | OTG_MODE | OC_DISABLE; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + + value = cdns_imx_readl(data, USB3_INT_REG); + value |= HOST_INT1_EN | DEV_INT_EN; + cdns_imx_writel(data, USB3_INT_REG, value); + + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value &= ~ALL_SW_RESET; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + return ret; +} + +static int cdns_imx_platform_suspend(struct device *dev, + bool suspend, bool wakeup); +static struct cdns3_platform_data cdns_imx_pdata = { + .platform_suspend = cdns_imx_platform_suspend, + .quirks = CDNS3_DEFAULT_PM_RUNTIME_ALLOW, +}; + +static const struct of_dev_auxdata cdns_imx_auxdata[] = { + { + .compatible = "cdns,usb3", + .platform_data = &cdns_imx_pdata, + }, + {}, +}; + +static int cdns_imx_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct cdns_imx *data; + int ret; + + if (!node) + return -ENODEV; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + platform_set_drvdata(pdev, data); + data->dev = dev; + data->noncore = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->noncore)) { + dev_err(dev, "can't map IOMEM resource\n"); + return PTR_ERR(data->noncore); + } + + data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks); + data->clks = devm_kmemdup(dev, imx_cdns3_core_clks, + sizeof(imx_cdns3_core_clks), GFP_KERNEL); + if (!data->clks) + return -ENOMEM; + + ret = devm_clk_bulk_get(dev, data->num_clks, data->clks); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(data->num_clks, data->clks); + if (ret) + return ret; + + ret = cdns_imx_noncore_init(data); + if (ret) + goto err; + + ret = of_platform_populate(node, NULL, cdns_imx_auxdata, dev); + if (ret) { + dev_err(dev, "failed to create children: %d\n", ret); + goto err; + } + + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return ret; +err: + clk_bulk_disable_unprepare(data->num_clks, data->clks); + return ret; +} + +static int cdns_imx_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cdns_imx *data = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); + clk_bulk_disable_unprepare(data->num_clks, data->clks); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static void cdns3_set_wakeup(struct cdns_imx *data, bool enable) +{ + u32 value; + + value = cdns_imx_readl(data, USB3_INT_REG); + if (enable) + value |= OTG_WAKEUP_EN | DEVU3_WAEKUP_EN; + else + value &= ~(OTG_WAKEUP_EN | DEVU3_WAEKUP_EN); + + cdns_imx_writel(data, USB3_INT_REG, value); +} + +static int cdns_imx_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + struct cdns *cdns = dev_get_drvdata(dev); + struct device *parent = dev->parent; + struct cdns_imx *data = dev_get_drvdata(parent); + void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs); + void __iomem *xhci_regs = cdns->xhci_regs; + u32 value; + int ret = 0; + + if (cdns->role != USB_ROLE_HOST) + return 0; + + if (suspend) { + /* SW request low power when all usb ports allow to it ??? */ + value = readl(xhci_regs + XECP_PM_PMCSR); + value &= ~PS_MASK; + value |= PS_D1; + writel(value, xhci_regs + XECP_PM_PMCSR); + + /* mdctrl_clk_sel */ + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value |= MDCTRL_CLK_SEL; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + + /* wait for mdctrl_clk_status */ + value = cdns_imx_readl(data, USB3_CORE_STATUS); + ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value, + (value & MDCTRL_CLK_STATUS) == MDCTRL_CLK_STATUS, + 10, 100000); + if (ret) + dev_warn(parent, "wait mdctrl_clk_status timeout\n"); + + /* wait lpm_clk_req to be 0 */ + value = cdns_imx_readl(data, USB3_INT_REG); + ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value, + (value & LPM_CLK_REQ) != LPM_CLK_REQ, + 10, 100000); + if (ret) + dev_warn(parent, "wait lpm_clk_req timeout\n"); + + /* wait phy_refclk_req to be 0 */ + value = cdns_imx_readl(data, USB3_SSPHY_STATUS); + ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value, + (value & PHY_REFCLK_REQ) != PHY_REFCLK_REQ, + 10, 100000); + if (ret) + dev_warn(parent, "wait phy_refclk_req timeout\n"); + + cdns3_set_wakeup(data, wakeup); + } else { + cdns3_set_wakeup(data, false); + + /* SW request D0 */ + value = readl(xhci_regs + XECP_PM_PMCSR); + value &= ~PS_MASK; + value |= PS_D0; + writel(value, xhci_regs + XECP_PM_PMCSR); + + /* clr CFG_RXDET_P3_EN */ + value = readl(xhci_regs + XECP_AUX_CTRL_REG1); + value &= ~CFG_RXDET_P3_EN; + writel(value, xhci_regs + XECP_AUX_CTRL_REG1); + + /* clear mdctrl_clk_sel */ + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value &= ~MDCTRL_CLK_SEL; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + + /* wait CLK_125_REQ to be 1 */ + value = cdns_imx_readl(data, USB3_INT_REG); + ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value, + (value & CLK_125_REQ) == CLK_125_REQ, + 10, 100000); + if (ret) + dev_warn(parent, "wait CLK_125_REQ timeout\n"); + + /* wait for mdctrl_clk_status is cleared */ + value = cdns_imx_readl(data, USB3_CORE_STATUS); + ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value, + (value & MDCTRL_CLK_STATUS) != MDCTRL_CLK_STATUS, + 10, 100000); + if (ret) + dev_warn(parent, "wait mdctrl_clk_status cleared timeout\n"); + + /* Wait until OTG_NRDY is 0 */ + value = readl(otg_regs + OTGSTS); + ret = readl_poll_timeout(otg_regs + OTGSTS, value, + (value & OTG_NRDY) != OTG_NRDY, + 10, 100000); + if (ret) + dev_warn(parent, "wait OTG ready timeout\n"); + } + + return ret; + +} + +static int cdns_imx_resume(struct device *dev) +{ + struct cdns_imx *data = dev_get_drvdata(dev); + + return clk_bulk_prepare_enable(data->num_clks, data->clks); +} + +static int cdns_imx_suspend(struct device *dev) +{ + struct cdns_imx *data = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(data->num_clks, data->clks); + + return 0; +} + + +/* Indicate if the controller was power lost before */ +static inline bool cdns_imx_is_power_lost(struct cdns_imx *data) +{ + u32 value; + + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + if ((value & SW_RESET_MASK) == ALL_SW_RESET) + return true; + else + return false; +} + +static int __maybe_unused cdns_imx_system_resume(struct device *dev) +{ + struct cdns_imx *data = dev_get_drvdata(dev); + int ret; + + ret = cdns_imx_resume(dev); + if (ret) + return ret; + + if (cdns_imx_is_power_lost(data)) { + dev_dbg(dev, "resume from power lost\n"); + ret = cdns_imx_noncore_init(data); + if (ret) + cdns_imx_suspend(dev); + } + + return ret; +} + +#else +static int cdns_imx_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + return 0; +} + +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops cdns_imx_pm_ops = { + SET_RUNTIME_PM_OPS(cdns_imx_suspend, cdns_imx_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(cdns_imx_suspend, cdns_imx_system_resume) +}; + +static const struct of_device_id cdns_imx_of_match[] = { + { .compatible = "fsl,imx8qm-usb3", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cdns_imx_of_match); + +static struct platform_driver cdns_imx_driver = { + .probe = cdns_imx_probe, + .remove = cdns_imx_remove, + .driver = { + .name = "cdns3-imx", + .of_match_table = cdns_imx_of_match, + .pm = &cdns_imx_pm_ops, + }, +}; +module_platform_driver(cdns_imx_driver); + +MODULE_ALIAS("platform:cdns3-imx"); +MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USB3 i.MX Glue Layer"); diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c new file mode 100644 index 000000000..1f6320d98 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-pci-wrap.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS PCI Glue driver + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> + +struct cdns3_wrap { + struct platform_device *plat_dev; + struct resource dev_res[6]; + int devfn; +}; + +#define RES_IRQ_HOST_ID 0 +#define RES_IRQ_PERIPHERAL_ID 1 +#define RES_IRQ_OTG_ID 2 +#define RES_HOST_ID 3 +#define RES_DEV_ID 4 +#define RES_DRD_ID 5 + +#define PCI_BAR_HOST 0 +#define PCI_BAR_DEV 2 +#define PCI_BAR_OTG 0 + +#define PCI_DEV_FN_HOST_DEVICE 0 +#define PCI_DEV_FN_OTG 1 + +#define PCI_DRIVER_NAME "cdns3-pci-usbss" +#define PLAT_DRIVER_NAME "cdns-usb3" + +#define CDNS_VENDOR_ID 0x17cd +#define CDNS_DEVICE_ID 0x0100 + +static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev) +{ + struct pci_dev *func; + + /* + * Gets the second function. + * It's little tricky, but this platform has two function. + * The fist keeps resources for Host/Device while the second + * keeps resources for DRD/OTG. + */ + func = pci_get_device(pdev->vendor, pdev->device, NULL); + if (unlikely(!func)) + return NULL; + + if (func->devfn == pdev->devfn) { + func = pci_get_device(pdev->vendor, pdev->device, func); + if (unlikely(!func)) + return NULL; + } + + if (func->devfn != PCI_DEV_FN_HOST_DEVICE && + func->devfn != PCI_DEV_FN_OTG) { + return NULL; + } + + return func; +} + +static int cdns3_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct platform_device_info plat_info; + struct cdns3_wrap *wrap; + struct resource *res; + struct pci_dev *func; + int err; + + /* + * for GADGET/HOST PCI (devfn) function number is 0, + * for OTG PCI (devfn) function number is 1 + */ + if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE && + pdev->devfn != PCI_DEV_FN_OTG)) + return -EINVAL; + + func = cdns3_get_second_fun(pdev); + if (unlikely(!func)) + return -EINVAL; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", err); + return err; + } + + pci_set_master(pdev); + + if (pci_is_enabled(func)) { + wrap = pci_get_drvdata(func); + } else { + wrap = kzalloc(sizeof(*wrap), GFP_KERNEL); + if (!wrap) { + pci_disable_device(pdev); + return -ENOMEM; + } + } + + res = wrap->dev_res; + + if (pdev->devfn == PCI_DEV_FN_HOST_DEVICE) { + /* function 0: host(BAR_0) + device(BAR_1).*/ + dev_dbg(&pdev->dev, "Initialize Device resources\n"); + res[RES_DEV_ID].start = pci_resource_start(pdev, PCI_BAR_DEV); + res[RES_DEV_ID].end = pci_resource_end(pdev, PCI_BAR_DEV); + res[RES_DEV_ID].name = "dev"; + res[RES_DEV_ID].flags = IORESOURCE_MEM; + dev_dbg(&pdev->dev, "USBSS-DEV physical base addr: %pa\n", + &res[RES_DEV_ID].start); + + res[RES_HOST_ID].start = pci_resource_start(pdev, PCI_BAR_HOST); + res[RES_HOST_ID].end = pci_resource_end(pdev, PCI_BAR_HOST); + res[RES_HOST_ID].name = "xhci"; + res[RES_HOST_ID].flags = IORESOURCE_MEM; + dev_dbg(&pdev->dev, "USBSS-XHCI physical base addr: %pa\n", + &res[RES_HOST_ID].start); + + /* Interrupt for XHCI */ + wrap->dev_res[RES_IRQ_HOST_ID].start = pdev->irq; + wrap->dev_res[RES_IRQ_HOST_ID].name = "host"; + wrap->dev_res[RES_IRQ_HOST_ID].flags = IORESOURCE_IRQ; + + /* Interrupt device. It's the same as for HOST. */ + wrap->dev_res[RES_IRQ_PERIPHERAL_ID].start = pdev->irq; + wrap->dev_res[RES_IRQ_PERIPHERAL_ID].name = "peripheral"; + wrap->dev_res[RES_IRQ_PERIPHERAL_ID].flags = IORESOURCE_IRQ; + } else { + res[RES_DRD_ID].start = pci_resource_start(pdev, PCI_BAR_OTG); + res[RES_DRD_ID].end = pci_resource_end(pdev, PCI_BAR_OTG); + res[RES_DRD_ID].name = "otg"; + res[RES_DRD_ID].flags = IORESOURCE_MEM; + dev_dbg(&pdev->dev, "USBSS-DRD physical base addr: %pa\n", + &res[RES_DRD_ID].start); + + /* Interrupt for OTG/DRD. */ + wrap->dev_res[RES_IRQ_OTG_ID].start = pdev->irq; + wrap->dev_res[RES_IRQ_OTG_ID].name = "otg"; + wrap->dev_res[RES_IRQ_OTG_ID].flags = IORESOURCE_IRQ; + } + + if (pci_is_enabled(func)) { + /* set up platform device info */ + memset(&plat_info, 0, sizeof(plat_info)); + plat_info.parent = &pdev->dev; + plat_info.fwnode = pdev->dev.fwnode; + plat_info.name = PLAT_DRIVER_NAME; + plat_info.id = pdev->devfn; + wrap->devfn = pdev->devfn; + plat_info.res = wrap->dev_res; + plat_info.num_res = ARRAY_SIZE(wrap->dev_res); + plat_info.dma_mask = pdev->dma_mask; + /* register platform device */ + wrap->plat_dev = platform_device_register_full(&plat_info); + if (IS_ERR(wrap->plat_dev)) { + pci_disable_device(pdev); + err = PTR_ERR(wrap->plat_dev); + kfree(wrap); + return err; + } + } + + pci_set_drvdata(pdev, wrap); + return err; +} + +static void cdns3_pci_remove(struct pci_dev *pdev) +{ + struct cdns3_wrap *wrap; + struct pci_dev *func; + + func = cdns3_get_second_fun(pdev); + + wrap = (struct cdns3_wrap *)pci_get_drvdata(pdev); + if (wrap->devfn == pdev->devfn) + platform_device_unregister(wrap->plat_dev); + + if (!pci_is_enabled(func)) + kfree(wrap); +} + +static const struct pci_device_id cdns3_pci_ids[] = { + { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, + { 0, } +}; + +static struct pci_driver cdns3_pci_driver = { + .name = PCI_DRIVER_NAME, + .id_table = cdns3_pci_ids, + .probe = cdns3_pci_probe, + .remove = cdns3_pci_remove, +}; + +module_pci_driver(cdns3_pci_driver); +MODULE_DEVICE_TABLE(pci, cdns3_pci_ids); + +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USBSS PCI wrapper"); diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c new file mode 100644 index 000000000..726b2e4f6 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-plat.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver. + * + * Copyright (C) 2018-2020 Cadence. + * Copyright (C) 2017-2018 NXP + * Copyright (C) 2019 Texas Instruments + * + * + * Author: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + * Roger Quadros <rogerq@ti.com> + */ + +#include <linux/module.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "core.h" +#include "gadget-export.h" +#include "drd.h" + +static int set_phy_power_on(struct cdns *cdns) +{ + int ret; + + ret = phy_power_on(cdns->usb2_phy); + if (ret) + return ret; + + ret = phy_power_on(cdns->usb3_phy); + if (ret) + phy_power_off(cdns->usb2_phy); + + return ret; +} + +static void set_phy_power_off(struct cdns *cdns) +{ + phy_power_off(cdns->usb3_phy); + phy_power_off(cdns->usb2_phy); +} + +/** + * cdns3_plat_probe - probe for cdns3 core device + * @pdev: Pointer to cdns3 core platform device + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_plat_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct cdns *cdns; + void __iomem *regs; + int ret; + + cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL); + if (!cdns) + return -ENOMEM; + + cdns->dev = dev; + cdns->pdata = dev_get_platdata(dev); + + platform_set_drvdata(pdev, cdns); + + ret = platform_get_irq_byname(pdev, "host"); + if (ret < 0) + return ret; + + cdns->xhci_res[0].start = ret; + cdns->xhci_res[0].end = ret; + cdns->xhci_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret); + cdns->xhci_res[0].name = "host"; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci"); + if (!res) { + dev_err(dev, "couldn't get xhci resource\n"); + return -ENXIO; + } + + cdns->xhci_res[1] = *res; + + cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral"); + + if (cdns->dev_irq < 0) + return cdns->dev_irq; + + regs = devm_platform_ioremap_resource_byname(pdev, "dev"); + if (IS_ERR(regs)) + return PTR_ERR(regs); + cdns->dev_regs = regs; + + cdns->otg_irq = platform_get_irq_byname(pdev, "otg"); + if (cdns->otg_irq < 0) + return cdns->otg_irq; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg"); + if (!res) { + dev_err(dev, "couldn't get otg resource\n"); + return -ENXIO; + } + + cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable"); + + cdns->otg_res = *res; + + cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); + if (cdns->wakeup_irq == -EPROBE_DEFER) + return cdns->wakeup_irq; + + if (cdns->wakeup_irq < 0) { + dev_dbg(dev, "couldn't get wakeup irq\n"); + cdns->wakeup_irq = 0x0; + } + + cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy"); + if (IS_ERR(cdns->usb2_phy)) + return PTR_ERR(cdns->usb2_phy); + + ret = phy_init(cdns->usb2_phy); + if (ret) + return ret; + + cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy"); + if (IS_ERR(cdns->usb3_phy)) + return PTR_ERR(cdns->usb3_phy); + + ret = phy_init(cdns->usb3_phy); + if (ret) + goto err_phy3_init; + + ret = set_phy_power_on(cdns); + if (ret) + goto err_phy_power_on; + + cdns->gadget_init = cdns3_gadget_init; + + ret = cdns_init(cdns); + if (ret) + goto err_cdns_init; + + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW))) + pm_runtime_forbid(dev); + + /* + * The controller needs less time between bus and controller suspend, + * and we also needs a small delay to avoid frequently entering low + * power mode. + */ + pm_runtime_set_autosuspend_delay(dev, 20); + pm_runtime_mark_last_busy(dev); + pm_runtime_use_autosuspend(dev); + + return 0; + +err_cdns_init: + set_phy_power_off(cdns); +err_phy_power_on: + phy_exit(cdns->usb3_phy); +err_phy3_init: + phy_exit(cdns->usb2_phy); + + return ret; +} + +/** + * cdns3_plat_remove() - unbind drd driver and clean up + * @pdev: Pointer to Linux platform device + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_plat_remove(struct platform_device *pdev) +{ + struct cdns *cdns = platform_get_drvdata(pdev); + struct device *dev = cdns->dev; + + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + cdns_remove(cdns); + set_phy_power_off(cdns); + phy_exit(cdns->usb2_phy); + phy_exit(cdns->usb3_phy); + return 0; +} + +#ifdef CONFIG_PM + +static int cdns3_set_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret = 0; + + if (cdns->pdata && cdns->pdata->platform_suspend) + ret = cdns->pdata->platform_suspend(dev, suspend, wakeup); + + return ret; +} + +static int cdns3_controller_suspend(struct device *dev, pm_message_t msg) +{ + struct cdns *cdns = dev_get_drvdata(dev); + bool wakeup; + unsigned long flags; + + if (cdns->in_lpm) + return 0; + + if (PMSG_IS_AUTO(msg)) + wakeup = true; + else + wakeup = device_may_wakeup(dev); + + cdns3_set_platform_suspend(cdns->dev, true, wakeup); + set_phy_power_off(cdns); + spin_lock_irqsave(&cdns->lock, flags); + cdns->in_lpm = true; + spin_unlock_irqrestore(&cdns->lock, flags); + dev_dbg(cdns->dev, "%s ends\n", __func__); + + return 0; +} + +static int cdns3_controller_resume(struct device *dev, pm_message_t msg) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret; + unsigned long flags; + + if (!cdns->in_lpm) + return 0; + + if (cdns_power_is_lost(cdns)) { + phy_exit(cdns->usb2_phy); + ret = phy_init(cdns->usb2_phy); + if (ret) + return ret; + + phy_exit(cdns->usb3_phy); + ret = phy_init(cdns->usb3_phy); + if (ret) + return ret; + } + + ret = set_phy_power_on(cdns); + if (ret) + return ret; + + cdns3_set_platform_suspend(cdns->dev, false, false); + + spin_lock_irqsave(&cdns->lock, flags); + cdns_resume(cdns); + cdns->in_lpm = false; + spin_unlock_irqrestore(&cdns->lock, flags); + cdns_set_active(cdns, !PMSG_IS_AUTO(msg)); + if (cdns->wakeup_pending) { + cdns->wakeup_pending = false; + enable_irq(cdns->wakeup_irq); + } + dev_dbg(cdns->dev, "%s ends\n", __func__); + + return ret; +} + +static int cdns3_plat_runtime_suspend(struct device *dev) +{ + return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND); +} + +static int cdns3_plat_runtime_resume(struct device *dev) +{ + return cdns3_controller_resume(dev, PMSG_AUTO_RESUME); +} + +#ifdef CONFIG_PM_SLEEP + +static int cdns3_plat_suspend(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret; + + cdns_suspend(cdns); + + ret = cdns3_controller_suspend(dev, PMSG_SUSPEND); + if (ret) + return ret; + + if (device_may_wakeup(dev) && cdns->wakeup_irq) + enable_irq_wake(cdns->wakeup_irq); + + return ret; +} + +static int cdns3_plat_resume(struct device *dev) +{ + return cdns3_controller_resume(dev, PMSG_RESUME); +} +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops cdns3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume) + SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend, + cdns3_plat_runtime_resume, NULL) +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_cdns3_match[] = { + { .compatible = "cdns,usb3" }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_cdns3_match); +#endif + +static struct platform_driver cdns3_driver = { + .probe = cdns3_plat_probe, + .remove = cdns3_plat_remove, + .driver = { + .name = "cdns-usb3", + .of_match_table = of_match_ptr(of_cdns3_match), + .pm = &cdns3_pm_ops, + }, +}; + +module_platform_driver(cdns3_driver); + +MODULE_ALIAS("platform:cdns3"); +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver"); diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c new file mode 100644 index 000000000..07c318770 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-ti.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * cdns3-ti.c - TI specific Glue layer for Cadence USB Controller + * + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> + +/* USB Wrapper register offsets */ +#define USBSS_PID 0x0 +#define USBSS_W1 0x4 +#define USBSS_STATIC_CONFIG 0x8 +#define USBSS_PHY_TEST 0xc +#define USBSS_DEBUG_CTRL 0x10 +#define USBSS_DEBUG_INFO 0x14 +#define USBSS_DEBUG_LINK_STATE 0x18 +#define USBSS_DEVICE_CTRL 0x1c + +/* Wrapper 1 register bits */ +#define USBSS_W1_PWRUP_RST BIT(0) +#define USBSS_W1_OVERCURRENT_SEL BIT(8) +#define USBSS_W1_MODESTRAP_SEL BIT(9) +#define USBSS_W1_OVERCURRENT BIT(16) +#define USBSS_W1_MODESTRAP_MASK GENMASK(18, 17) +#define USBSS_W1_MODESTRAP_SHIFT 17 +#define USBSS_W1_USB2_ONLY BIT(19) + +/* Static config register bits */ +#define USBSS1_STATIC_PLL_REF_SEL_MASK GENMASK(8, 5) +#define USBSS1_STATIC_PLL_REF_SEL_SHIFT 5 +#define USBSS1_STATIC_LOOPBACK_MODE_MASK GENMASK(4, 3) +#define USBSS1_STATIC_LOOPBACK_MODE_SHIFT 3 +#define USBSS1_STATIC_VBUS_SEL_MASK GENMASK(2, 1) +#define USBSS1_STATIC_VBUS_SEL_SHIFT 1 +#define USBSS1_STATIC_LANE_REVERSE BIT(0) + +/* Modestrap modes */ +enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE, + USBSS_MODESTRAP_MODE_HOST, + USBSS_MODESTRAP_MODE_PERIPHERAL}; + +struct cdns_ti { + struct device *dev; + void __iomem *usbss; + unsigned usb2_only:1; + unsigned vbus_divider:1; + struct clk *usb2_refclk; + struct clk *lpm_clk; +}; + +static const int cdns_ti_rate_table[] = { /* in KHZ */ + 9600, + 10000, + 12000, + 19200, + 20000, + 24000, + 25000, + 26000, + 38400, + 40000, + 58000, + 50000, + 52000, +}; + +static inline u32 cdns_ti_readl(struct cdns_ti *data, u32 offset) +{ + return readl(data->usbss + offset); +} + +static inline void cdns_ti_writel(struct cdns_ti *data, u32 offset, u32 value) +{ + writel(value, data->usbss + offset); +} + +static int cdns_ti_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct cdns_ti *data; + int error; + u32 reg; + int rate_code, i; + unsigned long rate; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + platform_set_drvdata(pdev, data); + + data->dev = dev; + + data->usbss = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->usbss)) { + dev_err(dev, "can't map IOMEM resource\n"); + return PTR_ERR(data->usbss); + } + + data->usb2_refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(data->usb2_refclk)) { + dev_err(dev, "can't get usb2_refclk\n"); + return PTR_ERR(data->usb2_refclk); + } + + data->lpm_clk = devm_clk_get(dev, "lpm"); + if (IS_ERR(data->lpm_clk)) { + dev_err(dev, "can't get lpm_clk\n"); + return PTR_ERR(data->lpm_clk); + } + + rate = clk_get_rate(data->usb2_refclk); + rate /= 1000; /* To KHz */ + for (i = 0; i < ARRAY_SIZE(cdns_ti_rate_table); i++) { + if (cdns_ti_rate_table[i] == rate) + break; + } + + if (i == ARRAY_SIZE(cdns_ti_rate_table)) { + dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate); + return -EINVAL; + } + + rate_code = i; + + pm_runtime_enable(dev); + error = pm_runtime_get_sync(dev); + if (error < 0) { + dev_err(dev, "pm_runtime_get_sync failed: %d\n", error); + goto err; + } + + /* assert RESET */ + reg = cdns_ti_readl(data, USBSS_W1); + reg &= ~USBSS_W1_PWRUP_RST; + cdns_ti_writel(data, USBSS_W1, reg); + + /* set static config */ + reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG); + reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK; + reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT; + + reg &= ~USBSS1_STATIC_VBUS_SEL_MASK; + data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); + if (data->vbus_divider) + reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT; + + cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg); + reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG); + + /* set USB2_ONLY mode if requested */ + reg = cdns_ti_readl(data, USBSS_W1); + data->usb2_only = device_property_read_bool(dev, "ti,usb2-only"); + if (data->usb2_only) + reg |= USBSS_W1_USB2_ONLY; + + /* set default modestrap */ + reg |= USBSS_W1_MODESTRAP_SEL; + reg &= ~USBSS_W1_MODESTRAP_MASK; + reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT; + cdns_ti_writel(data, USBSS_W1, reg); + + /* de-assert RESET */ + reg |= USBSS_W1_PWRUP_RST; + cdns_ti_writel(data, USBSS_W1, reg); + + error = of_platform_populate(node, NULL, NULL, dev); + if (error) { + dev_err(dev, "failed to create children: %d\n", error); + goto err; + } + + return 0; + +err: + pm_runtime_put_sync(data->dev); + pm_runtime_disable(data->dev); + + return error; +} + +static int cdns_ti_remove_core(struct device *dev, void *c) +{ + struct platform_device *pdev = to_platform_device(dev); + + platform_device_unregister(pdev); + + return 0; +} + +static int cdns_ti_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + device_for_each_child(dev, NULL, cdns_ti_remove_core); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id cdns_ti_of_match[] = { + { .compatible = "ti,j721e-usb", }, + { .compatible = "ti,am64-usb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cdns_ti_of_match); + +static struct platform_driver cdns_ti_driver = { + .probe = cdns_ti_probe, + .remove = cdns_ti_remove, + .driver = { + .name = "cdns3-ti", + .of_match_table = cdns_ti_of_match, + }, +}; + +module_platform_driver(cdns_ti_driver); + +MODULE_ALIAS("platform:cdns3-ti"); +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USB3 TI Glue Layer"); diff --git a/drivers/usb/cdns3/cdns3-trace.c b/drivers/usb/cdns3/cdns3-trace.c new file mode 100644 index 000000000..b9858acae --- /dev/null +++ b/drivers/usb/cdns3/cdns3-trace.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USBSS device controller driver Trace Support + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ + +#define CREATE_TRACE_POINTS +#include "cdns3-trace.h" diff --git a/drivers/usb/cdns3/cdns3-trace.h b/drivers/usb/cdns3/cdns3-trace.h new file mode 100644 index 000000000..7574b4a62 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-trace.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * USBSS device controller driver. + * Trace support header file. + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cdns3 + +#if !defined(__LINUX_CDNS3_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __LINUX_CDNS3_TRACE + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <asm/byteorder.h> +#include <linux/usb/ch9.h> +#include "core.h" +#include "cdns3-gadget.h" +#include "cdns3-debug.h" + +#define CDNS3_MSG_MAX 500 + +TRACE_EVENT(cdns3_halt, + TP_PROTO(struct cdns3_endpoint *ep_priv, u8 halt, u8 flush), + TP_ARGS(ep_priv, halt, flush), + TP_STRUCT__entry( + __string(name, ep_priv->name) + __field(u8, halt) + __field(u8, flush) + ), + TP_fast_assign( + __assign_str(name, ep_priv->name); + __entry->halt = halt; + __entry->flush = flush; + ), + TP_printk("Halt %s for %s: %s", __entry->flush ? " and flush" : "", + __get_str(name), __entry->halt ? "set" : "cleared") +); + +TRACE_EVENT(cdns3_wa1, + TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg), + TP_ARGS(ep_priv, msg), + TP_STRUCT__entry( + __string(ep_name, ep_priv->name) + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(ep_name, ep_priv->name); + __assign_str(msg, msg); + ), + TP_printk("WA1: %s %s", __get_str(ep_name), __get_str(msg)) +); + +TRACE_EVENT(cdns3_wa2, + TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg), + TP_ARGS(ep_priv, msg), + TP_STRUCT__entry( + __string(ep_name, ep_priv->name) + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(ep_name, ep_priv->name); + __assign_str(msg, msg); + ), + TP_printk("WA2: %s %s", __get_str(ep_name), __get_str(msg)) +); + +DECLARE_EVENT_CLASS(cdns3_log_doorbell, + TP_PROTO(const char *ep_name, u32 ep_trbaddr), + TP_ARGS(ep_name, ep_trbaddr), + TP_STRUCT__entry( + __string(name, ep_name) + __field(u32, ep_trbaddr) + ), + TP_fast_assign( + __assign_str(name, ep_name); + __entry->ep_trbaddr = ep_trbaddr; + ), + TP_printk("%s, ep_trbaddr %08x", __get_str(name), + __entry->ep_trbaddr) +); + +DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_ep0, + TP_PROTO(const char *ep_name, u32 ep_trbaddr), + TP_ARGS(ep_name, ep_trbaddr) +); + +DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_epx, + TP_PROTO(const char *ep_name, u32 ep_trbaddr), + TP_ARGS(ep_name, ep_trbaddr) +); + +DECLARE_EVENT_CLASS(cdns3_log_usb_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists), + TP_ARGS(priv_dev, usb_ists), + TP_STRUCT__entry( + __field(enum usb_device_speed, speed) + __field(u32, usb_ists) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __entry->speed = cdns3_get_speed(priv_dev); + __entry->usb_ists = usb_ists; + ), + TP_printk("%s", cdns3_decode_usb_irq(__get_str(str), __entry->speed, + __entry->usb_ists)) +); + +DEFINE_EVENT(cdns3_log_usb_irq, cdns3_usb_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists), + TP_ARGS(priv_dev, usb_ists) +); + +DECLARE_EVENT_CLASS(cdns3_log_epx_irq, + TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_dev, priv_ep), + TP_STRUCT__entry( + __string(ep_name, priv_ep->name) + __field(u32, ep_sts) + __field(u32, ep_traddr) + __field(u32, ep_last_sid) + __field(u32, use_streams) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __assign_str(ep_name, priv_ep->name); + __entry->ep_sts = readl(&priv_dev->regs->ep_sts); + __entry->ep_traddr = readl(&priv_dev->regs->ep_traddr); + __entry->ep_last_sid = priv_ep->last_stream_id; + __entry->use_streams = priv_ep->use_streams; + ), + TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d", + cdns3_decode_epx_irq(__get_str(str), + __get_str(ep_name), + __entry->ep_sts), + __entry->ep_traddr, + __entry->ep_last_sid, + __entry->use_streams) +); + +DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq, + TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_dev, priv_ep) +); + +DECLARE_EVENT_CLASS(cdns3_log_ep0_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts), + TP_ARGS(priv_dev, ep_sts), + TP_STRUCT__entry( + __field(int, ep_dir) + __field(u32, ep_sts) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __entry->ep_dir = priv_dev->selected_ep; + __entry->ep_sts = ep_sts; + ), + TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str), + __entry->ep_dir, + __entry->ep_sts)) +); + +DEFINE_EVENT(cdns3_log_ep0_irq, cdns3_ep0_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts), + TP_ARGS(priv_dev, ep_sts) +); + +DECLARE_EVENT_CLASS(cdns3_log_ctrl, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl), + TP_STRUCT__entry( + __field(u8, bRequestType) + __field(u8, bRequest) + __field(u16, wValue) + __field(u16, wIndex) + __field(u16, wLength) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __entry->bRequestType = ctrl->bRequestType; + __entry->bRequest = ctrl->bRequest; + __entry->wValue = le16_to_cpu(ctrl->wValue); + __entry->wIndex = le16_to_cpu(ctrl->wIndex); + __entry->wLength = le16_to_cpu(ctrl->wLength); + ), + TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNS3_MSG_MAX, + __entry->bRequestType, + __entry->bRequest, __entry->wValue, + __entry->wIndex, __entry->wLength) + ) +); + +DEFINE_EVENT(cdns3_log_ctrl, cdns3_ctrl_req, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl) +); + +DECLARE_EVENT_CLASS(cdns3_log_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __string(name, req->priv_ep->name) + __field(struct cdns3_request *, req) + __field(void *, buf) + __field(unsigned int, actual) + __field(unsigned int, length) + __field(int, status) + __field(int, zero) + __field(int, short_not_ok) + __field(int, no_interrupt) + __field(int, start_trb) + __field(int, end_trb) + __field(int, flags) + __field(unsigned int, stream_id) + ), + TP_fast_assign( + __assign_str(name, req->priv_ep->name); + __entry->req = req; + __entry->buf = req->request.buf; + __entry->actual = req->request.actual; + __entry->length = req->request.length; + __entry->status = req->request.status; + __entry->zero = req->request.zero; + __entry->short_not_ok = req->request.short_not_ok; + __entry->no_interrupt = req->request.no_interrupt; + __entry->start_trb = req->start_trb; + __entry->end_trb = req->end_trb; + __entry->flags = req->flags; + __entry->stream_id = req->request.stream_id; + ), + TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d," + " trb: [start:%d, end:%d], flags:%x SID: %u", + __get_str(name), __entry->req, __entry->buf, __entry->actual, + __entry->length, + __entry->zero ? "Z" : "z", + __entry->short_not_ok ? "S" : "s", + __entry->no_interrupt ? "I" : "i", + __entry->status, + __entry->start_trb, + __entry->end_trb, + __entry->flags, + __entry->stream_id + ) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_alloc_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_free_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_ep_queue, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_ep_dequeue, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_gadget_giveback, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +TRACE_EVENT(cdns3_ep0_queue, + TP_PROTO(struct cdns3_device *dev_priv, struct usb_request *request), + TP_ARGS(dev_priv, request), + TP_STRUCT__entry( + __field(int, dir) + __field(int, length) + ), + TP_fast_assign( + __entry->dir = dev_priv->ep0_data_dir; + __entry->length = request->length; + ), + TP_printk("Queue to ep0%s length: %u", __entry->dir ? "in" : "out", + __entry->length) +); + +DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __string(name, req->priv_ep->name) + __field(struct cdns3_request *, req) + __field(unsigned int, length) + __field(unsigned int, actual) + __field(unsigned int, stream_id) + ), + TP_fast_assign( + __assign_str(name, req->priv_ep->name); + __entry->req = req; + __entry->actual = req->request.length; + __entry->length = req->request.actual; + __entry->stream_id = req->request.stream_id; + ), + TP_printk("%s: req: %p,request length: %u actual length: %u SID: %u", + __get_str(name), __entry->req, __entry->length, + __entry->actual, __entry->stream_id) +); + +DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_stream_split_transfer_len, + cdns3_stream_transfer_split_next_part, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DECLARE_EVENT_CLASS(cdns3_log_aligned_request, + TP_PROTO(struct cdns3_request *priv_req), + TP_ARGS(priv_req), + TP_STRUCT__entry( + __string(name, priv_req->priv_ep->name) + __field(struct usb_request *, req) + __field(void *, buf) + __field(dma_addr_t, dma) + __field(void *, aligned_buf) + __field(dma_addr_t, aligned_dma) + __field(u32, aligned_buf_size) + ), + TP_fast_assign( + __assign_str(name, priv_req->priv_ep->name); + __entry->req = &priv_req->request; + __entry->buf = priv_req->request.buf; + __entry->dma = priv_req->request.dma; + __entry->aligned_buf = priv_req->aligned_buf->buf; + __entry->aligned_dma = priv_req->aligned_buf->dma; + __entry->aligned_buf_size = priv_req->aligned_buf->size; + ), + TP_printk("%s: req: %p, req buf %p, dma %pad a_buf %p a_dma %pad, size %d", + __get_str(name), __entry->req, __entry->buf, &__entry->dma, + __entry->aligned_buf, &__entry->aligned_dma, + __entry->aligned_buf_size + ) +); + +DEFINE_EVENT(cdns3_log_aligned_request, cdns3_free_aligned_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DECLARE_EVENT_CLASS(cdns3_log_map_request, + TP_PROTO(struct cdns3_request *priv_req), + TP_ARGS(priv_req), + TP_STRUCT__entry( + __string(name, priv_req->priv_ep->name) + __field(struct usb_request *, req) + __field(void *, buf) + __field(dma_addr_t, dma) + ), + TP_fast_assign( + __assign_str(name, priv_req->priv_ep->name); + __entry->req = &priv_req->request; + __entry->buf = priv_req->request.buf; + __entry->dma = priv_req->request.dma; + ), + TP_printk("%s: req: %p, req buf %p, dma %p", + __get_str(name), __entry->req, __entry->buf, &__entry->dma + ) +); +DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); +DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DECLARE_EVENT_CLASS(cdns3_log_trb, + TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), + TP_ARGS(priv_ep, trb), + TP_STRUCT__entry( + __string(name, priv_ep->name) + __field(struct cdns3_trb *, trb) + __field(u32, buffer) + __field(u32, length) + __field(u32, control) + __field(u32, type) + __field(unsigned int, last_stream_id) + ), + TP_fast_assign( + __assign_str(name, priv_ep->name); + __entry->trb = trb; + __entry->buffer = le32_to_cpu(trb->buffer); + __entry->length = le32_to_cpu(trb->length); + __entry->control = le32_to_cpu(trb->control); + __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); + __entry->last_stream_id = priv_ep->last_stream_id; + ), + TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u", + __get_str(name), __entry->trb, __entry->buffer, + TRB_LEN(__entry->length), + (u8)TRB_BURST_LEN_GET(__entry->length), + __entry->control, + __entry->control & TRB_CYCLE ? "C=1, " : "C=0, ", + __entry->control & TRB_TOGGLE ? "T=1, " : "T=0, ", + __entry->control & TRB_ISP ? "ISP, " : "", + __entry->control & TRB_FIFO_MODE ? "FIFO, " : "", + __entry->control & TRB_CHAIN ? "CHAIN, " : "", + __entry->control & TRB_IOC ? "IOC, " : "", + TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK", + TRB_FIELD_TO_STREAMID(__entry->control), + __entry->last_stream_id + ) +); + +DEFINE_EVENT(cdns3_log_trb, cdns3_prepare_trb, + TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), + TP_ARGS(priv_ep, trb) +); + +DEFINE_EVENT(cdns3_log_trb, cdns3_complete_trb, + TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), + TP_ARGS(priv_ep, trb) +); + +DECLARE_EVENT_CLASS(cdns3_log_ring, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep), + TP_STRUCT__entry( + __dynamic_array(u8, ring, TRB_RING_SIZE) + __dynamic_array(u8, priv_ep, sizeof(struct cdns3_endpoint)) + __dynamic_array(char, buffer, + (TRBS_PER_SEGMENT * 65) + CDNS3_MSG_MAX) + ), + TP_fast_assign( + memcpy(__get_dynamic_array(priv_ep), priv_ep, + sizeof(struct cdns3_endpoint)); + memcpy(__get_dynamic_array(ring), priv_ep->trb_pool, + TRB_RING_SIZE); + ), + + TP_printk("%s", + cdns3_dbg_ring((struct cdns3_endpoint *)__get_str(priv_ep), + (struct cdns3_trb *)__get_str(ring), + __get_str(buffer))) +); + +DEFINE_EVENT(cdns3_log_ring, cdns3_ring, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep) +); + +DECLARE_EVENT_CLASS(cdns3_log_ep, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep), + TP_STRUCT__entry( + __string(name, priv_ep->name) + __field(unsigned int, maxpacket) + __field(unsigned int, maxpacket_limit) + __field(unsigned int, max_streams) + __field(unsigned int, use_streams) + __field(unsigned int, maxburst) + __field(unsigned int, flags) + __field(unsigned int, dir) + __field(u8, enqueue) + __field(u8, dequeue) + ), + TP_fast_assign( + __assign_str(name, priv_ep->name); + __entry->maxpacket = priv_ep->endpoint.maxpacket; + __entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit; + __entry->max_streams = priv_ep->endpoint.max_streams; + __entry->use_streams = priv_ep->use_streams; + __entry->maxburst = priv_ep->endpoint.maxburst; + __entry->flags = priv_ep->flags; + __entry->dir = priv_ep->dir; + __entry->enqueue = priv_ep->enqueue; + __entry->dequeue = priv_ep->dequeue; + ), + TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, " + "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s", + __get_str(name), __entry->maxpacket, + __entry->maxpacket_limit, __entry->max_streams, + __entry->use_streams, + __entry->maxburst, __entry->enqueue, + __entry->dequeue, + __entry->flags & EP_ENABLED ? "EN | " : "", + __entry->flags & EP_STALLED ? "STALLED | " : "", + __entry->flags & EP_WEDGE ? "WEDGE | " : "", + __entry->flags & EP_TRANSFER_STARTED ? "STARTED | " : "", + __entry->flags & EP_UPDATE_EP_TRBADDR ? "UPD TRB | " : "", + __entry->flags & EP_PENDING_REQUEST ? "REQ PEN | " : "", + __entry->flags & EP_RING_FULL ? "RING FULL |" : "", + __entry->flags & EP_CLAIMED ? "CLAIMED " : "", + __entry->dir ? "IN" : "OUT" + ) +); + +DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_enable, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep) +); + +DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_disable, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep) +); + +DECLARE_EVENT_CLASS(cdns3_log_request_handled, + TP_PROTO(struct cdns3_request *priv_req, int current_index, + int handled), + TP_ARGS(priv_req, current_index, handled), + TP_STRUCT__entry( + __field(struct cdns3_request *, priv_req) + __field(unsigned int, dma_position) + __field(unsigned int, handled) + __field(unsigned int, dequeue_idx) + __field(unsigned int, enqueue_idx) + __field(unsigned int, start_trb) + __field(unsigned int, end_trb) + ), + TP_fast_assign( + __entry->priv_req = priv_req; + __entry->dma_position = current_index; + __entry->handled = handled; + __entry->dequeue_idx = priv_req->priv_ep->dequeue; + __entry->enqueue_idx = priv_req->priv_ep->enqueue; + __entry->start_trb = priv_req->start_trb; + __entry->end_trb = priv_req->end_trb; + ), + TP_printk("Req: %p %s, DMA pos: %d, ep deq: %d, ep enq: %d," + " start trb: %d, end trb: %d", + __entry->priv_req, + __entry->handled ? "handled" : "not handled", + __entry->dma_position, __entry->dequeue_idx, + __entry->enqueue_idx, __entry->start_trb, + __entry->end_trb + ) +); + +DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled, + TP_PROTO(struct cdns3_request *priv_req, int current_index, + int handled), + TP_ARGS(priv_req, current_index, handled) +); +#endif /* __LINUX_CDNS3_TRACE */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cdns3-trace + +#include <trace/define_trace.h> diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h new file mode 100644 index 000000000..f0ca865cc --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-debug.h @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ +#ifndef __LINUX_CDNSP_DEBUG +#define __LINUX_CDNSP_DEBUG + +static inline const char *cdnsp_trb_comp_code_string(u8 status) +{ + switch (status) { + case COMP_INVALID: + return "Invalid"; + case COMP_SUCCESS: + return "Success"; + case COMP_DATA_BUFFER_ERROR: + return "Data Buffer Error"; + case COMP_BABBLE_DETECTED_ERROR: + return "Babble Detected"; + case COMP_TRB_ERROR: + return "TRB Error"; + case COMP_RESOURCE_ERROR: + return "Resource Error"; + case COMP_NO_SLOTS_AVAILABLE_ERROR: + return "No Slots Available Error"; + case COMP_INVALID_STREAM_TYPE_ERROR: + return "Invalid Stream Type Error"; + case COMP_SLOT_NOT_ENABLED_ERROR: + return "Slot Not Enabled Error"; + case COMP_ENDPOINT_NOT_ENABLED_ERROR: + return "Endpoint Not Enabled Error"; + case COMP_SHORT_PACKET: + return "Short Packet"; + case COMP_RING_UNDERRUN: + return "Ring Underrun"; + case COMP_RING_OVERRUN: + return "Ring Overrun"; + case COMP_VF_EVENT_RING_FULL_ERROR: + return "VF Event Ring Full Error"; + case COMP_PARAMETER_ERROR: + return "Parameter Error"; + case COMP_CONTEXT_STATE_ERROR: + return "Context State Error"; + case COMP_EVENT_RING_FULL_ERROR: + return "Event Ring Full Error"; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + return "Incompatible Device Error"; + case COMP_MISSED_SERVICE_ERROR: + return "Missed Service Error"; + case COMP_COMMAND_RING_STOPPED: + return "Command Ring Stopped"; + case COMP_COMMAND_ABORTED: + return "Command Aborted"; + case COMP_STOPPED: + return "Stopped"; + case COMP_STOPPED_LENGTH_INVALID: + return "Stopped - Length Invalid"; + case COMP_STOPPED_SHORT_PACKET: + return "Stopped - Short Packet"; + case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: + return "Max Exit Latency Too Large Error"; + case COMP_ISOCH_BUFFER_OVERRUN: + return "Isoch Buffer Overrun"; + case COMP_EVENT_LOST_ERROR: + return "Event Lost Error"; + case COMP_UNDEFINED_ERROR: + return "Undefined Error"; + case COMP_INVALID_STREAM_ID_ERROR: + return "Invalid Stream ID Error"; + default: + return "Unknown!!"; + } +} + +static inline const char *cdnsp_trb_type_string(u8 type) +{ + switch (type) { + case TRB_NORMAL: + return "Normal"; + case TRB_SETUP: + return "Setup Stage"; + case TRB_DATA: + return "Data Stage"; + case TRB_STATUS: + return "Status Stage"; + case TRB_ISOC: + return "Isoch"; + case TRB_LINK: + return "Link"; + case TRB_EVENT_DATA: + return "Event Data"; + case TRB_TR_NOOP: + return "No-Op"; + case TRB_ENABLE_SLOT: + return "Enable Slot Command"; + case TRB_DISABLE_SLOT: + return "Disable Slot Command"; + case TRB_ADDR_DEV: + return "Address Device Command"; + case TRB_CONFIG_EP: + return "Configure Endpoint Command"; + case TRB_EVAL_CONTEXT: + return "Evaluate Context Command"; + case TRB_RESET_EP: + return "Reset Endpoint Command"; + case TRB_STOP_RING: + return "Stop Ring Command"; + case TRB_SET_DEQ: + return "Set TR Dequeue Pointer Command"; + case TRB_RESET_DEV: + return "Reset Device Command"; + case TRB_FORCE_HEADER: + return "Force Header Command"; + case TRB_CMD_NOOP: + return "No-Op Command"; + case TRB_TRANSFER: + return "Transfer Event"; + case TRB_COMPLETION: + return "Command Completion Event"; + case TRB_PORT_STATUS: + return "Port Status Change Event"; + case TRB_HC_EVENT: + return "Device Controller Event"; + case TRB_MFINDEX_WRAP: + return "MFINDEX Wrap Event"; + case TRB_ENDPOINT_NRDY: + return "Endpoint Not ready"; + case TRB_HALT_ENDPOINT: + return "Halt Endpoint"; + case TRB_FLUSH_ENDPOINT: + return "FLush Endpoint"; + default: + return "UNKNOWN"; + } +} + +static inline const char *cdnsp_ring_type_string(enum cdnsp_ring_type type) +{ + switch (type) { + case TYPE_CTRL: + return "CTRL"; + case TYPE_ISOC: + return "ISOC"; + case TYPE_BULK: + return "BULK"; + case TYPE_INTR: + return "INTR"; + case TYPE_STREAM: + return "STREAM"; + case TYPE_COMMAND: + return "CMD"; + case TYPE_EVENT: + return "EVENT"; + } + + return "UNKNOWN"; +} + +static inline char *cdnsp_slot_state_string(u32 state) +{ + switch (state) { + case SLOT_STATE_ENABLED: + return "enabled/disabled"; + case SLOT_STATE_DEFAULT: + return "default"; + case SLOT_STATE_ADDRESSED: + return "addressed"; + case SLOT_STATE_CONFIGURED: + return "configured"; + default: + return "reserved"; + } +} + +static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0, + u32 field1, u32 field2, u32 field3) +{ + int ep_id = TRB_TO_EP_INDEX(field3) - 1; + int type = TRB_FIELD_TO_TYPE(field3); + unsigned int ep_num; + int ret; + u32 temp; + + ep_num = DIV_ROUND_UP(ep_id, 2); + + switch (type) { + case TRB_LINK: + ret = snprintf(str, size, + "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c", + field1, field0, GET_INTR_TARGET(field2), + cdnsp_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_TC ? 'T' : 't', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_TRANSFER: + case TRB_COMPLETION: + case TRB_PORT_STATUS: + case TRB_HC_EVENT: + ret = snprintf(str, size, + "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'" + " len %ld slot %ld flags %c:%c", + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), + cdnsp_trb_type_string(type), field1, field0, + cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)), + EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), + field3 & EVENT_DATA ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_MFINDEX_WRAP: + ret = snprintf(str, size, "%s: flags %c", + cdnsp_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SETUP: + ret = snprintf(str, size, + "type '%s' bRequestType %02x bRequest %02x " + "wValue %02x%02x wIndex %02x%02x wLength %d " + "length %ld TD size %ld intr %ld Setup ID %ld " + "flags %c:%c:%c", + cdnsp_trb_type_string(type), + field0 & 0xff, + (field0 & 0xff00) >> 8, + (field0 & 0xff000000) >> 24, + (field0 & 0xff0000) >> 16, + (field1 & 0xff00) >> 8, + field1 & 0xff, + (field1 & 0xff000000) >> 16 | + (field1 & 0xff0000) >> 16, + TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + TRB_SETUPID_TO_TYPE(field3), + field3 & TRB_IDT ? 'D' : 'd', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DATA: + ret = snprintf(str, size, + "type '%s' Buffer %08x%08x length %ld TD size %ld " + "intr %ld flags %c:%c:%c:%c:%c:%c:%c", + cdnsp_trb_type_string(type), + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + field3 & TRB_IDT ? 'D' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STATUS: + ret = snprintf(str, size, + "Buffer %08x%08x length %ld TD size %ld intr" + "%ld type '%s' flags %c:%c:%c:%c", + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + cdnsp_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_NORMAL: + case TRB_ISOC: + case TRB_EVENT_DATA: + case TRB_TR_NOOP: + ret = snprintf(str, size, + "type '%s' Buffer %08x%08x length %ld " + "TD size %ld intr %ld " + "flags %c:%c:%c:%c:%c:%c:%c:%c:%c", + cdnsp_trb_type_string(type), + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'T' : 't', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c', + !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v'); + break; + case TRB_CMD_NOOP: + case TRB_ENABLE_SLOT: + ret = snprintf(str, size, "%s: flags %c", + cdnsp_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DISABLE_SLOT: + ret = snprintf(str, size, "%s: slot %ld flags %c", + cdnsp_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ADDR_DEV: + ret = snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c:%c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_BSR ? 'B' : 'b', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_CONFIG_EP: + ret = snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c:%c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_DC ? 'D' : 'd', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_EVAL_CONTEXT: + ret = snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_EP: + case TRB_HALT_ENDPOINT: + case TRB_FLUSH_ENDPOINT: + ret = snprintf(str, size, + "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STOP_RING: + ret = snprintf(str, size, + "%s: ep%d%s(%d) slot %ld sp %d flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), + TRB_TO_SLOT_ID(field3), + TRB_TO_SUSPEND_PORT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_DEQ: + ret = snprintf(str, size, + "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), field1, field0, + TRB_TO_STREAM_ID(field2), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_DEV: + ret = snprintf(str, size, "%s: slot %ld flags %c", + cdnsp_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ENDPOINT_NRDY: + temp = TRB_TO_HOST_STREAM(field2); + + ret = snprintf(str, size, + "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), temp, + temp == STREAM_PRIME_ACK ? "(PRIME)" : "", + temp == STREAM_REJECTED ? "(REJECTED)" : "", + TRB_TO_DEV_STREAM(field0), + field3 & TRB_STAT ? 'S' : 's', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + default: + ret = snprintf(str, size, + "type '%s' -> raw %08x %08x %08x %08x", + cdnsp_trb_type_string(type), + field0, field1, field2, field3); + } + + if (ret >= size) + pr_info("CDNSP: buffer overflowed.\n"); + + return str; +} + +static inline const char *cdnsp_decode_slot_context(u32 info, u32 info2, + u32 int_target, u32 state) +{ + static char str[1024]; + int ret = 0; + u32 speed; + char *s; + + speed = info & DEV_SPEED; + + switch (speed) { + case SLOT_SPEED_FS: + s = "full-speed"; + break; + case SLOT_SPEED_HS: + s = "high-speed"; + break; + case SLOT_SPEED_SS: + s = "super-speed"; + break; + case SLOT_SPEED_SSP: + s = "super-speed plus"; + break; + default: + s = "UNKNOWN speed"; + } + + ret = sprintf(str, "%s Ctx Entries %d", + s, (info & LAST_CTX_MASK) >> 27); + + ret += sprintf(str + ret, " [Intr %ld] Addr %ld State %s", + GET_INTR_TARGET(int_target), state & DEV_ADDR_MASK, + cdnsp_slot_state_string(GET_SLOT_STATE(state))); + + return str; +} + +static inline const char *cdnsp_portsc_link_state_string(u32 portsc) +{ + switch (portsc & PORT_PLS_MASK) { + case XDEV_U0: + return "U0"; + case XDEV_U1: + return "U1"; + case XDEV_U2: + return "U2"; + case XDEV_U3: + return "U3"; + case XDEV_DISABLED: + return "Disabled"; + case XDEV_RXDETECT: + return "RxDetect"; + case XDEV_INACTIVE: + return "Inactive"; + case XDEV_POLLING: + return "Polling"; + case XDEV_RECOVERY: + return "Recovery"; + case XDEV_HOT_RESET: + return "Hot Reset"; + case XDEV_COMP_MODE: + return "Compliance mode"; + case XDEV_TEST_MODE: + return "Test mode"; + case XDEV_RESUME: + return "Resume"; + default: + break; + } + + return "Unknown"; +} + +static inline const char *cdnsp_decode_portsc(char *str, size_t size, + u32 portsc) +{ + int ret; + + ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ", + portsc & PORT_POWER ? "Powered" : "Powered-off", + portsc & PORT_CONNECT ? "Connected" : "Not-connected", + portsc & PORT_PED ? "Enabled" : "Disabled", + cdnsp_portsc_link_state_string(portsc), + DEV_PORT_SPEED(portsc)); + + if (portsc & PORT_RESET) + ret += snprintf(str + ret, size - ret, "In-Reset "); + + ret += snprintf(str + ret, size - ret, "Change: "); + if (portsc & PORT_CSC) + ret += snprintf(str + ret, size - ret, "CSC "); + if (portsc & PORT_WRC) + ret += snprintf(str + ret, size - ret, "WRC "); + if (portsc & PORT_RC) + ret += snprintf(str + ret, size - ret, "PRC "); + if (portsc & PORT_PLC) + ret += snprintf(str + ret, size - ret, "PLC "); + if (portsc & PORT_CEC) + ret += snprintf(str + ret, size - ret, "CEC "); + ret += snprintf(str + ret, size - ret, "Wake: "); + if (portsc & PORT_WKCONN_E) + ret += snprintf(str + ret, size - ret, "WCE "); + if (portsc & PORT_WKDISC_E) + ret += snprintf(str + ret, size - ret, "WDE "); + + return str; +} + +static inline const char *cdnsp_ep_state_string(u8 state) +{ + switch (state) { + case EP_STATE_DISABLED: + return "disabled"; + case EP_STATE_RUNNING: + return "running"; + case EP_STATE_HALTED: + return "halted"; + case EP_STATE_STOPPED: + return "stopped"; + case EP_STATE_ERROR: + return "error"; + default: + return "INVALID"; + } +} + +static inline const char *cdnsp_ep_type_string(u8 type) +{ + switch (type) { + case ISOC_OUT_EP: + return "Isoc OUT"; + case BULK_OUT_EP: + return "Bulk OUT"; + case INT_OUT_EP: + return "Int OUT"; + case CTRL_EP: + return "Ctrl"; + case ISOC_IN_EP: + return "Isoc IN"; + case BULK_IN_EP: + return "Bulk IN"; + case INT_IN_EP: + return "Int IN"; + default: + return "INVALID"; + } +} + +static inline const char *cdnsp_decode_ep_context(char *str, size_t size, + u32 info, u32 info2, + u64 deq, u32 tx_info) +{ + u8 max_pstr, ep_state, interval, ep_type, burst, cerr, mult; + bool lsa, hid; + u16 maxp, avg; + u32 esit; + int ret; + + esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | + CTX_TO_MAX_ESIT_PAYLOAD_LO(tx_info); + + ep_state = info & EP_STATE_MASK; + max_pstr = CTX_TO_EP_MAXPSTREAMS(info); + interval = CTX_TO_EP_INTERVAL(info); + mult = CTX_TO_EP_MULT(info) + 1; + lsa = !!(info & EP_HAS_LSA); + + cerr = (info2 & (3 << 1)) >> 1; + ep_type = CTX_TO_EP_TYPE(info2); + hid = !!(info2 & (1 << 7)); + burst = CTX_TO_MAX_BURST(info2); + maxp = MAX_PACKET_DECODED(info2); + + avg = EP_AVG_TRB_LENGTH(tx_info); + + ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s", + cdnsp_ep_state_string(ep_state), mult, + max_pstr, lsa ? "LSA " : ""); + + ret += snprintf(str + ret, size - ret, + "interval %d us max ESIT payload %d CErr %d ", + (1 << interval) * 125, esit, cerr); + + ret += snprintf(str + ret, size - ret, + "Type %s %sburst %d maxp %d deq %016llx ", + cdnsp_ep_type_string(ep_type), hid ? "HID" : "", + burst, maxp, deq); + + ret += snprintf(str + ret, size - ret, "avg trb len %d", avg); + + return str; +} + +#endif /*__LINUX_CDNSP_DEBUG*/ diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c new file mode 100644 index 000000000..f317d3c84 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-ep0.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#include <linux/usb/composite.h> +#include <linux/usb/gadget.h> +#include <linux/list.h> + +#include "cdnsp-gadget.h" +#include "cdnsp-trace.h" + +static void cdnsp_ep0_stall(struct cdnsp_device *pdev) +{ + struct cdnsp_request *preq; + struct cdnsp_ep *pep; + + pep = &pdev->eps[0]; + preq = next_request(&pep->pending_list); + + if (pdev->three_stage_setup) { + cdnsp_halt_endpoint(pdev, pep, true); + + if (preq) + cdnsp_gadget_giveback(pep, preq, -ECONNRESET); + } else { + pep->ep_state |= EP0_HALTED_STATUS; + + if (preq) + list_del(&preq->list); + + cdnsp_status_stage(pdev); + } +} + +static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + int ret; + + spin_unlock(&pdev->lock); + ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl); + spin_lock(&pdev->lock); + + return ret; +} + +static int cdnsp_ep0_set_config(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + u32 cfg; + int ret; + + cfg = le16_to_cpu(ctrl->wValue); + + switch (state) { + case USB_STATE_ADDRESS: + trace_cdnsp_ep0_set_config("from Address state"); + break; + case USB_STATE_CONFIGURED: + trace_cdnsp_ep0_set_config("from Configured state"); + break; + default: + dev_err(pdev->dev, "Set Configuration - bad device state\n"); + return -EINVAL; + } + + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + if (ret) + return ret; + + if (!cfg) + usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS); + + return 0; +} + +static int cdnsp_ep0_set_address(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + struct cdnsp_slot_ctx *slot_ctx; + unsigned int slot_state; + int ret; + u32 addr; + + addr = le16_to_cpu(ctrl->wValue); + + if (addr > 127) { + dev_err(pdev->dev, "Invalid device address %d\n", addr); + return -EINVAL; + } + + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + + if (state == USB_STATE_CONFIGURED) { + dev_err(pdev->dev, "Can't Set Address from Configured State\n"); + return -EINVAL; + } + + pdev->device_address = le16_to_cpu(ctrl->wValue); + + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + if (slot_state == SLOT_STATE_ADDRESSED) + cdnsp_reset_device(pdev); + + /*set device address*/ + ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS); + if (ret) + return ret; + + if (addr) + usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS); + else + usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); + + return 0; +} + +int cdnsp_status_stage(struct cdnsp_device *pdev) +{ + pdev->ep0_stage = CDNSP_STATUS_STAGE; + pdev->ep0_preq.request.length = 0; + + return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static int cdnsp_w_index_to_ep_index(u16 wIndex) +{ + if (!(wIndex & USB_ENDPOINT_NUMBER_MASK)) + return 0; + + return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) + + (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1; +} + +static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + struct cdnsp_ep *pep; + __le16 *response; + int ep_sts = 0; + u16 status = 0; + u32 recipient; + + recipient = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recipient) { + case USB_RECIP_DEVICE: + status = pdev->gadget.is_selfpowered; + status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; + + if (pdev->gadget.speed >= USB_SPEED_SUPER) { + status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED; + status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED; + } + break; + case USB_RECIP_INTERFACE: + /* + * Function Remote Wake Capable D0 + * Function Remote Wakeup D1 + */ + return cdnsp_ep0_delegate_req(pdev, ctrl); + case USB_RECIP_ENDPOINT: + ep_sts = cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex)); + pep = &pdev->eps[ep_sts]; + ep_sts = GET_EP_CTX_STATE(pep->out_ctx); + + /* check if endpoint is stalled */ + if (ep_sts == EP_STATE_HALTED) + status = BIT(USB_ENDPOINT_HALT); + break; + default: + return -EINVAL; + } + + response = (__le16 *)pdev->setup_buf; + *response = cpu_to_le16(status); + + pdev->ep0_preq.request.length = sizeof(*response); + pdev->ep0_preq.request.buf = pdev->setup_buf; + + return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static void cdnsp_enter_test_mode(struct cdnsp_device *pdev) +{ + u32 temp; + + temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28); + temp |= PORT_TEST_MODE(pdev->test_mode); + writel(temp, &pdev->active_port->regs->portpmsc); +} + +static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + enum usb_device_state state; + enum usb_device_speed speed; + u16 tmode; + + state = pdev->gadget.state; + speed = pdev->gadget.speed; + + switch (le16_to_cpu(ctrl->wValue)) { + case USB_DEVICE_REMOTE_WAKEUP: + pdev->may_wakeup = !!set; + trace_cdnsp_may_wakeup(set); + break; + case USB_DEVICE_U1_ENABLE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + pdev->u1_allowed = !!set; + trace_cdnsp_u1(set); + break; + case USB_DEVICE_U2_ENABLE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + pdev->u2_allowed = !!set; + trace_cdnsp_u2(set); + break; + case USB_DEVICE_LTM_ENABLE: + return -EINVAL; + case USB_DEVICE_TEST_MODE: + if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH) + return -EINVAL; + + tmode = le16_to_cpu(ctrl->wIndex); + + if (!set || (tmode & 0xff) != 0) + return -EINVAL; + + tmode = tmode >> 8; + + if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J) + return -EINVAL; + + pdev->test_mode = tmode; + + /* + * Test mode must be set before Status Stage but controller + * will start testing sequence after Status Stage. + */ + cdnsp_enter_test_mode(pdev); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + u16 wValue, wIndex; + int ret; + + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + if (ret) + return ret; + + /* + * Remote wakeup is enabled when any function within a device + * is enabled for function remote wakeup. + */ + if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) + pdev->may_wakeup++; + else + if (pdev->may_wakeup > 0) + pdev->may_wakeup--; + + return 0; + default: + return -EINVAL; + } + + return 0; +} + +static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + struct cdnsp_ep *pep; + u16 wValue; + + wValue = le16_to_cpu(ctrl->wValue); + pep = &pdev->eps[cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))]; + + switch (wValue) { + case USB_ENDPOINT_HALT: + if (!set && (pep->ep_state & EP_WEDGE)) { + /* Resets Sequence Number */ + cdnsp_halt_endpoint(pdev, pep, 0); + cdnsp_halt_endpoint(pdev, pep, 1); + break; + } + + return cdnsp_halt_endpoint(pdev, pep, set); + default: + dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue); + return -EINVAL; + } + + return 0; +} + +static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl, + int set) +{ + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + return cdnsp_ep0_handle_feature_device(pdev, ctrl, set); + case USB_RECIP_INTERFACE: + return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set); + case USB_RECIP_ENDPOINT: + return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set); + default: + return -EINVAL; + } +} + +static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + u16 wLength; + + if (state == USB_STATE_DEFAULT) + return -EINVAL; + + wLength = le16_to_cpu(ctrl->wLength); + + if (wLength != 6) { + dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n", + wLength); + return -EINVAL; + } + + /* + * To handle Set SEL we need to receive 6 bytes from Host. So let's + * queue a usb_request for 6 bytes. + */ + pdev->ep0_preq.request.length = 6; + pdev->ep0_preq.request.buf = pdev->setup_buf; + + return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength)) + return -EINVAL; + + pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue); + + return 0; +} + +static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + int ret; + + switch (ctrl->bRequest) { + case USB_REQ_GET_STATUS: + ret = cdnsp_ep0_handle_status(pdev, ctrl); + break; + case USB_REQ_CLEAR_FEATURE: + ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0); + break; + case USB_REQ_SET_FEATURE: + ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1); + break; + case USB_REQ_SET_ADDRESS: + ret = cdnsp_ep0_set_address(pdev, ctrl); + break; + case USB_REQ_SET_CONFIGURATION: + ret = cdnsp_ep0_set_config(pdev, ctrl); + break; + case USB_REQ_SET_SEL: + ret = cdnsp_ep0_set_sel(pdev, ctrl); + break; + case USB_REQ_SET_ISOCH_DELAY: + ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl); + break; + default: + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + break; + } + + return ret; +} + +void cdnsp_setup_analyze(struct cdnsp_device *pdev) +{ + struct usb_ctrlrequest *ctrl = &pdev->setup; + int ret = -EINVAL; + u16 len; + + trace_cdnsp_ctrl_req(ctrl); + + if (!pdev->gadget_driver) + goto out; + + if (pdev->gadget.state == USB_STATE_NOTATTACHED) { + dev_err(pdev->dev, "ERR: Setup detected in unattached state\n"); + goto out; + } + + /* Restore the ep0 to Stopped/Running state. */ + if (pdev->eps[0].ep_state & EP_HALTED) { + trace_cdnsp_ep0_halted("Restore to normal state"); + cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); + } + + /* + * Finishing previous SETUP transfer by removing request from + * list and informing upper layer + */ + if (!list_empty(&pdev->eps[0].pending_list)) { + struct cdnsp_request *req; + + trace_cdnsp_ep0_request("Remove previous"); + req = next_request(&pdev->eps[0].pending_list); + cdnsp_ep_dequeue(&pdev->eps[0], req); + } + + len = le16_to_cpu(ctrl->wLength); + if (!len) { + pdev->three_stage_setup = false; + pdev->ep0_expect_in = false; + } else { + pdev->three_stage_setup = true; + pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN); + } + + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + ret = cdnsp_ep0_std_request(pdev, ctrl); + else + ret = cdnsp_ep0_delegate_req(pdev, ctrl); + + if (ret == USB_GADGET_DELAYED_STATUS) { + trace_cdnsp_ep0_status_stage("delayed"); + return; + } +out: + if (ret < 0) + cdnsp_ep0_stall(pdev); + else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE) + cdnsp_status_stage(pdev); +} diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c new file mode 100644 index 000000000..0044897ee --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -0,0 +1,2029 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#include <linux/moduleparam.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/iopoll.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <linux/dmi.h> + +#include "core.h" +#include "gadget-export.h" +#include "drd.h" +#include "cdnsp-gadget.h" +#include "cdnsp-trace.h" + +unsigned int cdnsp_port_speed(unsigned int port_status) +{ + /*Detect gadget speed based on PORTSC register*/ + if (DEV_SUPERSPEEDPLUS(port_status)) + return USB_SPEED_SUPER_PLUS; + else if (DEV_SUPERSPEED(port_status)) + return USB_SPEED_SUPER; + else if (DEV_HIGHSPEED(port_status)) + return USB_SPEED_HIGH; + else if (DEV_FULLSPEED(port_status)) + return USB_SPEED_FULL; + + /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ + return USB_SPEED_UNKNOWN; +} + +/* + * Given a port state, this function returns a value that would result in the + * port being in the same state, if the value was written to the port status + * control register. + * Save Read Only (RO) bits and save read/write bits where + * writing a 0 clears the bit and writing a 1 sets the bit (RWS). + * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. + */ +u32 cdnsp_port_state_to_neutral(u32 state) +{ + /* Save read-only status and port state. */ + return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS); +} + +/** + * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities + * with capability ID id. + * @base: PCI MMIO registers base address. + * @start: Address at which to start looking, (0 or HCC_PARAMS to start at + * beginning of list) + * @id: Extended capability ID to search for. + * + * Returns the offset of the next matching extended capability structure. + * Some capabilities can occur several times, + * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all. + */ +int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id) +{ + u32 offset = start; + u32 next; + u32 val; + + if (!start || start == HCC_PARAMS_OFFSET) { + val = readl(base + HCC_PARAMS_OFFSET); + if (val == ~0) + return 0; + + offset = HCC_EXT_CAPS(val) << 2; + if (!offset) + return 0; + } + + do { + val = readl(base + offset); + if (val == ~0) + return 0; + + if (EXT_CAPS_ID(val) == id && offset != start) + return offset; + + next = EXT_CAPS_NEXT(val); + offset += next << 2; + } while (next); + + return 0; +} + +void cdnsp_set_link_state(struct cdnsp_device *pdev, + __le32 __iomem *port_regs, + u32 link_state) +{ + int port_num = 0xFF; + u32 temp; + + temp = readl(port_regs); + temp = cdnsp_port_state_to_neutral(temp); + temp |= PORT_WKCONN_E | PORT_WKDISC_E; + writel(temp, port_regs); + + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | link_state; + + if (pdev->active_port) + port_num = pdev->active_port->port_num; + + trace_cdnsp_handle_port_status(port_num, readl(port_regs)); + writel(temp, port_regs); + trace_cdnsp_link_state_changed(port_num, readl(port_regs)); +} + +static void cdnsp_disable_port(struct cdnsp_device *pdev, + __le32 __iomem *port_regs) +{ + u32 temp = cdnsp_port_state_to_neutral(readl(port_regs)); + + writel(temp | PORT_PED, port_regs); +} + +static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, + __le32 __iomem *port_regs) +{ + u32 portsc = readl(port_regs); + + writel(cdnsp_port_state_to_neutral(portsc) | + (portsc & PORT_CHANGE_BITS), port_regs); +} + +static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) +{ + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; + + bit = readl(reg) | bit; + writel(bit, reg); +} + +static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) +{ + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; + + bit = readl(reg) & ~bit; + writel(bit, reg); +} + +/* + * Disable interrupts and begin the controller halting process. + */ +static void cdnsp_quiesce(struct cdnsp_device *pdev) +{ + u32 halted; + u32 mask; + u32 cmd; + + mask = ~(u32)(CDNSP_IRQS); + + halted = readl(&pdev->op_regs->status) & STS_HALT; + if (!halted) + mask &= ~(CMD_R_S | CMD_DEVEN); + + cmd = readl(&pdev->op_regs->command); + cmd &= mask; + writel(cmd, &pdev->op_regs->command); +} + +/* + * Force controller into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * Controller will complete any current and actively pipelined transactions, and + * should halt within 16 ms of the run/stop bit being cleared. + * Read controller Halted bit in the status register to see when the + * controller is finished. + */ +int cdnsp_halt(struct cdnsp_device *pdev) +{ + int ret; + u32 val; + + cdnsp_quiesce(pdev); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, + val & STS_HALT, 1, + CDNSP_MAX_HALT_USEC); + if (ret) { + dev_err(pdev->dev, "ERROR: Device halt failed\n"); + return ret; + } + + pdev->cdnsp_state |= CDNSP_STATE_HALTED; + + return 0; +} + +/* + * device controller died, register read returns 0xffffffff, or command never + * ends. + */ +void cdnsp_died(struct cdnsp_device *pdev) +{ + dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n"); + pdev->cdnsp_state |= CDNSP_STATE_DYING; + cdnsp_halt(pdev); +} + +/* + * Set the run bit and wait for the device to be running. + */ +static int cdnsp_start(struct cdnsp_device *pdev) +{ + u32 temp; + int ret; + + temp = readl(&pdev->op_regs->command); + temp |= (CMD_R_S | CMD_DEVEN); + writel(temp, &pdev->op_regs->command); + + pdev->cdnsp_state = 0; + + /* + * Wait for the STS_HALT Status bit to be 0 to indicate the device is + * running. + */ + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, + !(temp & STS_HALT), 1, + CDNSP_MAX_HALT_USEC); + if (ret) { + pdev->cdnsp_state = CDNSP_STATE_DYING; + dev_err(pdev->dev, "ERROR: Controller run failed\n"); + } + + return ret; +} + +/* + * Reset a halted controller. + * + * This resets pipelines, timers, counters, state machines, etc. + * Transactions will be terminated immediately, and operational registers + * will be set to their defaults. + */ +int cdnsp_reset(struct cdnsp_device *pdev) +{ + u32 command; + u32 temp; + int ret; + + temp = readl(&pdev->op_regs->status); + + if (temp == ~(u32)0) { + dev_err(pdev->dev, "Device not accessible, reset failed.\n"); + return -ENODEV; + } + + if ((temp & STS_HALT) == 0) { + dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); + return -EINVAL; + } + + command = readl(&pdev->op_regs->command); + command |= CMD_RESET; + writel(command, &pdev->op_regs->command); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, + !(temp & CMD_RESET), 1, + 10 * 1000); + if (ret) { + dev_err(pdev->dev, "ERROR: Controller reset failed\n"); + return ret; + } + + /* + * CDNSP cannot write any doorbells or operational registers other + * than status until the "Controller Not Ready" flag is cleared. + */ + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, + !(temp & STS_CNR), 1, + 10 * 1000); + + if (ret) { + dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); + return ret; + } + + dev_dbg(pdev->dev, "Controller ready to work"); + + return ret; +} + +/* + * cdnsp_get_endpoint_index - Find the index for an endpoint given its + * descriptor.Use the return value to right shift 1 for the bitmask. + * + * Index = (epnum * 2) + direction - 1, + * where direction = 0 for OUT, 1 for IN. + * For control endpoints, the IN index is used (OUT index is unused), so + * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) + */ +static unsigned int + cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc) +{ + unsigned int index = (unsigned int)usb_endpoint_num(desc); + + if (usb_endpoint_xfer_control(desc)) + return index * 2; + + return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; +} + +/* + * Find the flag for this endpoint (for use in the control context). Use the + * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is + * bit 1, etc. + */ +static unsigned int + cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) +{ + return 1 << (cdnsp_get_endpoint_index(desc) + 1); +} + +int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq) +{ + struct cdnsp_device *pdev = pep->pdev; + struct usb_request *request; + int ret; + + if (preq->epnum == 0 && !list_empty(&pep->pending_list)) { + trace_cdnsp_request_enqueue_busy(preq); + return -EBUSY; + } + + request = &preq->request; + request->actual = 0; + request->status = -EINPROGRESS; + preq->direction = pep->direction; + preq->epnum = pep->number; + preq->td.drbl = 0; + + ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); + if (ret) { + trace_cdnsp_request_enqueue_error(preq); + return ret; + } + + list_add_tail(&preq->list, &pep->pending_list); + + trace_cdnsp_request_enqueue(preq); + + switch (usb_endpoint_type(pep->endpoint.desc)) { + case USB_ENDPOINT_XFER_CONTROL: + ret = cdnsp_queue_ctrl_tx(pdev, preq); + break; + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + ret = cdnsp_queue_bulk_tx(pdev, preq); + break; + case USB_ENDPOINT_XFER_ISOC: + ret = cdnsp_queue_isoc_tx_prepare(pdev, preq); + } + + if (ret) + goto unmap; + + return 0; + +unmap: + usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, + pep->direction); + list_del(&preq->list); + trace_cdnsp_request_enqueue_error(preq); + + return ret; +} + +/* + * Remove the request's TD from the endpoint ring. This may cause the + * controller to stop USB transfers, potentially stopping in the middle of a + * TRB buffer. The controller should pick up where it left off in the TD, + * unless a Set Transfer Ring Dequeue Pointer is issued. + * + * The TRBs that make up the buffers for the canceled request will be "removed" + * from the ring. Since the ring is a contiguous structure, they can't be + * physically removed. Instead, there are two options: + * + * 1) If the controller is in the middle of processing the request to be + * canceled, we simply move the ring's dequeue pointer past those TRBs + * using the Set Transfer Ring Dequeue Pointer command. This will be + * the common case, when drivers timeout on the last submitted request + * and attempt to cancel. + * + * 2) If the controller is in the middle of a different TD, we turn the TRBs + * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. + * The controller will need to invalidate the any TRBs it has cached after + * the stop endpoint command. + * + * 3) The TD may have completed by the time the Stop Endpoint Command + * completes, so software needs to handle that case too. + * + */ +int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) +{ + struct cdnsp_device *pdev = pep->pdev; + int ret_stop = 0; + int ret_rem; + + trace_cdnsp_request_dequeue(preq); + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) + ret_stop = cdnsp_cmd_stop_ep(pdev, pep); + + ret_rem = cdnsp_remove_request(pdev, preq, pep); + + return ret_rem ? ret_rem : ret_stop; +} + +static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_slot_ctx *slot_ctx; + struct cdnsp_ep_ctx *ep_ctx; + int i; + + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + + /* + * When a device's add flag and drop flag are zero, any subsequent + * configure endpoint command will leave that endpoint's state + * untouched. Make sure we don't leave any old state in the input + * endpoint contexts. + */ + ctrl_ctx->drop_flags = 0; + ctrl_ctx->add_flags = 0; + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + + /* Endpoint 0 is always valid */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) { + ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = 0; + ep_ctx->deq = 0; + ep_ctx->tx_info = 0; + } +} + +/* Issue a configure endpoint command and wait for it to finish. */ +static int cdnsp_configure_endpoint(struct cdnsp_device *pdev) +{ + int ret; + + cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + if (ret) { + dev_err(pdev->dev, + "ERR: unexpected command completion code 0x%x.\n", ret); + return -EINVAL; + } + + return ret; +} + +static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_segment *segment; + union cdnsp_trb *event; + u32 cycle_state; + u32 data; + + event = pdev->event_ring->dequeue; + segment = pdev->event_ring->deq_seg; + cycle_state = pdev->event_ring->cycle_state; + + while (1) { + data = le32_to_cpu(event->trans_event.flags); + + /* Check the owner of the TRB. */ + if ((data & TRB_CYCLE) != cycle_state) + break; + + if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && + TRB_TO_EP_ID(data) == (pep->idx + 1)) { + data |= TRB_EVENT_INVALIDATE; + event->trans_event.flags = cpu_to_le32(data); + } + + if (cdnsp_last_trb_on_seg(segment, event)) { + cycle_state ^= 1; + segment = pdev->event_ring->deq_seg->next; + event = segment->trbs; + } else { + event++; + } + } +} + +int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) +{ + struct cdnsp_segment *event_deq_seg; + union cdnsp_trb *cmd_trb; + dma_addr_t cmd_deq_dma; + union cdnsp_trb *event; + u32 cycle_state; + int ret, val; + u64 cmd_dma; + u32 flags; + + cmd_trb = pdev->cmd.command_trb; + pdev->cmd.status = 0; + + trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, + !CMD_RING_BUSY(val), 1, + CDNSP_CMD_TIMEOUT); + if (ret) { + dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); + trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic); + pdev->cdnsp_state = CDNSP_STATE_DYING; + return -ETIMEDOUT; + } + + event = pdev->event_ring->dequeue; + event_deq_seg = pdev->event_ring->deq_seg; + cycle_state = pdev->event_ring->cycle_state; + + cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); + if (!cmd_deq_dma) + return -EINVAL; + + while (1) { + flags = le32_to_cpu(event->event_cmd.flags); + + /* Check the owner of the TRB. */ + if ((flags & TRB_CYCLE) != cycle_state) + return -EINVAL; + + cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); + + /* + * Check whether the completion event is for last queued + * command. + */ + if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || + cmd_dma != (u64)cmd_deq_dma) { + if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { + event++; + continue; + } + + if (cdnsp_last_trb_on_ring(pdev->event_ring, + event_deq_seg, event)) + cycle_state ^= 1; + + event_deq_seg = event_deq_seg->next; + event = event_deq_seg->trbs; + continue; + } + + trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic); + + pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); + if (pdev->cmd.status == COMP_SUCCESS) + return 0; + + return -pdev->cmd.status; + } +} + +int cdnsp_halt_endpoint(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + int value) +{ + int ret; + + trace_cdnsp_ep_halt(value ? "Set" : "Clear"); + + ret = cdnsp_cmd_stop_ep(pdev, pep); + if (ret) + return ret; + + if (value) { + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { + cdnsp_queue_halt_endpoint(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + } + + pep->ep_state |= EP_HALTED; + } else { + cdnsp_queue_reset_ep(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx); + + if (ret) + return ret; + + pep->ep_state &= ~EP_HALTED; + + if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + + pep->ep_state &= ~EP_WEDGE; + } + + return 0; +} + +static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_slot_ctx *slot_ctx; + int ret = 0; + u32 ep_sts; + int i; + + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + + /* Don't issue the command if there's no endpoints to update. */ + if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) + return 0; + + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); + ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); + ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); + + /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) { + __le32 le32 = cpu_to_le32(BIT(i)); + + if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || + (ctrl_ctx->add_flags & le32) || i == 1) { + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); + break; + } + } + + ep_sts = GET_EP_CTX_STATE(pep->out_ctx); + + if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && + ep_sts == EP_STATE_DISABLED) || + (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) + ret = cdnsp_configure_endpoint(pdev); + + trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx)); + trace_cdnsp_handle_cmd_config_ep(pep->out_ctx); + + cdnsp_zero_in_ctx(pdev); + + return ret; +} + +/* + * This submits a Reset Device Command, which will set the device state to 0, + * set the device address to 0, and disable all the endpoints except the default + * control endpoint. The USB core should come back and call + * cdnsp_setup_device(), and then re-set up the configuration. + */ +int cdnsp_reset_device(struct cdnsp_device *pdev) +{ + struct cdnsp_slot_ctx *slot_ctx; + int slot_state; + int ret, i; + + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + slot_ctx->dev_info = 0; + pdev->device_address = 0; + + /* If device is not setup, there is no point in resetting it. */ + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + trace_cdnsp_reset_device(slot_ctx); + + if (slot_state <= SLOT_STATE_DEFAULT && + pdev->eps[0].ep_state & EP_HALTED) { + cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); + } + + /* + * During Reset Device command controller shall transition the + * endpoint ep0 to the Running State. + */ + pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED); + pdev->eps[0].ep_state |= EP_ENABLED; + + if (slot_state <= SLOT_STATE_DEFAULT) + return 0; + + cdnsp_queue_reset_device(pdev); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + /* + * After Reset Device command all not default endpoints + * are in Disabled state. + */ + for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) + pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED; + + trace_cdnsp_handle_cmd_reset_dev(slot_ctx); + + if (ret) + dev_err(pdev->dev, "Reset device failed with error code %d", + ret); + + return ret; +} + +/* + * Sets the MaxPStreams field and the Linear Stream Array field. + * Sets the dequeue pointer to the stream context array. + */ +static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, + struct cdnsp_ep_ctx *ep_ctx, + struct cdnsp_stream_info *stream_info) +{ + u32 max_primary_streams; + + /* MaxPStreams is the number of stream context array entries, not the + * number we're actually using. Must be in 2^(MaxPstreams + 1) format. + * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. + */ + max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; + ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); + ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) + | EP_HAS_LSA); + ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); +} + +/* + * The drivers use this function to prepare a bulk endpoints to use streams. + * + * Don't allow the call to succeed if endpoint only supports one stream + * (which means it doesn't support streams at all). + */ +int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); + unsigned int num_stream_ctxs; + int ret; + + if (num_streams == 0) + return 0; + + if (num_streams > STREAM_NUM_STREAMS) + return -EINVAL; + + /* + * Add two to the number of streams requested to account for + * stream 0 that is reserved for controller usage and one additional + * for TASK SET FULL response. + */ + num_streams += 2; + + /* The stream context array size must be a power of two */ + num_stream_ctxs = roundup_pow_of_two(num_streams); + + trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams); + + ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams); + if (ret) + return ret; + + cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info); + + pep->ep_state |= EP_HAS_STREAMS; + pep->stream_info.td_count = 0; + pep->stream_info.first_prime_det = 0; + + /* Subtract 1 for stream 0, which drivers can't use. */ + return num_streams - 1; +} + +int cdnsp_disable_slot(struct cdnsp_device *pdev) +{ + int ret; + + cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + pdev->slot_id = 0; + pdev->active_port = NULL; + + trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); + + memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE); + memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE); + + return ret; +} + +int cdnsp_enable_slot(struct cdnsp_device *pdev) +{ + struct cdnsp_slot_ctx *slot_ctx; + int slot_state; + int ret; + + /* If device is not setup, there is no point in resetting it */ + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (slot_state != SLOT_STATE_DISABLED) + return 0; + + cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + if (ret) + goto show_trace; + + pdev->slot_id = 1; + +show_trace: + trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); + + return ret; +} + +/* + * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY + * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS. + */ +int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_slot_ctx *slot_ctx; + int dev_state = 0; + int ret; + + if (!pdev->slot_id) { + trace_cdnsp_slot_id("incorrect"); + return -EINVAL; + } + + if (!pdev->active_port->port_num) + return -EINVAL; + + slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); + dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) { + trace_cdnsp_slot_already_in_default(slot_ctx); + return 0; + } + + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + + if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { + ret = cdnsp_setup_addressable_priv_dev(pdev); + if (ret) + return ret; + } + + cdnsp_copy_ep0_dequeue_into_input_ctx(pdev); + + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); + ctrl_ctx->drop_flags = 0; + + trace_cdnsp_setup_device_slot(slot_ctx); + + cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx)); + + /* Zero the input context control for later use. */ + ctrl_ctx->add_flags = 0; + ctrl_ctx->drop_flags = 0; + + return ret; +} + +void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev, + struct usb_request *req, + int enable) +{ + if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) + return; + + trace_cdnsp_lpm(enable); + + if (enable) + writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, + &pdev->active_port->regs->portpmsc); + else + writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); +} + +static int cdnsp_get_frame(struct cdnsp_device *pdev) +{ + return readl(&pdev->run_regs->microframe_index) >> 3; +} + +static int cdnsp_gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_device *pdev; + struct cdnsp_ep *pep; + unsigned long flags; + u32 added_ctxs; + int ret; + + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || + !desc->wMaxPacketSize) + return -EINVAL; + + pep = to_cdnsp_ep(ep); + pdev = pep->pdev; + pep->ep_state &= ~EP_UNCONFIGURED; + + if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED, + "%s is already enabled\n", pep->name)) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + + added_ctxs = cdnsp_get_endpoint_flag(desc); + if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { + dev_err(pdev->dev, "ERROR: Bad endpoint number\n"); + ret = -EINVAL; + goto unlock; + } + + pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + + if (pdev->gadget.speed == USB_SPEED_FULL) { + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) + pep->interval = desc->bInterval << 3; + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) + pep->interval = BIT(desc->bInterval - 1) << 3; + } + + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { + if (pep->interval > BIT(12)) { + dev_err(pdev->dev, "bInterval %d not supported\n", + desc->bInterval); + ret = -EINVAL; + goto unlock; + } + cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); + } + + ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC); + if (ret) + goto unlock; + + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); + ctrl_ctx->drop_flags = 0; + + ret = cdnsp_update_eps_configuration(pdev, pep); + if (ret) { + cdnsp_free_endpoint_rings(pdev, pep); + goto unlock; + } + + pep->ep_state |= EP_ENABLED; + pep->ep_state &= ~EP_STOPPED; + +unlock: + trace_cdnsp_ep_enable_end(pep, 0); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int cdnsp_gadget_ep_disable(struct usb_ep *ep) +{ + struct cdnsp_input_control_ctx *ctrl_ctx; + struct cdnsp_request *preq; + struct cdnsp_device *pdev; + struct cdnsp_ep *pep; + unsigned long flags; + u32 drop_flag; + int ret = 0; + + if (!ep) + return -EINVAL; + + pep = to_cdnsp_ep(ep); + pdev = pep->pdev; + + spin_lock_irqsave(&pdev->lock, flags); + + if (!(pep->ep_state & EP_ENABLED)) { + dev_err(pdev->dev, "%s is already disabled\n", pep->name); + ret = -EINVAL; + goto finish; + } + + pep->ep_state |= EP_DIS_IN_RROGRESS; + + /* Endpoint was unconfigured by Reset Device command. */ + if (!(pep->ep_state & EP_UNCONFIGURED)) { + cdnsp_cmd_stop_ep(pdev, pep); + cdnsp_cmd_flush_ep(pdev, pep); + } + + /* Remove all queued USB requests. */ + while (!list_empty(&pep->pending_list)) { + preq = next_request(&pep->pending_list); + cdnsp_ep_dequeue(pep, preq); + } + + cdnsp_invalidate_ep_events(pdev, pep); + + pep->ep_state &= ~EP_DIS_IN_RROGRESS; + drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc); + ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); + ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); + ctrl_ctx->add_flags = 0; + + cdnsp_endpoint_zero(pdev, pep); + + if (!(pep->ep_state & EP_UNCONFIGURED)) + ret = cdnsp_update_eps_configuration(pdev, pep); + + cdnsp_free_endpoint_rings(pdev, pep); + + pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED); + pep->ep_state |= EP_STOPPED; + +finish: + trace_cdnsp_ep_disable_end(pep, 0); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_request *preq; + + preq = kzalloc(sizeof(*preq), gfp_flags); + if (!preq) + return NULL; + + preq->epnum = pep->number; + preq->pep = pep; + + trace_cdnsp_alloc_request(preq); + + return &preq->request; +} + +static void cdnsp_gadget_ep_free_request(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdnsp_request *preq = to_cdnsp_request(request); + + trace_cdnsp_free_request(preq); + kfree(preq); +} + +static int cdnsp_gadget_ep_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags) +{ + struct cdnsp_request *preq; + struct cdnsp_device *pdev; + struct cdnsp_ep *pep; + unsigned long flags; + int ret; + + if (!request || !ep) + return -EINVAL; + + pep = to_cdnsp_ep(ep); + pdev = pep->pdev; + + if (!(pep->ep_state & EP_ENABLED)) { + dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", + pep->name); + return -EINVAL; + } + + preq = to_cdnsp_request(request); + spin_lock_irqsave(&pdev->lock, flags); + ret = cdnsp_ep_enqueue(pep, preq); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_device *pdev = pep->pdev; + unsigned long flags; + int ret; + + if (request->status != -EINPROGRESS) + return 0; + + if (!pep->endpoint.desc) { + dev_err(pdev->dev, + "%s: can't dequeue to disabled endpoint\n", + pep->name); + return -ESHUTDOWN; + } + + /* Requests has been dequeued during disabling endpoint. */ + if (!(pep->ep_state & EP_ENABLED)) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_device *pdev = pep->pdev; + struct cdnsp_request *preq; + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + + preq = next_request(&pep->pending_list); + if (value) { + if (preq) { + trace_cdnsp_ep_busy_try_halt_again(pep, 0); + ret = -EAGAIN; + goto done; + } + } + + ret = cdnsp_halt_endpoint(pdev, pep, value); + +done: + spin_unlock_irqrestore(&pdev->lock, flags); + return ret; +} + +static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct cdnsp_ep *pep = to_cdnsp_ep(ep); + struct cdnsp_device *pdev = pep->pdev; + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + pep->ep_state |= EP_WEDGE; + ret = cdnsp_halt_endpoint(pdev, pep, 1); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static const struct usb_ep_ops cdnsp_gadget_ep0_ops = { + .enable = cdnsp_gadget_ep_enable, + .disable = cdnsp_gadget_ep_disable, + .alloc_request = cdnsp_gadget_ep_alloc_request, + .free_request = cdnsp_gadget_ep_free_request, + .queue = cdnsp_gadget_ep_queue, + .dequeue = cdnsp_gadget_ep_dequeue, + .set_halt = cdnsp_gadget_ep_set_halt, + .set_wedge = cdnsp_gadget_ep_set_wedge, +}; + +static const struct usb_ep_ops cdnsp_gadget_ep_ops = { + .enable = cdnsp_gadget_ep_enable, + .disable = cdnsp_gadget_ep_disable, + .alloc_request = cdnsp_gadget_ep_alloc_request, + .free_request = cdnsp_gadget_ep_free_request, + .queue = cdnsp_gadget_ep_queue, + .dequeue = cdnsp_gadget_ep_dequeue, + .set_halt = cdnsp_gadget_ep_set_halt, + .set_wedge = cdnsp_gadget_ep_set_wedge, +}; + +void cdnsp_gadget_giveback(struct cdnsp_ep *pep, + struct cdnsp_request *preq, + int status) +{ + struct cdnsp_device *pdev = pep->pdev; + + list_del(&preq->list); + + if (preq->request.status == -EINPROGRESS) + preq->request.status = status; + + usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, + preq->direction); + + trace_cdnsp_request_giveback(preq); + + if (preq != &pdev->ep0_preq) { + spin_unlock(&pdev->lock); + usb_gadget_giveback_request(&pep->endpoint, &preq->request); + spin_lock(&pdev->lock); + } +} + +static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, +}; + +static int cdnsp_run(struct cdnsp_device *pdev, + enum usb_device_speed speed) +{ + u32 fs_speed = 0; + u32 temp; + int ret; + + temp = readl(&pdev->ir_set->irq_control); + temp &= ~IMOD_INTERVAL_MASK; + temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); + writel(temp, &pdev->ir_set->irq_control); + + temp = readl(&pdev->port3x_regs->mode_addr); + + switch (speed) { + case USB_SPEED_SUPER_PLUS: + temp |= CFG_3XPORT_SSP_SUPPORT; + break; + case USB_SPEED_SUPER: + temp &= ~CFG_3XPORT_SSP_SUPPORT; + break; + case USB_SPEED_HIGH: + break; + case USB_SPEED_FULL: + fs_speed = PORT_REG6_FORCE_FS; + break; + default: + dev_err(pdev->dev, "invalid maximum_speed parameter %d\n", + speed); + fallthrough; + case USB_SPEED_UNKNOWN: + /* Default to superspeed. */ + speed = USB_SPEED_SUPER; + break; + } + + if (speed >= USB_SPEED_SUPER) { + writel(temp, &pdev->port3x_regs->mode_addr); + cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc, + XDEV_RXDETECT); + } else { + cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); + } + + cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc, + XDEV_RXDETECT); + + cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + + writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6); + + ret = cdnsp_start(pdev); + if (ret) { + ret = -ENODEV; + goto err; + } + + temp = readl(&pdev->op_regs->command); + temp |= (CMD_INTE); + writel(temp, &pdev->op_regs->command); + + temp = readl(&pdev->ir_set->irq_pending); + writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); + + trace_cdnsp_init("Controller ready to work"); + return 0; +err: + cdnsp_halt(pdev); + return ret; +} + +static int cdnsp_gadget_udc_start(struct usb_gadget *g, + struct usb_gadget_driver *driver) +{ + enum usb_device_speed max_speed = driver->max_speed; + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + pdev->gadget_driver = driver; + + /* limit speed if necessary */ + max_speed = min(driver->max_speed, g->max_speed); + ret = cdnsp_run(pdev, max_speed); + + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +/* + * Update Event Ring Dequeue Pointer: + * - When all events have finished + * - To avoid "Event Ring Full Error" condition + */ +void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, + union cdnsp_trb *event_ring_deq, + u8 clear_ehb) +{ + u64 temp_64; + dma_addr_t deq; + + temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); + + /* If necessary, update the HW's version of the event ring deq ptr. */ + if (event_ring_deq != pdev->event_ring->dequeue) { + deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue); + temp_64 &= ERST_PTR_MASK; + temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); + } + + /* Clear the event handler busy flag (RW1C). */ + if (clear_ehb) + temp_64 |= ERST_EHB; + else + temp_64 &= ~ERST_EHB; + + cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue); +} + +static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev) +{ + struct cdnsp_segment *seg; + u64 val_64; + int i; + + cdnsp_initialize_ring_info(pdev->cmd_ring); + + seg = pdev->cmd_ring->first_seg; + for (i = 0; i < pdev->cmd_ring->num_segs; i++) { + memset(seg->trbs, 0, + sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); + seg = seg->next; + } + + /* Set the address in the Command Ring Control register. */ + val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); + val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | + (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | + pdev->cmd_ring->cycle_state; + cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); +} + +static void cdnsp_consume_all_events(struct cdnsp_device *pdev) +{ + struct cdnsp_segment *event_deq_seg; + union cdnsp_trb *event_ring_deq; + union cdnsp_trb *event; + u32 cycle_bit; + + event_ring_deq = pdev->event_ring->dequeue; + event_deq_seg = pdev->event_ring->deq_seg; + event = pdev->event_ring->dequeue; + + /* Update ring dequeue pointer. */ + while (1) { + cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); + + /* Does the controller or driver own the TRB? */ + if (cycle_bit != pdev->event_ring->cycle_state) + break; + + cdnsp_inc_deq(pdev, pdev->event_ring); + + if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { + event++; + continue; + } + + if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg, + event)) + cycle_bit ^= 1; + + event_deq_seg = event_deq_seg->next; + event = event_deq_seg->trbs; + } + + cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); +} + +static void cdnsp_stop(struct cdnsp_device *pdev) +{ + u32 temp; + + cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]); + + /* Remove internally queued request for ep0. */ + if (!list_empty(&pdev->eps[0].pending_list)) { + struct cdnsp_request *req; + + req = next_request(&pdev->eps[0].pending_list); + if (req == &pdev->ep0_preq) + cdnsp_ep_dequeue(&pdev->eps[0], req); + } + + cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc); + cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); + cdnsp_disable_slot(pdev); + cdnsp_halt(pdev); + + temp = readl(&pdev->op_regs->status); + writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); + temp = readl(&pdev->ir_set->irq_pending); + writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); + + cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); + cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); + + /* Clear interrupt line */ + temp = readl(&pdev->ir_set->irq_pending); + temp |= IMAN_IP; + writel(temp, &pdev->ir_set->irq_pending); + + cdnsp_consume_all_events(pdev); + cdnsp_clear_cmd_ring(pdev); + + trace_cdnsp_exit("Controller stopped."); +} + +/* + * Stop controller. + * This function is called by the gadget core when the driver is removed. + * Disable slot, disable IRQs, and quiesce the controller. + */ +static int cdnsp_gadget_udc_stop(struct usb_gadget *g) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + cdnsp_stop(pdev); + pdev->gadget_driver = NULL; + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_get_frame(struct usb_gadget *g) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + + return cdnsp_get_frame(pdev); +} + +static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev) +{ + struct cdnsp_port_regs __iomem *port_regs; + u32 portpm, portsc; + + port_regs = pdev->active_port->regs; + portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; + + /* Remote wakeup feature is not enabled by host. */ + if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { + portpm = readl(&port_regs->portpmsc); + + if (!(portpm & PORT_RWE)) + return; + } + + if (portsc == XDEV_U3 && !pdev->may_wakeup) + return; + + cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0); + + pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING; +} + +static int cdnsp_gadget_wakeup(struct usb_gadget *g) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + __cdnsp_gadget_wakeup(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g, + int is_selfpowered) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + g->is_selfpowered = !!is_selfpowered; + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); + struct cdns *cdns = dev_get_drvdata(pdev->dev); + unsigned long flags; + + trace_cdnsp_pullup(is_on); + + /* + * Disable events handling while controller is being + * enabled/disabled. + */ + disable_irq(cdns->dev_irq); + spin_lock_irqsave(&pdev->lock, flags); + + if (!is_on) { + cdnsp_reset_device(pdev); + cdns_clear_vbus(cdns); + } else { + cdns_set_vbus(cdns); + } + + spin_unlock_irqrestore(&pdev->lock, flags); + enable_irq(cdns->dev_irq); + + return 0; +} + +static const struct usb_gadget_ops cdnsp_gadget_ops = { + .get_frame = cdnsp_gadget_get_frame, + .wakeup = cdnsp_gadget_wakeup, + .set_selfpowered = cdnsp_gadget_set_selfpowered, + .pullup = cdnsp_gadget_pullup, + .udc_start = cdnsp_gadget_udc_start, + .udc_stop = cdnsp_gadget_udc_stop, +}; + +static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + void __iomem *reg = &pdev->cap_regs->hc_capbase; + int endpoints; + + reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID); + + if (!pep->direction) { + pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); + pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); + pep->buffering = (pep->buffering + 1) / 2; + pep->buffering_period = (pep->buffering_period + 1) / 2; + return; + } + + endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; + + /* Set to XBUF_TX_TAG_MASK_0 register. */ + reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); + /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */ + reg += pep->number * sizeof(u32) * 2; + + pep->buffering = (readl(reg) + 1) / 2; + pep->buffering_period = pep->buffering; +} + +static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev) +{ + int max_streams = HCC_MAX_PSA(pdev->hcc_params); + struct cdnsp_ep *pep; + int i; + + INIT_LIST_HEAD(&pdev->gadget.ep_list); + + if (max_streams < STREAM_LOG_STREAMS) { + dev_err(pdev->dev, "Stream size %d not supported\n", + max_streams); + return -EINVAL; + } + + max_streams = STREAM_LOG_STREAMS; + + for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { + bool direction = !(i & 1); /* Start from OUT endpoint. */ + u8 epnum = ((i + 1) >> 1); + + if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) + continue; + + pep = &pdev->eps[i]; + pep->pdev = pdev; + pep->number = epnum; + pep->direction = direction; /* 0 for OUT, 1 for IN. */ + + /* + * Ep0 is bidirectional, so ep0in and ep0out are represented by + * pdev->eps[0] + */ + if (epnum == 0) { + snprintf(pep->name, sizeof(pep->name), "ep%d%s", + epnum, "BiDir"); + + pep->idx = 0; + usb_ep_set_maxpacket_limit(&pep->endpoint, 512); + pep->endpoint.maxburst = 1; + pep->endpoint.ops = &cdnsp_gadget_ep0_ops; + pep->endpoint.desc = &cdnsp_gadget_ep0_desc; + pep->endpoint.comp_desc = NULL; + pep->endpoint.caps.type_control = true; + pep->endpoint.caps.dir_in = true; + pep->endpoint.caps.dir_out = true; + + pdev->ep0_preq.epnum = pep->number; + pdev->ep0_preq.pep = pep; + pdev->gadget.ep0 = &pep->endpoint; + } else { + snprintf(pep->name, sizeof(pep->name), "ep%d%s", + epnum, (pep->direction) ? "in" : "out"); + + pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; + usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); + + pep->endpoint.max_streams = max_streams; + pep->endpoint.ops = &cdnsp_gadget_ep_ops; + list_add_tail(&pep->endpoint.ep_list, + &pdev->gadget.ep_list); + + pep->endpoint.caps.type_iso = true; + pep->endpoint.caps.type_bulk = true; + pep->endpoint.caps.type_int = true; + + pep->endpoint.caps.dir_in = direction; + pep->endpoint.caps.dir_out = !direction; + } + + pep->endpoint.name = pep->name; + pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx); + pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx); + cdnsp_get_ep_buffering(pdev, pep); + + dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: " + "CTRL: %s, INT: %s, BULK: %s, ISOC %s, " + "SupDir IN: %s, OUT: %s\n", + pep->name, 1024, + (pep->endpoint.caps.type_control) ? "yes" : "no", + (pep->endpoint.caps.type_int) ? "yes" : "no", + (pep->endpoint.caps.type_bulk) ? "yes" : "no", + (pep->endpoint.caps.type_iso) ? "yes" : "no", + (pep->endpoint.caps.dir_in) ? "yes" : "no", + (pep->endpoint.caps.dir_out) ? "yes" : "no"); + + INIT_LIST_HEAD(&pep->pending_list); + } + + return 0; +} + +static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev) +{ + struct cdnsp_ep *pep; + int i; + + for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { + pep = &pdev->eps[i]; + if (pep->number != 0 && pep->out_ctx) + list_del(&pep->endpoint.ep_list); + } +} + +void cdnsp_disconnect_gadget(struct cdnsp_device *pdev) +{ + pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING; + + if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->disconnect(&pdev->gadget); + spin_lock(&pdev->lock); + } + + pdev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); + + pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING; +} + +void cdnsp_suspend_gadget(struct cdnsp_device *pdev) +{ + if (pdev->gadget_driver && pdev->gadget_driver->suspend) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->suspend(&pdev->gadget); + spin_lock(&pdev->lock); + } +} + +void cdnsp_resume_gadget(struct cdnsp_device *pdev) +{ + if (pdev->gadget_driver && pdev->gadget_driver->resume) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->resume(&pdev->gadget); + spin_lock(&pdev->lock); + } +} + +void cdnsp_irq_reset(struct cdnsp_device *pdev) +{ + struct cdnsp_port_regs __iomem *port_regs; + + cdnsp_reset_device(pdev); + + port_regs = pdev->active_port->regs; + pdev->gadget.speed = cdnsp_port_speed(readl(port_regs)); + + spin_unlock(&pdev->lock); + usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); + spin_lock(&pdev->lock); + + switch (pdev->gadget.speed) { + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + pdev->gadget.ep0->maxpacket = 512; + break; + case USB_SPEED_HIGH: + case USB_SPEED_FULL: + cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); + pdev->gadget.ep0->maxpacket = 64; + break; + default: + /* Low speed is not supported. */ + dev_err(pdev->dev, "Unknown device speed\n"); + break; + } + + cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); + cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY); + usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); +} + +static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) +{ + void __iomem *reg = &pdev->cap_regs->hc_capbase; + + reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); + pdev->rev_cap = reg; + + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", + readl(&pdev->rev_cap->ctrl_revision), + readl(&pdev->rev_cap->rtl_revision), + readl(&pdev->rev_cap->ep_supported), + readl(&pdev->rev_cap->rx_buff_size), + readl(&pdev->rev_cap->tx_buff_size)); +} + +static int cdnsp_gen_setup(struct cdnsp_device *pdev) +{ + int ret; + u32 reg; + + pdev->cap_regs = pdev->regs; + pdev->op_regs = pdev->regs + + HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); + pdev->run_regs = pdev->regs + + (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); + + /* Cache read-only capability registers */ + pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); + pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); + pdev->hci_version = HC_VERSION(pdev->hcc_params); + pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + + cdnsp_get_rev_cap(pdev); + + /* Make sure the Device Controller is halted. */ + ret = cdnsp_halt(pdev); + if (ret) + return ret; + + /* Reset the internal controller memory state and registers. */ + ret = cdnsp_reset(pdev); + if (ret) + return ret; + + /* + * Set dma_mask and coherent_dma_mask to 64-bits, + * if controller supports 64-bit addressing. + */ + if (HCC_64BIT_ADDR(pdev->hcc_params) && + !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n"); + dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); + } else { + /* + * This is to avoid error in cases where a 32-bit USB + * controller is used on a 64-bit capable system. + */ + ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n"); + dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); + } + + spin_lock_init(&pdev->lock); + + ret = cdnsp_mem_init(pdev); + if (ret) + return ret; + + /* + * Software workaround for U1: after transition + * to U1 the controller starts gating clock, and in some cases, + * it causes that controller stack. + */ + reg = readl(&pdev->port3x_regs->mode_2); + reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; + writel(reg, &pdev->port3x_regs->mode_2); + + return 0; +} + +static int __cdnsp_gadget_init(struct cdns *cdns) +{ + struct cdnsp_device *pdev; + u32 max_speed; + int ret = -ENOMEM; + + cdns_drd_gadget_on(cdns); + + pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); + if (!pdev) + return -ENOMEM; + + pm_runtime_get_sync(cdns->dev); + + cdns->gadget_dev = pdev; + pdev->dev = cdns->dev; + pdev->regs = cdns->dev_regs; + max_speed = usb_get_maximum_speed(cdns->dev); + + switch (max_speed) { + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + default: + dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed); + fallthrough; + case USB_SPEED_UNKNOWN: + /* Default to SSP */ + max_speed = USB_SPEED_SUPER_PLUS; + break; + } + + pdev->gadget.ops = &cdnsp_gadget_ops; + pdev->gadget.name = "cdnsp-gadget"; + pdev->gadget.speed = USB_SPEED_UNKNOWN; + pdev->gadget.sg_supported = 1; + pdev->gadget.max_speed = max_speed; + pdev->gadget.lpm_capable = 1; + + pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); + if (!pdev->setup_buf) + goto free_pdev; + + /* + * Controller supports not aligned buffer but it should improve + * performance. + */ + pdev->gadget.quirk_ep_out_aligned_size = true; + + ret = cdnsp_gen_setup(pdev); + if (ret) { + dev_err(pdev->dev, "Generic initialization failed %d\n", ret); + goto free_setup; + } + + ret = cdnsp_gadget_init_endpoints(pdev); + if (ret) { + dev_err(pdev->dev, "failed to initialize endpoints\n"); + goto halt_pdev; + } + + ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); + if (ret) { + dev_err(pdev->dev, "failed to register udc\n"); + goto free_endpoints; + } + + ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq, + cdnsp_irq_handler, + cdnsp_thread_irq_handler, IRQF_SHARED, + dev_name(pdev->dev), pdev); + if (ret) + goto del_gadget; + + return 0; + +del_gadget: + usb_del_gadget_udc(&pdev->gadget); +free_endpoints: + cdnsp_gadget_free_endpoints(pdev); +halt_pdev: + cdnsp_halt(pdev); + cdnsp_reset(pdev); + cdnsp_mem_cleanup(pdev); +free_setup: + kfree(pdev->setup_buf); +free_pdev: + kfree(pdev); + + return ret; +} + +static void cdnsp_gadget_exit(struct cdns *cdns) +{ + struct cdnsp_device *pdev = cdns->gadget_dev; + + devm_free_irq(pdev->dev, cdns->dev_irq, pdev); + pm_runtime_mark_last_busy(cdns->dev); + pm_runtime_put_autosuspend(cdns->dev); + usb_del_gadget_udc(&pdev->gadget); + cdnsp_gadget_free_endpoints(pdev); + cdnsp_mem_cleanup(pdev); + kfree(pdev); + cdns->gadget_dev = NULL; + cdns_drd_gadget_off(cdns); +} + +static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup) +{ + struct cdnsp_device *pdev = cdns->gadget_dev; + unsigned long flags; + + if (pdev->link_state == XDEV_U3) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + cdnsp_disconnect_gadget(pdev); + cdnsp_stop(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated) +{ + struct cdnsp_device *pdev = cdns->gadget_dev; + enum usb_device_speed max_speed; + unsigned long flags; + int ret; + + if (!pdev->gadget_driver) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + max_speed = pdev->gadget_driver->max_speed; + + /* Limit speed if necessary. */ + max_speed = min(max_speed, pdev->gadget.max_speed); + + ret = cdnsp_run(pdev, max_speed); + + if (pdev->link_state == XDEV_U3) + __cdnsp_gadget_wakeup(pdev); + + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +/** + * cdnsp_gadget_init - initialize device structure + * @cdns: cdnsp instance + * + * This function initializes the gadget. + */ +int cdnsp_gadget_init(struct cdns *cdns) +{ + struct cdns_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = __cdnsp_gadget_init; + rdrv->stop = cdnsp_gadget_exit; + rdrv->suspend = cdnsp_gadget_suspend; + rdrv->resume = cdnsp_gadget_resume; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; + rdrv->name = "gadget"; + cdns->roles[USB_ROLE_DEVICE] = rdrv; + + return 0; +} diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h new file mode 100644 index 000000000..f740fa608 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -0,0 +1,1602 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp. + */ +#ifndef __LINUX_CDNSP_GADGET_H +#define __LINUX_CDNSP_GADGET_H + +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/usb/gadget.h> +#include <linux/irq.h> + +/* Max number slots - only 1 is allowed. */ +#define CDNSP_DEV_MAX_SLOTS 1 + +#define CDNSP_EP0_SETUP_SIZE 512 + +/* One control and 15 for in and 15 for out endpoints. */ +#define CDNSP_ENDPOINTS_NUM 31 + +/* Best Effort Service Latency. */ +#define CDNSP_DEFAULT_BESL 0 + +/* Device Controller command default timeout value in us */ +#define CDNSP_CMD_TIMEOUT (15 * 1000) + +/* Up to 16 ms to halt an device controller */ +#define CDNSP_MAX_HALT_USEC (16 * 1000) + +#define CDNSP_CTX_SIZE 2112 + +/* + * Controller register interface. + */ + +/** + * struct cdnsp_cap_regs - CDNSP Registers. + * @hc_capbase: Length of the capabilities register and controller + * version number + * @hcs_params1: HCSPARAMS1 - Structural Parameters 1 + * @hcs_params2: HCSPARAMS2 - Structural Parameters 2 + * @hcs_params3: HCSPARAMS3 - Structural Parameters 3 + * @hcc_params: HCCPARAMS - Capability Parameters + * @db_off: DBOFF - Doorbell array offset + * @run_regs_off: RTSOFF - Runtime register space offset + * @hcc_params2: HCCPARAMS2 Capability Parameters 2, + */ +struct cdnsp_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; + /* Reserved up to (CAPLENGTH - 0x1C) */ +}; + +/* hc_capbase bitmasks. */ +/* bits 7:0 - how long is the Capabilities register. */ +#define HC_LENGTH(p) (((p) >> 00) & GENMASK(7, 0)) +/* bits 31:16 */ +#define HC_VERSION(p) (((p) >> 16) & GENMASK(15, 1)) + +/* HCSPARAMS1 - hcs_params1 - bitmasks */ +/* bits 0:7, Max Device Endpoints */ +#define HCS_ENDPOINTS_MASK GENMASK(7, 0) +#define HCS_ENDPOINTS(p) (((p) & HCS_ENDPOINTS_MASK) >> 0) + +/* HCCPARAMS offset from PCI base address */ +#define HCC_PARAMS_OFFSET 0x10 + +/* HCCPARAMS - hcc_params - bitmasks */ +/* 1: device controller can use 64-bit address pointers. */ +#define HCC_64BIT_ADDR(p) ((p) & BIT(0)) +/* 1: device controller uses 64-byte Device Context structures. */ +#define HCC_64BYTE_CONTEXT(p) ((p) & BIT(2)) +/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15. */ +#define HCC_MAX_PSA(p) ((((p) >> 12) & 0xf) + 1) +/* Extended Capabilities pointer from PCI base. */ +#define HCC_EXT_CAPS(p) (((p) & GENMASK(31, 16)) >> 16) + +#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) + +/* db_off bitmask - bits 0:1 reserved. */ +#define DBOFF_MASK GENMASK(31, 2) + +/* run_regs_off bitmask - bits 0:4 reserved. */ +#define RTSOFF_MASK GENMASK(31, 5) + +/** + * struct cdnsp_op_regs - Device Controller Operational Registers. + * @command: USBCMD - Controller command register. + * @status: USBSTS - Controller status register. + * @page_size: This indicates the page size that the device controller supports. + * If bit n is set, the controller supports a page size of 2^(n+12), + * up to a 128MB page size. 4K is the minimum page size. + * @dnctrl: DNCTRL - Device notification control register. + * @cmd_ring: CRP - 64-bit Command Ring Pointer. + * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer. + * @config_reg: CONFIG - Configure Register + * @port_reg_base: PORTSCn - base address for Port Status and Control + * Each port has a Port Status and Control register, + * followed by a Port Power Management Status and Control + * register, a Port Link Info register, and a reserved + * register. + */ +struct cdnsp_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dnctrl; + __le64 cmd_ring; + /* rsvd: offset 0x20-2F. */ + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + /* rsvd: offset 0x3C-3FF. */ + __le32 reserved4[241]; + /* port 1 registers, which serve as a base address for other ports. */ + __le32 port_reg_base; +}; + +/* Number of registers per port. */ +#define NUM_PORT_REGS 4 + +/** + * struct cdnsp_port_regs - Port Registers. + * @portsc: PORTSC - Port Status and Control Register. + * @portpmsc: PORTPMSC - Port Power Managements Status and Control Register. + * @portli: PORTLI - Port Link Info register. + */ +struct cdnsp_port_regs { + __le32 portsc; + __le32 portpmsc; + __le32 portli; + __le32 reserved; +}; + +/* + * These bits are Read Only (RO) and should be saved and written to the + * registers: 0 (connect status) and 10:13 (port speed). + * These bits are also sticky - meaning they're in the AUX well and they aren't + * changed by a hot and warm. + */ +#define CDNSP_PORT_RO (PORT_CONNECT | DEV_SPEED_MASK) + +/* + * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit: + * bits 5:8 (link state), 25:26 ("wake on" enable state) + */ +#define CDNSP_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E) + +/* + * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect: + * bits 1 (port enable/disable), 17 ( connect changed), + * 21 (port reset changed) , 22 (Port Link State Change), + */ +#define CDNSP_PORT_RW1CS (PORT_PED | PORT_CSC | PORT_RC | PORT_PLC) + +/* USBCMD - USB command - bitmasks. */ +/* Run/Stop, controller execution - do not write unless controller is halted.*/ +#define CMD_R_S BIT(0) +/* + * Reset device controller - resets internal controller state machine and all + * registers (except PCI config regs). + */ +#define CMD_RESET BIT(1) +/* Event Interrupt Enable - a '1' allows interrupts from the controller. */ +#define CMD_INTE BIT(2) +/* + * Device System Error Interrupt Enable - get out-of-band signal for + * controller errors. + */ +#define CMD_DSEIE BIT(3) +/* device controller save/restore state. */ +#define CMD_CSS BIT(8) +#define CMD_CRS BIT(9) +/* + * Enable Wrap Event - '1' means device controller generates an event + * when MFINDEX wraps. + */ +#define CMD_EWE BIT(10) +/* 1: device enabled */ +#define CMD_DEVEN BIT(17) +/* bits 18:31 are reserved (and should be preserved on writes). */ + +/* Command register values to disable interrupts. */ +#define CDNSP_IRQS (CMD_INTE | CMD_DSEIE | CMD_EWE) + +/* USBSTS - USB status - bitmasks */ +/* controller not running - set to 1 when run/stop bit is cleared. */ +#define STS_HALT BIT(0) +/* + * serious error, e.g. PCI parity error. The controller will clear + * the run/stop bit. + */ +#define STS_FATAL BIT(2) +/* event interrupt - clear this prior to clearing any IP flags in IR set.*/ +#define STS_EINT BIT(3) +/* port change detect */ +#define STS_PCD BIT(4) +/* save state status - '1' means device controller is saving state. */ +#define STS_SSS BIT(8) +/* restore state status - '1' means controllers is restoring state. */ +#define STS_RSS BIT(9) +/* 1: save or restore error */ +#define STS_SRE BIT(10) +/* 1: device Not Ready to accept doorbell or op reg writes after reset. */ +#define STS_CNR BIT(11) +/* 1: internal Device Controller Error.*/ +#define STS_HCE BIT(12) + +/* CRCR - Command Ring Control Register - cmd_ring bitmasks. */ +/* bit 0 is the command ring cycle state. */ +#define CMD_RING_CS BIT(0) +/* stop ring immediately - abort the currently executing command. */ +#define CMD_RING_ABORT BIT(2) +/* + * Command Ring Busy. + * Set when Doorbell register is written with DB for command and cleared when + * the controller reached end of CR. + */ +#define CMD_RING_BUSY(p) ((p) & BIT(4)) +/* 1: command ring is running */ +#define CMD_RING_RUNNING BIT(3) +/* Command Ring pointer - bit mask for the lower 32 bits. */ +#define CMD_RING_RSVD_BITS GENMASK(5, 0) + +/* CONFIG - Configure Register - config_reg bitmasks. */ +/* bits 0:7 - maximum number of device slots enabled. */ +#define MAX_DEVS GENMASK(7, 0) +/* bit 8: U3 Entry Enabled, assert PLC when controller enters U3. */ +#define CONFIG_U3E BIT(8) + +/* PORTSC - Port Status and Control Register - port_reg_base bitmasks */ +/* 1: device connected. */ +#define PORT_CONNECT BIT(0) +/* 1: port enabled. */ +#define PORT_PED BIT(1) +/* 1: port reset signaling asserted. */ +#define PORT_RESET BIT(4) +/* + * Port Link State - bits 5:8 + * A read gives the current link PM state of the port, + * a write with Link State Write Strobe sets the link state. + */ +#define PORT_PLS_MASK GENMASK(8, 5) +#define XDEV_U0 (0x0 << 5) +#define XDEV_U1 (0x1 << 5) +#define XDEV_U2 (0x2 << 5) +#define XDEV_U3 (0x3 << 5) +#define XDEV_DISABLED (0x4 << 5) +#define XDEV_RXDETECT (0x5 << 5) +#define XDEV_INACTIVE (0x6 << 5) +#define XDEV_POLLING (0x7 << 5) +#define XDEV_RECOVERY (0x8 << 5) +#define XDEV_HOT_RESET (0x9 << 5) +#define XDEV_COMP_MODE (0xa << 5) +#define XDEV_TEST_MODE (0xb << 5) +#define XDEV_RESUME (0xf << 5) +/* 1: port has power. */ +#define PORT_POWER BIT(9) +/* + * bits 10:13 indicate device speed: + * 0 - undefined speed - port hasn't be initialized by a reset yet + * 1 - full speed + * 2 - Reserved (Low Speed not supported + * 3 - high speed + * 4 - super speed + * 5 - super speed + * 6-15 reserved + */ +#define DEV_SPEED_MASK GENMASK(13, 10) +#define XDEV_FS (0x1 << 10) +#define XDEV_HS (0x3 << 10) +#define XDEV_SS (0x4 << 10) +#define XDEV_SSP (0x5 << 10) +#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10)) +#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) +#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) +#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) +#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) +#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f) +/* Port Link State Write Strobe - set this when changing link state */ +#define PORT_LINK_STROBE BIT(16) +/* 1: connect status change */ +#define PORT_CSC BIT(17) +/* 1: warm reset for a USB 3.0 device is done. */ +#define PORT_WRC BIT(19) +/* 1: reset change - 1 to 0 transition of PORT_RESET */ +#define PORT_RC BIT(21) +/* + * port link status change - set on some port link state transitions: + * Transition Reason + * ---------------------------------------------------------------------------- + * - U3 to Resume Wakeup signaling from a device + * - Resume to Recovery to U0 USB 3.0 device resume + * - Resume to U0 USB 2.0 device resume + * - U3 to Recovery to U0 Software resume of USB 3.0 device complete + * - U3 to U0 Software resume of USB 2.0 device complete + * - U2 to U0 L1 resume of USB 2.1 device complete + * - U0 to U0 L1 entry rejection by USB 2.1 device + * - U0 to disabled L1 entry error with USB 2.1 device + * - Any state to inactive Error on USB 3.0 port + */ +#define PORT_PLC BIT(22) +/* Port configure error change - port failed to configure its link partner. */ +#define PORT_CEC BIT(23) +/* Wake on connect (enable). */ +#define PORT_WKCONN_E BIT(25) +/* Wake on disconnect (enable). */ +#define PORT_WKDISC_E BIT(26) +/* Indicates if Warm Reset is being received. */ +#define PORT_WR BIT(31) + +#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC) + +/* PORTPMSCUSB3 - Port Power Management Status and Control - bitmasks. */ +/* Enables U1 entry. */ +#define PORT_U1_TIMEOUT_MASK GENMASK(7, 0) +#define PORT_U1_TIMEOUT(p) ((p) & PORT_U1_TIMEOUT_MASK) +/* Enables U2 entry .*/ +#define PORT_U2_TIMEOUT_MASK GENMASK(14, 8) +#define PORT_U2_TIMEOUT(p) (((p) << 8) & PORT_U2_TIMEOUT_MASK) + +/* PORTPMSCUSB2 - Port Power Management Status and Control - bitmasks. */ +#define PORT_L1S_MASK GENMASK(2, 0) +#define PORT_L1S(p) ((p) & PORT_L1S_MASK) +#define PORT_L1S_ACK PORT_L1S(1) +#define PORT_L1S_NYET PORT_L1S(2) +#define PORT_L1S_STALL PORT_L1S(3) +#define PORT_L1S_TIMEOUT PORT_L1S(4) +/* Remote Wake Enable. */ +#define PORT_RWE BIT(3) +/* Best Effort Service Latency (BESL). */ +#define PORT_BESL(p) (((p) << 4) & GENMASK(7, 4)) +/* Hardware LPM Enable (HLE). */ +#define PORT_HLE BIT(16) +/* Received Best Effort Service Latency (BESL). */ +#define PORT_RRBESL(p) (((p) & GENMASK(20, 17)) >> 17) +/* Port Test Control. */ +#define PORT_TEST_MODE_MASK GENMASK(31, 28) +#define PORT_TEST_MODE(p) (((p) << 28) & PORT_TEST_MODE_MASK) + +/** + * struct cdnsp_intr_reg - Interrupt Register Set. + * @irq_pending: IMAN - Interrupt Management Register. Used to enable + * interrupts and check for pending interrupts. + * @irq_control: IMOD - Interrupt Moderation Register. + * Used to throttle interrupts. + * @erst_size: Number of segments in the Event Ring Segment Table (ERST). + * @erst_base: ERST base address. + * @erst_dequeue: Event ring dequeue pointer. + * + * Each interrupter (defined by a MSI-X vector) has an event ring and an Event + * Ring Segment Table (ERST) associated with it. The event ring is comprised of + * multiple segments of the same size. The controller places events on the ring + * and "updates the Cycle bit in the TRBs to indicate to software the current + * position of the Enqueue Pointer." The driver processes those events and + * updates the dequeue pointer. + */ +struct cdnsp_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +/* IMAN - Interrupt Management Register - irq_pending bitmasks l. */ +#define IMAN_IE BIT(1) +#define IMAN_IP BIT(0) +/* bits 2:31 need to be preserved */ +#define IMAN_IE_SET(p) ((p) | IMAN_IE) +#define IMAN_IE_CLEAR(p) ((p) & ~IMAN_IE) + +/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */ +/* + * Minimum interval between interrupts (in 250ns intervals). The interval + * between interrupts will be longer if there are no events on the event ring. + * Default is 4000 (1 ms). + */ +#define IMOD_INTERVAL_MASK GENMASK(15, 0) +/* Counter used to count down the time to the next interrupt - HW use only */ +#define IMOD_COUNTER_MASK GENMASK(31, 16) +#define IMOD_DEFAULT_INTERVAL 0 + +/* erst_size bitmasks. */ +/* Preserve bits 16:31 of erst_size. */ +#define ERST_SIZE_MASK GENMASK(31, 16) + +/* erst_dequeue bitmasks. */ +/* + * Dequeue ERST Segment Index (DESI) - Segment number (or alias) + * where the current dequeue pointer lies. This is an optional HW hint. + */ +#define ERST_DESI_MASK GENMASK(2, 0) +/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced. */ +#define ERST_EHB BIT(3) +#define ERST_PTR_MASK GENMASK(3, 0) + +/** + * struct cdnsp_run_regs + * @microframe_index: MFINDEX - current microframe number. + * @ir_set: Array of Interrupter registers. + * + * Device Controller Runtime Registers: + * "Software should read and write these registers using only Dword (32 bit) + * or larger accesses" + */ +struct cdnsp_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct cdnsp_intr_reg ir_set[128]; +}; + +/** + * USB2.0 Port Peripheral Configuration Registers. + * @ext_cap: Header register for Extended Capability. + * @port_reg1: Timer Configuration Register. + * @port_reg2: Timer Configuration Register. + * @port_reg3: Timer Configuration Register. + * @port_reg4: Timer Configuration Register. + * @port_reg5: Timer Configuration Register. + * @port_reg6: Chicken bits for USB20PPP. + */ +struct cdnsp_20port_cap { + __le32 ext_cap; + __le32 port_reg1; + __le32 port_reg2; + __le32 port_reg3; + __le32 port_reg4; + __le32 port_reg5; + __le32 port_reg6; +}; + +/* Extended capability register fields */ +#define EXT_CAPS_ID(p) (((p) >> 0) & GENMASK(7, 0)) +#define EXT_CAPS_NEXT(p) (((p) >> 8) & GENMASK(7, 0)) +/* Extended capability IDs - ID 0 reserved */ +#define EXT_CAPS_PROTOCOL 2 + +/* USB 2.0 Port Peripheral Configuration Extended Capability */ +#define EXT_CAP_CFG_DEV_20PORT_CAP_ID 0xC1 +/* + * Setting this bit to '1' enables automatic wakeup from L1 state on transfer + * TRB prepared when USBSSP operates in USB2.0 mode. + */ +#define PORT_REG6_L1_L0_HW_EN BIT(1) +/* + * Setting this bit to '1' forces Full Speed when USBSSP operates in USB2.0 + * mode (disables High Speed). + */ +#define PORT_REG6_FORCE_FS BIT(0) + +/** + * USB3.x Port Peripheral Configuration Registers. + * @ext_cap: Header register for Extended Capability. + * @mode_addr: Miscellaneous 3xPORT operation mode configuration register. + * @mode_2: 3x Port Control Register 2. + */ +struct cdnsp_3xport_cap { + __le32 ext_cap; + __le32 mode_addr; + __le32 reserved[52]; + __le32 mode_2; +}; + +/* Extended Capability Header for 3XPort Configuration Registers. */ +#define D_XEC_CFG_3XPORT_CAP 0xC0 +#define CFG_3XPORT_SSP_SUPPORT BIT(31) +#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0) + +/* Revision Extended Capability ID */ +#define RTL_REV_CAP 0xC4 +#define RTL_REV_CAP_RX_BUFF_CMD_SIZE BITMASK(31, 24) +#define RTL_REV_CAP_RX_BUFF_SIZE BITMASK(15, 0) +#define RTL_REV_CAP_TX_BUFF_CMD_SIZE BITMASK(31, 24) +#define RTL_REV_CAP_TX_BUFF_SIZE BITMASK(15, 0) + +#define CDNSP_VER_1 0x00000000 +#define CDNSP_VER_2 0x10000000 + +#define CDNSP_IF_EP_EXIST(pdev, ep_num, dir) \ + (readl(&(pdev)->rev_cap->ep_supported) & \ + (BIT(ep_num) << ((dir) ? 0 : 16))) + +/** + * struct cdnsp_rev_cap - controller capabilities. + * @ext_cap: Header for RTL Revision Extended Capability. + * @rtl_revision: RTL revision. + * @rx_buff_size: Rx buffer sizes. + * @tx_buff_size: Tx buffer sizes. + * @ep_supported: Supported endpoints. + * @ctrl_revision: Controller revision ID. + */ +struct cdnsp_rev_cap { + __le32 ext_cap; + __le32 rtl_revision; + __le32 rx_buff_size; + __le32 tx_buff_size; + __le32 ep_supported; + __le32 ctrl_revision; +}; + +/* USB2.0 Port Peripheral Configuration Registers. */ +#define D_XEC_PRE_REGS_CAP 0xC8 +#define REG_CHICKEN_BITS_2_OFFSET 0x48 +#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) + +/* XBUF Extended Capability ID. */ +#define XBUF_CAP_ID 0xCB +#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C +#define XBUF_RX_TAG_MASK_1_OFFSET 0x24 +#define XBUF_TX_CMD_OFFSET 0x2C + +/** + * struct cdnsp_doorbell_array. + * @cmd_db: Command ring doorbell register. + * @ep_db: Endpoint ring doorbell register. + * Bits 0 - 7: Endpoint target. + * Bits 8 - 15: RsvdZ. + * Bits 16 - 31: Stream ID. + */ +struct cdnsp_doorbell_array { + __le32 cmd_db; + __le32 ep_db; +}; + +#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) +#define DB_VALUE_EP0_OUT(ep, stream) ((ep) & 0xff) +#define DB_VALUE_CMD 0x00000000 + +/** + * struct cdnsp_container_ctx. + * @type: Type of context. Used to calculated offsets to contained contexts. + * @size: Size of the context data. + * @ctx_size: context data structure size - 64 or 32 bits. + * @dma: dma address of the bytes. + * @bytes: The raw context data given to HW. + * + * Represents either a Device or Input context. Holds a pointer to the raw + * memory used for the context (bytes) and dma address of it (dma). + */ +struct cdnsp_container_ctx { + unsigned int type; +#define CDNSP_CTX_TYPE_DEVICE 0x1 +#define CDNSP_CTX_TYPE_INPUT 0x2 + int size; + int ctx_size; + dma_addr_t dma; + u8 *bytes; +}; + +/** + * struct cdnsp_slot_ctx + * @dev_info: Device speed, and last valid endpoint. + * @dev_port: Device port number that is needed to access the USB device. + * @int_target: Interrupter target number. + * @dev_state: Slot state and device address. + * + * Slot Context - This assumes the controller uses 32-byte context + * structures. If the controller uses 64-byte contexts, there is an additional + * 32 bytes reserved at the end of the slot context for controller internal use. + */ +struct cdnsp_slot_ctx { + __le32 dev_info; + __le32 dev_port; + __le32 int_target; + __le32 dev_state; + /* offset 0x10 to 0x1f reserved for controller internal use. */ + __le32 reserved[4]; +}; + +/* Bits 20:23 in the Slot Context are the speed for the device. */ +#define SLOT_SPEED_FS (XDEV_FS << 10) +#define SLOT_SPEED_HS (XDEV_HS << 10) +#define SLOT_SPEED_SS (XDEV_SS << 10) +#define SLOT_SPEED_SSP (XDEV_SSP << 10) + +/* dev_info bitmasks. */ +/* Device speed - values defined by PORTSC Device Speed field - 20:23. */ +#define DEV_SPEED GENMASK(23, 20) +#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20) +/* Index of the last valid endpoint context in this device context - 27:31. */ +#define LAST_CTX_MASK ((unsigned int)GENMASK(31, 27)) +#define LAST_CTX(p) ((p) << 27) +#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1) +#define SLOT_FLAG BIT(0) +#define EP0_FLAG BIT(1) + +/* dev_port bitmasks */ +/* Device port number that is needed to access the USB device. */ +#define DEV_PORT(p) (((p) & 0xff) << 16) + +/* dev_state bitmasks */ +/* USB device address - assigned by the controller. */ +#define DEV_ADDR_MASK GENMASK(7, 0) +/* Slot state */ +#define SLOT_STATE GENMASK(31, 27) +#define GET_SLOT_STATE(p) (((p) & SLOT_STATE) >> 27) + +#define SLOT_STATE_DISABLED 0 +#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED +#define SLOT_STATE_DEFAULT 1 +#define SLOT_STATE_ADDRESSED 2 +#define SLOT_STATE_CONFIGURED 3 + +/** + * struct cdnsp_ep_ctx. + * @ep_info: Endpoint state, streams, mult, and interval information. + * @ep_info2: Information on endpoint type, max packet size, max burst size, + * error count, and whether the controller will force an event for + * all transactions. + * @deq: 64-bit ring dequeue pointer address. If the endpoint only + * defines one stream, this points to the endpoint transfer ring. + * Otherwise, it points to a stream context array, which has a + * ring pointer for each flow. + * @tx_info: Average TRB lengths for the endpoint ring and + * max payload within an Endpoint Service Interval Time (ESIT). + * + * Endpoint Context - This assumes the controller uses 32-byte context + * structures. If the controller uses 64-byte contexts, there is an additional + * 32 bytes reserved at the end of the endpoint context for controller internal + * use. + */ +struct cdnsp_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + /* offset 0x14 - 0x1f reserved for controller internal use. */ + __le32 reserved[3]; +}; + +/* ep_info bitmasks. */ +/* + * Endpoint State - bits 0:2: + * 0 - disabled + * 1 - running + * 2 - halted due to halt condition + * 3 - stopped + * 4 - TRB error + * 5-7 - reserved + */ +#define EP_STATE_MASK GENMASK(3, 0) +#define EP_STATE_DISABLED 0 +#define EP_STATE_RUNNING 1 +#define EP_STATE_HALTED 2 +#define EP_STATE_STOPPED 3 +#define EP_STATE_ERROR 4 +#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK) + +/* Mult - Max number of burst within an interval, in EP companion desc. */ +#define EP_MULT(p) (((p) << 8) & GENMASK(9, 8)) +#define CTX_TO_EP_MULT(p) (((p) & GENMASK(9, 8)) >> 8) +/* bits 10:14 are Max Primary Streams. */ +/* bit 15 is Linear Stream Array. */ +/* Interval - period between requests to an endpoint - 125u increments. */ +#define EP_INTERVAL(p) (((p) << 16) & GENMASK(23, 16)) +#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) & GENMASK(23, 16)) >> 16)) +#define CTX_TO_EP_INTERVAL(p) (((p) & GENMASK(23, 16)) >> 16) +#define EP_MAXPSTREAMS_MASK GENMASK(14, 10) +#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) +/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ +#define EP_HAS_LSA BIT(15) + +/* ep_info2 bitmasks */ +#define ERROR_COUNT(p) (((p) & 0x3) << 1) +#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7) +#define EP_TYPE(p) ((p) << 3) +#define ISOC_OUT_EP 1 +#define BULK_OUT_EP 2 +#define INT_OUT_EP 3 +#define CTRL_EP 4 +#define ISOC_IN_EP 5 +#define BULK_IN_EP 6 +#define INT_IN_EP 7 +/* bit 6 reserved. */ +/* bit 7 is Device Initiate Disable - for disabling stream selection. */ +#define MAX_BURST(p) (((p) << 8) & GENMASK(15, 8)) +#define CTX_TO_MAX_BURST(p) (((p) & GENMASK(15, 8)) >> 8) +#define MAX_PACKET(p) (((p) << 16) & GENMASK(31, 16)) +#define MAX_PACKET_MASK GENMASK(31, 16) +#define MAX_PACKET_DECODED(p) (((p) & GENMASK(31, 16)) >> 16) + +/* tx_info bitmasks. */ +#define EP_AVG_TRB_LENGTH(p) ((p) & GENMASK(15, 0)) +#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) << 16) & GENMASK(31, 16)) +#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) & GENMASK(23, 16)) >> 16) << 24) +#define CTX_TO_MAX_ESIT_PAYLOAD_LO(p) (((p) & GENMASK(31, 16)) >> 16) +#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) & GENMASK(31, 24)) >> 24) + +/* deq bitmasks. */ +#define EP_CTX_CYCLE_MASK BIT(0) +#define CTX_DEQ_MASK (~0xfL) + +/** + * struct cdnsp_input_control_context + * Input control context; + * + * @drop_context: Set the bit of the endpoint context you want to disable. + * @add_context: Set the bit of the endpoint context you want to enable. + */ +struct cdnsp_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +/** + * Represents everything that is needed to issue a command on the command ring. + * + * @in_ctx: Pointer to input context structure. + * @status: Command Completion Code for last command. + * @command_trb: Pointer to command TRB. + */ +struct cdnsp_command { + /* Input context for changing device state. */ + struct cdnsp_container_ctx *in_ctx; + u32 status; + union cdnsp_trb *command_trb; +}; + +/** + * Stream context structure. + * + * @stream_ring: 64-bit stream ring address, cycle state, and stream type. + * @reserved: offset 0x14 - 0x1f reserved for controller internal use. + */ +struct cdnsp_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +/* Stream Context Types - bits 3:1 of stream ctx deq ptr. */ +#define SCT_FOR_CTX(p) (((p) << 1) & GENMASK(3, 1)) +/* Secondary stream array type, dequeue pointer is to a transfer ring. */ +#define SCT_SEC_TR 0 +/* Primary stream array type, dequeue pointer is to a transfer ring. */ +#define SCT_PRI_TR 1 + +/** + * struct cdnsp_stream_info: Representing everything that is needed to + * supports stream capable endpoints. + * @stream_rings: Array of pointers containing Transfer rings for all + * supported streams. + * @num_streams: Number of streams, including stream 0. + * @stream_ctx_array: The stream context array may be bigger than the number + * of streams the driver asked for. + * @num_stream_ctxs: Number of streams. + * @ctx_array_dma: Dma address of Context Stream Array. + * @trb_address_map: For mapping physical TRB addresses to segments in + * stream rings. + * @td_count: Number of TDs associated with endpoint. + * @first_prime_det: First PRIME packet detected. + * @drbls_count: Number of allowed doorbells. + */ +struct cdnsp_stream_info { + struct cdnsp_ring **stream_rings; + unsigned int num_streams; + struct cdnsp_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct radix_tree_root trb_address_map; + int td_count; + u8 first_prime_det; +#define STREAM_DRBL_FIFO_DEPTH 2 + u8 drbls_count; +}; + +#define STREAM_LOG_STREAMS 4 +#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS) + +#if STREAM_LOG_STREAMS > 16 && STREAM_LOG_STREAMS < 1 +#error "Not suupported stream value" +#endif + +/** + * struct cdnsp_ep - extended device side representation of USB endpoint. + * @endpoint: usb endpoint + * @pending_req_list: List of requests queuing on transfer ring. + * @pdev: Device associated with this endpoint. + * @number: Endpoint number (1 - 15). + * idx: The device context index (DCI). + * interval: Interval between packets used for ISOC endpoint. + * @name: A human readable name e.g. ep1out. + * @direction: Endpoint direction. + * @buffering: Number of on-chip buffers related to endpoint. + * @buffering_period; Number of on-chip buffers related to periodic endpoint. + * @in_ctx: Pointer to input endpoint context structure. + * @out_ctx: Pointer to output endpoint context structure. + * @ring: Pointer to transfer ring. + * @stream_info: Hold stream information. + * @ep_state: Current state of endpoint. + * @skip: Sometimes the controller can not process isochronous endpoint ring + * quickly enough, and it will miss some isoc tds on the ring and + * generate Missed Service Error Event. + * Set skip flag when receive a Missed Service Error Event and + * process the missed tds on the endpoint ring. + */ +struct cdnsp_ep { + struct usb_ep endpoint; + struct list_head pending_list; + struct cdnsp_device *pdev; + u8 number; + u8 idx; + u32 interval; + char name[20]; + u8 direction; + u8 buffering; + u8 buffering_period; + struct cdnsp_ep_ctx *in_ctx; + struct cdnsp_ep_ctx *out_ctx; + struct cdnsp_ring *ring; + struct cdnsp_stream_info stream_info; + unsigned int ep_state; +#define EP_ENABLED BIT(0) +#define EP_DIS_IN_RROGRESS BIT(1) +#define EP_HALTED BIT(2) +#define EP_STOPPED BIT(3) +#define EP_WEDGE BIT(4) +#define EP0_HALTED_STATUS BIT(5) +#define EP_HAS_STREAMS BIT(6) +#define EP_UNCONFIGURED BIT(7) + + bool skip; +}; + +/** + * struct cdnsp_device_context_array + * @dev_context_ptr: Array of 64-bit DMA addresses for device contexts. + * @dma: DMA address for device contexts structure. + */ +struct cdnsp_device_context_array { + __le64 dev_context_ptrs[CDNSP_DEV_MAX_SLOTS + 1]; + dma_addr_t dma; +}; + +/** + * struct cdnsp_transfer_event. + * @buffer: 64-bit buffer address, or immediate data. + * @transfer_len: Data length transferred. + * @flags: Field is interpreted differently based on the type of TRB. + */ +struct cdnsp_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +/* Invalidate event after disabling endpoint. */ +#define TRB_EVENT_INVALIDATE 8 + +/* Transfer event TRB length bit mask. */ +/* bits 0:23 */ +#define EVENT_TRB_LEN(p) ((p) & GENMASK(23, 0)) +/* Completion Code - only applicable for some types of TRBs */ +#define COMP_CODE_MASK (0xff << 24) +#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) +#define COMP_INVALID 0 +#define COMP_SUCCESS 1 +#define COMP_DATA_BUFFER_ERROR 2 +#define COMP_BABBLE_DETECTED_ERROR 3 +#define COMP_TRB_ERROR 5 +#define COMP_RESOURCE_ERROR 7 +#define COMP_NO_SLOTS_AVAILABLE_ERROR 9 +#define COMP_INVALID_STREAM_TYPE_ERROR 10 +#define COMP_SLOT_NOT_ENABLED_ERROR 11 +#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12 +#define COMP_SHORT_PACKET 13 +#define COMP_RING_UNDERRUN 14 +#define COMP_RING_OVERRUN 15 +#define COMP_VF_EVENT_RING_FULL_ERROR 16 +#define COMP_PARAMETER_ERROR 17 +#define COMP_CONTEXT_STATE_ERROR 19 +#define COMP_EVENT_RING_FULL_ERROR 21 +#define COMP_INCOMPATIBLE_DEVICE_ERROR 22 +#define COMP_MISSED_SERVICE_ERROR 23 +#define COMP_COMMAND_RING_STOPPED 24 +#define COMP_COMMAND_ABORTED 25 +#define COMP_STOPPED 26 +#define COMP_STOPPED_LENGTH_INVALID 27 +#define COMP_STOPPED_SHORT_PACKET 28 +#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29 +#define COMP_ISOCH_BUFFER_OVERRUN 31 +#define COMP_EVENT_LOST_ERROR 32 +#define COMP_UNDEFINED_ERROR 33 +#define COMP_INVALID_STREAM_ID_ERROR 34 + +/*Transfer Event NRDY bit fields */ +#define TRB_TO_DEV_STREAM(p) ((p) & GENMASK(16, 0)) +#define TRB_TO_HOST_STREAM(p) ((p) & GENMASK(16, 0)) +#define STREAM_PRIME_ACK 0xFFFE +#define STREAM_REJECTED 0xFFFF + +/** Transfer Event bit fields **/ +#define TRB_TO_EP_ID(p) (((p) & GENMASK(20, 16)) >> 16) + +/** + * struct cdnsp_link_trb + * @segment_ptr: 64-bit segment pointer. + * @intr_target: Interrupter target. + * @control: Flags. + */ +struct cdnsp_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +/* control bitfields */ +#define LINK_TOGGLE BIT(1) + +/** + * struct cdnsp_event_cmd - Command completion event TRB. + * cmd_trb: Pointer to command TRB, or the value passed by the event data trb + * status: Command completion parameters and error code. + * flags: Flags. + */ +struct cdnsp_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +/* flags bitmasks */ + +/* Address device - disable SetAddress. */ +#define TRB_BSR BIT(9) + +/* Configure Endpoint - Deconfigure. */ +#define TRB_DC BIT(9) + +/* Force Header */ +#define TRB_FH_TO_PACKET_TYPE(p) ((p) & GENMASK(4, 0)) +#define TRB_FH_TR_PACKET 0x4 +#define TRB_FH_TO_DEVICE_ADDRESS(p) (((p) << 25) & GENMASK(31, 25)) +#define TRB_FH_TR_PACKET_DEV_NOT 0x6 +#define TRB_FH_TO_NOT_TYPE(p) (((p) << 4) & GENMASK(7, 4)) +#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1 +#define TRB_FH_TO_INTERFACE(p) (((p) << 8) & GENMASK(15, 8)) + +enum cdnsp_setup_dev { + SETUP_CONTEXT_ONLY, + SETUP_CONTEXT_ADDRESS, +}; + +/* bits 24:31 are the slot ID. */ +#define TRB_TO_SLOT_ID(p) (((p) & GENMASK(31, 24)) >> 24) +#define SLOT_ID_FOR_TRB(p) (((p) << 24) & GENMASK(31, 24)) + +/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB. */ +#define TRB_TO_EP_INDEX(p) (((p) >> 16) & 0x1f) + +#define EP_ID_FOR_TRB(p) ((((p) + 1) << 16) & GENMASK(20, 16)) + +#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23) +#define TRB_TO_SUSPEND_PORT(p) (((p) >> 23) & 0x1) +#define LAST_EP_INDEX 30 + +/* Set TR Dequeue Pointer command TRB fields. */ +#define TRB_TO_STREAM_ID(p) ((((p) & GENMASK(31, 16)) >> 16)) +#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16)) +#define SCT_FOR_TRB(p) (((p) << 1) & 0x7) + +/* Link TRB specific fields. */ +#define TRB_TC BIT(1) + +/* Port Status Change Event TRB fields. */ +/* Port ID - bits 31:24. */ +#define GET_PORT_ID(p) (((p) & GENMASK(31, 24)) >> 24) +#define SET_PORT_ID(p) (((p) << 24) & GENMASK(31, 24)) +#define EVENT_DATA BIT(2) + +/* Normal TRB fields. */ +/* transfer_len bitmasks - bits 0:16. */ +#define TRB_LEN(p) ((p) & GENMASK(16, 0)) +/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31). */ +#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17) +#define GET_TD_SIZE(p) (((p) & GENMASK(21, 17)) >> 17) +/* + * Controller uses the TD_SIZE field for TBC if Extended TBC + * is enabled (ETE). + */ +#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17) +/* Interrupter Target - which MSI-X vector to target the completion event at. */ +#define TRB_INTR_TARGET(p) (((p) << 22) & GENMASK(31, 22)) +#define GET_INTR_TARGET(p) (((p) & GENMASK(31, 22)) >> 22) +/* + * Total burst count field, Rsvdz on controller with Extended TBC + * enabled (ETE). + */ +#define TRB_TBC(p) (((p) & 0x3) << 7) +#define TRB_TLBPC(p) (((p) & 0xf) << 16) + +/* Cycle bit - indicates TRB ownership by driver or driver.*/ +#define TRB_CYCLE BIT(0) +/* + * Force next event data TRB to be evaluated before task switch. + * Used to pass OS data back after a TD completes. + */ +#define TRB_ENT BIT(1) +/* Interrupt on short packet. */ +#define TRB_ISP BIT(2) +/* Set PCIe no snoop attribute. */ +#define TRB_NO_SNOOP BIT(3) +/* Chain multiple TRBs into a TD. */ +#define TRB_CHAIN BIT(4) +/* Interrupt on completion. */ +#define TRB_IOC BIT(5) +/* The buffer pointer contains immediate data. */ +#define TRB_IDT BIT(6) +/* 0 - NRDY during data stage, 1 - NRDY during status stage (only control). */ +#define TRB_STAT BIT(7) +/* Block Event Interrupt. */ +#define TRB_BEI BIT(9) + +/* Control transfer TRB specific fields. */ +#define TRB_DIR_IN BIT(16) + +/* TRB bit mask in Data Stage TRB */ +#define TRB_SETUPID_BITMASK GENMASK(9, 8) +#define TRB_SETUPID(p) ((p) << 8) +#define TRB_SETUPID_TO_TYPE(p) (((p) & TRB_SETUPID_BITMASK) >> 8) + +#define TRB_SETUP_SPEEDID_USB3 0x1 +#define TRB_SETUP_SPEEDID_USB2 0x0 +#define TRB_SETUP_SPEEDID(p) ((p) & (1 << 7)) + +#define TRB_SETUPSTAT_ACK 0x1 +#define TRB_SETUPSTAT_STALL 0x0 +#define TRB_SETUPSTAT(p) ((p) << 6) + +/* Isochronous TRB specific fields */ +#define TRB_SIA BIT(31) +#define TRB_FRAME_ID(p) (((p) << 20) & GENMASK(30, 20)) + +struct cdnsp_generic_trb { + __le32 field[4]; +}; + +union cdnsp_trb { + struct cdnsp_link_trb link; + struct cdnsp_transfer_event trans_event; + struct cdnsp_event_cmd event_cmd; + struct cdnsp_generic_trb generic; +}; + +/* TRB bit mask. */ +#define TRB_TYPE_BITMASK GENMASK(15, 10) +#define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) + +/* TRB type IDs. */ +/* bulk, interrupt, isoc scatter/gather, and control data stage. */ +#define TRB_NORMAL 1 +/* Setup Stage for control transfers. */ +#define TRB_SETUP 2 +/* Data Stage for control transfers. */ +#define TRB_DATA 3 +/* Status Stage for control transfers. */ +#define TRB_STATUS 4 +/* ISOC transfers. */ +#define TRB_ISOC 5 +/* TRB for linking ring segments. */ +#define TRB_LINK 6 +#define TRB_EVENT_DATA 7 +/* Transfer Ring No-op (not for the command ring). */ +#define TRB_TR_NOOP 8 + +/* Command TRBs */ +/* Enable Slot Command. */ +#define TRB_ENABLE_SLOT 9 +/* Disable Slot Command. */ +#define TRB_DISABLE_SLOT 10 +/* Address Device Command. */ +#define TRB_ADDR_DEV 11 +/* Configure Endpoint Command. */ +#define TRB_CONFIG_EP 12 +/* Evaluate Context Command. */ +#define TRB_EVAL_CONTEXT 13 +/* Reset Endpoint Command. */ +#define TRB_RESET_EP 14 +/* Stop Transfer Ring Command. */ +#define TRB_STOP_RING 15 +/* Set Transfer Ring Dequeue Pointer Command. */ +#define TRB_SET_DEQ 16 +/* Reset Device Command. */ +#define TRB_RESET_DEV 17 +/* Force Event Command (opt). */ +#define TRB_FORCE_EVENT 18 +/* Force Header Command - generate a transaction or link management packet. */ +#define TRB_FORCE_HEADER 22 +/* No-op Command - not for transfer rings. */ +#define TRB_CMD_NOOP 23 +/* TRB IDs 24-31 reserved. */ + +/* Event TRBS. */ +/* Transfer Event. */ +#define TRB_TRANSFER 32 +/* Command Completion Event. */ +#define TRB_COMPLETION 33 +/* Port Status Change Event. */ +#define TRB_PORT_STATUS 34 +/* Device Controller Event. */ +#define TRB_HC_EVENT 37 +/* MFINDEX Wrap Event - microframe counter wrapped. */ +#define TRB_MFINDEX_WRAP 39 +/* TRB IDs 40-47 reserved. */ +/* Endpoint Not Ready Event. */ +#define TRB_ENDPOINT_NRDY 48 +/* TRB IDs 49-53 reserved. */ +/* Halt Endpoint Command. */ +#define TRB_HALT_ENDPOINT 54 +/* Doorbell Overflow Event. */ +#define TRB_DRB_OVERFLOW 57 +/* Flush Endpoint Command. */ +#define TRB_FLUSH_ENDPOINT 58 + +#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) +#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_TR_NOOP))) + +/* + * TRBS_PER_SEGMENT must be a multiple of 4. + * The command ring is 64-byte aligned, so it must also be greater than 16. + */ +#define TRBS_PER_SEGMENT 256 +#define TRBS_PER_EVENT_SEGMENT 256 +#define TRBS_PER_EV_DEQ_UPDATE 100 +#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16) +#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE)) +/* TRB buffer pointers can't cross 64KB boundaries. */ +#define TRB_MAX_BUFF_SHIFT 16 +#define TRB_MAX_BUFF_SIZE BIT(TRB_MAX_BUFF_SHIFT) +/* How much data is left before the 64KB boundary? */ +#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \ + ((addr) & (TRB_MAX_BUFF_SIZE - 1))) + +/** + * struct cdnsp_segment - segment related data. + * @trbs: Array of Transfer Request Blocks. + * @next: Pointer to the next segment. + * @dma: DMA address of current segment. + * @bounce_dma: Bounce buffer DMA address . + * @bounce_buf: Bounce buffer virtual address. + * bounce_offs: Bounce buffer offset. + * bounce_len: Bounce buffer length. + */ +struct cdnsp_segment { + union cdnsp_trb *trbs; + struct cdnsp_segment *next; + dma_addr_t dma; + /* Max packet sized bounce buffer for td-fragmant alignment */ + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +/** + * struct cdnsp_td - Transfer Descriptor object. + * @td_list: Used for binding TD with ep_ring->td_list. + * @preq: Request associated with this TD + * @start_seg: Segment containing the first_trb in TD. + * @first_trb: First TRB for this TD. + * @last_trb: Last TRB related with TD. + * @bounce_seg: Bounce segment for this TD. + * @request_length_set: actual_length of the request has already been set. + * @drbl - TD has been added to HW scheduler - only for stream capable + * endpoints. + */ +struct cdnsp_td { + struct list_head td_list; + struct cdnsp_request *preq; + struct cdnsp_segment *start_seg; + union cdnsp_trb *first_trb; + union cdnsp_trb *last_trb; + struct cdnsp_segment *bounce_seg; + bool request_length_set; + bool drbl; +}; + +/** + * struct cdnsp_dequeue_state - New dequeue pointer for Transfer Ring. + * @new_deq_seg: New dequeue segment. + * @new_deq_ptr: New dequeue pointer. + * @new_cycle_state: New cycle state. + * @stream_id: stream id for which new dequeue pointer has been selected. + */ +struct cdnsp_dequeue_state { + struct cdnsp_segment *new_deq_seg; + union cdnsp_trb *new_deq_ptr; + int new_cycle_state; + unsigned int stream_id; +}; + +enum cdnsp_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC, + TYPE_BULK, + TYPE_INTR, + TYPE_STREAM, + TYPE_COMMAND, + TYPE_EVENT, +}; + +/** + * struct cdnsp_ring - information describing transfer, command or event ring. + * @first_seg: First segment on transfer ring. + * @last_seg: Last segment on transfer ring. + * @enqueue: SW enqueue pointer address. + * @enq_seg: SW enqueue segment address. + * @dequeue: SW dequeue pointer address. + * @deq_seg: SW dequeue segment address. + * @td_list: transfer descriptor list associated with this ring. + * @cycle_state: Current cycle bit. Write the cycle state into the TRB cycle + * field to give ownership of the TRB to the device controller + * (if we are the producer) or to check if we own the TRB + * (if we are the consumer). + * @stream_id: Stream id + * @stream_active: Stream is active - PRIME packet has been detected. + * @stream_rejected: This ring has been rejected by host. + * @num_tds: Number of TDs associated with ring. + * @num_segs: Number of segments. + * @num_trbs_free: Number of free TRBs on the ring. + * @bounce_buf_len: Length of bounce buffer. + * @type: Ring type - event, transfer, or command ring. + * @last_td_was_short - TD is short TD. + * @trb_address_map: For mapping physical TRB addresses to segments in + * stream rings. + */ +struct cdnsp_ring { + struct cdnsp_segment *first_seg; + struct cdnsp_segment *last_seg; + union cdnsp_trb *enqueue; + struct cdnsp_segment *enq_seg; + union cdnsp_trb *dequeue; + struct cdnsp_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int stream_id; + unsigned int stream_active; + unsigned int stream_rejected; + int num_tds; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum cdnsp_ring_type type; + bool last_td_was_short; + struct radix_tree_root *trb_address_map; +}; + +/** + * struct cdnsp_erst_entry - even ring segment table entry object. + * @seg_addr: 64-bit event ring segment address. + * seg_size: Number of TRBs in segment.; + */ +struct cdnsp_erst_entry { + __le64 seg_addr; + __le32 seg_size; + /* Set to zero */ + __le32 rsvd; +}; + +/** + * struct cdnsp_erst - even ring segment table for event ring. + * @entries: Array of event ring segments + * @num_entries: Number of segments in entries array. + * @erst_dma_addr: DMA address for entries array. + */ +struct cdnsp_erst { + struct cdnsp_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +/** + * struct cdnsp_request - extended device side representation of usb_request + * object . + * @td: Transfer descriptor associated with this request. + * @request: Generic usb_request object describing single I/O request. + * @list: Used to adding request to endpoint pending_list. + * @pep: Extended representation of usb_ep object + * @epnum: Endpoint number associated with usb request. + * @direction: Endpoint direction for usb request. + */ +struct cdnsp_request { + struct cdnsp_td td; + struct usb_request request; + struct list_head list; + struct cdnsp_ep *pep; + u8 epnum; + unsigned direction:1; +}; + +#define ERST_NUM_SEGS 1 + +/* Stages used during enumeration process.*/ +enum cdnsp_ep0_stage { + CDNSP_SETUP_STAGE, + CDNSP_DATA_STAGE, + CDNSP_STATUS_STAGE, +}; + +/** + * struct cdnsp_port - holds information about detected ports. + * @port_num: Port number. + * @exist: Indicate if port exist. + * maj_rev: Major revision. + * min_rev: Minor revision. + */ +struct cdnsp_port { + struct cdnsp_port_regs __iomem *regs; + u8 port_num; + u8 exist; + u8 maj_rev; + u8 min_rev; +}; + +#define CDNSP_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define CDNSP_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) +#define CDNSP_EXT_PORT_OFF(x) ((x) & 0xff) +#define CDNSP_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +/** + * struct cdnsp_device - represent USB device. + * @dev: Pointer to device structure associated whit this controller. + * @gadget: Device side representation of the peripheral controller. + * @gadget_driver: Pointer to the gadget driver. + * @irq: IRQ line number used by device side. + * @regs:IO device memory. + * @cap_regs: Capability registers. + * @op_regs: Operational registers. + * @run_regs: Runtime registers. + * @dba: Device base address register. + * @ir_set: Current interrupter register set. + * @port20_regs: Port 2.0 Peripheral Configuration Registers. + * @port3x_regs: USB3.x Port Peripheral Configuration Registers. + * @rev_cap: Controller Capabilities Registers. + * @hcs_params1: Cached register copies of read-only HCSPARAMS1 + * @hcc_params: Cached register copies of read-only HCCPARAMS1 + * @setup: Temporary buffer for setup packet. + * @ep0_preq: Internal allocated request used during enumeration. + * @ep0_stage: ep0 stage during enumeration process. + * @three_stage_setup: Three state or two state setup. + * @ep0_expect_in: Data IN expected for control transfer. + * @setup_id: Setup identifier. + * @setup_speed - Speed detected for current SETUP packet. + * @setup_buf: Buffer for SETUP packet. + * @device_address: Current device address. + * @may_wakeup: remote wakeup enabled/disabled. + * @lock: Lock used in interrupt thread context. + * @hci_version: device controller version. + * @dcbaa: Device context base address array. + * @cmd_ring: Command ring. + * @cmd: Represent all what is needed to issue command on Command Ring. + * @event_ring: Event ring. + * @erst: Event Ring Segment table + * @slot_id: Current Slot ID. Should be 0 or 1. + * @out_ctx: Output context. + * @in_ctx: Input context. + * @eps: array of endpoints object associated with device. + * @usb2_hw_lpm_capable: hardware lpm is enabled; + * @u1_allowed: Allow device transition to U1 state. + * @u2_allowed: Allow device transition to U2 state + * @device_pool: DMA pool for allocating input and output context. + * @segment_pool: DMA pool for allocating new segments. + * @cdnsp_state: Current state of controller. + * @link_state: Current link state. + * @usb2_port - Port USB 2.0. + * @usb3_port - Port USB 3.0. + * @active_port - Current selected Port. + * @test_mode: selected Test Mode. + */ +struct cdnsp_device { + struct device *dev; + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + unsigned int irq; + void __iomem *regs; + + /* Registers map */ + struct cdnsp_cap_regs __iomem *cap_regs; + struct cdnsp_op_regs __iomem *op_regs; + struct cdnsp_run_regs __iomem *run_regs; + struct cdnsp_doorbell_array __iomem *dba; + struct cdnsp_intr_reg __iomem *ir_set; + struct cdnsp_20port_cap __iomem *port20_regs; + struct cdnsp_3xport_cap __iomem *port3x_regs; + struct cdnsp_rev_cap __iomem *rev_cap; + + /* Cached register copies of read-only CDNSP data */ + __u32 hcs_params1; + __u32 hcs_params3; + __u32 hcc_params; + /* Lock used in interrupt thread context. */ + spinlock_t lock; + struct usb_ctrlrequest setup; + struct cdnsp_request ep0_preq; + enum cdnsp_ep0_stage ep0_stage; + u8 three_stage_setup; + u8 ep0_expect_in; + u8 setup_id; + u8 setup_speed; + void *setup_buf; + u8 device_address; + int may_wakeup; + u16 hci_version; + + /* data structures */ + struct cdnsp_device_context_array *dcbaa; + struct cdnsp_ring *cmd_ring; + struct cdnsp_command cmd; + struct cdnsp_ring *event_ring; + struct cdnsp_erst erst; + int slot_id; + + /* + * Commands to the hardware are passed an "input context" that + * tells the hardware what to change in its data structures. + * The hardware will return changes in an "output context" that + * software must allocate for the hardware. . + */ + struct cdnsp_container_ctx out_ctx; + struct cdnsp_container_ctx in_ctx; + struct cdnsp_ep eps[CDNSP_ENDPOINTS_NUM]; + u8 usb2_hw_lpm_capable:1; + u8 u1_allowed:1; + u8 u2_allowed:1; + + /* DMA pools */ + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + +#define CDNSP_STATE_HALTED BIT(1) +#define CDNSP_STATE_DYING BIT(2) +#define CDNSP_STATE_DISCONNECT_PENDING BIT(3) +#define CDNSP_WAKEUP_PENDING BIT(4) + unsigned int cdnsp_state; + unsigned int link_state; + + struct cdnsp_port usb2_port; + struct cdnsp_port usb3_port; + struct cdnsp_port *active_port; + u16 test_mode; +}; + +/* + * Registers should always be accessed with double word or quad word accesses. + * + * Registers with 64-bit address pointers should be written to with + * dword accesses by writing the low dword first (ptr[0]), then the high dword + * (ptr[1]) second. controller implementations that do not support 64-bit + * address pointers will ignore the high dword, and write order is irrelevant. + */ +static inline u64 cdnsp_read_64(__le64 __iomem *regs) +{ + return lo_hi_readq(regs); +} + +static inline void cdnsp_write_64(const u64 val, __le64 __iomem *regs) +{ + lo_hi_writeq(val, regs); +} + +/* CDNSP memory management functions. */ +void cdnsp_mem_cleanup(struct cdnsp_device *pdev); +int cdnsp_mem_init(struct cdnsp_device *pdev); +int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev); +void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev); +void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *ep); +int cdnsp_endpoint_init(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + gfp_t mem_flags); +int cdnsp_ring_expansion(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs, gfp_t flags); +struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *ep, u64 address); +int cdnsp_alloc_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int num_stream_ctxs, + unsigned int num_streams); +int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep); + +/* Device controller glue. */ +int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id); +int cdnsp_halt(struct cdnsp_device *pdev); +void cdnsp_died(struct cdnsp_device *pdev); +int cdnsp_reset(struct cdnsp_device *pdev); +irqreturn_t cdnsp_irq_handler(int irq, void *priv); +int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup); +void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *usbsssp_data, + struct usb_request *req, int enable); +irqreturn_t cdnsp_thread_irq_handler(int irq, void *data); + +/* Ring, segment, TRB, and TD functions. */ +dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, + union cdnsp_trb *trb); +bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb); +bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + union cdnsp_trb *trb); +int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev); +void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, + union cdnsp_trb *event_ring_deq, + u8 clear_ehb); +void cdnsp_initialize_ring_info(struct cdnsp_ring *ring); +void cdnsp_ring_cmd_db(struct cdnsp_device *pdev); +void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type); +void cdnsp_queue_address_device(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr, + enum cdnsp_setup_dev setup); +void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq); +int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq); +int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev, + struct cdnsp_request *preq); +void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr); +void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index); +void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num); +void cdnsp_queue_reset_device(struct cdnsp_device *pdev); +void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state); +void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, + struct cdnsp_ep *pep); +void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring); +void cdnsp_set_link_state(struct cdnsp_device *pdev, + __le32 __iomem *port_regs, u32 link_state); +u32 cdnsp_port_state_to_neutral(u32 state); + +/* CDNSP device controller contexts. */ +int cdnsp_enable_slot(struct cdnsp_device *pdev); +int cdnsp_disable_slot(struct cdnsp_device *pdev); +struct cdnsp_input_control_ctx + *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx); +struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx); +struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx, + unsigned int ep_index); +/* CDNSP gadget interface. */ +void cdnsp_suspend_gadget(struct cdnsp_device *pdev); +void cdnsp_resume_gadget(struct cdnsp_device *pdev); +void cdnsp_disconnect_gadget(struct cdnsp_device *pdev); +void cdnsp_gadget_giveback(struct cdnsp_ep *pep, struct cdnsp_request *preq, + int status); +int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq); +int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq); +unsigned int cdnsp_port_speed(unsigned int port_status); +void cdnsp_irq_reset(struct cdnsp_device *pdev); +int cdnsp_halt_endpoint(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, int value); +int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +void cdnsp_setup_analyze(struct cdnsp_device *pdev); +int cdnsp_status_stage(struct cdnsp_device *pdev); +int cdnsp_reset_device(struct cdnsp_device *pdev); + +/** + * next_request - gets the next request on the given list + * @list: the request list to operate on + * + * Caller should take care of locking. This function return NULL or the first + * request available on list. + */ +static inline struct cdnsp_request *next_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct cdnsp_request, list); +} + +#define to_cdnsp_ep(ep) (container_of(ep, struct cdnsp_ep, endpoint)) +#define gadget_to_cdnsp(g) (container_of(g, struct cdnsp_device, gadget)) +#define request_to_cdnsp_request(r) (container_of(r, struct cdnsp_request, \ + request)) +#define to_cdnsp_request(r) (container_of(r, struct cdnsp_request, request)) +int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq, + struct cdnsp_ep *pep); + +#endif /* __LINUX_CDNSP_GADGET_H */ diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c new file mode 100644 index 000000000..97866bfb2 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-mem.c @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp. + */ + +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/slab.h> +#include <linux/usb.h> + +#include "cdnsp-gadget.h" +#include "cdnsp-trace.h" + +static void cdnsp_free_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep); +/* + * Allocates a generic ring segment from the ring pool, sets the dma address, + * initializes the segment to zero, and sets the private next pointer to NULL. + * + * "All components of all Command and Transfer TRBs shall be initialized to '0'" + */ +static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev, + unsigned int cycle_state, + unsigned int max_packet, + gfp_t flags) +{ + struct cdnsp_segment *seg; + dma_addr_t dma; + int i; + + seg = kzalloc(sizeof(*seg), flags); + if (!seg) + return NULL; + + seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma); + if (!seg->trbs) { + kfree(seg); + return NULL; + } + + if (max_packet) { + seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); + if (!seg->bounce_buf) + goto free_dma; + } + + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */ + if (cycle_state == 0) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); + } + seg->dma = dma; + seg->next = NULL; + + return seg; + +free_dma: + dma_pool_free(pdev->segment_pool, seg->trbs, dma); + kfree(seg); + + return NULL; +} + +static void cdnsp_segment_free(struct cdnsp_device *pdev, + struct cdnsp_segment *seg) +{ + if (seg->trbs) + dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma); + + kfree(seg->bounce_buf); + kfree(seg); +} + +static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev, + struct cdnsp_segment *first) +{ + struct cdnsp_segment *seg; + + seg = first->next; + + while (seg != first) { + struct cdnsp_segment *next = seg->next; + + cdnsp_segment_free(pdev, seg); + seg = next; + } + + cdnsp_segment_free(pdev, first); +} + +/* + * Make the prev segment point to the next segment. + * + * Change the last TRB in the prev segment to be a Link TRB which points to the + * DMA address of the next segment. The caller needs to set any Link TRB + * related flags, such as End TRB, Toggle Cycle, and no snoop. + */ +static void cdnsp_link_segments(struct cdnsp_device *pdev, + struct cdnsp_segment *prev, + struct cdnsp_segment *next, + enum cdnsp_ring_type type) +{ + struct cdnsp_link_trb *link; + u32 val; + + if (!prev || !next) + return; + + prev->next = next; + if (type != TYPE_EVENT) { + link = &prev->trbs[TRBS_PER_SEGMENT - 1].link; + link->segment_ptr = cpu_to_le64(next->dma); + + /* + * Set the last TRB in the segment to have a TRB type ID + * of Link TRB + */ + val = le32_to_cpu(link->control); + val &= ~TRB_TYPE_BITMASK; + val |= TRB_TYPE(TRB_LINK); + link->control = cpu_to_le32(val); + } +} + +/* + * Link the ring to the new segments. + * Set Toggle Cycle for the new ring if needed. + */ +static void cdnsp_link_rings(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_segment *first, + struct cdnsp_segment *last, + unsigned int num_segs) +{ + struct cdnsp_segment *next; + + if (!ring || !first || !last) + return; + + next = ring->enq_seg->next; + cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type); + cdnsp_link_segments(pdev, last, next, ring->type); + ring->num_segs += num_segs; + ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; + + if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= + ~cpu_to_le32(LINK_TOGGLE); + last->trbs[TRBS_PER_SEGMENT - 1].link.control |= + cpu_to_le32(LINK_TOGGLE); + ring->last_seg = last; + } +} + +/* + * We need a radix tree for mapping physical addresses of TRBs to which stream + * ID they belong to. We need to do this because the device controller won't + * tell us which stream ring the TRB came from. We could store the stream ID + * in an event data TRB, but that doesn't help us for the cancellation case, + * since the endpoint may stop before it reaches that event data TRB. + * + * The radix tree maps the upper portion of the TRB DMA address to a ring + * segment that has the same upper portion of DMA addresses. For example, + * say I have segments of size 1KB, that are always 1KB aligned. A segment may + * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the + * key to the stream ID is 0x43244. I can use the DMA address of the TRB to + * pass the radix tree a key to get the right stream ID: + * + * 0x10c90fff >> 10 = 0x43243 + * 0x10c912c0 >> 10 = 0x43244 + * 0x10c91400 >> 10 = 0x43245 + * + * Obviously, only those TRBs with DMA addresses that are within the segment + * will make the radix tree return the stream ID for that ring. + * + * Caveats for the radix tree: + * + * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an + * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be + * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the + * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit + * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit + * extended systems (where the DMA address can be bigger than 32-bits), + * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. + */ +static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map, + struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + gfp_t mem_flags) +{ + unsigned long key; + int ret; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + + /* Skip any segments that were already added. */ + if (radix_tree_lookup(trb_address_map, key)) + return 0; + + ret = radix_tree_maybe_preload(mem_flags); + if (ret) + return ret; + + ret = radix_tree_insert(trb_address_map, key, ring); + radix_tree_preload_end(); + + return ret; +} + +static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map, + struct cdnsp_segment *seg) +{ + unsigned long key; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + if (radix_tree_lookup(trb_address_map, key)) + radix_tree_delete(trb_address_map, key); +} + +static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map, + struct cdnsp_ring *ring, + struct cdnsp_segment *first_seg, + struct cdnsp_segment *last_seg, + gfp_t mem_flags) +{ + struct cdnsp_segment *failed_seg; + struct cdnsp_segment *seg; + int ret; + + seg = first_seg; + do { + ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg, + mem_flags); + if (ret) + goto remove_streams; + if (seg == last_seg) + return 0; + seg = seg->next; + } while (seg != first_seg); + + return 0; + +remove_streams: + failed_seg = seg; + seg = first_seg; + do { + cdnsp_remove_segment_mapping(trb_address_map, seg); + if (seg == failed_seg) + return ret; + seg = seg->next; + } while (seg != first_seg); + + return ret; +} + +static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring) +{ + struct cdnsp_segment *seg; + + seg = ring->first_seg; + do { + cdnsp_remove_segment_mapping(ring->trb_address_map, seg); + seg = seg->next; + } while (seg != ring->first_seg); +} + +static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring) +{ + return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring, + ring->first_seg, ring->last_seg, GFP_ATOMIC); +} + +static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring) +{ + if (!ring) + return; + + trace_cdnsp_ring_free(ring); + + if (ring->first_seg) { + if (ring->type == TYPE_STREAM) + cdnsp_remove_stream_mapping(ring); + + cdnsp_free_segments_for_ring(pdev, ring->first_seg); + } + + kfree(ring); +} + +void cdnsp_initialize_ring_info(struct cdnsp_ring *ring) +{ + ring->enqueue = ring->first_seg->trbs; + ring->enq_seg = ring->first_seg; + ring->dequeue = ring->enqueue; + ring->deq_seg = ring->first_seg; + + /* + * The ring is initialized to 0. The producer must write 1 to the cycle + * bit to handover ownership of the TRB, so PCS = 1. The consumer must + * compare CCS to the cycle bit to check ownership, so CCS = 1. + * + * New rings are initialized with cycle state equal to 1; if we are + * handling ring expansion, set the cycle state equal to the old ring. + */ + ring->cycle_state = 1; + + /* + * Each segment has a link TRB, and leave an extra TRB for SW + * accounting purpose + */ + ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; +} + +/* Allocate segments and link them for a ring. */ +static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev, + struct cdnsp_segment **first, + struct cdnsp_segment **last, + unsigned int num_segs, + unsigned int cycle_state, + enum cdnsp_ring_type type, + unsigned int max_packet, + gfp_t flags) +{ + struct cdnsp_segment *prev; + + /* Allocate first segment. */ + prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags); + if (!prev) + return -ENOMEM; + + num_segs--; + *first = prev; + + /* Allocate all other segments. */ + while (num_segs > 0) { + struct cdnsp_segment *next; + + next = cdnsp_segment_alloc(pdev, cycle_state, + max_packet, flags); + if (!next) { + cdnsp_free_segments_for_ring(pdev, *first); + return -ENOMEM; + } + + cdnsp_link_segments(pdev, prev, next, type); + + prev = next; + num_segs--; + } + + cdnsp_link_segments(pdev, prev, *first, type); + *last = prev; + + return 0; +} + +/* + * Create a new ring with zero or more segments. + * + * Link each segment together into a ring. + * Set the end flag and the cycle toggle bit on the last segment. + */ +static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev, + unsigned int num_segs, + enum cdnsp_ring_type type, + unsigned int max_packet, + gfp_t flags) +{ + struct cdnsp_ring *ring; + int ret; + + ring = kzalloc(sizeof *(ring), flags); + if (!ring) + return NULL; + + ring->num_segs = num_segs; + ring->bounce_buf_len = max_packet; + INIT_LIST_HEAD(&ring->td_list); + ring->type = type; + + if (num_segs == 0) + return ring; + + ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg, + &ring->last_seg, num_segs, + 1, type, max_packet, flags); + if (ret) + goto fail; + + /* Only event ring does not use link TRB. */ + if (type != TYPE_EVENT) + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= + cpu_to_le32(LINK_TOGGLE); + + cdnsp_initialize_ring_info(ring); + trace_cdnsp_ring_alloc(ring); + return ring; +fail: + kfree(ring); + return NULL; +} + +void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + cdnsp_ring_free(pdev, pep->ring); + pep->ring = NULL; + cdnsp_free_stream_info(pdev, pep); +} + +/* + * Expand an existing ring. + * Allocate a new ring which has same segment numbers and link the two rings. + */ +int cdnsp_ring_expansion(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs, + gfp_t flags) +{ + unsigned int num_segs_needed; + struct cdnsp_segment *first; + struct cdnsp_segment *last; + unsigned int num_segs; + int ret; + + num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / + (TRBS_PER_SEGMENT - 1); + + /* Allocate number of segments we needed, or double the ring size. */ + num_segs = max(ring->num_segs, num_segs_needed); + + ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs, + ring->cycle_state, ring->type, + ring->bounce_buf_len, flags); + if (ret) + return -ENOMEM; + + if (ring->type == TYPE_STREAM) + ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map, + ring, first, + last, flags); + + if (ret) { + cdnsp_free_segments_for_ring(pdev, first); + + return ret; + } + + cdnsp_link_rings(pdev, ring, first, last, num_segs); + trace_cdnsp_ring_expansion(ring); + + return 0; +} + +static int cdnsp_init_device_ctx(struct cdnsp_device *pdev) +{ + int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024; + + pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE; + pdev->out_ctx.size = size; + pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params); + pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC, + &pdev->out_ctx.dma); + + if (!pdev->out_ctx.bytes) + return -ENOMEM; + + pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT; + pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size; + pdev->in_ctx.size = size + pdev->out_ctx.ctx_size; + pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC, + &pdev->in_ctx.dma); + + if (!pdev->in_ctx.bytes) { + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + return -ENOMEM; + } + + return 0; +} + +struct cdnsp_input_control_ctx + *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx) +{ + if (ctx->type != CDNSP_CTX_TYPE_INPUT) + return NULL; + + return (struct cdnsp_input_control_ctx *)ctx->bytes; +} + +struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx) +{ + if (ctx->type == CDNSP_CTX_TYPE_DEVICE) + return (struct cdnsp_slot_ctx *)ctx->bytes; + + return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size); +} + +struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx, + unsigned int ep_index) +{ + /* Increment ep index by offset of start of ep ctx array. */ + ep_index++; + if (ctx->type == CDNSP_CTX_TYPE_INPUT) + ep_index++; + + return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size)); +} + +static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array, + pep->stream_info.ctx_array_dma); +} + +/* The stream context array must be a power of 2. */ +static struct cdnsp_stream_ctx + *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + size_t size = sizeof(struct cdnsp_stream_ctx) * + pep->stream_info.num_stream_ctxs; + + if (size > CDNSP_CTX_SIZE) + return NULL; + + /** + * Driver uses intentionally the device_pool to allocated stream + * context array. Device Pool has 2048 bytes of size what gives us + * 128 entries. + */ + return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC, + &pep->stream_info.ctx_array_dma); +} + +struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address) +{ + if (pep->ep_state & EP_HAS_STREAMS) + return radix_tree_lookup(&pep->stream_info.trb_address_map, + address >> TRB_SEGMENT_SHIFT); + + return pep->ring; +} + +/* + * Change an endpoint's internal structure so it supports stream IDs. + * The number of requested streams includes stream 0, which cannot be used by + * driver. + * + * The number of stream contexts in the stream context array may be bigger than + * the number of streams the driver wants to use. This is because the number of + * stream context array entries must be a power of two. + */ +int cdnsp_alloc_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int num_stream_ctxs, + unsigned int num_streams) +{ + struct cdnsp_stream_info *stream_info; + struct cdnsp_ring *cur_ring; + u32 cur_stream; + u64 addr; + int ret; + int mps; + + stream_info = &pep->stream_info; + stream_info->num_streams = num_streams; + stream_info->num_stream_ctxs = num_stream_ctxs; + + /* Initialize the array of virtual pointers to stream rings. */ + stream_info->stream_rings = kcalloc(num_streams, + sizeof(struct cdnsp_ring *), + GFP_ATOMIC); + if (!stream_info->stream_rings) + return -ENOMEM; + + /* Initialize the array of DMA addresses for stream rings for the HW. */ + stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep); + if (!stream_info->stream_ctx_array) + goto cleanup_stream_rings; + + memset(stream_info->stream_ctx_array, 0, + sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs); + INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); + mps = usb_endpoint_maxp(pep->endpoint.desc); + + /* + * Allocate rings for all the streams that the driver will use, + * and add their segment DMA addresses to the radix tree. + * Stream 0 is reserved. + */ + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps, + GFP_ATOMIC); + stream_info->stream_rings[cur_stream] = cur_ring; + + if (!cur_ring) + goto cleanup_rings; + + cur_ring->stream_id = cur_stream; + cur_ring->trb_address_map = &stream_info->trb_address_map; + + /* Set deq ptr, cycle bit, and stream context type. */ + addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) | + cur_ring->cycle_state; + + stream_info->stream_ctx_array[cur_stream].stream_ring = + cpu_to_le64(addr); + + trace_cdnsp_set_stream_ring(cur_ring); + + ret = cdnsp_update_stream_mapping(cur_ring); + if (ret) + goto cleanup_rings; + } + + return 0; + +cleanup_rings: + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + cdnsp_ring_free(pdev, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + +cleanup_stream_rings: + kfree(pep->stream_info.stream_rings); + + return -ENOMEM; +} + +/* Frees all stream contexts associated with the endpoint. */ +static void cdnsp_free_stream_info(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_stream_info *stream_info = &pep->stream_info; + struct cdnsp_ring *cur_ring; + int cur_stream; + + if (!(pep->ep_state & EP_HAS_STREAMS)) + return; + + for (cur_stream = 1; cur_stream < stream_info->num_streams; + cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + cdnsp_ring_free(pdev, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + + if (stream_info->stream_ctx_array) + cdnsp_free_stream_ctx(pdev, pep); + + kfree(stream_info->stream_rings); + pep->ep_state &= ~EP_HAS_STREAMS; +} + +/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/ +static void cdnsp_free_priv_device(struct cdnsp_device *pdev) +{ + pdev->dcbaa->dev_context_ptrs[1] = 0; + + cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]); + + if (pdev->in_ctx.bytes) + dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes, + pdev->in_ctx.dma); + + if (pdev->out_ctx.bytes) + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + + pdev->in_ctx.bytes = NULL; + pdev->out_ctx.bytes = NULL; +} + +static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev) +{ + int ret; + + ret = cdnsp_init_device_ctx(pdev); + if (ret) + return ret; + + /* Allocate endpoint 0 ring. */ + pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC); + if (!pdev->eps[0].ring) + goto fail; + + /* Point to output device context in dcbaa. */ + pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma); + pdev->cmd.in_ctx = &pdev->in_ctx; + + trace_cdnsp_alloc_priv_device(pdev); + return 0; +fail: + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes, + pdev->in_ctx.dma); + + return ret; +} + +void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev) +{ + struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx; + struct cdnsp_ring *ep_ring = pdev->eps[0].ring; + dma_addr_t dma; + + dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); + ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state); +} + +/* Setup an controller private device for a Set Address command. */ +int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev) +{ + struct cdnsp_slot_ctx *slot_ctx; + struct cdnsp_ep_ctx *ep0_ctx; + u32 max_packets, port; + + ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0); + slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); + + /* Only the control endpoint is valid - one endpoint context. */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + + switch (pdev->gadget.speed) { + case USB_SPEED_SUPER_PLUS: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_SUPER: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_HIGH: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); + max_packets = MAX_PACKET(64); + break; + case USB_SPEED_FULL: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); + max_packets = MAX_PACKET(64); + break; + default: + /* Speed was not set , this shouldn't happen. */ + return -EINVAL; + } + + port = DEV_PORT(pdev->active_port->port_num); + slot_ctx->dev_port |= cpu_to_le32(port); + slot_ctx->dev_state = cpu_to_le32((pdev->device_address & + DEV_ADDR_MASK)); + ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8)); + ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | + max_packets); + + ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma | + pdev->eps[0].ring->cycle_state); + + trace_cdnsp_setup_addressable_priv_device(pdev); + + return 0; +} + +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + */ +static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + unsigned int interval; + + interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1; + if (interval != pep->endpoint.desc->bInterval - 1) + dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n", + pep->name, 1 << interval, + g->speed == USB_SPEED_FULL ? "" : "micro"); + + /* + * Full speed isoc endpoints specify interval in frames, + * not microframes. We are using microframes everywhere, + * so adjust accordingly. + */ + if (g->speed == USB_SPEED_FULL) + interval += 3; /* 1 frame = 2^3 uframes */ + + /* Controller handles only up to 512ms (2^12). */ + if (interval > 12) + interval = 12; + + return interval; +} + +/* + * Convert bInterval expressed in microframes (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g, + struct cdnsp_ep *pep, + unsigned int desc_interval, + unsigned int min_exponent, + unsigned int max_exponent) +{ + unsigned int interval; + + interval = fls(desc_interval) - 1; + return clamp_val(interval, min_exponent, max_exponent); +} + +/* + * Return the polling interval. + * + * The polling interval is expressed in "microframes". If controllers's Interval + * field is set to N, it will service the endpoint every 2^(Interval)*125us. + */ +static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + unsigned int interval = 0; + + switch (g->speed) { + case USB_SPEED_HIGH: + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + if (usb_endpoint_xfer_int(pep->endpoint.desc) || + usb_endpoint_xfer_isoc(pep->endpoint.desc)) + interval = cdnsp_parse_exponent_interval(g, pep); + break; + case USB_SPEED_FULL: + if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) { + interval = cdnsp_parse_exponent_interval(g, pep); + } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) { + interval = pep->endpoint.desc->bInterval << 3; + interval = cdnsp_microframes_to_exponent(g, pep, + interval, + 3, 10); + } + + break; + default: + WARN_ON(1); + } + + return interval; +} + +/* + * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. + * High speed endpoint descriptors can define "the number of additional + * transaction opportunities per microframe", but that goes in the Max Burst + * endpoint context field. + */ +static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep) +{ + if (g->speed < USB_SPEED_SUPER || + !usb_endpoint_xfer_isoc(pep->endpoint.desc)) + return 0; + + return pep->endpoint.comp_desc->bmAttributes; +} + +static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + /* Super speed and Plus have max burst in ep companion desc */ + if (g->speed >= USB_SPEED_SUPER) + return pep->endpoint.comp_desc->bMaxBurst; + + if (g->speed == USB_SPEED_HIGH && + (usb_endpoint_xfer_isoc(pep->endpoint.desc) || + usb_endpoint_xfer_int(pep->endpoint.desc))) + return usb_endpoint_maxp_mult(pep->endpoint.desc) - 1; + + return 0; +} + +static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc) +{ + int in; + + in = usb_endpoint_dir_in(desc); + + switch (usb_endpoint_type(desc)) { + case USB_ENDPOINT_XFER_CONTROL: + return CTRL_EP; + case USB_ENDPOINT_XFER_BULK: + return in ? BULK_IN_EP : BULK_OUT_EP; + case USB_ENDPOINT_XFER_ISOC: + return in ? ISOC_IN_EP : ISOC_OUT_EP; + case USB_ENDPOINT_XFER_INT: + return in ? INT_IN_EP : INT_OUT_EP; + } + + return 0; +} + +/* + * Return the maximum endpoint service interval time (ESIT) payload. + * Basically, this is the maxpacket size, multiplied by the burst size + * and mult size. + */ +static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g, + struct cdnsp_ep *pep) +{ + int max_packet; + int max_burst; + + /* Only applies for interrupt or isochronous endpoints*/ + if (usb_endpoint_xfer_control(pep->endpoint.desc) || + usb_endpoint_xfer_bulk(pep->endpoint.desc)) + return 0; + + /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */ + if (g->speed >= USB_SPEED_SUPER_PLUS && + USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes)) + return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); + /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ + else if (g->speed >= USB_SPEED_SUPER) + return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); + + max_packet = usb_endpoint_maxp(pep->endpoint.desc); + max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc); + + /* A 0 in max burst means 1 transfer per ESIT */ + return max_packet * max_burst; +} + +int cdnsp_endpoint_init(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + gfp_t mem_flags) +{ + enum cdnsp_ring_type ring_type; + struct cdnsp_ep_ctx *ep_ctx; + unsigned int err_count = 0; + unsigned int avg_trb_len; + unsigned int max_packet; + unsigned int max_burst; + unsigned int interval; + u32 max_esit_payload; + unsigned int mult; + u32 endpoint_type; + int ret; + + ep_ctx = pep->in_ctx; + + endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc); + if (!endpoint_type) + return -EINVAL; + + ring_type = usb_endpoint_type(pep->endpoint.desc); + + /* + * Get values to fill the endpoint context, mostly from ep descriptor. + * The average TRB buffer length for bulk endpoints is unclear as we + * have no clue on scatter gather list entry size. For Isoc and Int, + * set it to max available. + */ + max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep); + interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep); + mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep); + max_packet = usb_endpoint_maxp(pep->endpoint.desc); + max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep); + avg_trb_len = max_esit_payload; + + /* Allow 3 retries for everything but isoc, set CErr = 3. */ + if (!usb_endpoint_xfer_isoc(pep->endpoint.desc)) + err_count = 3; + if (usb_endpoint_xfer_bulk(pep->endpoint.desc) && + pdev->gadget.speed == USB_SPEED_HIGH) + max_packet = 512; + /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */ + if (usb_endpoint_xfer_control(pep->endpoint.desc)) + avg_trb_len = 8; + + /* Set up the endpoint ring. */ + pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags); + if (!pep->ring) + return -ENOMEM; + + pep->skip = false; + + /* Fill the endpoint context */ + ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | + EP_INTERVAL(interval) | EP_MULT(mult)); + ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | + MAX_PACKET(max_packet) | MAX_BURST(max_burst) | + ERROR_COUNT(err_count)); + ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma | + pep->ring->cycle_state); + + ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | + EP_AVG_TRB_LENGTH(avg_trb_len)); + + if (usb_endpoint_xfer_bulk(pep->endpoint.desc) && + pdev->gadget.speed > USB_SPEED_HIGH) { + ret = cdnsp_alloc_streams(pdev, pep); + if (ret < 0) + return ret; + } + + return 0; +} + +void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + pep->in_ctx->ep_info = 0; + pep->in_ctx->ep_info2 = 0; + pep->in_ctx->deq = 0; + pep->in_ctx->tx_info = 0; +} + +static int cdnsp_alloc_erst(struct cdnsp_device *pdev, + struct cdnsp_ring *evt_ring, + struct cdnsp_erst *erst) +{ + struct cdnsp_erst_entry *entry; + struct cdnsp_segment *seg; + unsigned int val; + size_t size; + + size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs; + erst->entries = dma_alloc_coherent(pdev->dev, size, + &erst->erst_dma_addr, GFP_KERNEL); + if (!erst->entries) + return -ENOMEM; + + erst->num_entries = evt_ring->num_segs; + + seg = evt_ring->first_seg; + for (val = 0; val < evt_ring->num_segs; val++) { + entry = &erst->entries[val]; + entry->seg_addr = cpu_to_le64(seg->dma); + entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); + entry->rsvd = 0; + seg = seg->next; + } + + return 0; +} + +static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst) +{ + size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries); + struct device *dev = pdev->dev; + + if (erst->entries) + dma_free_coherent(dev, size, erst->entries, + erst->erst_dma_addr); + + erst->entries = NULL; +} + +void cdnsp_mem_cleanup(struct cdnsp_device *pdev) +{ + struct device *dev = pdev->dev; + + cdnsp_free_priv_device(pdev); + cdnsp_free_erst(pdev, &pdev->erst); + + if (pdev->event_ring) + cdnsp_ring_free(pdev, pdev->event_ring); + + pdev->event_ring = NULL; + + if (pdev->cmd_ring) + cdnsp_ring_free(pdev, pdev->cmd_ring); + + pdev->cmd_ring = NULL; + + dma_pool_destroy(pdev->segment_pool); + pdev->segment_pool = NULL; + dma_pool_destroy(pdev->device_pool); + pdev->device_pool = NULL; + + dma_free_coherent(dev, sizeof(*pdev->dcbaa), + pdev->dcbaa, pdev->dcbaa->dma); + + pdev->dcbaa = NULL; + + pdev->usb2_port.exist = 0; + pdev->usb3_port.exist = 0; + pdev->usb2_port.port_num = 0; + pdev->usb3_port.port_num = 0; + pdev->active_port = NULL; +} + +static void cdnsp_set_event_deq(struct cdnsp_device *pdev) +{ + dma_addr_t deq; + u64 temp; + + deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue); + + /* Update controller event ring dequeue pointer */ + temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue); + temp &= ERST_PTR_MASK; + + /* + * Don't clear the EHB bit (which is RW1C) because + * there might be more events to service. + */ + temp &= ~ERST_EHB; + + cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp, + &pdev->ir_set->erst_dequeue); +} + +static void cdnsp_add_in_port(struct cdnsp_device *pdev, + struct cdnsp_port *port, + __le32 __iomem *addr) +{ + u32 temp, port_offset, port_count; + + temp = readl(addr); + port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp); + port->min_rev = CDNSP_EXT_PORT_MINOR(temp); + + /* Port offset and count in the third dword.*/ + temp = readl(addr + 2); + port_offset = CDNSP_EXT_PORT_OFF(temp); + port_count = CDNSP_EXT_PORT_COUNT(temp); + + trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev); + + port->port_num = port_offset; + port->exist = 1; +} + +/* + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that + * specify what speeds each port is supposed to be. + */ +static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev) +{ + void __iomem *base; + u32 offset; + int i; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, 0, + EXT_CAP_CFG_DEV_20PORT_CAP_ID); + pdev->port20_regs = base + offset; + + offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP); + pdev->port3x_regs = base + offset; + + offset = 0; + base = &pdev->cap_regs->hc_capbase; + + /* Driver expects max 2 extended protocol capability. */ + for (i = 0; i < 2; i++) { + u32 temp; + + offset = cdnsp_find_next_ext_cap(base, offset, + EXT_CAPS_PROTOCOL); + temp = readl(base + offset); + + if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 && + !pdev->usb3_port.port_num) + cdnsp_add_in_port(pdev, &pdev->usb3_port, + base + offset); + + if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 && + !pdev->usb2_port.port_num) + cdnsp_add_in_port(pdev, &pdev->usb2_port, + base + offset); + } + + if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) { + dev_err(pdev->dev, "Error: Only one port detected\n"); + return -ENODEV; + } + + trace_cdnsp_init("Found USB 2.0 ports and USB 3.0 ports."); + + pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *) + (&pdev->op_regs->port_reg_base + NUM_PORT_REGS * + (pdev->usb2_port.port_num - 1)); + + pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *) + (&pdev->op_regs->port_reg_base + NUM_PORT_REGS * + (pdev->usb3_port.port_num - 1)); + + return 0; +} + +/* + * Initialize memory for CDNSP (one-time init). + * + * Program the PAGESIZE register, initialize the device context array, create + * device contexts, set up a command ring segment, create event + * ring (one for now). + */ +int cdnsp_mem_init(struct cdnsp_device *pdev) +{ + struct device *dev = pdev->dev; + int ret = -ENOMEM; + unsigned int val; + dma_addr_t dma; + u32 page_size; + u64 val_64; + + /* + * Use 4K pages, since that's common and the minimum the + * controller supports + */ + page_size = 1 << 12; + + val = readl(&pdev->op_regs->config_reg); + val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E; + writel(val, &pdev->op_regs->config_reg); + + /* + * Doorbell array must be physically contiguous + * and 64-byte (cache line) aligned. + */ + pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa), + &dma, GFP_KERNEL); + if (!pdev->dcbaa) + return -ENOMEM; + + pdev->dcbaa->dma = dma; + + cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr); + + /* + * Initialize the ring segment pool. The ring must be a contiguous + * structure comprised of TRBs. The TRBs must be 16 byte aligned, + * however, the command ring segment needs 64-byte aligned segments + * and our use of dma addresses in the trb_address_map radix tree needs + * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment + * need. + */ + pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev, + TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, + page_size); + if (!pdev->segment_pool) + goto release_dcbaa; + + pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev, + CDNSP_CTX_SIZE, 64, page_size); + if (!pdev->device_pool) + goto destroy_segment_pool; + + + /* Set up the command ring to have one segments for now. */ + pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL); + if (!pdev->cmd_ring) + goto destroy_device_pool; + + /* Set the address in the Command Ring Control register */ + val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); + val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | + (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | + pdev->cmd_ring->cycle_state; + cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); + + val = readl(&pdev->cap_regs->db_off); + val &= DBOFF_MASK; + pdev->dba = (void __iomem *)pdev->cap_regs + val; + + /* Set ir_set to interrupt register set 0 */ + pdev->ir_set = &pdev->run_regs->ir_set[0]; + + /* + * Event ring setup: Allocate a normal ring, but also setup + * the event ring segment table (ERST). + */ + pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT, + 0, GFP_KERNEL); + if (!pdev->event_ring) + goto free_cmd_ring; + + ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst); + if (ret) + goto free_event_ring; + + /* Set ERST count with the number of entries in the segment table. */ + val = readl(&pdev->ir_set->erst_size); + val &= ERST_SIZE_MASK; + val |= ERST_NUM_SEGS; + writel(val, &pdev->ir_set->erst_size); + + /* Set the segment table base address. */ + val_64 = cdnsp_read_64(&pdev->ir_set->erst_base); + val_64 &= ERST_PTR_MASK; + val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK); + cdnsp_write_64(val_64, &pdev->ir_set->erst_base); + + /* Set the event ring dequeue address. */ + cdnsp_set_event_deq(pdev); + + ret = cdnsp_setup_port_arrays(pdev); + if (ret) + goto free_erst; + + ret = cdnsp_alloc_priv_device(pdev); + if (ret) { + dev_err(pdev->dev, + "Could not allocate cdnsp_device data structures\n"); + goto free_erst; + } + + return 0; + +free_erst: + cdnsp_free_erst(pdev, &pdev->erst); +free_event_ring: + cdnsp_ring_free(pdev, pdev->event_ring); +free_cmd_ring: + cdnsp_ring_free(pdev, pdev->cmd_ring); +destroy_device_pool: + dma_pool_destroy(pdev->device_pool); +destroy_segment_pool: + dma_pool_destroy(pdev->segment_pool); +release_dcbaa: + dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa, + pdev->dcbaa->dma); + + cdnsp_reset(pdev); + + return ret; +} diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c new file mode 100644 index 000000000..a85db23fa --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence PCI Glue driver. + * + * Copyright (C) 2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> + +#include "core.h" +#include "gadget-export.h" + +#define PCI_BAR_HOST 0 +#define PCI_BAR_OTG 0 +#define PCI_BAR_DEV 2 + +#define PCI_DEV_FN_HOST_DEVICE 0 +#define PCI_DEV_FN_OTG 1 + +#define PCI_DRIVER_NAME "cdns-pci-usbssp" +#define PLAT_DRIVER_NAME "cdns-usbssp" + +#define CDNS_VENDOR_ID 0x17cd +#define CDNS_DEVICE_ID 0x0200 +#define CDNS_DRD_ID 0x0100 +#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) + +static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) +{ + /* + * Gets the second function. + * Platform has two function. The fist keeps resources for + * Host/Device while the secon keeps resources for DRD/OTG. + */ + if (pdev->device == CDNS_DEVICE_ID) + return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL); + else if (pdev->device == CDNS_DRD_ID) + return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL); + + return NULL; +} + +static int cdnsp_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct pci_dev *func; + struct resource *res; + struct cdns *cdnsp; + int ret; + + /* + * For GADGET/HOST PCI (devfn) function number is 0, + * for OTG PCI (devfn) function number is 1. + */ + if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE && + pdev->devfn != PCI_DEV_FN_OTG)) + return -EINVAL; + + func = cdnsp_get_second_fun(pdev); + if (!func) + return -EINVAL; + + if (func->class == PCI_CLASS_SERIAL_USB_XHCI || + pdev->class == PCI_CLASS_SERIAL_USB_XHCI) { + ret = -EINVAL; + goto put_pci; + } + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret); + goto put_pci; + } + + pci_set_master(pdev); + if (pci_is_enabled(func)) { + cdnsp = pci_get_drvdata(func); + } else { + cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL); + if (!cdnsp) { + ret = -ENOMEM; + goto disable_pci; + } + } + + /* For GADGET device function number is 0. */ + if (pdev->devfn == 0) { + resource_size_t rsrc_start, rsrc_len; + + /* Function 0: host(BAR_0) + device(BAR_1).*/ + dev_dbg(dev, "Initialize resources\n"); + rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV); + rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV); + res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev"); + if (!res) { + dev_dbg(dev, "controller already in use\n"); + ret = -EBUSY; + goto free_cdnsp; + } + + cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len); + if (!cdnsp->dev_regs) { + dev_dbg(dev, "error mapping memory\n"); + ret = -EFAULT; + goto free_cdnsp; + } + + cdnsp->dev_irq = pdev->irq; + dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n", + &rsrc_start); + + res = &cdnsp->xhci_res[0]; + res->start = pci_resource_start(pdev, PCI_BAR_HOST); + res->end = pci_resource_end(pdev, PCI_BAR_HOST); + res->name = "xhci"; + res->flags = IORESOURCE_MEM; + dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n", + &res->start); + + /* Interrupt for XHCI, */ + res = &cdnsp->xhci_res[1]; + res->start = pdev->irq; + res->name = "host"; + res->flags = IORESOURCE_IRQ; + } else { + res = &cdnsp->otg_res; + res->start = pci_resource_start(pdev, PCI_BAR_OTG); + res->end = pci_resource_end(pdev, PCI_BAR_OTG); + res->name = "otg"; + res->flags = IORESOURCE_MEM; + dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n", + &res->start); + + /* Interrupt for OTG/DRD. */ + cdnsp->otg_irq = pdev->irq; + } + + if (pci_is_enabled(func)) { + cdnsp->dev = dev; + cdnsp->gadget_init = cdnsp_gadget_init; + + ret = cdns_init(cdnsp); + if (ret) + goto free_cdnsp; + } + + pci_set_drvdata(pdev, cdnsp); + + device_wakeup_enable(&pdev->dev); + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +free_cdnsp: + if (!pci_is_enabled(func)) + kfree(cdnsp); + +disable_pci: + pci_disable_device(pdev); + +put_pci: + pci_dev_put(func); + + return ret; +} + +static void cdnsp_pci_remove(struct pci_dev *pdev) +{ + struct cdns *cdnsp; + struct pci_dev *func; + + func = cdnsp_get_second_fun(pdev); + cdnsp = (struct cdns *)pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + if (!pci_is_enabled(func)) { + kfree(cdnsp); + goto pci_put; + } + + cdns_remove(cdnsp); + +pci_put: + pci_dev_put(func); +} + +static int __maybe_unused cdnsp_pci_suspend(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + + return cdns_suspend(cdns); +} + +static int __maybe_unused cdnsp_pci_resume(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&cdns->lock, flags); + ret = cdns_resume(cdns); + spin_unlock_irqrestore(&cdns->lock, flags); + cdns_set_active(cdns, 1); + + return ret; +} + +static const struct dev_pm_ops cdnsp_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume) +}; + +static const struct pci_device_id cdnsp_pci_ids[] = { + { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID }, + { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + CDNS_DRD_IF, PCI_ANY_ID }, + { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID, + CDNS_DRD_IF, PCI_ANY_ID }, + { 0, } +}; + +static struct pci_driver cdnsp_pci_driver = { + .name = "cdnsp-pci", + .id_table = &cdnsp_pci_ids[0], + .probe = cdnsp_pci_probe, + .remove = cdnsp_pci_remove, + .driver = { + .pm = &cdnsp_pci_pm_ops, + } +}; + +module_pci_driver(cdnsp_pci_driver); +MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids); + +MODULE_ALIAS("pci:cdnsp"); +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence CDNSP PCI driver"); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c new file mode 100644 index 000000000..8a2cc0405 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -0,0 +1,2484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp + */ + +/* + * Ring initialization rules: + * 1. Each segment is initialized to zero, except for link TRBs. + * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or + * Consumer Cycle State (CCS), depending on ring function. + * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. + * + * Ring behavior rules: + * 1. A ring is empty if enqueue == dequeue. This means there will always be at + * least one free TRB in the ring. This is useful if you want to turn that + * into a link TRB and expand the ring. + * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a + * link TRB, then load the pointer with the address in the link TRB. If the + * link TRB had its toggle bit set, you may need to update the ring cycle + * state (see cycle bit rules). You may have to do this multiple times + * until you reach a non-link TRB. + * 3. A ring is full if enqueue++ (for the definition of increment above) + * equals the dequeue pointer. + * + * Cycle bit rules: + * 1. When a consumer increments a dequeue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * 2. When a producer increments an enqueue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * + * Producer rules: + * 1. Check if ring is full before you enqueue. + * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. + * Update enqueue pointer between each write (which may update the ring + * cycle state). + * 3. Notify consumer. If SW is producer, it rings the doorbell for command + * and endpoint rings. If controller is the producer for the event ring, + * and it generates an interrupt according to interrupt modulation rules. + * + * Consumer rules: + * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, + * the TRB is owned by the consumer. + * 2. Update dequeue pointer (which may update the ring cycle state) and + * continue processing TRBs until you reach a TRB which is not owned by you. + * 3. Notify the producer. SW is the consumer for the event ring, and it + * updates event ring dequeue pointer. Controller is the consumer for the + * command and endpoint rings; it generates events on the event ring + * for these. + */ + +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/irq.h> + +#include "cdnsp-trace.h" +#include "cdnsp-gadget.h" + +/* + * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA + * address of the TRB. + */ +dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, + union cdnsp_trb *trb) +{ + unsigned long segment_offset = trb - seg->trbs; + + if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) + return 0; + + return seg->dma + (segment_offset * sizeof(*trb)); +} + +static bool cdnsp_trb_is_noop(union cdnsp_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + +static bool cdnsp_trb_is_link(union cdnsp_trb *trb) +{ + return TRB_TYPE_LINK_LE32(trb->link.control); +} + +bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb) +{ + return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; +} + +bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + union cdnsp_trb *trb) +{ + return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); +} + +static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb) +{ + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +} + +static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type) +{ + if (cdnsp_trb_is_link(trb)) { + /* Unchain chained link TRBs. */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); + } else { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + /* Preserve only the cycle bit of this TRB. */ + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); + } +} + +/* + * Updates trb to point to the next TRB in the ring, and updates seg if the next + * TRB is in a new segment. This does not skip over link TRBs, and it does not + * effect the ring dequeue or enqueue pointers. + */ +static void cdnsp_next_trb(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_segment **seg, + union cdnsp_trb **trb) +{ + if (cdnsp_trb_is_link(*trb)) { + *seg = (*seg)->next; + *trb = ((*seg)->trbs); + } else { + (*trb)++; + } +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * Don't make a ring full of link TRBs. That would be dumb and this would loop. + */ +void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring) +{ + /* event ring doesn't have link trbs, check for last trb. */ + if (ring->type == TYPE_EVENT) { + if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + ring->dequeue++; + goto out; + } + + if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) + ring->cycle_state ^= 1; + + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + goto out; + } + + /* All other rings have link trbs. */ + if (!cdnsp_trb_is_link(ring->dequeue)) { + ring->dequeue++; + ring->num_trbs_free++; + } + while (cdnsp_trb_is_link(ring->dequeue)) { + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } +out: + trace_cdnsp_inc_deq(ring); +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * Don't make a ring full of link TRBs. That would be dumb and this would loop. + * + * If we've just enqueued a TRB that is in the middle of a TD (meaning the + * chain bit is set), then set the chain bit in all the following link TRBs. + * If we've enqueued the last TRB in a TD, make sure the following link TRBs + * have their chain bit cleared (so that each Link TRB is a separate TD). + * + * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell. + */ +static void cdnsp_inc_enq(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + bool more_trbs_coming) +{ + union cdnsp_trb *next; + u32 chain; + + chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; + + /* If this is not event ring, there is one less usable TRB. */ + if (!cdnsp_trb_is_link(ring->enqueue)) + ring->num_trbs_free--; + next = ++(ring->enqueue); + + /* Update the dequeue pointer further if that was a link TRB */ + while (cdnsp_trb_is_link(next)) { + /* + * If the caller doesn't plan on enqueuing more TDs before + * ringing the doorbell, then we don't want to give the link TRB + * to the hardware just yet. We'll give the link TRB back in + * cdnsp_prepare_ring() just before we enqueue the TD at the + * top of the ring. + */ + if (!chain && !more_trbs_coming) + break; + + next->link.control &= cpu_to_le32(~TRB_CHAIN); + next->link.control |= cpu_to_le32(chain); + + /* Give this link TRB to the hardware */ + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (cdnsp_link_trb_toggles_cycle(next)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + next = ring->enqueue; + } + + trace_cdnsp_inc_enq(ring); +} + +/* + * Check to see if there's room to enqueue num_trbs on the ring and make sure + * enqueue pointer will not advance into dequeue segment. + */ +static bool cdnsp_room_on_ring(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs) +{ + int num_trbs_in_deq_seg; + + if (ring->num_trbs_free < num_trbs) + return false; + + if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { + num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; + + if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) + return false; + } + + return true; +} + +/* + * Workaround for L1: controller has issue with resuming from L1 after + * setting doorbell for endpoint during L1 state. This function forces + * resume signal in such case. + */ +static void cdnsp_force_l0_go(struct cdnsp_device *pdev) +{ + if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable) + cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0); +} + +/* Ring the doorbell after placing a command on the ring. */ +void cdnsp_ring_cmd_db(struct cdnsp_device *pdev) +{ + writel(DB_VALUE_CMD, &pdev->dba->cmd_db); +} + +/* + * Ring the doorbell after placing a transfer on the ring. + * Returns true if doorbell was set, otherwise false. + */ +static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id) +{ + __le32 __iomem *reg_addr = &pdev->dba->ep_db; + unsigned int ep_state = pep->ep_state; + unsigned int db_value; + + /* + * Don't ring the doorbell for this endpoint if endpoint is halted or + * disabled. + */ + if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED)) + return false; + + /* For stream capable endpoints driver can ring doorbell only twice. */ + if (pep->ep_state & EP_HAS_STREAMS) { + if (pep->stream_info.drbls_count >= 2) + return false; + + pep->stream_info.drbls_count++; + } + + pep->ep_state &= ~EP_STOPPED; + + if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE && + !pdev->ep0_expect_in) + db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id); + else + db_value = DB_VALUE(pep->idx, stream_id); + + trace_cdnsp_tr_drbl(pep, stream_id); + + writel(db_value, reg_addr); + + cdnsp_force_l0_go(pdev); + + /* Doorbell was set. */ + return true; +} + +/* + * Get the right ring for the given pep and stream_id. + * If the endpoint supports streams, boundary check the USB request's stream ID. + * If the endpoint doesn't support streams, return the singular endpoint ring. + */ +static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id) +{ + if (!(pep->ep_state & EP_HAS_STREAMS)) + return pep->ring; + + if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) { + dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n", + pep->name, stream_id); + return NULL; + } + + return pep->stream_info.stream_rings[stream_id]; +} + +static struct cdnsp_ring * + cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + return cdnsp_get_transfer_ring(pdev, preq->pep, + preq->request.stream_id); +} + +/* Ring the doorbell for any rings with pending requests. */ +void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_stream_info *stream_info; + unsigned int stream_id; + int ret; + + if (pep->ep_state & EP_DIS_IN_RROGRESS) + return; + + /* A ring has pending Request if its TD list is not empty. */ + if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) { + if (pep->ring && !list_empty(&pep->ring->td_list)) + cdnsp_ring_ep_doorbell(pdev, pep, 0); + return; + } + + stream_info = &pep->stream_info; + + for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) { + struct cdnsp_td *td, *td_temp; + struct cdnsp_ring *ep_ring; + + if (stream_info->drbls_count >= 2) + return; + + ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + continue; + + if (!ep_ring->stream_active || ep_ring->stream_rejected) + continue; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, + td_list) { + if (td->drbl) + continue; + + ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id); + if (ret) + td->drbl = 1; + } + } +} + +/* + * Get the hw dequeue pointer controller stopped on, either directly from the + * endpoint context, or if streams are in use from the stream context. + * The returned hw_dequeue contains the lowest four bits with cycle state + * and possible stream context type. + */ +static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev, + unsigned int ep_index, + unsigned int stream_id) +{ + struct cdnsp_stream_ctx *st_ctx; + struct cdnsp_ep *pep; + + pep = &pdev->eps[stream_id]; + + if (pep->ep_state & EP_HAS_STREAMS) { + st_ctx = &pep->stream_info.stream_ctx_array[stream_id]; + return le64_to_cpu(st_ctx->stream_ring); + } + + return le64_to_cpu(pep->out_ctx->deq); +} + +/* + * Move the controller endpoint ring dequeue pointer past cur_td. + * Record the new state of the controller endpoint ring dequeue segment, + * dequeue pointer, and new consumer cycle state in state. + * Update internal representation of the ring's dequeue pointer. + * + * We do this in three jumps: + * - First we update our new ring state to be the same as when the + * controller stopped. + * - Then we traverse the ring to find the segment that contains + * the last TRB in the TD. We toggle the controller new cycle state + * when we pass any link TRBs with the toggle cycle bit set. + * - Finally we move the dequeue state one TRB further, toggling the cycle bit + * if we've moved it past a link TRB with the toggle cycle bit set. + */ +static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id, + struct cdnsp_td *cur_td, + struct cdnsp_dequeue_state *state) +{ + bool td_last_trb_found = false; + struct cdnsp_segment *new_seg; + struct cdnsp_ring *ep_ring; + union cdnsp_trb *new_deq; + bool cycle_found = false; + u64 hw_dequeue; + + ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + return; + + /* + * Dig out the cycle state saved by the controller during the + * stop endpoint command. + */ + hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; + state->stream_id = stream_id; + + /* + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. + */ + do { + if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + + if (td_last_trb_found) + break; + } + + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; + + if (cycle_found && cdnsp_trb_is_link(new_deq) && + cdnsp_link_trb_toggles_cycle(new_deq)) + state->new_cycle_state ^= 0x1; + + cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out. */ + if (new_deq == pep->ring->dequeue) { + dev_err(pdev->dev, + "Error: Failed finding new dequeue state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + + } while (!cycle_found || !td_last_trb_found); + + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; + + trace_cdnsp_new_deq_state(state); +} + +/* + * flip_cycle means flip the cycle bit of all but the first and last TRB. + * (The last TRB actually points to the ring enqueue pointer, which is not part + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. + */ +static void cdnsp_td_to_noop(struct cdnsp_device *pdev, + struct cdnsp_ring *ep_ring, + struct cdnsp_td *td, + bool flip_cycle) +{ + struct cdnsp_segment *seg = td->start_seg; + union cdnsp_trb *trb = td->first_trb; + + while (1) { + cdnsp_trb_to_noop(trb, TRB_TR_NOOP); + + /* flip cycle if asked to */ + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) + break; + + cdnsp_next_trb(pdev, ep_ring, &seg, &trb); + } +} + +/* + * This TD is defined by the TRBs starting at start_trb in start_seg and ending + * at end_trb, which may be in another segment. If the suspect DMA address is a + * TRB in this TD, this function returns that TRB's segment. Otherwise it + * returns 0. + */ +static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev, + struct cdnsp_segment *start_seg, + union cdnsp_trb *start_trb, + union cdnsp_trb *end_trb, + dma_addr_t suspect_dma) +{ + struct cdnsp_segment *cur_seg; + union cdnsp_trb *temp_trb; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + dma_addr_t start_dma; + + start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb); + cur_seg = start_seg; + + do { + if (start_dma == 0) + return NULL; + + temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1]; + /* We may get an event for a Link TRB in the middle of a TD */ + end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb); + /* If the end TRB isn't in this segment, this is set to 0 */ + end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb); + + trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma, + end_trb_dma, cur_seg->dma, + end_seg_dma); + + if (end_trb_dma > 0) { + /* + * The end TRB is in this segment, so suspect should + * be here + */ + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && + suspect_dma <= end_trb_dma) { + return cur_seg; + } + } else { + /* + * Case for one segment with a + * TD wrapped around to the top + */ + if ((suspect_dma >= start_dma && + suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && + suspect_dma <= end_trb_dma)) { + return cur_seg; + } + } + + return NULL; + } + + /* Might still be somewhere in this segment */ + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; + + cur_seg = cur_seg->next; + start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != start_seg); + + return NULL; +} + +static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_td *td) +{ + struct cdnsp_segment *seg = td->bounce_seg; + struct cdnsp_request *preq; + size_t len; + + if (!seg) + return; + + preq = td->preq; + + trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs, + seg->bounce_dma, 0); + + if (!preq->direction) { + dma_unmap_single(pdev->dev, seg->bounce_dma, + ring->bounce_buf_len, DMA_TO_DEVICE); + return; + } + + dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + + /* For in transfers we need to copy the data from bounce to sg */ + len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs, + seg->bounce_buf, seg->bounce_len, + seg->bounce_offs); + if (len != seg->bounce_len) + dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + + seg->bounce_len = 0; + seg->bounce_offs = 0; +} + +static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state) +{ + struct cdnsp_ring *ep_ring; + int ret; + + if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) { + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + return 0; + } + + cdnsp_queue_new_dequeue_state(pdev, pep, deq_state); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx)); + trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx); + + /* + * Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id); + + if (cdnsp_trb_is_link(ep_ring->dequeue)) { + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + + while (ep_ring->dequeue != deq_state->new_deq_ptr) { + ep_ring->num_trbs_free++; + ep_ring->dequeue++; + + if (cdnsp_trb_is_link(ep_ring->dequeue)) { + if (ep_ring->dequeue == deq_state->new_deq_ptr) + break; + + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + } + + /* + * Probably there was TIMEOUT during handling Set Dequeue Pointer + * command. It's critical error and controller will be stopped. + */ + if (ret) + return -ESHUTDOWN; + + /* Restart any rings with pending requests */ + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + + return 0; +} + +int cdnsp_remove_request(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + struct cdnsp_ep *pep) +{ + struct cdnsp_dequeue_state deq_state; + struct cdnsp_td *cur_td = NULL; + struct cdnsp_ring *ep_ring; + struct cdnsp_segment *seg; + int status = -ECONNRESET; + int ret = 0; + u64 hw_deq; + + memset(&deq_state, 0, sizeof(deq_state)); + + trace_cdnsp_remove_request(pep->out_ctx); + trace_cdnsp_remove_request_td(preq); + + cur_td = &preq->td; + ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); + + /* + * If we stopped on the TD we need to cancel, then we have to + * move the controller endpoint ring dequeue pointer past + * this TD. + */ + hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id); + hw_deq &= ~0xf; + + seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, + cur_td->last_trb, hw_deq); + + if (seg && (pep->ep_state & EP_ENABLED)) + cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, + cur_td, &deq_state); + else + cdnsp_td_to_noop(pdev, ep_ring, cur_td, false); + + /* + * The event handler won't see a completion for this TD anymore, + * so remove it from the endpoint ring's TD list. + */ + list_del_init(&cur_td->td_list); + ep_ring->num_tds--; + pep->stream_info.td_count--; + + /* + * During disconnecting all endpoint will be disabled so we don't + * have to worry about updating dequeue pointer. + */ + if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { + status = -ESHUTDOWN; + ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); + } + + cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td); + cdnsp_gadget_giveback(pep, cur_td->preq, status); + + return ret; +} + +static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) +{ + struct cdnsp_port *port = pdev->active_port; + u8 old_port = 0; + + if (port && port->port_num == port_id) + return 0; + + if (port) + old_port = port->port_num; + + if (port_id == pdev->usb2_port.port_num) { + port = &pdev->usb2_port; + } else if (port_id == pdev->usb3_port.port_num) { + port = &pdev->usb3_port; + } else { + dev_err(pdev->dev, "Port event with invalid port ID %d\n", + port_id); + return -EINVAL; + } + + if (port_id != old_port) { + cdnsp_disable_slot(pdev); + pdev->active_port = port; + cdnsp_enable_slot(pdev); + } + + if (port_id == pdev->usb2_port.port_num) + cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1); + else + writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1), + &pdev->usb3_port.regs->portpmsc); + + return 0; +} + +static void cdnsp_handle_port_status(struct cdnsp_device *pdev, + union cdnsp_trb *event) +{ + struct cdnsp_port_regs __iomem *port_regs; + u32 portsc, cmd_regs; + bool port2 = false; + u32 link_state; + u32 port_id; + + /* Port status change events always have a successful completion code */ + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + dev_err(pdev->dev, "ERR: incorrect PSC event\n"); + + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); + + if (cdnsp_update_port_id(pdev, port_id)) + goto cleanup; + + port_regs = pdev->active_port->regs; + + if (port_id == pdev->usb2_port.port_num) + port2 = true; + +new_event: + portsc = readl(&port_regs->portsc); + writel(cdnsp_port_state_to_neutral(portsc) | + (portsc & PORT_CHANGE_BITS), &port_regs->portsc); + + trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc); + + pdev->gadget.speed = cdnsp_port_speed(portsc); + link_state = portsc & PORT_PLS_MASK; + + /* Port Link State change detected. */ + if ((portsc & PORT_PLC)) { + if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) && + link_state == XDEV_RESUME) { + cmd_regs = readl(&pdev->op_regs->command); + if (!(cmd_regs & CMD_R_S)) + goto cleanup; + + if (DEV_SUPERSPEED_ANY(portsc)) { + cdnsp_set_link_state(pdev, &port_regs->portsc, + XDEV_U0); + + cdnsp_resume_gadget(pdev); + } + } + + if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) && + link_state == XDEV_U0) { + pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING; + + cdnsp_force_header_wakeup(pdev, 1); + cdnsp_ring_cmd_db(pdev); + cdnsp_wait_for_cmd_compl(pdev); + } + + if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 && + !DEV_SUPERSPEED_ANY(portsc)) + cdnsp_resume_gadget(pdev); + + if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3) + cdnsp_suspend_gadget(pdev); + + pdev->link_state = link_state; + } + + if (portsc & PORT_CSC) { + /* Detach device. */ + if (pdev->gadget.connected && !(portsc & PORT_CONNECT)) + cdnsp_disconnect_gadget(pdev); + + /* Attach device. */ + if (portsc & PORT_CONNECT) { + if (!port2) + cdnsp_irq_reset(pdev); + + usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED); + } + } + + /* Port reset. */ + if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) { + cdnsp_irq_reset(pdev); + pdev->u1_allowed = 0; + pdev->u2_allowed = 0; + pdev->may_wakeup = 0; + } + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Over Current detected\n"); + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Configure Error detected\n"); + + if (readl(&port_regs->portsc) & PORT_CHANGE_BITS) + goto new_event; + +cleanup: + cdnsp_inc_deq(pdev, pdev->event_ring); +} + +static void cdnsp_td_cleanup(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_ring *ep_ring, + int *status) +{ + struct cdnsp_request *preq = td->preq; + + /* if a bounce buffer was used to align this td then unmap it */ + cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td); + + /* + * If the controller said we transferred more data than the buffer + * length, Play it safe and say we didn't transfer anything. + */ + if (preq->request.actual > preq->request.length) { + preq->request.actual = 0; + *status = 0; + } + + list_del_init(&td->td_list); + ep_ring->num_tds--; + preq->pep->stream_info.td_count--; + + cdnsp_gadget_giveback(preq->pep, preq, *status); +} + +static void cdnsp_finish_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *ep, + int *status) +{ + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_SHORT_PACKET) { + /* + * The Endpoint Stop Command completion will take care of any + * stopped TDs. A stopped TD may be restarted, so don't update + * the ring dequeue pointer or take this TD off any lists yet. + */ + return; + } + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_td_cleanup(pdev, td, ep_ring, status); +} + +/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ +static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + union cdnsp_trb *stop_trb) +{ + struct cdnsp_segment *seg = ring->deq_seg; + union cdnsp_trb *trb = ring->dequeue; + u32 sum; + + for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) { + if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + return sum; +} + +static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id, + int start_cycle, + struct cdnsp_generic_trb *start_trb) +{ + /* + * Pass all the TRBs to the hardware at once and make sure this write + * isn't reordered. + */ + wmb(); + + if (start_cycle) + start_trb->field[3] |= cpu_to_le32(start_cycle); + else + start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + + if ((pep->ep_state & EP_HAS_STREAMS) && + !pep->stream_info.first_prime_det) { + trace_cdnsp_wait_for_prime(pep, stream_id); + return 0; + } + + return cdnsp_ring_ep_doorbell(pdev, pep, stream_id); +} + +/* + * Process control tds, update USB request status and actual_length. + */ +static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *event_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int *status) +{ + struct cdnsp_ring *ep_ring; + u32 remaining; + u32 trb_type; + + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3])); + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + /* + * if on data stage then update the actual_length of the USB + * request and flag it as set, so it won't be overwritten in the event + * for the last TRB. + */ + if (trb_type == TRB_DATA) { + td->request_length_set = true; + td->preq->request.actual = td->preq->request.length - remaining; + } + + /* at status stage */ + if (!td->request_length_set) + td->preq->request.actual = td->preq->request.length; + + if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 && + pdev->three_stage_setup) { + td = list_entry(ep_ring->td_list.next, struct cdnsp_td, + td_list); + pdev->ep0_stage = CDNSP_STATUS_STAGE; + + cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state, + &td->last_trb->generic); + return; + } + + *status = 0; + + cdnsp_finish_td(pdev, td, event, pep, status); +} + +/* + * Process isochronous tds, update usb request status and actual_length. + */ +static void cdnsp_process_isoc_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *ep_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int status) +{ + struct cdnsp_request *preq = td->preq; + u32 remaining, requested, ep_trb_len; + bool sum_trbs_for_length = false; + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + u32 td_length; + + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + + requested = preq->request.length; + + /* handle completion code */ + switch (trb_comp_code) { + case COMP_SUCCESS: + preq->request.status = 0; + break; + case COMP_SHORT_PACKET: + preq->request.status = 0; + sum_trbs_for_length = true; + break; + case COMP_ISOCH_BUFFER_OVERRUN: + case COMP_BABBLE_DETECTED_ERROR: + preq->request.status = -EOVERFLOW; + break; + case COMP_STOPPED: + sum_trbs_for_length = true; + break; + case COMP_STOPPED_SHORT_PACKET: + /* field normally containing residue now contains transferred */ + preq->request.status = 0; + requested = remaining; + break; + case COMP_STOPPED_LENGTH_INVALID: + requested = 0; + remaining = 0; + break; + default: + sum_trbs_for_length = true; + preq->request.status = -1; + break; + } + + if (sum_trbs_for_length) { + td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb); + td_length += ep_trb_len - remaining; + } else { + td_length = requested; + } + + td->preq->request.actual += td_length; + + cdnsp_finish_td(pdev, td, event, pep, &status); +} + +static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int status) +{ + struct cdnsp_ring *ep_ring; + + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + td->preq->request.status = -EXDEV; + td->preq->request.actual = 0; + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_td_cleanup(pdev, td, ep_ring, &status); +} + +/* + * Process bulk and interrupt tds, update usb request status and actual_length. + */ +static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *ep_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *ep, + int *status) +{ + u32 remaining, requested, ep_trb_len; + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->preq->request.length; + + switch (trb_comp_code) { + case COMP_SUCCESS: + case COMP_SHORT_PACKET: + *status = 0; + break; + case COMP_STOPPED_SHORT_PACKET: + td->preq->request.actual = remaining; + goto finish_td; + case COMP_STOPPED_LENGTH_INVALID: + /* Stopped on ep trb with invalid length, exclude it. */ + ep_trb_len = 0; + remaining = 0; + break; + } + + if (ep_trb == td->last_trb) + ep_trb_len = requested - remaining; + else + ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) + + ep_trb_len - remaining; + td->preq->request.actual = ep_trb_len; + +finish_td: + ep->stream_info.drbls_count--; + + cdnsp_finish_td(pdev, td, event, ep, status); +} + +static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev, + struct cdnsp_transfer_event *event) +{ + struct cdnsp_generic_trb *generic; + struct cdnsp_ring *ep_ring; + struct cdnsp_ep *pep; + int cur_stream; + int ep_index; + int host_sid; + int dev_sid; + + generic = (struct cdnsp_generic_trb *)event; + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0])); + host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2])); + + pep = &pdev->eps[ep_index]; + + if (!(pep->ep_state & EP_HAS_STREAMS)) + return; + + if (host_sid == STREAM_PRIME_ACK) { + pep->stream_info.first_prime_det = 1; + for (cur_stream = 1; cur_stream < pep->stream_info.num_streams; + cur_stream++) { + ep_ring = pep->stream_info.stream_rings[cur_stream]; + ep_ring->stream_active = 1; + ep_ring->stream_rejected = 0; + } + } + + if (host_sid == STREAM_REJECTED) { + struct cdnsp_td *td, *td_temp; + + pep->stream_info.drbls_count--; + ep_ring = pep->stream_info.stream_rings[dev_sid]; + ep_ring->stream_active = 0; + ep_ring->stream_rejected = 1; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, + td_list) { + td->drbl = 0; + } + } + + cdnsp_ring_doorbell_for_active_rings(pdev, pep); +} + +/* + * If this function returns an error condition, it means it got a Transfer + * event with a corrupted TRB DMA address or endpoint is disabled. + */ +static int cdnsp_handle_tx_event(struct cdnsp_device *pdev, + struct cdnsp_transfer_event *event) +{ + const struct usb_endpoint_descriptor *desc; + bool handling_skipped_tds = false; + struct cdnsp_segment *ep_seg; + struct cdnsp_ring *ep_ring; + int status = -EINPROGRESS; + union cdnsp_trb *ep_trb; + dma_addr_t ep_trb_dma; + struct cdnsp_ep *pep; + struct cdnsp_td *td; + u32 trb_comp_code; + int invalidate; + int ep_index; + + invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE; + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + ep_trb_dma = le64_to_cpu(event->buffer); + + pep = &pdev->eps[ep_index]; + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + + /* + * If device is disconnect then all requests will be dequeued + * by upper layers as part of disconnect sequence. + * We don't want handle such event to avoid racing. + */ + if (invalidate || !pdev->gadget.connected) + goto cleanup; + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) { + trace_cdnsp_ep_disabled(pep->out_ctx); + goto err_out; + } + + /* Some transfer events don't always point to a trb*/ + if (!ep_ring) { + switch (trb_comp_code) { + case COMP_INVALID_STREAM_TYPE_ERROR: + case COMP_INVALID_STREAM_ID_ERROR: + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + goto cleanup; + default: + dev_err(pdev->dev, "ERROR: %s event for unknown ring\n", + pep->name); + goto err_out; + } + } + + /* Look for some error cases that need special treatment. */ + switch (trb_comp_code) { + case COMP_BABBLE_DETECTED_ERROR: + status = -EOVERFLOW; + break; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + /* + * When the Isoch ring is empty, the controller will generate + * a Ring Overrun Event for IN Isoch endpoint or Ring + * Underrun Event for OUT Isoch endpoint. + */ + goto cleanup; + case COMP_MISSED_SERVICE_ERROR: + /* + * When encounter missed service error, one or more isoc tds + * may be missed by controller. + * Set skip flag of the ep_ring; Complete the missed tds as + * short transfer when process the ep_ring next time. + */ + pep->skip = true; + break; + } + + do { + /* + * This TRB should be in the TD at the head of this ring's TD + * list. + */ + if (list_empty(&ep_ring->td_list)) { + /* + * Don't print warnings if it's due to a stopped + * endpoint generating an extra completion event, or + * a event for the last TRB of a short TD we already + * got a short event for. + * The short TD is already removed from the TD list. + */ + if (!(trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + ep_ring->last_td_was_short)) + trace_cdnsp_trb_without_td(ep_ring, + (struct cdnsp_generic_trb *)event); + + if (pep->skip) { + pep->skip = false; + trace_cdnsp_ep_list_empty_with_skip(pep, 0); + } + + goto cleanup; + } + + td = list_entry(ep_ring->td_list.next, struct cdnsp_td, + td_list); + + /* Is this a TRB in the currently executing TD? */ + ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg, + ep_ring->dequeue, td->last_trb, + ep_trb_dma); + + /* + * Skip the Force Stopped Event. The event_trb(ep_trb_dma) + * of FSE is not in the current TD pointed by ep_ring->dequeue + * because that the hardware dequeue pointer still at the + * previous TRB of the current TD. The previous TRB maybe a + * Link TD or the last TRB of the previous TD. The command + * completion handle will take care the rest. + */ + if (!ep_seg && (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + pep->skip = false; + goto cleanup; + } + + desc = td->preq->pep->endpoint.desc; + if (!ep_seg) { + if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) { + /* Something is busted, give up! */ + dev_err(pdev->dev, + "ERROR Transfer event TRB DMA ptr not " + "part of current TD ep_index %d " + "comp_code %u\n", ep_index, + trb_comp_code); + return -EINVAL; + } + + cdnsp_skip_isoc_td(pdev, td, event, pep, status); + goto cleanup; + } + + if (trb_comp_code == COMP_SHORT_PACKET) + ep_ring->last_td_was_short = true; + else + ep_ring->last_td_was_short = false; + + if (pep->skip) { + pep->skip = false; + cdnsp_skip_isoc_td(pdev, td, event, pep, status); + goto cleanup; + } + + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) + / sizeof(*ep_trb)]; + + trace_cdnsp_handle_transfer(ep_ring, + (struct cdnsp_generic_trb *)ep_trb); + + if (cdnsp_trb_is_noop(ep_trb)) + goto cleanup; + + if (usb_endpoint_xfer_control(desc)) + cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep, + &status); + else if (usb_endpoint_xfer_isoc(desc)) + cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep, + status); + else + cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep, + &status); +cleanup: + handling_skipped_tds = pep->skip; + + /* + * Do not update event ring dequeue pointer if we're in a loop + * processing missed tds. + */ + if (!handling_skipped_tds) + cdnsp_inc_deq(pdev, pdev->event_ring); + + /* + * If ep->skip is set, it means there are missed tds on the + * endpoint ring need to take care of. + * Process them as short transfer until reach the td pointed by + * the event. + */ + } while (handling_skipped_tds); + return 0; + +err_out: + dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n", + (unsigned long long) + cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue), + lower_32_bits(le64_to_cpu(event->buffer)), + upper_32_bits(le64_to_cpu(event->buffer)), + le32_to_cpu(event->transfer_len), + le32_to_cpu(event->flags)); + return -EINVAL; +} + +/* + * This function handles all events on the event ring. + * Returns true for "possibly more events to process" (caller should call + * again), otherwise false if done. + */ +static bool cdnsp_handle_event(struct cdnsp_device *pdev) +{ + unsigned int comp_code; + union cdnsp_trb *event; + bool update_ptrs = true; + u32 cycle_bit; + int ret = 0; + u32 flags; + + event = pdev->event_ring->dequeue; + flags = le32_to_cpu(event->event_cmd.flags); + cycle_bit = (flags & TRB_CYCLE); + + /* Does the controller or driver own the TRB? */ + if (cycle_bit != pdev->event_ring->cycle_state) + return false; + + trace_cdnsp_handle_event(pdev->event_ring, &event->generic); + + /* + * Barrier between reading the TRB_CYCLE (valid) flag above and any + * reads of the event's flags/data below. + */ + rmb(); + + switch (flags & TRB_TYPE_BITMASK) { + case TRB_TYPE(TRB_COMPLETION): + /* + * Command can't be handled in interrupt context so just + * increment command ring dequeue pointer. + */ + cdnsp_inc_deq(pdev, pdev->cmd_ring); + break; + case TRB_TYPE(TRB_PORT_STATUS): + cdnsp_handle_port_status(pdev, event); + update_ptrs = false; + break; + case TRB_TYPE(TRB_TRANSFER): + ret = cdnsp_handle_tx_event(pdev, &event->trans_event); + if (ret >= 0) + update_ptrs = false; + break; + case TRB_TYPE(TRB_SETUP): + pdev->ep0_stage = CDNSP_SETUP_STAGE; + pdev->setup_id = TRB_SETUPID_TO_TYPE(flags); + pdev->setup_speed = TRB_SETUP_SPEEDID(flags); + pdev->setup = *((struct usb_ctrlrequest *) + &event->trans_event.buffer); + + cdnsp_setup_analyze(pdev); + break; + case TRB_TYPE(TRB_ENDPOINT_NRDY): + cdnsp_handle_tx_nrdy(pdev, &event->trans_event); + break; + case TRB_TYPE(TRB_HC_EVENT): { + comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); + + switch (comp_code) { + case COMP_EVENT_RING_FULL_ERROR: + dev_err(pdev->dev, "Event Ring Full\n"); + break; + default: + dev_err(pdev->dev, "Controller error code 0x%02x\n", + comp_code); + } + + break; + } + case TRB_TYPE(TRB_MFINDEX_WRAP): + case TRB_TYPE(TRB_DRB_OVERFLOW): + break; + default: + dev_warn(pdev->dev, "ERROR unknown event type %ld\n", + TRB_FIELD_TO_TYPE(flags)); + } + + if (update_ptrs) + /* Update SW event ring dequeue pointer. */ + cdnsp_inc_deq(pdev, pdev->event_ring); + + /* + * Caller will call us again to check if there are more items + * on the event ring. + */ + return true; +} + +irqreturn_t cdnsp_thread_irq_handler(int irq, void *data) +{ + struct cdnsp_device *pdev = (struct cdnsp_device *)data; + union cdnsp_trb *event_ring_deq; + unsigned long flags; + int counter = 0; + + local_bh_disable(); + spin_lock_irqsave(&pdev->lock, flags); + + if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) { + /* + * While removing or stopping driver there may still be deferred + * not handled interrupt which should not be treated as error. + * Driver should simply ignore it. + */ + if (pdev->gadget_driver) + cdnsp_died(pdev); + + spin_unlock_irqrestore(&pdev->lock, flags); + local_bh_enable(); + return IRQ_HANDLED; + } + + event_ring_deq = pdev->event_ring->dequeue; + + while (cdnsp_handle_event(pdev)) { + if (++counter >= TRBS_PER_EV_DEQ_UPDATE) { + cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0); + event_ring_deq = pdev->event_ring->dequeue; + counter = 0; + } + } + + cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); + + spin_unlock_irqrestore(&pdev->lock, flags); + local_bh_enable(); + + return IRQ_HANDLED; +} + +irqreturn_t cdnsp_irq_handler(int irq, void *priv) +{ + struct cdnsp_device *pdev = (struct cdnsp_device *)priv; + u32 irq_pending; + u32 status; + + status = readl(&pdev->op_regs->status); + + if (status == ~(u32)0) { + cdnsp_died(pdev); + return IRQ_HANDLED; + } + + if (!(status & STS_EINT)) + return IRQ_NONE; + + writel(status | STS_EINT, &pdev->op_regs->status); + irq_pending = readl(&pdev->ir_set->irq_pending); + irq_pending |= IMAN_IP; + writel(irq_pending, &pdev->ir_set->irq_pending); + + if (status & STS_FATAL) { + cdnsp_died(pdev); + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + +/* + * Generic function for queuing a TRB on a ring. + * The caller must have checked to make sure there's room on the ring. + * + * @more_trbs_coming: Will you enqueue more TRBs before setting doorbell? + */ +static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring, + bool more_trbs_coming, u32 field1, u32 field2, + u32 field3, u32 field4) +{ + struct cdnsp_generic_trb *trb; + + trb = &ring->enqueue->generic; + + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); + trb->field[3] = cpu_to_le32(field4); + + trace_cdnsp_queue_trb(ring, trb); + cdnsp_inc_enq(pdev, ring, more_trbs_coming); +} + +/* + * Does various checks on the endpoint ring, and makes it ready to + * queue num_trbs. + */ +static int cdnsp_prepare_ring(struct cdnsp_device *pdev, + struct cdnsp_ring *ep_ring, + u32 ep_state, unsigned + int num_trbs, + gfp_t mem_flags) +{ + unsigned int num_trbs_needed; + + /* Make sure the endpoint has been added to controller schedule. */ + switch (ep_state) { + case EP_STATE_STOPPED: + case EP_STATE_RUNNING: + case EP_STATE_HALTED: + break; + default: + dev_err(pdev->dev, "ERROR: incorrect endpoint state\n"); + return -EINVAL; + } + + while (1) { + if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs)) + break; + + trace_cdnsp_no_room_on_ring("try ring expansion"); + + num_trbs_needed = num_trbs - ep_ring->num_trbs_free; + if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed, + mem_flags)) { + dev_err(pdev->dev, "Ring expansion failed\n"); + return -ENOMEM; + } + } + + while (cdnsp_trb_is_link(ep_ring->enqueue)) { + ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN); + /* The cycle bit must be set as the last operation. */ + wmb(); + ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue)) + ep_ring->cycle_state ^= 1; + ep_ring->enq_seg = ep_ring->enq_seg->next; + ep_ring->enqueue = ep_ring->enq_seg->trbs; + } + return 0; +} + +static int cdnsp_prepare_transfer(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + unsigned int num_trbs) +{ + struct cdnsp_ring *ep_ring; + int ret; + + ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep, + preq->request.stream_id); + if (!ep_ring) + return -EINVAL; + + ret = cdnsp_prepare_ring(pdev, ep_ring, + GET_EP_CTX_STATE(preq->pep->out_ctx), + num_trbs, GFP_ATOMIC); + if (ret) + return ret; + + INIT_LIST_HEAD(&preq->td.td_list); + preq->td.preq = preq; + + /* Add this TD to the tail of the endpoint ring's TD list. */ + list_add_tail(&preq->td.td_list, &ep_ring->td_list); + ep_ring->num_tds++; + preq->pep->stream_info.td_count++; + + preq->td.start_seg = ep_ring->enq_seg; + preq->td.first_trb = ep_ring->enqueue; + + return 0; +} + +static unsigned int cdnsp_count_trbs(u64 addr, u64 len) +{ + unsigned int num_trbs; + + num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), + TRB_MAX_BUFF_SIZE); + if (num_trbs == 0) + num_trbs++; + + return num_trbs; +} + +static unsigned int count_trbs_needed(struct cdnsp_request *preq) +{ + return cdnsp_count_trbs(preq->request.dma, preq->request.length); +} + +static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq) +{ + unsigned int i, len, full_len, num_trbs = 0; + struct scatterlist *sg; + + full_len = preq->request.length; + + for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) { + len = sg_dma_len(sg); + num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len); + len = min(len, full_len); + full_len -= len; + if (full_len == 0) + break; + } + + return num_trbs; +} + +static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq) +{ + return cdnsp_count_trbs(preq->request.dma, preq->request.length); +} + +static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total) +{ + if (running_total != preq->request.length) + dev_err(preq->pep->pdev->dev, + "%s - Miscalculated tx length, " + "queued %#x, asked for %#x (%d)\n", + preq->pep->name, running_total, + preq->request.length, preq->request.actual); +} + +/* + * TD size is the number of max packet sized packets remaining in the TD + * (*not* including this TRB). + * + * Total TD packet count = total_packet_count = + * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) + * + * Packets transferred up to and including this TRB = packets_transferred = + * rounddown(total bytes transferred including this TRB / wMaxPacketSize) + * + * TD size = total_packet_count - packets_transferred + * + * It must fit in bits 21:17, so it can't be bigger than 31. + * This is taken care of in the TRB_TD_SIZE() macro + * + * The last TRB in a TD must have the TD size set to zero. + */ +static u32 cdnsp_td_remainder(struct cdnsp_device *pdev, + int transferred, + int trb_buff_len, + unsigned int td_total_len, + struct cdnsp_request *preq, + bool more_trbs_coming, + bool zlp) +{ + u32 maxp, total_packet_count; + + /* Before ZLP driver needs set TD_SIZE = 1. */ + if (zlp) + return 1; + + /* One TRB with a zero-length data packet. */ + if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || + trb_buff_len == td_total_len) + return 0; + + maxp = usb_endpoint_maxp(preq->pep->endpoint.desc); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); + + /* Queuing functions don't count the current TRB into transferred. */ + return (total_packet_count - ((transferred + trb_buff_len) / maxp)); +} + +static int cdnsp_align_td(struct cdnsp_device *pdev, + struct cdnsp_request *preq, u32 enqd_len, + u32 *trb_buff_len, struct cdnsp_segment *seg) +{ + struct device *dev = pdev->dev; + unsigned int unalign; + unsigned int max_pkt; + u32 new_buff_len; + + max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc); + unalign = (enqd_len + *trb_buff_len) % max_pkt; + + /* We got lucky, last normal TRB data on segment is packet aligned. */ + if (unalign == 0) + return 0; + + /* Is the last nornal TRB alignable by splitting it. */ + if (*trb_buff_len > unalign) { + *trb_buff_len -= unalign; + trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len, + enqd_len, 0, unalign); + return 0; + } + + /* + * We want enqd_len + trb_buff_len to sum up to a number aligned to + * number which is divisible by the endpoint's wMaxPacketSize. IOW: + * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. + */ + new_buff_len = max_pkt - (enqd_len % max_pkt); + + if (new_buff_len > (preq->request.length - enqd_len)) + new_buff_len = (preq->request.length - enqd_len); + + /* Create a max max_pkt sized bounce buffer pointed to by last trb. */ + if (preq->direction) { + sg_pcopy_to_buffer(preq->request.sg, + preq->request.num_mapped_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_FROM_DEVICE); + } + + if (dma_mapping_error(dev, seg->bounce_dma)) { + /* Try without aligning.*/ + dev_warn(pdev->dev, + "Failed mapping bounce buffer, not aligning\n"); + return 0; + } + + *trb_buff_len = new_buff_len; + seg->bounce_len = new_buff_len; + seg->bounce_offs = enqd_len; + + trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma, + unalign); + + /* + * Bounce buffer successful aligned and seg->bounce_dma will be used + * in transfer TRB as new transfer buffer address. + */ + return 1; +} + +int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) +{ + unsigned int enqd_len, block_len, trb_buff_len, full_len; + unsigned int start_cycle, num_sgs = 0; + struct cdnsp_generic_trb *start_trb; + u32 field, length_field, remainder; + struct scatterlist *sg = NULL; + bool more_trbs_coming = true; + bool need_zero_pkt = false; + bool zero_len_trb = false; + struct cdnsp_ring *ring; + bool first_trb = true; + unsigned int num_trbs; + struct cdnsp_ep *pep; + u64 addr, send_addr; + int sent_len, ret; + + ring = cdnsp_request_to_transfer_ring(pdev, preq); + if (!ring) + return -EINVAL; + + full_len = preq->request.length; + + if (preq->request.num_sgs) { + num_sgs = preq->request.num_sgs; + sg = preq->request.sg; + addr = (u64)sg_dma_address(sg); + block_len = sg_dma_len(sg); + num_trbs = count_sg_trbs_needed(preq); + } else { + num_trbs = count_trbs_needed(preq); + addr = (u64)preq->request.dma; + block_len = full_len; + } + + pep = preq->pep; + + /* Deal with request.zero - need one more td/trb. */ + if (preq->request.zero && preq->request.length && + IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) { + need_zero_pkt = true; + num_trbs++; + } + + ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); + if (ret) + return ret; + + /* + * Don't give the first TRB to the hardware (by toggling the cycle bit) + * until we've finished creating all the other TRBs. The ring's cycle + * state may change as we enqueue the other TRBs, so save it too. + */ + start_trb = &ring->enqueue->generic; + start_cycle = ring->cycle_state; + send_addr = addr; + + /* Queue the TRBs, even if they are zero-length */ + for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len; + enqd_len += trb_buff_len) { + field = TRB_TYPE(TRB_NORMAL); + + /* TRB buffer should not cross 64KB boundaries */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + trb_buff_len = min(trb_buff_len, block_len); + if (enqd_len + trb_buff_len > full_len) + trb_buff_len = full_len - enqd_len; + + /* Don't change the cycle bit of the first TRB until later */ + if (first_trb) { + first_trb = false; + if (start_cycle == 0) + field |= TRB_CYCLE; + } else { + field |= ring->cycle_state; + } + + /* + * Chain all the TRBs together; clear the chain bit in the last + * TRB to indicate it's the last TRB in the chain. + */ + if (enqd_len + trb_buff_len < full_len || need_zero_pkt) { + field |= TRB_CHAIN; + if (cdnsp_trb_is_link(ring->enqueue + 1)) { + if (cdnsp_align_td(pdev, preq, enqd_len, + &trb_buff_len, + ring->enq_seg)) { + send_addr = ring->enq_seg->bounce_dma; + /* Assuming TD won't span 2 segs */ + preq->td.bounce_seg = ring->enq_seg; + } + } + } + + if (enqd_len + trb_buff_len >= full_len) { + if (need_zero_pkt && !zero_len_trb) { + zero_len_trb = true; + } else { + zero_len_trb = false; + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + need_zero_pkt = false; + preq->td.last_trb = ring->enqueue; + } + } + + /* Only set interrupt on short packet for OUT endpoints. */ + if (!preq->direction) + field |= TRB_ISP; + + /* Set the TRB length, TD size, and interrupter fields. */ + remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len, + full_len, preq, + more_trbs_coming, + zero_len_trb); + + length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | + TRB_INTR_TARGET(0); + + cdnsp_queue_trb(pdev, ring, more_trbs_coming, + lower_32_bits(send_addr), + upper_32_bits(send_addr), + length_field, + field); + + addr += trb_buff_len; + sent_len = trb_buff_len; + while (sg && sent_len >= block_len) { + /* New sg entry */ + --num_sgs; + sent_len -= block_len; + if (num_sgs != 0) { + sg = sg_next(sg); + block_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + addr += sent_len; + } + } + block_len -= sent_len; + send_addr = addr; + } + + cdnsp_check_trb_math(preq, enqd_len); + ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id, + start_cycle, start_trb); + + if (ret) + preq->td.drbl = 1; + + return 0; +} + +int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) +{ + u32 field, length_field, zlp = 0; + struct cdnsp_ep *pep = preq->pep; + struct cdnsp_ring *ep_ring; + int num_trbs; + u32 maxp; + int ret; + + ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); + if (!ep_ring) + return -EINVAL; + + /* 1 TRB for data, 1 for status */ + num_trbs = (pdev->three_stage_setup) ? 2 : 1; + + maxp = usb_endpoint_maxp(pep->endpoint.desc); + + if (preq->request.zero && preq->request.length && + (preq->request.length % maxp == 0)) { + num_trbs++; + zlp = 1; + } + + ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); + if (ret) + return ret; + + /* If there's data, queue data TRBs */ + if (preq->request.length > 0) { + field = TRB_TYPE(TRB_DATA); + + if (zlp) + field |= TRB_CHAIN; + else + field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP); + + if (pdev->ep0_expect_in) + field |= TRB_DIR_IN; + + length_field = TRB_LEN(preq->request.length) | + TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0); + + cdnsp_queue_trb(pdev, ep_ring, true, + lower_32_bits(preq->request.dma), + upper_32_bits(preq->request.dma), length_field, + field | ep_ring->cycle_state | + TRB_SETUPID(pdev->setup_id) | + pdev->setup_speed); + + if (zlp) { + field = TRB_TYPE(TRB_NORMAL) | TRB_IOC; + + if (!pdev->ep0_expect_in) + field = TRB_ISP; + + cdnsp_queue_trb(pdev, ep_ring, true, + lower_32_bits(preq->request.dma), + upper_32_bits(preq->request.dma), 0, + field | ep_ring->cycle_state | + TRB_SETUPID(pdev->setup_id) | + pdev->setup_speed); + } + + pdev->ep0_stage = CDNSP_DATA_STAGE; + } + + /* Save the DMA address of the last TRB in the TD. */ + preq->td.last_trb = ep_ring->enqueue; + + /* Queue status TRB. */ + if (preq->request.length == 0) + field = ep_ring->cycle_state; + else + field = (ep_ring->cycle_state ^ 1); + + if (preq->request.length > 0 && pdev->ep0_expect_in) + field |= TRB_DIR_IN; + + if (pep->ep_state & EP0_HALTED_STATUS) { + pep->ep_state &= ~EP0_HALTED_STATUS; + field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL); + } else { + field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK); + } + + cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0), + field | TRB_IOC | TRB_SETUPID(pdev->setup_id) | + TRB_TYPE(TRB_STATUS) | pdev->setup_speed); + + cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id); + + return 0; +} + +int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx); + int ret = 0; + + if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED || + ep_state == EP_STATE_HALTED) { + trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx); + goto ep_stopped; + } + + cdnsp_queue_stop_endpoint(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx); + +ep_stopped: + pep->ep_state |= EP_STOPPED; + return ret; +} + +int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +{ + int ret; + + cdnsp_queue_flush_endpoint(pdev, pep->idx); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx); + + return ret; +} + +/* + * The transfer burst count field of the isochronous TRB defines the number of + * bursts that are required to move all packets in this TD. Only SuperSpeed + * devices can burst up to bMaxBurst number of packets per service interval. + * This field is zero based, meaning a value of zero in the field means one + * burst. Basically, for everything but SuperSpeed devices, this field will be + * zero. + */ +static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + unsigned int total_packet_count) +{ + unsigned int max_burst; + + if (pdev->gadget.speed < USB_SPEED_SUPER) + return 0; + + max_burst = preq->pep->endpoint.comp_desc->bMaxBurst; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; +} + +/* + * Returns the number of packets in the last "burst" of packets. This field is + * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so + * the last burst packet count is equal to the total number of packets in the + * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst + * must contain (bMaxBurst + 1) number of packets, but the last burst can + * contain 1 to (bMaxBurst + 1) packets. + */ +static unsigned int + cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + unsigned int total_packet_count) +{ + unsigned int max_burst; + unsigned int residue; + + if (pdev->gadget.speed >= USB_SPEED_SUPER) { + /* bMaxBurst is zero based: 0 means 1 packet per burst. */ + max_burst = preq->pep->endpoint.comp_desc->bMaxBurst; + residue = total_packet_count % (max_burst + 1); + + /* + * If residue is zero, the last burst contains (max_burst + 1) + * number of packets, but the TLBPC field is zero-based. + */ + if (residue == 0) + return max_burst; + + return residue - 1; + } + if (total_packet_count == 0) + return 0; + + return total_packet_count - 1; +} + +/* Queue function isoc transfer */ +static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + int trb_buff_len, td_len, td_remain_len, ret; + unsigned int burst_count, last_burst_pkt; + unsigned int total_pkt_count, max_pkt; + struct cdnsp_generic_trb *start_trb; + bool more_trbs_coming = true; + struct cdnsp_ring *ep_ring; + int running_total = 0; + u32 field, length_field; + int start_cycle; + int trbs_per_td; + u64 addr; + int i; + + ep_ring = preq->pep->ring; + start_trb = &ep_ring->enqueue->generic; + start_cycle = ep_ring->cycle_state; + td_len = preq->request.length; + addr = (u64)preq->request.dma; + td_remain_len = td_len; + + max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc); + total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); + + /* A zero-length transfer still involves at least one packet. */ + if (total_pkt_count == 0) + total_pkt_count++; + + burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count); + last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq, + total_pkt_count); + trbs_per_td = count_isoc_trbs_needed(preq); + + ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td); + if (ret) + goto cleanup; + + /* + * Set isoc specific data for the first TRB in a TD. + * Prevent HW from getting the TRBs by keeping the cycle state + * inverted in the first TDs isoc TRB. + */ + field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) | + TRB_SIA | TRB_TBC(burst_count); + + if (!start_cycle) + field |= TRB_CYCLE; + + /* Fill the rest of the TRB fields, and remaining normal TRBs. */ + for (i = 0; i < trbs_per_td; i++) { + u32 remainder; + + /* Calculate TRB length. */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + if (trb_buff_len > td_remain_len) + trb_buff_len = td_remain_len; + + /* Set the TRB length, TD size, & interrupter fields. */ + remainder = cdnsp_td_remainder(pdev, running_total, + trb_buff_len, td_len, preq, + more_trbs_coming, 0); + + length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0); + + /* Only first TRB is isoc, overwrite otherwise. */ + if (i) { + field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state; + length_field |= TRB_TD_SIZE(remainder); + } else { + length_field |= TRB_TD_SIZE_TBC(burst_count); + } + + /* Only set interrupt on short packet for OUT EPs. */ + if (usb_endpoint_dir_out(preq->pep->endpoint.desc)) + field |= TRB_ISP; + + /* Set the chain bit for all except the last TRB. */ + if (i < trbs_per_td - 1) { + more_trbs_coming = true; + field |= TRB_CHAIN; + } else { + more_trbs_coming = false; + preq->td.last_trb = ep_ring->enqueue; + field |= TRB_IOC; + } + + cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming, + lower_32_bits(addr), upper_32_bits(addr), + length_field, field); + + running_total += trb_buff_len; + addr += trb_buff_len; + td_remain_len -= trb_buff_len; + } + + /* Check TD length */ + if (running_total != td_len) { + dev_err(pdev->dev, "ISOC TD length unmatch\n"); + ret = -EINVAL; + goto cleanup; + } + + cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id, + start_cycle, start_trb); + + return 0; + +cleanup: + /* Clean up a partially enqueued isoc transfer. */ + list_del_init(&preq->td.td_list); + ep_ring->num_tds--; + + /* + * Use the first TD as a temporary variable to turn the TDs we've + * queued into No-ops with a software-owned cycle bit. + * That way the hardware won't accidentally start executing bogus TDs + * when we partially overwrite them. + * td->first_trb and td->start_seg are already set. + */ + preq->td.last_trb = ep_ring->enqueue; + /* Every TRB except the first & last will have its cycle bit flipped. */ + cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true); + + /* Reset the ring enqueue back to the first TRB and its cycle bit. */ + ep_ring->enqueue = preq->td.first_trb; + ep_ring->enq_seg = preq->td.start_seg; + ep_ring->cycle_state = start_cycle; + return ret; +} + +int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + struct cdnsp_ring *ep_ring; + u32 ep_state; + int num_trbs; + int ret; + + ep_ring = preq->pep->ring; + ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx); + num_trbs = count_isoc_trbs_needed(preq); + + /* + * Check the ring to guarantee there is enough room for the whole + * request. Do not insert any td of the USB Request to the ring if the + * check failed. + */ + ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC); + if (ret) + return ret; + + return cdnsp_queue_isoc_tx(pdev, preq); +} + +/**** Command Ring Operations ****/ +/* + * Generic function for queuing a command TRB on the command ring. + * Driver queue only one command to ring in the moment. + */ +static void cdnsp_queue_command(struct cdnsp_device *pdev, + u32 field1, + u32 field2, + u32 field3, + u32 field4) +{ + cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1, + GFP_ATOMIC); + + pdev->cmd.command_trb = pdev->cmd_ring->enqueue; + + cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2, + field3, field4 | pdev->cmd_ring->cycle_state); +} + +/* Queue a slot enable or disable request on the command ring */ +void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +/* Queue an address device command TRB */ +void cdnsp_queue_address_device(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr, + enum cdnsp_setup_dev setup) +{ + cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_ADDR_DEV) | + SLOT_ID_FOR_TRB(pdev->slot_id) | + (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0)); +} + +/* Queue a reset device command TRB */ +void cdnsp_queue_reset_device(struct cdnsp_device *pdev) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +/* Queue a configure endpoint command TRB */ +void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev, + dma_addr_t in_ctx_ptr) +{ + cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_CONFIG_EP) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +/* + * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop + * activity on an endpoint that is about to be suspended. + */ +void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) +{ + cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING)); +} + +/* Set Transfer Ring Dequeue Pointer command. */ +void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state) +{ + u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id); + u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id); + u32 type = TRB_TYPE(TRB_SET_DEQ); + u32 trb_sct = 0; + dma_addr_t addr; + + addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg, + deq_state->new_deq_ptr); + + if (deq_state->stream_id) + trb_sct = SCT_FOR_TRB(SCT_PRI_TR); + + cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct | + deq_state->new_cycle_state, upper_32_bits(addr), + trb_stream_id, trb_slot_id | + EP_ID_FOR_TRB(pep->idx) | type); +} + +void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index) +{ + return cdnsp_queue_command(pdev, 0, 0, 0, + SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index) | + TRB_TYPE(TRB_RESET_EP)); +} + +/* + * Queue a halt endpoint request on the command ring. + */ +void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index)); +} + +/* + * Queue a flush endpoint request on the command ring. + */ +void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index) +{ + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index)); +} + +void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num) +{ + u32 lo, mid; + + lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) | + TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address); + mid = TRB_FH_TR_PACKET_DEV_NOT | + TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) | + TRB_FH_TO_INTERFACE(intf_num); + + cdnsp_queue_command(pdev, lo, mid, 0, + TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2)); +} diff --git a/drivers/usb/cdns3/cdnsp-trace.c b/drivers/usb/cdns3/cdnsp-trace.c new file mode 100644 index 000000000..e50ab799a --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#define CREATE_TRACE_POINTS +#include "cdnsp-trace.h" diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h new file mode 100644 index 000000000..5983dfb99 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-trace.h @@ -0,0 +1,830 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence CDNSP DRD Driver. + * Trace support header file + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cdnsp-dev + +/* + * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a + * legitimate C variable. It is not exported to user space. + */ +#undef TRACE_SYSTEM_VAR +#define TRACE_SYSTEM_VAR cdnsp_dev + +#if !defined(__CDNSP_DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __CDNSP_DEV_TRACE_H + +#include <linux/tracepoint.h> +#include "cdnsp-gadget.h" +#include "cdnsp-debug.h" + +/* + * There is limitation for single buffer size in TRACEPOINT subsystem. + * By default TRACE_BUF_SIZE is 1024, so no all data will be logged. + * To show more data this must be increased. In most cases the default + * value is sufficient. + */ +#define CDNSP_MSG_MAX 500 + +DECLARE_EVENT_CLASS(cdnsp_log_ep, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id), + TP_STRUCT__entry( + __string(name, pep->name) + __field(unsigned int, state) + __field(u32, stream_id) + __field(u8, enabled) + __field(unsigned int, num_streams) + __field(int, td_count) + __field(u8, first_prime_det) + __field(u8, drbls_count) + ), + TP_fast_assign( + __assign_str(name, pep->name); + __entry->state = pep->ep_state; + __entry->stream_id = stream_id; + __entry->enabled = pep->ep_state & EP_HAS_STREAMS; + __entry->num_streams = pep->stream_info.num_streams; + __entry->td_count = pep->stream_info.td_count; + __entry->first_prime_det = pep->stream_info.first_prime_det; + __entry->drbls_count = pep->stream_info.drbls_count; + ), + TP_printk("%s: SID: %08x, ep state: %x, stream: enabled: %d num %d " + "tds %d, first prime: %d drbls %d", + __get_str(name), __entry->stream_id, __entry->state, + __entry->enabled, __entry->num_streams, __entry->td_count, + __entry->first_prime_det, __entry->drbls_count) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_tr_drbl, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_wait_for_prime, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_list_empty_with_skip, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_enable_end, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_disable_end, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_busy_try_halt_again, + TP_PROTO(struct cdnsp_ep *pep, u32 stream_id), + TP_ARGS(pep, stream_id) +); + +DECLARE_EVENT_CLASS(cdnsp_log_enable_disable, + TP_PROTO(int set), + TP_ARGS(set), + TP_STRUCT__entry( + __field(int, set) + ), + TP_fast_assign( + __entry->set = set; + ), + TP_printk("%s", __entry->set ? "enabled" : "disabled") +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_pullup, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u1, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u2, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_lpm, + TP_PROTO(int set), + TP_ARGS(set) +); + +DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_may_wakeup, + TP_PROTO(int set), + TP_ARGS(set) +); + +DECLARE_EVENT_CLASS(cdnsp_log_simple, + TP_PROTO(char *msg), + TP_ARGS(msg), + TP_STRUCT__entry( + __string(text, msg) + ), + TP_fast_assign( + __assign_str(text, msg); + ), + TP_printk("%s", __get_str(text)) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_exit, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_init, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_slot_id, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_no_room_on_ring, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_status_stage, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_request, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_set_config, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_halted, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep_halt, + TP_PROTO(char *msg), + TP_ARGS(msg) +); + +TRACE_EVENT(cdnsp_looking_trb_in_td, + TP_PROTO(dma_addr_t suspect, dma_addr_t trb_start, dma_addr_t trb_end, + dma_addr_t curr_seg, dma_addr_t end_seg), + TP_ARGS(suspect, trb_start, trb_end, curr_seg, end_seg), + TP_STRUCT__entry( + __field(dma_addr_t, suspect) + __field(dma_addr_t, trb_start) + __field(dma_addr_t, trb_end) + __field(dma_addr_t, curr_seg) + __field(dma_addr_t, end_seg) + ), + TP_fast_assign( + __entry->suspect = suspect; + __entry->trb_start = trb_start; + __entry->trb_end = trb_end; + __entry->curr_seg = curr_seg; + __entry->end_seg = end_seg; + ), + TP_printk("DMA: suspect event: %pad, trb-start: %pad, trb-end %pad, " + "seg-start %pad, seg-end %pad", + &__entry->suspect, &__entry->trb_start, &__entry->trb_end, + &__entry->curr_seg, &__entry->end_seg) +); + +TRACE_EVENT(cdnsp_port_info, + TP_PROTO(__le32 __iomem *addr, u32 offset, u32 count, u32 rev), + TP_ARGS(addr, offset, count, rev), + TP_STRUCT__entry( + __field(__le32 __iomem *, addr) + __field(u32, offset) + __field(u32, count) + __field(u32, rev) + ), + TP_fast_assign( + __entry->addr = addr; + __entry->offset = offset; + __entry->count = count; + __entry->rev = rev; + ), + TP_printk("Ext Cap %p, port offset = %u, count = %u, rev = 0x%x", + __entry->addr, __entry->offset, __entry->count, __entry->rev) +); + +DECLARE_EVENT_CLASS(cdnsp_log_deq_state, + TP_PROTO(struct cdnsp_dequeue_state *state), + TP_ARGS(state), + TP_STRUCT__entry( + __field(int, new_cycle_state) + __field(struct cdnsp_segment *, new_deq_seg) + __field(dma_addr_t, deq_seg_dma) + __field(union cdnsp_trb *, new_deq_ptr) + __field(dma_addr_t, deq_ptr_dma) + ), + TP_fast_assign( + __entry->new_cycle_state = state->new_cycle_state; + __entry->new_deq_seg = state->new_deq_seg; + __entry->deq_seg_dma = state->new_deq_seg->dma; + __entry->new_deq_ptr = state->new_deq_ptr, + __entry->deq_ptr_dma = cdnsp_trb_virt_to_dma(state->new_deq_seg, + state->new_deq_ptr); + ), + TP_printk("New cycle state = 0x%x, New dequeue segment = %p (0x%pad dma), " + "New dequeue pointer = %p (0x%pad dma)", + __entry->new_cycle_state, __entry->new_deq_seg, + &__entry->deq_seg_dma, __entry->new_deq_ptr, + &__entry->deq_ptr_dma + ) +); + +DEFINE_EVENT(cdnsp_log_deq_state, cdnsp_new_deq_state, + TP_PROTO(struct cdnsp_dequeue_state *state), + TP_ARGS(state) +); + +DECLARE_EVENT_CLASS(cdnsp_log_ctrl, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl), + TP_STRUCT__entry( + __field(u8, bRequestType) + __field(u8, bRequest) + __field(u16, wValue) + __field(u16, wIndex) + __field(u16, wLength) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->bRequestType = ctrl->bRequestType; + __entry->bRequest = ctrl->bRequest; + __entry->wValue = le16_to_cpu(ctrl->wValue); + __entry->wIndex = le16_to_cpu(ctrl->wIndex); + __entry->wLength = le16_to_cpu(ctrl->wLength); + ), + TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNSP_MSG_MAX, + __entry->bRequestType, + __entry->bRequest, __entry->wValue, + __entry->wIndex, __entry->wLength) + ) +); + +DEFINE_EVENT(cdnsp_log_ctrl, cdnsp_ctrl_req, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl) +); + +DECLARE_EVENT_CLASS(cdnsp_log_bounce, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign), + TP_STRUCT__entry( + __string(name, preq->pep->name) + __field(u32, new_buf_len) + __field(u32, offset) + __field(dma_addr_t, dma) + __field(unsigned int, unalign) + ), + TP_fast_assign( + __assign_str(name, preq->pep->name); + __entry->new_buf_len = new_buf_len; + __entry->offset = offset; + __entry->dma = dma; + __entry->unalign = unalign; + ), + TP_printk("%s buf len %d, offset %d, dma %pad, unalign %d", + __get_str(name), __entry->new_buf_len, + __entry->offset, &__entry->dma, __entry->unalign + ) +); + +DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_align_td_split, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign) +); + +DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_map, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign) +); + +DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_unmap, + TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset, + dma_addr_t dma, unsigned int unalign), + TP_ARGS(preq, new_buf_len, offset, dma, unalign) +); + +DECLARE_EVENT_CLASS(cdnsp_log_trb, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb), + TP_STRUCT__entry( + __field(u32, type) + __field(u32, field0) + __field(u32, field1) + __field(u32, field2) + __field(u32, field3) + __field(union cdnsp_trb *, trb) + __field(dma_addr_t, trb_dma) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->type = ring->type; + __entry->field0 = le32_to_cpu(trb->field[0]); + __entry->field1 = le32_to_cpu(trb->field[1]); + __entry->field2 = le32_to_cpu(trb->field[2]); + __entry->field3 = le32_to_cpu(trb->field[3]); + __entry->trb = (union cdnsp_trb *)trb; + __entry->trb_dma = cdnsp_trb_virt_to_dma(ring->deq_seg, + (union cdnsp_trb *)trb); + + ), + TP_printk("%s: %s trb: %p(%pad)", cdnsp_ring_type_string(__entry->type), + cdnsp_decode_trb(__get_str(str), CDNSP_MSG_MAX, + __entry->field0, __entry->field1, + __entry->field2, __entry->field3), + __entry->trb, &__entry->trb_dma + ) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_event, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_trb_without_td, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_command, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_transfer, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_queue_trb, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_wait_for_compl, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_timeout, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(cdnsp_log_trb, cdnsp_defered_event, + TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DECLARE_EVENT_CLASS(cdnsp_log_pdev, + TP_PROTO(struct cdnsp_device *pdev), + TP_ARGS(pdev), + TP_STRUCT__entry( + __field(struct cdnsp_device *, pdev) + __field(struct usb_gadget *, gadget) + __field(dma_addr_t, out_ctx) + __field(dma_addr_t, in_ctx) + __field(u8, port_num) + ), + TP_fast_assign( + __entry->pdev = pdev; + __entry->gadget = &pdev->gadget; + __entry->in_ctx = pdev->in_ctx.dma; + __entry->out_ctx = pdev->out_ctx.dma; + __entry->port_num = pdev->active_port ? + pdev->active_port->port_num : 0xFF; + ), + TP_printk("pdev %p gadget %p ctx %pad | %pad, port %d ", + __entry->pdev, __entry->gadget, &__entry->in_ctx, + &__entry->out_ctx, __entry->port_num + ) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_alloc_priv_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_free_priv_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_addressable_priv_device, + TP_PROTO(struct cdnsp_device *vdev), + TP_ARGS(vdev) +); + +DECLARE_EVENT_CLASS(cdnsp_log_request, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __string(name, req->pep->name) + __field(struct usb_request *, request) + __field(struct cdnsp_request *, preq) + __field(void *, buf) + __field(unsigned int, actual) + __field(unsigned int, length) + __field(int, status) + __field(dma_addr_t, dma) + __field(unsigned int, stream_id) + __field(unsigned int, zero) + __field(unsigned int, short_not_ok) + __field(unsigned int, no_interrupt) + __field(struct scatterlist*, sg) + __field(unsigned int, num_sgs) + __field(unsigned int, num_mapped_sgs) + + ), + TP_fast_assign( + __assign_str(name, req->pep->name); + __entry->request = &req->request; + __entry->preq = req; + __entry->buf = req->request.buf; + __entry->actual = req->request.actual; + __entry->length = req->request.length; + __entry->status = req->request.status; + __entry->dma = req->request.dma; + __entry->stream_id = req->request.stream_id; + __entry->zero = req->request.zero; + __entry->short_not_ok = req->request.short_not_ok; + __entry->no_interrupt = req->request.no_interrupt; + __entry->sg = req->request.sg; + __entry->num_sgs = req->request.num_sgs; + __entry->num_mapped_sgs = req->request.num_mapped_sgs; + ), + TP_printk("%s; req U:%p/P:%p, req buf %p, length %u/%u, status %d, " + "buf dma (%pad), SID %u, %s%s%s, sg %p, num_sg %d," + " num_m_sg %d", + __get_str(name), __entry->request, __entry->preq, + __entry->buf, __entry->actual, __entry->length, + __entry->status, &__entry->dma, + __entry->stream_id, __entry->zero ? "Z" : "z", + __entry->short_not_ok ? "S" : "s", + __entry->no_interrupt ? "I" : "i", + __entry->sg, __entry->num_sgs, __entry->num_mapped_sgs + ) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_busy, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_error, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_dequeue, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_request_giveback, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_alloc_request, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdnsp_log_request, cdnsp_free_request, + TP_PROTO(struct cdnsp_request *req), + TP_ARGS(req) +); + +DECLARE_EVENT_CLASS(cdnsp_log_ep_ctx, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx), + TP_STRUCT__entry( + __field(u32, info) + __field(u32, info2) + __field(u64, deq) + __field(u32, tx_info) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->info = le32_to_cpu(ctx->ep_info); + __entry->info2 = le32_to_cpu(ctx->ep_info2); + __entry->deq = le64_to_cpu(ctx->deq); + __entry->tx_info = le32_to_cpu(ctx->tx_info); + ), + TP_printk("%s", cdnsp_decode_ep_context(__get_str(str), CDNSP_MSG_MAX, + __entry->info, __entry->info2, + __entry->deq, __entry->tx_info) + ) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_disabled, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_stopped_or_disabled, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_remove_request, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_stop_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_flush_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_set_deq_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_reset_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_config_ep, + TP_PROTO(struct cdnsp_ep_ctx *ctx), + TP_ARGS(ctx) +); + +DECLARE_EVENT_CLASS(cdnsp_log_slot_ctx, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx), + TP_STRUCT__entry( + __field(u32, info) + __field(u32, info2) + __field(u32, int_target) + __field(u32, state) + ), + TP_fast_assign( + __entry->info = le32_to_cpu(ctx->dev_info); + __entry->info2 = le32_to_cpu(ctx->dev_port); + __entry->int_target = le32_to_cpu(ctx->int_target); + __entry->state = le32_to_cpu(ctx->dev_state); + ), + TP_printk("%s", cdnsp_decode_slot_context(__entry->info, + __entry->info2, + __entry->int_target, + __entry->state) + ) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_slot_already_in_default, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_enable_slot, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_disable_slot, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_reset_device, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_setup_device_slot, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_addr_dev, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_reset_dev, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_set_deq, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_configure_endpoint, + TP_PROTO(struct cdnsp_slot_ctx *ctx), + TP_ARGS(ctx) +); + +DECLARE_EVENT_CLASS(cdnsp_log_td_info, + TP_PROTO(struct cdnsp_request *preq), + TP_ARGS(preq), + TP_STRUCT__entry( + __string(name, preq->pep->name) + __field(struct usb_request *, request) + __field(struct cdnsp_request *, preq) + __field(union cdnsp_trb *, first_trb) + __field(union cdnsp_trb *, last_trb) + __field(dma_addr_t, trb_dma) + ), + TP_fast_assign( + __assign_str(name, preq->pep->name); + __entry->request = &preq->request; + __entry->preq = preq; + __entry->first_trb = preq->td.first_trb; + __entry->last_trb = preq->td.last_trb; + __entry->trb_dma = cdnsp_trb_virt_to_dma(preq->td.start_seg, + preq->td.first_trb) + ), + TP_printk("%s req/preq: %p/%p, first trb %p[vir]/%pad(dma), last trb %p", + __get_str(name), __entry->request, __entry->preq, + __entry->first_trb, &__entry->trb_dma, + __entry->last_trb + ) +); + +DEFINE_EVENT(cdnsp_log_td_info, cdnsp_remove_request_td, + TP_PROTO(struct cdnsp_request *preq), + TP_ARGS(preq) +); + +DECLARE_EVENT_CLASS(cdnsp_log_ring, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring), + TP_STRUCT__entry( + __field(u32, type) + __field(void *, ring) + __field(dma_addr_t, enq) + __field(dma_addr_t, deq) + __field(dma_addr_t, enq_seg) + __field(dma_addr_t, deq_seg) + __field(unsigned int, num_segs) + __field(unsigned int, stream_id) + __field(unsigned int, cycle_state) + __field(unsigned int, num_trbs_free) + __field(unsigned int, bounce_buf_len) + ), + TP_fast_assign( + __entry->ring = ring; + __entry->type = ring->type; + __entry->num_segs = ring->num_segs; + __entry->stream_id = ring->stream_id; + __entry->enq_seg = ring->enq_seg->dma; + __entry->deq_seg = ring->deq_seg->dma; + __entry->cycle_state = ring->cycle_state; + __entry->num_trbs_free = ring->num_trbs_free; + __entry->bounce_buf_len = ring->bounce_buf_len; + __entry->enq = cdnsp_trb_virt_to_dma(ring->enq_seg, + ring->enqueue); + __entry->deq = cdnsp_trb_virt_to_dma(ring->deq_seg, + ring->dequeue); + ), + TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d" + " free_trbs %d bounce %d cycle %d", + cdnsp_ring_type_string(__entry->type), __entry->ring, + &__entry->enq, &__entry->enq_seg, + &__entry->deq, &__entry->deq_seg, + __entry->num_segs, + __entry->stream_id, + __entry->num_trbs_free, + __entry->bounce_buf_len, + __entry->cycle_state + ) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_alloc, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_free, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_set_stream_ring, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_expansion, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_enq, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_deq, + TP_PROTO(struct cdnsp_ring *ring), + TP_ARGS(ring) +); + +DECLARE_EVENT_CLASS(cdnsp_log_portsc, + TP_PROTO(u32 portnum, u32 portsc), + TP_ARGS(portnum, portsc), + TP_STRUCT__entry( + __field(u32, portnum) + __field(u32, portsc) + __dynamic_array(char, str, CDNSP_MSG_MAX) + ), + TP_fast_assign( + __entry->portnum = portnum; + __entry->portsc = portsc; + ), + TP_printk("port-%d: %s", + __entry->portnum, + cdnsp_decode_portsc(__get_str(str), CDNSP_MSG_MAX, + __entry->portsc) + ) +); + +DEFINE_EVENT(cdnsp_log_portsc, cdnsp_handle_port_status, + TP_PROTO(u32 portnum, u32 portsc), + TP_ARGS(portnum, portsc) +); + +DEFINE_EVENT(cdnsp_log_portsc, cdnsp_link_state_changed, + TP_PROTO(u32 portnum, u32 portsc), + TP_ARGS(portnum, portsc) +); + +TRACE_EVENT(cdnsp_stream_number, + TP_PROTO(struct cdnsp_ep *pep, int num_stream_ctxs, int num_streams), + TP_ARGS(pep, num_stream_ctxs, num_streams), + TP_STRUCT__entry( + __string(name, pep->name) + __field(int, num_stream_ctxs) + __field(int, num_streams) + ), + TP_fast_assign( + __entry->num_stream_ctxs = num_stream_ctxs; + __entry->num_streams = num_streams; + ), + TP_printk("%s Need %u stream ctx entries for %u stream IDs.", + __get_str(name), __entry->num_stream_ctxs, + __entry->num_streams) +); + +#endif /* __CDNSP_TRACE_H */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cdnsp-trace + +#include <trace/define_trace.h> diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c new file mode 100644 index 000000000..7b20d2d5c --- /dev/null +++ b/drivers/usb/cdns3/core.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS and USBSSP DRD Driver. + * + * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2017-2018 NXP + * Copyright (C) 2019 Texas Instruments + * + * Author: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + * Roger Quadros <rogerq@ti.com> + */ + +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> + +#include "core.h" +#include "host-export.h" +#include "drd.h" + +static int cdns_idle_init(struct cdns *cdns); + +static int cdns_role_start(struct cdns *cdns, enum usb_role role) +{ + int ret; + + if (WARN_ON(role > USB_ROLE_DEVICE)) + return 0; + + mutex_lock(&cdns->mutex); + cdns->role = role; + mutex_unlock(&cdns->mutex); + + if (!cdns->roles[role]) + return -ENXIO; + + if (cdns->roles[role]->state == CDNS_ROLE_STATE_ACTIVE) + return 0; + + mutex_lock(&cdns->mutex); + ret = cdns->roles[role]->start(cdns); + if (!ret) + cdns->roles[role]->state = CDNS_ROLE_STATE_ACTIVE; + mutex_unlock(&cdns->mutex); + + return ret; +} + +static void cdns_role_stop(struct cdns *cdns) +{ + enum usb_role role = cdns->role; + + if (WARN_ON(role > USB_ROLE_DEVICE)) + return; + + if (cdns->roles[role]->state == CDNS_ROLE_STATE_INACTIVE) + return; + + mutex_lock(&cdns->mutex); + cdns->roles[role]->stop(cdns); + cdns->roles[role]->state = CDNS_ROLE_STATE_INACTIVE; + mutex_unlock(&cdns->mutex); +} + +static void cdns_exit_roles(struct cdns *cdns) +{ + cdns_role_stop(cdns); + cdns_drd_exit(cdns); +} + +/** + * cdns_core_init_role - initialize role of operation + * @cdns: Pointer to cdns structure + * + * Returns 0 on success otherwise negative errno + */ +static int cdns_core_init_role(struct cdns *cdns) +{ + struct device *dev = cdns->dev; + enum usb_dr_mode best_dr_mode; + enum usb_dr_mode dr_mode; + int ret; + + dr_mode = usb_get_dr_mode(dev); + cdns->role = USB_ROLE_NONE; + + /* + * If driver can't read mode by means of usb_get_dr_mode function then + * chooses mode according with Kernel configuration. This setting + * can be restricted later depending on strap pin configuration. + */ + if (dr_mode == USB_DR_MODE_UNKNOWN) { + if (cdns->version == CDNSP_CONTROLLER_V2) { + if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) && + IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } else { + if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && + IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } + } + + /* + * At this point cdns->dr_mode contains strap configuration. + * Driver try update this setting considering kernel configuration + */ + best_dr_mode = cdns->dr_mode; + + ret = cdns_idle_init(cdns); + if (ret) + return ret; + + if (dr_mode == USB_DR_MODE_OTG) { + best_dr_mode = cdns->dr_mode; + } else if (cdns->dr_mode == USB_DR_MODE_OTG) { + best_dr_mode = dr_mode; + } else if (cdns->dr_mode != dr_mode) { + dev_err(dev, "Incorrect DRD configuration\n"); + return -EINVAL; + } + + dr_mode = best_dr_mode; + + if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { + if ((cdns->version == CDNSP_CONTROLLER_V2 && + IS_ENABLED(CONFIG_USB_CDNSP_HOST)) || + (cdns->version < CDNSP_CONTROLLER_V2 && + IS_ENABLED(CONFIG_USB_CDNS3_HOST))) + ret = cdns_host_init(cdns); + else + ret = -ENXIO; + + if (ret) { + dev_err(dev, "Host initialization failed with %d\n", + ret); + goto err; + } + } + + if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { + if (cdns->gadget_init) + ret = cdns->gadget_init(cdns); + else + ret = -ENXIO; + + if (ret) { + dev_err(dev, "Device initialization failed with %d\n", + ret); + goto err; + } + } + + cdns->dr_mode = dr_mode; + + ret = cdns_drd_update_mode(cdns); + if (ret) + goto err; + + /* Initialize idle role to start with */ + ret = cdns_role_start(cdns, USB_ROLE_NONE); + if (ret) + goto err; + + switch (cdns->dr_mode) { + case USB_DR_MODE_OTG: + ret = cdns_hw_role_switch(cdns); + if (ret) + goto err; + break; + case USB_DR_MODE_PERIPHERAL: + ret = cdns_role_start(cdns, USB_ROLE_DEVICE); + if (ret) + goto err; + break; + case USB_DR_MODE_HOST: + ret = cdns_role_start(cdns, USB_ROLE_HOST); + if (ret) + goto err; + break; + default: + ret = -EINVAL; + goto err; + } + + return 0; +err: + cdns_exit_roles(cdns); + return ret; +} + +/** + * cdns_hw_role_state_machine - role switch state machine based on hw events. + * @cdns: Pointer to controller structure. + * + * Returns next role to be entered based on hw events. + */ +static enum usb_role cdns_hw_role_state_machine(struct cdns *cdns) +{ + enum usb_role role = USB_ROLE_NONE; + int id, vbus; + + if (cdns->dr_mode != USB_DR_MODE_OTG) { + if (cdns_is_host(cdns)) + role = USB_ROLE_HOST; + if (cdns_is_device(cdns)) + role = USB_ROLE_DEVICE; + + return role; + } + + id = cdns_get_id(cdns); + vbus = cdns_get_vbus(cdns); + + /* + * Role change state machine + * Inputs: ID, VBUS + * Previous state: cdns->role + * Next state: role + */ + role = cdns->role; + + switch (role) { + case USB_ROLE_NONE: + /* + * Driver treats USB_ROLE_NONE synonymous to IDLE state from + * controller specification. + */ + if (!id) + role = USB_ROLE_HOST; + else if (vbus) + role = USB_ROLE_DEVICE; + break; + case USB_ROLE_HOST: /* from HOST, we can only change to NONE */ + if (id) + role = USB_ROLE_NONE; + break; + case USB_ROLE_DEVICE: /* from GADGET, we can only change to NONE*/ + if (!vbus) + role = USB_ROLE_NONE; + break; + } + + dev_dbg(cdns->dev, "role %d -> %d\n", cdns->role, role); + + return role; +} + +static int cdns_idle_role_start(struct cdns *cdns) +{ + return 0; +} + +static void cdns_idle_role_stop(struct cdns *cdns) +{ + /* Program Lane swap and bring PHY out of RESET */ + phy_reset(cdns->usb3_phy); +} + +static int cdns_idle_init(struct cdns *cdns) +{ + struct cdns_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = cdns_idle_role_start; + rdrv->stop = cdns_idle_role_stop; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; + rdrv->suspend = NULL; + rdrv->resume = NULL; + rdrv->name = "idle"; + + cdns->roles[USB_ROLE_NONE] = rdrv; + + return 0; +} + +/** + * cdns_hw_role_switch - switch roles based on HW state + * @cdns: controller + */ +int cdns_hw_role_switch(struct cdns *cdns) +{ + enum usb_role real_role, current_role; + int ret = 0; + + /* Depends on role switch class */ + if (cdns->role_sw) + return 0; + + pm_runtime_get_sync(cdns->dev); + + current_role = cdns->role; + real_role = cdns_hw_role_state_machine(cdns); + + /* Do nothing if nothing changed */ + if (current_role == real_role) + goto exit; + + cdns_role_stop(cdns); + + dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role); + + ret = cdns_role_start(cdns, real_role); + if (ret) { + /* Back to current role */ + dev_err(cdns->dev, "set %d has failed, back to %d\n", + real_role, current_role); + ret = cdns_role_start(cdns, current_role); + if (ret) + dev_err(cdns->dev, "back to %d failed too\n", + current_role); + } +exit: + pm_runtime_put_sync(cdns->dev); + return ret; +} + +/** + * cdns_role_get - get current role of controller. + * + * @sw: pointer to USB role switch structure + * + * Returns role + */ +static enum usb_role cdns_role_get(struct usb_role_switch *sw) +{ + struct cdns *cdns = usb_role_switch_get_drvdata(sw); + + return cdns->role; +} + +/** + * cdns_role_set - set current role of controller. + * + * @sw: pointer to USB role switch structure + * @role: the previous role + * Handles below events: + * - Role switch for dual-role devices + * - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices + */ +static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role) +{ + struct cdns *cdns = usb_role_switch_get_drvdata(sw); + int ret = 0; + + pm_runtime_get_sync(cdns->dev); + + if (cdns->role == role) + goto pm_put; + + if (cdns->dr_mode == USB_DR_MODE_HOST) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_HOST: + break; + default: + goto pm_put; + } + } + + if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_DEVICE: + break; + default: + goto pm_put; + } + } + + cdns_role_stop(cdns); + ret = cdns_role_start(cdns, role); + if (ret) + dev_err(cdns->dev, "set role %d has failed\n", role); + +pm_put: + pm_runtime_put_sync(cdns->dev); + return ret; +} + + +/** + * cdns_wakeup_irq - interrupt handler for wakeup events + * @irq: irq number for cdns3/cdnsp core device + * @data: structure of cdns + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns_wakeup_irq(int irq, void *data) +{ + struct cdns *cdns = data; + + if (cdns->in_lpm) { + disable_irq_nosync(irq); + cdns->wakeup_pending = true; + if ((cdns->role == USB_ROLE_HOST) && cdns->host_dev) + pm_request_resume(&cdns->host_dev->dev); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +/** + * cdns_init - probe for cdns3/cdnsp core device + * @cdns: Pointer to cdns structure. + * + * Returns 0 on success otherwise negative errno + */ +int cdns_init(struct cdns *cdns) +{ + struct device *dev = cdns->dev; + int ret; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "error setting dma mask: %d\n", ret); + return ret; + } + + mutex_init(&cdns->mutex); + + if (device_property_read_bool(dev, "usb-role-switch")) { + struct usb_role_switch_desc sw_desc = { }; + + sw_desc.set = cdns_role_set; + sw_desc.get = cdns_role_get; + sw_desc.allow_userspace_control = true; + sw_desc.driver_data = cdns; + sw_desc.fwnode = dev->fwnode; + + cdns->role_sw = usb_role_switch_register(dev, &sw_desc); + if (IS_ERR(cdns->role_sw)) { + dev_warn(dev, "Unable to register Role Switch\n"); + return PTR_ERR(cdns->role_sw); + } + } + + if (cdns->wakeup_irq) { + ret = devm_request_irq(cdns->dev, cdns->wakeup_irq, + cdns_wakeup_irq, + IRQF_SHARED, + dev_name(cdns->dev), cdns); + + if (ret) { + dev_err(cdns->dev, "couldn't register wakeup irq handler\n"); + goto role_switch_unregister; + } + } + + ret = cdns_drd_init(cdns); + if (ret) + goto init_failed; + + ret = cdns_core_init_role(cdns); + if (ret) + goto init_failed; + + spin_lock_init(&cdns->lock); + + dev_dbg(dev, "Cadence USB3 core: probe succeed\n"); + + return 0; +init_failed: + cdns_drd_exit(cdns); +role_switch_unregister: + if (cdns->role_sw) + usb_role_switch_unregister(cdns->role_sw); + + return ret; +} +EXPORT_SYMBOL_GPL(cdns_init); + +/** + * cdns_remove - unbind drd driver and clean up + * @cdns: Pointer to cdns structure. + * + * Returns 0 on success otherwise negative errno + */ +int cdns_remove(struct cdns *cdns) +{ + cdns_exit_roles(cdns); + usb_role_switch_unregister(cdns->role_sw); + + return 0; +} +EXPORT_SYMBOL_GPL(cdns_remove); + +#ifdef CONFIG_PM_SLEEP +int cdns_suspend(struct cdns *cdns) +{ + struct device *dev = cdns->dev; + unsigned long flags; + + if (pm_runtime_status_suspended(dev)) + pm_runtime_resume(dev); + + if (cdns->roles[cdns->role]->suspend) { + spin_lock_irqsave(&cdns->lock, flags); + cdns->roles[cdns->role]->suspend(cdns, false); + spin_unlock_irqrestore(&cdns->lock, flags); + } + + return 0; +} +EXPORT_SYMBOL_GPL(cdns_suspend); + +int cdns_resume(struct cdns *cdns) +{ + enum usb_role real_role; + bool role_changed = false; + int ret = 0; + + if (cdns_power_is_lost(cdns)) { + if (cdns->role_sw) { + cdns->role = cdns_role_get(cdns->role_sw); + } else { + real_role = cdns_hw_role_state_machine(cdns); + if (real_role != cdns->role) { + ret = cdns_hw_role_switch(cdns); + if (ret) + return ret; + role_changed = true; + } + } + + if (!role_changed) { + if (cdns->role == USB_ROLE_HOST) + ret = cdns_drd_host_on(cdns); + else if (cdns->role == USB_ROLE_DEVICE) + ret = cdns_drd_gadget_on(cdns); + + if (ret) + return ret; + } + } + + if (cdns->roles[cdns->role]->resume) + cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns)); + + return 0; +} +EXPORT_SYMBOL_GPL(cdns_resume); + +void cdns_set_active(struct cdns *cdns, u8 set_active) +{ + struct device *dev = cdns->dev; + + if (set_active) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return; +} +EXPORT_SYMBOL_GPL(cdns_set_active); +#endif /* CONFIG_PM_SLEEP */ + +MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>"); +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_DESCRIPTION("Cadence USBSS and USBSSP DRD Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h new file mode 100644 index 000000000..81a9c9d6b --- /dev/null +++ b/drivers/usb/cdns3/core.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS and USBSSP DRD Header File. + * + * Copyright (C) 2017-2018 NXP + * Copyright (C) 2018-2019 Cadence. + * + * Authors: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + */ +#ifndef __LINUX_CDNS3_CORE_H +#define __LINUX_CDNS3_CORE_H + +#include <linux/usb/otg.h> +#include <linux/usb/role.h> + +struct cdns; + +/** + * struct cdns_role_driver - host/gadget role driver + * @start: start this role + * @stop: stop this role + * @suspend: suspend callback for this role + * @resume: resume callback for this role + * @irq: irq handler for this role + * @name: role name string (host/gadget) + * @state: current state + */ +struct cdns_role_driver { + int (*start)(struct cdns *cdns); + void (*stop)(struct cdns *cdns); + int (*suspend)(struct cdns *cdns, bool do_wakeup); + int (*resume)(struct cdns *cdns, bool hibernated); + const char *name; +#define CDNS_ROLE_STATE_INACTIVE 0 +#define CDNS_ROLE_STATE_ACTIVE 1 + int state; +}; + +#define CDNS_XHCI_RESOURCES_NUM 2 + +struct cdns3_platform_data { + int (*platform_suspend)(struct device *dev, + bool suspend, bool wakeup); + unsigned long quirks; +#define CDNS3_DEFAULT_PM_RUNTIME_ALLOW BIT(0) +}; + +/** + * struct cdns - Representation of Cadence USB3 DRD controller. + * @dev: pointer to Cadence device struct + * @xhci_regs: pointer to base of xhci registers + * @xhci_res: the resource for xhci + * @dev_regs: pointer to base of dev registers + * @otg_res: the resource for otg + * @otg_v0_regs: pointer to base of v0 otg registers + * @otg_v1_regs: pointer to base of v1 otg registers + * @otg_cdnsp_regs: pointer to base of CDNSP otg registers + * @otg_regs: pointer to base of otg registers + * @otg_irq_regs: pointer to interrupt registers + * @otg_irq: irq number for otg controller + * @dev_irq: irq number for device controller + * @wakeup_irq: irq number for wakeup event, it is optional + * @roles: array of supported roles for this controller + * @role: current role + * @host_dev: the child host device pointer for cdns core + * @gadget_dev: the child gadget device pointer + * @usb2_phy: pointer to USB2 PHY + * @usb3_phy: pointer to USB3 PHY + * @mutex: the mutex for concurrent code at driver + * @dr_mode: supported mode of operation it can be only Host, only Device + * or OTG mode that allow to switch between Device and Host mode. + * This field based on firmware setting, kernel configuration + * and hardware configuration. + * @role_sw: pointer to role switch object. + * @in_lpm: indicate the controller is in low power mode + * @wakeup_pending: wakeup interrupt pending + * @pdata: platform data from glue layer + * @lock: spinlock structure + * @xhci_plat_data: xhci private data structure pointer + * @gadget_init: pointer to gadget initialization function + */ +struct cdns { + struct device *dev; + void __iomem *xhci_regs; + struct resource xhci_res[CDNS_XHCI_RESOURCES_NUM]; + struct cdns3_usb_regs __iomem *dev_regs; + + struct resource otg_res; + struct cdns3_otg_legacy_regs __iomem *otg_v0_regs; + struct cdns3_otg_regs __iomem *otg_v1_regs; + struct cdnsp_otg_regs __iomem *otg_cdnsp_regs; + struct cdns_otg_common_regs __iomem *otg_regs; + struct cdns_otg_irq_regs __iomem *otg_irq_regs; +#define CDNS3_CONTROLLER_V0 0 +#define CDNS3_CONTROLLER_V1 1 +#define CDNSP_CONTROLLER_V2 2 + u32 version; + bool phyrst_a_enable; + + int otg_irq; + int dev_irq; + int wakeup_irq; + struct cdns_role_driver *roles[USB_ROLE_DEVICE + 1]; + enum usb_role role; + struct platform_device *host_dev; + void *gadget_dev; + struct phy *usb2_phy; + struct phy *usb3_phy; + /* mutext used in workqueue*/ + struct mutex mutex; + enum usb_dr_mode dr_mode; + struct usb_role_switch *role_sw; + bool in_lpm; + bool wakeup_pending; + struct cdns3_platform_data *pdata; + spinlock_t lock; + struct xhci_plat_priv *xhci_plat_data; + + int (*gadget_init)(struct cdns *cdns); +}; + +int cdns_hw_role_switch(struct cdns *cdns); +int cdns_init(struct cdns *cdns); +int cdns_remove(struct cdns *cdns); + +#ifdef CONFIG_PM_SLEEP +int cdns_resume(struct cdns *cdns); +int cdns_suspend(struct cdns *cdns); +void cdns_set_active(struct cdns *cdns, u8 set_active); +#else /* CONFIG_PM_SLEEP */ +static inline int cdns_resume(struct cdns *cdns) +{ return 0; } +static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { } +static inline int cdns_suspend(struct cdns *cdns) +{ return 0; } +#endif /* CONFIG_PM_SLEEP */ +#endif /* __LINUX_CDNS3_CORE_H */ diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c new file mode 100644 index 000000000..d00ff98df --- /dev/null +++ b/drivers/usb/cdns3/drd.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS and USBSSP DRD Driver. + * + * Copyright (C) 2018-2020 Cadence. + * Copyright (C) 2019 Texas Instruments + * + * Author: Pawel Laszczak <pawell@cadence.com> + * Roger Quadros <rogerq@ti.com> + * + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/usb/otg.h> + +#include "drd.h" +#include "core.h" + +/** + * cdns_set_mode - change mode of OTG Core + * @cdns: pointer to context structure + * @mode: selected mode from cdns_role + * + * Returns 0 on success otherwise negative errno + */ +static int cdns_set_mode(struct cdns *cdns, enum usb_dr_mode mode) +{ + void __iomem *override_reg; + u32 reg; + + switch (mode) { + case USB_DR_MODE_PERIPHERAL: + break; + case USB_DR_MODE_HOST: + break; + case USB_DR_MODE_OTG: + dev_dbg(cdns->dev, "Set controller to OTG mode\n"); + + if (cdns->version == CDNSP_CONTROLLER_V2) + override_reg = &cdns->otg_cdnsp_regs->override; + else if (cdns->version == CDNS3_CONTROLLER_V1) + override_reg = &cdns->otg_v1_regs->override; + else + override_reg = &cdns->otg_v0_regs->ctrl1; + + reg = readl(override_reg); + + if (cdns->version != CDNS3_CONTROLLER_V0) + reg |= OVERRIDE_IDPULLUP; + else + reg |= OVERRIDE_IDPULLUP_V0; + + writel(reg, override_reg); + + if (cdns->version == CDNS3_CONTROLLER_V1) { + /* + * Enable work around feature built into the + * controller to address issue with RX Sensitivity + * est (EL_17) for USB2 PHY. The issue only occures + * for 0x0002450D controller version. + */ + if (cdns->phyrst_a_enable) { + reg = readl(&cdns->otg_v1_regs->phyrst_cfg); + reg |= PHYRST_CFG_PHYRST_A_ENABLE; + writel(reg, &cdns->otg_v1_regs->phyrst_cfg); + } + } + + /* + * Hardware specification says: "ID_VALUE must be valid within + * 50ms after idpullup is set to '1" so driver must wait + * 50ms before reading this pin. + */ + usleep_range(50000, 60000); + break; + default: + dev_err(cdns->dev, "Unsupported mode of operation %d\n", mode); + return -EINVAL; + } + + return 0; +} + +int cdns_get_id(struct cdns *cdns) +{ + int id; + + id = readl(&cdns->otg_regs->sts) & OTGSTS_ID_VALUE; + dev_dbg(cdns->dev, "OTG ID: %d", id); + + return id; +} + +int cdns_get_vbus(struct cdns *cdns) +{ + int vbus; + + vbus = !!(readl(&cdns->otg_regs->sts) & OTGSTS_VBUS_VALID); + dev_dbg(cdns->dev, "OTG VBUS: %d", vbus); + + return vbus; +} + +void cdns_clear_vbus(struct cdns *cdns) +{ + u32 reg; + + if (cdns->version != CDNSP_CONTROLLER_V2) + return; + + reg = readl(&cdns->otg_cdnsp_regs->override); + reg |= OVERRIDE_SESS_VLD_SEL; + writel(reg, &cdns->otg_cdnsp_regs->override); +} +EXPORT_SYMBOL_GPL(cdns_clear_vbus); + +void cdns_set_vbus(struct cdns *cdns) +{ + u32 reg; + + if (cdns->version != CDNSP_CONTROLLER_V2) + return; + + reg = readl(&cdns->otg_cdnsp_regs->override); + reg &= ~OVERRIDE_SESS_VLD_SEL; + writel(reg, &cdns->otg_cdnsp_regs->override); +} +EXPORT_SYMBOL_GPL(cdns_set_vbus); + +bool cdns_is_host(struct cdns *cdns) +{ + if (cdns->dr_mode == USB_DR_MODE_HOST) + return true; + else if (cdns_get_id(cdns) == CDNS3_ID_HOST) + return true; + + return false; +} + +bool cdns_is_device(struct cdns *cdns) +{ + if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) + return true; + else if (cdns->dr_mode == USB_DR_MODE_OTG) + if (cdns_get_id(cdns) == CDNS3_ID_PERIPHERAL) + return true; + + return false; +} + +/** + * cdns_otg_disable_irq - Disable all OTG interrupts + * @cdns: Pointer to controller context structure + */ +static void cdns_otg_disable_irq(struct cdns *cdns) +{ + writel(0, &cdns->otg_irq_regs->ien); +} + +/** + * cdns_otg_enable_irq - enable id and sess_valid interrupts + * @cdns: Pointer to controller context structure + */ +static void cdns_otg_enable_irq(struct cdns *cdns) +{ + writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT | + OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien); +} + +/** + * cdns_drd_host_on - start host. + * @cdns: Pointer to controller context structure. + * + * Returns 0 on success otherwise negative errno. + */ +int cdns_drd_host_on(struct cdns *cdns) +{ + u32 val, ready_bit; + int ret; + + /* Enable host mode. */ + writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS, + &cdns->otg_regs->cmd); + + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_XHCI_READY; + else + ready_bit = OTGSTS_CDNS3_XHCI_READY; + + dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n"); + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, + val & ready_bit, 1, 100000); + + if (ret) + dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); + + phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST); + return ret; +} + +/** + * cdns_drd_host_off - stop host. + * @cdns: Pointer to controller context structure. + */ +void cdns_drd_host_off(struct cdns *cdns) +{ + u32 val; + + writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP | + OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF, + &cdns->otg_regs->cmd); + + /* Waiting till H_IDLE state.*/ + readl_poll_timeout_atomic(&cdns->otg_regs->state, val, + !(val & OTGSTATE_HOST_STATE_MASK), + 1, 2000000); + phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); +} + +/** + * cdns_drd_gadget_on - start gadget. + * @cdns: Pointer to controller context structure. + * + * Returns 0 on success otherwise negative errno + */ +int cdns_drd_gadget_on(struct cdns *cdns) +{ + u32 reg = OTGCMD_OTG_DIS; + u32 ready_bit; + int ret, val; + + /* switch OTG core */ + writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd); + + dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n"); + + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_DEV_READY; + else + ready_bit = OTGSTS_CDNS3_DEV_READY; + + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, + val & ready_bit, 1, 100000); + if (ret) { + dev_err(cdns->dev, "timeout waiting for dev_ready\n"); + return ret; + } + + phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE); + return 0; +} +EXPORT_SYMBOL_GPL(cdns_drd_gadget_on); + +/** + * cdns_drd_gadget_off - stop gadget. + * @cdns: Pointer to controller context structure. + */ +void cdns_drd_gadget_off(struct cdns *cdns) +{ + u32 val; + + /* + * Driver should wait at least 10us after disabling Device + * before turning-off Device (DEV_BUS_DROP). + */ + usleep_range(20, 30); + writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP | + OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF, + &cdns->otg_regs->cmd); + /* Waiting till DEV_IDLE state.*/ + readl_poll_timeout_atomic(&cdns->otg_regs->state, val, + !(val & OTGSTATE_DEV_STATE_MASK), + 1, 2000000); + phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); +} +EXPORT_SYMBOL_GPL(cdns_drd_gadget_off); + +/** + * cdns_init_otg_mode - initialize drd controller + * @cdns: Pointer to controller context structure + * + * Returns 0 on success otherwise negative errno + */ +static int cdns_init_otg_mode(struct cdns *cdns) +{ + int ret; + + cdns_otg_disable_irq(cdns); + /* clear all interrupts */ + writel(~0, &cdns->otg_irq_regs->ivect); + + ret = cdns_set_mode(cdns, USB_DR_MODE_OTG); + if (ret) + return ret; + + cdns_otg_enable_irq(cdns); + + return 0; +} + +/** + * cdns_drd_update_mode - initialize mode of operation + * @cdns: Pointer to controller context structure + * + * Returns 0 on success otherwise negative errno + */ +int cdns_drd_update_mode(struct cdns *cdns) +{ + int ret; + + switch (cdns->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + ret = cdns_set_mode(cdns, USB_DR_MODE_PERIPHERAL); + break; + case USB_DR_MODE_HOST: + ret = cdns_set_mode(cdns, USB_DR_MODE_HOST); + break; + case USB_DR_MODE_OTG: + ret = cdns_init_otg_mode(cdns); + break; + default: + dev_err(cdns->dev, "Unsupported mode of operation %d\n", + cdns->dr_mode); + return -EINVAL; + } + + return ret; +} + +static irqreturn_t cdns_drd_thread_irq(int irq, void *data) +{ + struct cdns *cdns = data; + + cdns_hw_role_switch(cdns); + + return IRQ_HANDLED; +} + +/** + * cdns_drd_irq - interrupt handler for OTG events + * + * @irq: irq number for cdns core device + * @data: structure of cdns + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns_drd_irq(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + struct cdns *cdns = data; + u32 reg; + + if (cdns->dr_mode != USB_DR_MODE_OTG) + return IRQ_NONE; + + if (cdns->in_lpm) + return ret; + + reg = readl(&cdns->otg_irq_regs->ivect); + + if (!reg) + return IRQ_NONE; + + if (reg & OTGIEN_ID_CHANGE_INT) { + dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n", + cdns_get_id(cdns)); + + ret = IRQ_WAKE_THREAD; + } + + if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) { + dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n", + cdns_get_vbus(cdns)); + + ret = IRQ_WAKE_THREAD; + } + + writel(~0, &cdns->otg_irq_regs->ivect); + return ret; +} + +int cdns_drd_init(struct cdns *cdns) +{ + void __iomem *regs; + u32 state; + int ret; + + regs = devm_ioremap_resource(cdns->dev, &cdns->otg_res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + /* Detection of DRD version. Controller has been released + * in three versions. All are very similar and are software compatible, + * but they have same changes in register maps. + * The first register in oldest version is command register and it's + * read only. Driver should read 0 from it. On the other hand, in v1 + * and v2 the first register contains device ID number which is not + * set to 0. Driver uses this fact to detect the proper version of + * controller. + */ + cdns->otg_v0_regs = regs; + if (!readl(&cdns->otg_v0_regs->cmd)) { + cdns->version = CDNS3_CONTROLLER_V0; + cdns->otg_v1_regs = NULL; + cdns->otg_cdnsp_regs = NULL; + cdns->otg_regs = regs; + cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) + &cdns->otg_v0_regs->ien; + writel(1, &cdns->otg_v0_regs->simulate); + dev_dbg(cdns->dev, "DRD version v0 (%08x)\n", + readl(&cdns->otg_v0_regs->version)); + } else { + cdns->otg_v0_regs = NULL; + cdns->otg_v1_regs = regs; + cdns->otg_cdnsp_regs = regs; + + cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd; + + if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) { + cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) + &cdns->otg_cdnsp_regs->ien; + cdns->version = CDNSP_CONTROLLER_V2; + } else { + cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) + &cdns->otg_v1_regs->ien; + writel(1, &cdns->otg_v1_regs->simulate); + cdns->version = CDNS3_CONTROLLER_V1; + } + + dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", + readl(&cdns->otg_v1_regs->did), + readl(&cdns->otg_v1_regs->rid)); + } + + state = OTGSTS_STRAP(readl(&cdns->otg_regs->sts)); + + /* Update dr_mode according to STRAP configuration. */ + cdns->dr_mode = USB_DR_MODE_OTG; + + if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_HOST) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_HOST)) { + dev_dbg(cdns->dev, "Controller strapped to HOST\n"); + cdns->dr_mode = USB_DR_MODE_HOST; + } else if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_GADGET) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_GADGET)) { + dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n"); + cdns->dr_mode = USB_DR_MODE_PERIPHERAL; + } + + ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq, + cdns_drd_irq, + cdns_drd_thread_irq, + IRQF_SHARED, + dev_name(cdns->dev), cdns); + if (ret) { + dev_err(cdns->dev, "couldn't get otg_irq\n"); + return ret; + } + + state = readl(&cdns->otg_regs->sts); + if (OTGSTS_OTG_NRDY(state)) { + dev_err(cdns->dev, "Cadence USB3 OTG device not ready\n"); + return -ENODEV; + } + + return 0; +} + +int cdns_drd_exit(struct cdns *cdns) +{ + cdns_otg_disable_irq(cdns); + + return 0; +} + + +/* Indicate the cdns3 core was power lost before */ +bool cdns_power_is_lost(struct cdns *cdns) +{ + if (cdns->version == CDNS3_CONTROLLER_V0) { + if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0))) + return true; + } else { + if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0))) + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(cdns_power_is_lost); diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h new file mode 100644 index 000000000..cbdf94f73 --- /dev/null +++ b/drivers/usb/cdns3/drd.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USB3 and USBSSP DRD header file. + * + * Copyright (C) 2018-2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ +#ifndef __LINUX_CDNS3_DRD +#define __LINUX_CDNS3_DRD + +#include <linux/usb/otg.h> +#include "core.h" + +/* DRD register interface for version v1 of cdns3 driver. */ +struct cdns3_otg_regs { + __le32 did; + __le32 rid; + __le32 capabilities; + __le32 reserved1; + __le32 cmd; + __le32 sts; + __le32 state; + __le32 reserved2; + __le32 ien; + __le32 ivect; + __le32 refclk; + __le32 tmr; + __le32 reserved3[4]; + __le32 simulate; + __le32 override; + __le32 susp_ctrl; + __le32 phyrst_cfg; + __le32 anasts; + __le32 adp_ramp_time; + __le32 ctrl1; + __le32 ctrl2; +}; + +/* DRD register interface for version v0 of cdns3 driver. */ +struct cdns3_otg_legacy_regs { + __le32 cmd; + __le32 sts; + __le32 state; + __le32 refclk; + __le32 ien; + __le32 ivect; + __le32 reserved1[3]; + __le32 tmr; + __le32 reserved2[2]; + __le32 version; + __le32 capabilities; + __le32 reserved3[2]; + __le32 simulate; + __le32 reserved4[5]; + __le32 ctrl1; +}; + +/* DRD register interface for cdnsp driver */ +struct cdnsp_otg_regs { + __le32 did; + __le32 rid; + __le32 cfgs1; + __le32 cfgs2; + __le32 cmd; + __le32 sts; + __le32 state; + __le32 ien; + __le32 ivect; + __le32 tmr; + __le32 simulate; + __le32 adpbc_sts; + __le32 adp_ramp_time; + __le32 adpbc_ctrl1; + __le32 adpbc_ctrl2; + __le32 override; + __le32 vbusvalid_dbnc_cfg; + __le32 sessvalid_dbnc_cfg; + __le32 susp_timing_ctrl; +}; + +#define OTG_CDNSP_DID 0x0004034E + +/* + * Common registers interface for both CDNS3 and CDNSP version of DRD. + */ +struct cdns_otg_common_regs { + __le32 cmd; + __le32 sts; + __le32 state; +}; + +/* + * Interrupt related registers. This registers are mapped in different + * location for CDNSP controller. + */ +struct cdns_otg_irq_regs { + __le32 ien; + __le32 ivect; +}; + +/* CDNS_RID - bitmasks */ +#define CDNS_RID(p) ((p) & GENMASK(15, 0)) + +/* CDNS_VID - bitmasks */ +#define CDNS_DID(p) ((p) & GENMASK(31, 0)) + +/* OTGCMD - bitmasks */ +/* "Request the bus for Device mode. */ +#define OTGCMD_DEV_BUS_REQ BIT(0) +/* Request the bus for Host mode */ +#define OTGCMD_HOST_BUS_REQ BIT(1) +/* Enable OTG mode. */ +#define OTGCMD_OTG_EN BIT(2) +/* Disable OTG mode */ +#define OTGCMD_OTG_DIS BIT(3) +/*"Configure OTG as A-Device. */ +#define OTGCMD_A_DEV_EN BIT(4) +/*"Configure OTG as A-Device. */ +#define OTGCMD_A_DEV_DIS BIT(5) +/* Drop the bus for Device mod e. */ +#define OTGCMD_DEV_BUS_DROP BIT(8) +/* Drop the bus for Host mode*/ +#define OTGCMD_HOST_BUS_DROP BIT(9) +/* Power Down USBSS-DEV - only for CDNS3.*/ +#define OTGCMD_DEV_POWER_OFF BIT(11) +/* Power Down CDNSXHCI - only for CDNS3. */ +#define OTGCMD_HOST_POWER_OFF BIT(12) + +/* OTGIEN - bitmasks */ +/* ID change interrupt enable */ +#define OTGIEN_ID_CHANGE_INT BIT(0) +/* Vbusvalid fall detected interrupt enable.*/ +#define OTGIEN_VBUSVALID_RISE_INT BIT(4) +/* Vbusvalid fall detected interrupt enable */ +#define OTGIEN_VBUSVALID_FALL_INT BIT(5) + +/* OTGSTS - bitmasks */ +/* + * Current value of the ID pin. It is only valid when idpullup in + * OTGCTRL1_TYPE register is set to '1'. + */ +#define OTGSTS_ID_VALUE BIT(0) +/* Current value of the vbus_valid */ +#define OTGSTS_VBUS_VALID BIT(1) +/* Current value of the b_sess_vld */ +#define OTGSTS_SESSION_VALID BIT(2) +/*Device mode is active*/ +#define OTGSTS_DEV_ACTIVE BIT(3) +/* Host mode is active. */ +#define OTGSTS_HOST_ACTIVE BIT(4) +/* OTG Controller not ready. */ +#define OTGSTS_OTG_NRDY_MASK BIT(11) +#define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK) +/* + * Value of the strap pins for: + * CDNS3: + * 000 - no default configuration + * 010 - Controller initiall configured as Host + * 100 - Controller initially configured as Device + * CDNSP: + * 000 - No default configuration. + * 010 - Controller initiall configured as Host. + * 100 - Controller initially configured as Device. + */ +#define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12) +#define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00 +#define OTGSTS_STRAP_HOST_OTG 0x01 +#define OTGSTS_STRAP_HOST 0x02 +#define OTGSTS_STRAP_GADGET 0x04 +#define OTGSTS_CDNSP_STRAP_HOST 0x01 +#define OTGSTS_CDNSP_STRAP_GADGET 0x02 + +/* Host mode is turned on. */ +#define OTGSTS_CDNS3_XHCI_READY BIT(26) +#define OTGSTS_CDNSP_XHCI_READY BIT(27) + +/* "Device mode is turned on .*/ +#define OTGSTS_CDNS3_DEV_READY BIT(27) +#define OTGSTS_CDNSP_DEV_READY BIT(26) + +/* OTGSTATE- bitmasks */ +#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0) +#define OTGSTATE_HOST_STATE_MASK GENMASK(5, 3) +#define OTGSTATE_HOST_STATE_IDLE 0x0 +#define OTGSTATE_HOST_STATE_VBUS_FALL 0x7 +#define OTGSTATE_HOST_STATE(p) (((p) & OTGSTATE_HOST_STATE_MASK) >> 3) + +/* OTGREFCLK - bitmasks */ +#define OTGREFCLK_STB_CLK_SWITCH_EN BIT(31) + +/* OVERRIDE - bitmasks */ +#define OVERRIDE_IDPULLUP BIT(0) +/* Only for CDNS3_CONTROLLER_V0 version */ +#define OVERRIDE_IDPULLUP_V0 BIT(24) +/* Vbusvalid/Sesvalid override select. */ +#define OVERRIDE_SESS_VLD_SEL BIT(10) + +/* PHYRST_CFG - bitmasks */ +#define PHYRST_CFG_PHYRST_A_ENABLE BIT(0) + +#define CDNS3_ID_PERIPHERAL 1 +#define CDNS3_ID_HOST 0 + +bool cdns_is_host(struct cdns *cdns); +bool cdns_is_device(struct cdns *cdns); +int cdns_get_id(struct cdns *cdns); +int cdns_get_vbus(struct cdns *cdns); +void cdns_clear_vbus(struct cdns *cdns); +void cdns_set_vbus(struct cdns *cdns); +int cdns_drd_init(struct cdns *cdns); +int cdns_drd_exit(struct cdns *cdns); +int cdns_drd_update_mode(struct cdns *cdns); +int cdns_drd_gadget_on(struct cdns *cdns); +void cdns_drd_gadget_off(struct cdns *cdns); +int cdns_drd_host_on(struct cdns *cdns); +void cdns_drd_host_off(struct cdns *cdns); +bool cdns_power_is_lost(struct cdns *cdns); +#endif /* __LINUX_CDNS3_DRD */ diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h new file mode 100644 index 000000000..c37b6269b --- /dev/null +++ b/drivers/usb/cdns3/gadget-export.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS and USBSSP DRD Driver - Gadget Export APIs. + * + * Copyright (C) 2017 NXP + * Copyright (C) 2017-2018 NXP + * + * Authors: Peter Chen <peter.chen@nxp.com> + */ +#ifndef __LINUX_CDNS3_GADGET_EXPORT +#define __LINUX_CDNS3_GADGET_EXPORT + +#if IS_ENABLED(CONFIG_USB_CDNSP_GADGET) + +int cdnsp_gadget_init(struct cdns *cdns); +#else + +static inline int cdnsp_gadget_init(struct cdns *cdns) +{ + return -ENXIO; +} + +#endif /* CONFIG_USB_CDNSP_GADGET */ + +#if IS_ENABLED(CONFIG_USB_CDNS3_GADGET) + +int cdns3_gadget_init(struct cdns *cdns); +#else + +static inline int cdns3_gadget_init(struct cdns *cdns) +{ + return -ENXIO; +} + +#endif /* CONFIG_USB_CDNS3_GADGET */ + +#endif /* __LINUX_CDNS3_GADGET_EXPORT */ diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h new file mode 100644 index 000000000..cf92173ec --- /dev/null +++ b/drivers/usb/cdns3/host-export.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS and USBSSP DRD Driver - Host Export APIs + * + * Copyright (C) 2017-2018 NXP + * + * Authors: Peter Chen <peter.chen@nxp.com> + */ +#ifndef __LINUX_CDNS3_HOST_EXPORT +#define __LINUX_CDNS3_HOST_EXPORT + +#if IS_ENABLED(CONFIG_USB_CDNS_HOST) + +int cdns_host_init(struct cdns *cdns); + +#else + +static inline int cdns_host_init(struct cdns *cdns) +{ + return -ENXIO; +} + +static inline void cdns_host_exit(struct cdns *cdns) { } + +#endif /* USB_CDNS_HOST */ + +#endif /* __LINUX_CDNS3_HOST_EXPORT */ diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c new file mode 100644 index 000000000..6164fc4c9 --- /dev/null +++ b/drivers/usb/cdns3/host.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS and USBSSP DRD Driver - host side + * + * Copyright (C) 2018-2019 Cadence Design Systems. + * Copyright (C) 2017-2018 NXP + * + * Authors: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + */ + +#include <linux/platform_device.h> +#include <linux/slab.h> +#include "core.h" +#include "drd.h" +#include "host-export.h" +#include <linux/usb/hcd.h> +#include "../host/xhci.h" +#include "../host/xhci-plat.h" + +#define XECP_PORT_CAP_REG 0x8000 +#define XECP_AUX_CTRL_REG1 0x8120 + +#define CFG_RXDET_P3_EN BIT(15) +#define LPM_2_STB_SWITCH_EN BIT(25) + +static void xhci_cdns3_plat_start(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + u32 value; + + /* set usbcmd.EU3S */ + value = readl(&xhci->op_regs->command); + value |= CMD_PM_INDEX; + writel(value, &xhci->op_regs->command); + + if (hcd->regs) { + value = readl(hcd->regs + XECP_AUX_CTRL_REG1); + value |= CFG_RXDET_P3_EN; + writel(value, hcd->regs + XECP_AUX_CTRL_REG1); + + value = readl(hcd->regs + XECP_PORT_CAP_REG); + value |= LPM_2_STB_SWITCH_EN; + writel(value, hcd->regs + XECP_PORT_CAP_REG); + } +} + +static int xhci_cdns3_resume_quirk(struct usb_hcd *hcd) +{ + xhci_cdns3_plat_start(hcd); + return 0; +} + +static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { + .quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI, + .plat_start = xhci_cdns3_plat_start, + .resume_quirk = xhci_cdns3_resume_quirk, +}; + +static int __cdns_host_init(struct cdns *cdns) +{ + struct platform_device *xhci; + int ret; + struct usb_hcd *hcd; + + cdns_drd_host_on(cdns); + + xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO); + if (!xhci) { + dev_err(cdns->dev, "couldn't allocate xHCI device\n"); + return -ENOMEM; + } + + xhci->dev.parent = cdns->dev; + cdns->host_dev = xhci; + + ret = platform_device_add_resources(xhci, cdns->xhci_res, + CDNS_XHCI_RESOURCES_NUM); + if (ret) { + dev_err(cdns->dev, "couldn't add resources to xHCI device\n"); + goto err1; + } + + cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci, + sizeof(struct xhci_plat_priv), GFP_KERNEL); + if (!cdns->xhci_plat_data) { + ret = -ENOMEM; + goto err1; + } + + if (cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)) + cdns->xhci_plat_data->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; + + ret = platform_device_add_data(xhci, cdns->xhci_plat_data, + sizeof(struct xhci_plat_priv)); + if (ret) + goto free_memory; + + ret = platform_device_add(xhci); + if (ret) { + dev_err(cdns->dev, "failed to register xHCI device\n"); + goto free_memory; + } + + /* Glue needs to access xHCI region register for Power management */ + hcd = platform_get_drvdata(xhci); + if (hcd) + cdns->xhci_regs = hcd->regs; + + return 0; + +free_memory: + kfree(cdns->xhci_plat_data); +err1: + platform_device_put(xhci); + return ret; +} + +static void cdns_host_exit(struct cdns *cdns) +{ + kfree(cdns->xhci_plat_data); + platform_device_unregister(cdns->host_dev); + cdns->host_dev = NULL; + cdns_drd_host_off(cdns); +} + +int cdns_host_init(struct cdns *cdns) +{ + struct cdns_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = __cdns_host_init; + rdrv->stop = cdns_host_exit; + rdrv->state = CDNS_ROLE_STATE_INACTIVE; + rdrv->name = "host"; + + cdns->roles[USB_ROLE_HOST] = rdrv; + + return 0; +} |