summaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/usb/musb
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r--drivers/usb/musb/Kconfig184
-rw-r--r--drivers/usb/musb/Makefile39
-rw-r--r--drivers/usb/musb/am35x.c610
-rw-r--r--drivers/usb/musb/cppi_dma.c1547
-rw-r--r--drivers/usb/musb/cppi_dma.h147
-rw-r--r--drivers/usb/musb/da8xx.c638
-rw-r--r--drivers/usb/musb/davinci.c606
-rw-r--r--drivers/usb/musb/davinci.h103
-rw-r--r--drivers/usb/musb/jz4740.c294
-rw-r--r--drivers/usb/musb/mediatek.c543
-rw-r--r--drivers/usb/musb/mpfs.c269
-rw-r--r--drivers/usb/musb/musb_core.c2966
-rw-r--r--drivers/usb/musb/musb_core.h601
-rw-r--r--drivers/usb/musb/musb_cppi41.c811
-rw-r--r--drivers/usb/musb/musb_debug.h34
-rw-r--r--drivers/usb/musb/musb_debugfs.c341
-rw-r--r--drivers/usb/musb/musb_dma.h220
-rw-r--r--drivers/usb/musb/musb_dsps.c1052
-rw-r--r--drivers/usb/musb/musb_gadget.c2093
-rw-r--r--drivers/usb/musb/musb_gadget.h116
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c1058
-rw-r--r--drivers/usb/musb/musb_host.c2759
-rw-r--r--drivers/usb/musb/musb_host.h124
-rw-r--r--drivers/usb/musb/musb_io.h49
-rw-r--r--drivers/usb/musb/musb_regs.h362
-rw-r--r--drivers/usb/musb/musb_trace.c25
-rw-r--r--drivers/usb/musb/musb_trace.h389
-rw-r--r--drivers/usb/musb/musb_virthub.c440
-rw-r--r--drivers/usb/musb/musbhsdma.c455
-rw-r--r--drivers/usb/musb/omap2430.c625
-rw-r--r--drivers/usb/musb/omap2430.h48
-rw-r--r--drivers/usb/musb/sunxi.c834
-rw-r--r--drivers/usb/musb/tusb6010.c1282
-rw-r--r--drivers/usb/musb/tusb6010.h215
-rw-r--r--drivers/usb/musb/tusb6010_omap.c642
-rw-r--r--drivers/usb/musb/ux500.c371
-rw-r--r--drivers/usb/musb/ux500_dma.c396
37 files changed, 23288 insertions, 0 deletions
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 000000000..6c8f7763e
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+ tristate 'Inventra Highspeed Dual Role Controller'
+ depends on (USB || USB_GADGET)
+ depends on HAS_IOMEM
+ help
+ Say Y here if your system has a dual role high speed USB
+ controller based on the Mentor Graphics silicon IP. Then
+ configure options to match your silicon and the board
+ it's being used with, including the USB peripheral role,
+ or the USB host role, or both.
+
+ Texas Instruments families using this IP include DaVinci
+ (35x, 644x ...), OMAP 243x, OMAP 3, and TUSB 6010.
+
+ Allwinner SoCs using this IP include A10, A13, A20, ...
+
+ If you do not know what this is, please say N.
+
+ To compile this driver as a module, choose M here; the
+ module will be called "musb-hdrc".
+
+if USB_MUSB_HDRC
+
+choice
+ bool "MUSB Mode Selection"
+ default USB_MUSB_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_MUSB_HOST if (USB && !USB_GADGET)
+ default USB_MUSB_GADGET if (!USB && USB_GADGET)
+
+config USB_MUSB_HOST
+ bool "Host only mode"
+ depends on USB=y || USB=USB_MUSB_HDRC
+ help
+ Select this when you want to use MUSB in host mode only,
+ thereby the gadget feature will be regressed.
+
+config USB_MUSB_GADGET
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC
+ depends on HAS_DMA
+ help
+ Select this when you want to use MUSB in gadget mode only,
+ thereby the host feature will be regressed.
+
+config USB_MUSB_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on ((USB=y || USB=USB_MUSB_HDRC) && (USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC))
+ depends on HAS_DMA
+ help
+ This is the default mode of working of MUSB controller where
+ both host and gadget features are enabled.
+
+endchoice
+
+comment "Platform Glue Layer"
+
+config USB_MUSB_SUNXI
+ tristate "Allwinner (sunxi)"
+ depends on ARCH_SUNXI
+ depends on NOP_USB_XCEIV
+ depends on PHY_SUN4I_USB
+ depends on EXTCON
+ select GENERIC_PHY
+ select SUNXI_SRAM
+
+config USB_MUSB_DAVINCI
+ tristate "DaVinci"
+ depends on ARCH_DAVINCI_DMx
+ depends on NOP_USB_XCEIV
+ depends on BROKEN
+
+config USB_MUSB_DA8XX
+ tristate "DA8xx/OMAP-L1x"
+ depends on ARCH_DAVINCI_DA8XX
+ depends on NOP_USB_XCEIV
+ select PHY_DA8XX_USB
+
+config USB_MUSB_TUSB6010
+ tristate "TUSB6010"
+ depends on HAS_IOMEM
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on NOP_USB_XCEIV!=m || USB_MUSB_HDRC=m
+
+config USB_MUSB_OMAP2PLUS
+ tristate "OMAP2430 and onwards"
+ depends on ARCH_OMAP2PLUS && USB
+ depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY
+ select GENERIC_PHY
+
+config USB_MUSB_AM35X
+ tristate "AM35x"
+ depends on ARCH_OMAP
+ depends on NOP_USB_XCEIV
+
+config USB_MUSB_DSPS
+ tristate "TI DSPS platforms"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on OF_IRQ
+
+config USB_MUSB_UX500
+ tristate "Ux500 platforms"
+ depends on ARCH_U8500 || COMPILE_TEST
+
+config USB_MUSB_JZ4740
+ tristate "JZ4740"
+ depends on OF
+ depends on MIPS || COMPILE_TEST
+ depends on USB_MUSB_GADGET
+ depends on USB=n || USB_OTG_DISABLE_EXTERNAL_HUB
+ select USB_ROLE_SWITCH
+
+config USB_MUSB_MEDIATEK
+ tristate "MediaTek platforms"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on NOP_USB_XCEIV
+ select GENERIC_PHY
+ select USB_ROLE_SWITCH
+
+config USB_MUSB_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC platforms"
+ depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on NOP_USB_XCEIV
+ select USB_MUSB_DUAL_ROLE
+ help
+ Say Y here to enable support for USB on Microchip's PolarFire SoC.
+
+ This support is also available as a module. If so, the module
+ will be called mpfs.
+
+comment "MUSB DMA mode"
+
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+if !MUSB_PIO_ONLY
+
+config USB_UX500_DMA
+ bool 'ST Ericsson Ux500'
+ depends on USB_MUSB_UX500
+ help
+ Enable DMA transfers on UX500 platforms.
+
+config USB_INVENTRA_DMA
+ bool 'Inventra'
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK || USB_MUSB_JZ4740 || USB_MUSB_POLARFIRE_SOC
+ help
+ Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+ bool 'TI CPPI (Davinci)'
+ depends on USB_MUSB_DAVINCI
+ help
+ Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TI_CPPI41_DMA
+ bool 'TI CPPI 4.1'
+ depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX) && DMADEVICES
+ select TI_CPPI41
+
+config USB_TUSB_OMAP_DMA
+ bool 'TUSB 6010'
+ depends on USB_MUSB_TUSB6010 = USB_MUSB_HDRC # both built-in or both modules
+ depends on ARCH_OMAP
+ help
+ Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+endif # !MUSB_PIO_ONLY
+
+endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 000000000..51dd54a8d
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+# define_trace.h needs to know how to find our header
+CFLAGS_musb_trace.o := -I$(src)
+
+obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
+
+musb_hdrc-y := musb_core.o musb_trace.o
+
+musb_hdrc-$(CONFIG_USB_MUSB_HOST)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_virthub.o musb_host.o
+musb_hdrc-$(CONFIG_USB_MUSB_GADGET)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_gadget_ep0.o musb_gadget.o
+musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o
+
+# Hardware Glue Layer
+obj-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o
+obj-$(CONFIG_USB_MUSB_AM35X) += am35x.o
+obj-$(CONFIG_USB_MUSB_DSPS) += musb_dsps.o
+obj-$(CONFIG_USB_MUSB_TUSB6010) += tusb6010.o
+obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o
+obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
+obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
+obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
+obj-$(CONFIG_USB_MUSB_SUNXI) += sunxi.o
+obj-$(CONFIG_USB_MUSB_MEDIATEK) += mediatek.o
+obj-$(CONFIG_USB_MUSB_POLARFIRE_SOC) += mpfs.o
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO only, or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o
+musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o
+musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI41_DMA) += musb_cppi41.o
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
new file mode 100644
index 000000000..bf2c0fa6c
--- /dev/null
+++ b/drivers/usb/musb/am35x.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Texas Instruments AM35x "glue layer"
+ *
+ * Copyright (c) 2010, by Texas Instruments
+ *
+ * Based on the DA8xx "glue layer" code.
+ * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/platform_data/usb-omap.h>
+
+#include "musb_core.h"
+
+/*
+ * AM35x specific definitions
+ */
+/* USB 2.0 OTG module registers */
+#define USB_REVISION_REG 0x00
+#define USB_CTRL_REG 0x04
+#define USB_STAT_REG 0x08
+#define USB_EMULATION_REG 0x0c
+/* 0x10 Reserved */
+#define USB_AUTOREQ_REG 0x14
+#define USB_SRP_FIX_TIME_REG 0x18
+#define USB_TEARDOWN_REG 0x1c
+#define EP_INTR_SRC_REG 0x20
+#define EP_INTR_SRC_SET_REG 0x24
+#define EP_INTR_SRC_CLEAR_REG 0x28
+#define EP_INTR_MASK_REG 0x2c
+#define EP_INTR_MASK_SET_REG 0x30
+#define EP_INTR_MASK_CLEAR_REG 0x34
+#define EP_INTR_SRC_MASKED_REG 0x38
+#define CORE_INTR_SRC_REG 0x40
+#define CORE_INTR_SRC_SET_REG 0x44
+#define CORE_INTR_SRC_CLEAR_REG 0x48
+#define CORE_INTR_MASK_REG 0x4c
+#define CORE_INTR_MASK_SET_REG 0x50
+#define CORE_INTR_MASK_CLEAR_REG 0x54
+#define CORE_INTR_SRC_MASKED_REG 0x58
+/* 0x5c Reserved */
+#define USB_END_OF_INTR_REG 0x60
+
+/* Control register bits */
+#define AM35X_SOFT_RESET_MASK 1
+
+/* USB interrupt register bits */
+#define AM35X_INTR_USB_SHIFT 16
+#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
+#define AM35X_INTR_DRVVBUS 0x100
+#define AM35X_INTR_RX_SHIFT 16
+#define AM35X_INTR_TX_SHIFT 0
+#define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
+#define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
+#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
+#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
+
+#define USB_MENTOR_CORE_OFFSET 0x400
+
+struct am35x_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+ struct clk *phy_clk;
+ struct clk *clk;
+};
+
+/*
+ * am35x_musb_enable - enable interrupts
+ */
+static void am35x_musb_enable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 epmask;
+
+ /* Workaround: setup IRQs through both register sets. */
+ epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
+ ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
+
+ musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
+ musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
+
+ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
+ musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
+ AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
+}
+
+/*
+ * am35x_musb_disable - disable HDRC and flush interrupts
+ */
+static void am35x_musb_disable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
+ musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
+ AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
+ musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
+}
+
+#define portstate(stmt) stmt
+
+static void am35x_musb_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+}
+
+#define POLL_SECONDS 2
+
+static void otg_timer(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /*
+ * We poll because AM35x's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
+ MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ del_timer(&musb->dev_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb->dev_timer, timeout);
+}
+
+static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
+{
+ struct musb *musb = hci;
+ void __iomem *reg_base = musb->ctrl_base;
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u32 epintr, usbintr;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Get endpoint interrupts */
+ epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
+
+ if (epintr) {
+ musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
+
+ musb->int_rx =
+ (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
+ musb->int_tx =
+ (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
+ }
+
+ /* Get usb core interrupts */
+ usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
+ if (!usbintr && !epintr)
+ goto eoi;
+
+ if (usbintr) {
+ musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
+
+ musb->int_usb =
+ (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
+ }
+ /*
+ * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
+ * AM35x's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires that we know its
+ * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
+ int drvvbus = musb_readl(reg_base, USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /*
+ * The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"-starting sessions
+ * without waiting for VBUS to stop registering in
+ * devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&musb->dev_timer);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ /* NOTE: this must complete power-on within 100 ms. */
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ err ? " ERROR" : "",
+ devctl);
+ ret = IRQ_HANDLED;
+ }
+
+ /* Drop spurious RX and TX if device is disconnected */
+ if (musb->int_usb & MUSB_INTR_DISCONNECT) {
+ musb->int_tx = 0;
+ musb->int_rx = 0;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ ret |= musb_interrupt(musb);
+
+eoi:
+ /* EOI needs to be written for the IRQ to be re-asserted. */
+ if (ret == IRQ_HANDLED || epintr || usbintr) {
+ /* clear level interrupt */
+ if (data->clear_irq)
+ data->clear_irq();
+ /* write EOI */
+ musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
+ }
+
+ /* Poll for ID change */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
+{
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ int retval = 0;
+
+ if (data->set_mode)
+ data->set_mode(musb_mode);
+ else
+ retval = -EIO;
+
+ return retval;
+}
+
+static int am35x_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 rev;
+
+ musb->mregs += USB_MENTOR_CORE_OFFSET;
+
+ /* Returns zero if e.g. not clocked */
+ rev = musb_readl(reg_base, USB_REVISION_REG);
+ if (!rev)
+ return -ENODEV;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv))
+ return -EPROBE_DEFER;
+
+ timer_setup(&musb->dev_timer, otg_timer, 0);
+
+ /* Reset the musb */
+ if (data->reset)
+ data->reset();
+
+ /* Reset the controller */
+ musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
+
+ /* Start the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(1);
+
+ msleep(5);
+
+ musb->isr = am35x_musb_interrupt;
+
+ /* clear level interrupt */
+ if (data->clear_irq)
+ data->clear_irq();
+
+ return 0;
+}
+
+static int am35x_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+
+ del_timer_sync(&musb->dev_timer);
+
+ /* Shutdown the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(0);
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+/* AM35x supports only 32bit read operation */
+static void am35x_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+ u32 val;
+ int i;
+
+ /* Read for 32bit-aligned destination address */
+ if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
+ readsl(fifo, dst, len >> 2);
+ dst += len & ~0x03;
+ len &= 0x03;
+ }
+ /*
+ * Now read the remaining 1 to 3 byte or complete length if
+ * unaligned address.
+ */
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ *(u32 *) dst = musb_readl(fifo, 0);
+ dst += 4;
+ }
+ len &= 0x03;
+ }
+ if (len > 0) {
+ val = musb_readl(fifo, 0);
+ memcpy(dst, &val, len);
+ }
+}
+
+static const struct musb_platform_ops am35x_ops = {
+ .quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP,
+ .init = am35x_musb_init,
+ .exit = am35x_musb_exit,
+
+ .read_fifo = am35x_read_fifo,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .enable = am35x_musb_enable,
+ .disable = am35x_musb_disable,
+
+ .set_mode = am35x_musb_set_mode,
+ .try_idle = am35x_musb_try_idle,
+
+ .set_vbus = am35x_musb_set_vbus,
+};
+
+static const struct platform_device_info am35x_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int am35x_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct am35x_glue *glue;
+ struct platform_device_info pinfo;
+ struct clk *phy_clk;
+ struct clk *clk;
+
+ int ret = -ENOMEM;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ goto err0;
+
+ phy_clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(phy_clk)) {
+ dev_err(&pdev->dev, "failed to get PHY clock\n");
+ ret = PTR_ERR(phy_clk);
+ goto err3;
+ }
+
+ clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err4;
+ }
+
+ ret = clk_enable(phy_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PHY clock\n");
+ goto err5;
+ }
+
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err6;
+ }
+
+ glue->dev = &pdev->dev;
+ glue->phy_clk = phy_clk;
+ glue->clk = clk;
+
+ pdata->platform_ops = &am35x_ops;
+
+ glue->phy = usb_phy_generic_register();
+ if (IS_ERR(glue->phy)) {
+ ret = PTR_ERR(glue->phy);
+ goto err7;
+ }
+ platform_set_drvdata(pdev, glue);
+
+ pinfo = am35x_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ goto err8;
+ }
+
+ return 0;
+
+err8:
+ usb_phy_generic_unregister(glue->phy);
+
+err7:
+ clk_disable(clk);
+
+err6:
+ clk_disable(phy_clk);
+
+err5:
+ clk_put(clk);
+
+err4:
+ clk_put(phy_clk);
+
+err3:
+ kfree(glue);
+
+err0:
+ return ret;
+}
+
+static int am35x_remove(struct platform_device *pdev)
+{
+ struct am35x_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->phy);
+ clk_disable(glue->clk);
+ clk_disable(glue->phy_clk);
+ clk_put(glue->clk);
+ clk_put(glue->phy_clk);
+ kfree(glue);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int am35x_suspend(struct device *dev)
+{
+ struct am35x_glue *glue = dev_get_drvdata(dev);
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+
+ /* Shutdown the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(0);
+
+ clk_disable(glue->phy_clk);
+ clk_disable(glue->clk);
+
+ return 0;
+}
+
+static int am35x_resume(struct device *dev)
+{
+ struct am35x_glue *glue = dev_get_drvdata(dev);
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ int ret;
+
+ /* Start the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(1);
+
+ ret = clk_enable(glue->phy_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable PHY clock\n");
+ return ret;
+ }
+
+ ret = clk_enable(glue->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(am35x_pm_ops, am35x_suspend, am35x_resume);
+
+static struct platform_driver am35x_driver = {
+ .probe = am35x_probe,
+ .remove = am35x_remove,
+ .driver = {
+ .name = "musb-am35x",
+ .pm = &am35x_pm_ops,
+ },
+};
+
+MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
+MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(am35x_driver);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 000000000..edb5b63d7
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1547 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "musb_core.h"
+#include "musb_debug.h"
+#include "cppi_dma.h"
+#include "davinci.h"
+
+
+/* CPPI DMA status 7-mar-2006:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ * which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug-2006 (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ * evidently after the 1 byte packet was received and acked, the queue
+ * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
+ * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
+ * of its next (512 byte) packet. IRQ issues?
+ *
+ * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD 64
+#define NUM_RXCHAN_BD 64
+
+static inline void cpu_drain_writebuffer(void)
+{
+ wmb();
+#ifdef CONFIG_CPU_ARM926T
+ /* REVISIT this "should not be needed",
+ * but lack of it sure seemed to hurt ...
+ */
+ asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
+{
+ struct cppi_descriptor *bd = c->freelist;
+
+ if (bd)
+ c->freelist = bd->next;
+ return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+ if (!bd)
+ return;
+ bd->next = c->freelist;
+ c->freelist = bd;
+}
+
+/*
+ * Start DMA controller
+ *
+ * Initialize the DMA controller as necessary.
+ */
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
+{
+ musb_writel(&rx->rx_skipbytes, 0, 0);
+ musb_writel(&rx->rx_head, 0, 0);
+ musb_writel(&rx->rx_sop, 0, 0);
+ musb_writel(&rx->rx_current, 0, 0);
+ musb_writel(&rx->rx_buf_current, 0, 0);
+ musb_writel(&rx->rx_len_len, 0, 0);
+ musb_writel(&rx->rx_cnt_cnt, 0, 0);
+}
+
+/* zero out entire tx state RAM entry for the channel */
+static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+{
+ musb_writel(&tx->tx_head, 0, 0);
+ musb_writel(&tx->tx_buf, 0, 0);
+ musb_writel(&tx->tx_current, 0, 0);
+ musb_writel(&tx->tx_buf_current, 0, 0);
+ musb_writel(&tx->tx_info, 0, 0);
+ musb_writel(&tx->tx_rem_len, 0, 0);
+ /* musb_writel(&tx->tx_dummy, 0, 0); */
+ musb_writel(&tx->tx_complete, 0, ptr);
+}
+
+static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+ int j;
+
+ /* initialize channel fields */
+ c->head = NULL;
+ c->tail = NULL;
+ c->last_processed = NULL;
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = cppi;
+ c->is_rndis = 0;
+ c->freelist = NULL;
+
+ /* build the BD Free list for the channel */
+ for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+ struct cppi_descriptor *bd;
+ dma_addr_t dma;
+
+ bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
+ bd->dma = dma;
+ cppi_bd_free(c, bd);
+ }
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+ struct cppi *cppi = c->controller;
+ struct cppi_descriptor *bd;
+
+ (void) cppi_channel_abort(&c->channel);
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = NULL;
+
+ /* free all its bds */
+ bd = c->last_processed;
+ do {
+ if (bd)
+ dma_pool_free(cppi->pool, bd, bd->dma);
+ bd = cppi_bd_alloc(c);
+ } while (bd);
+ c->last_processed = NULL;
+}
+
+static void cppi_controller_start(struct cppi *controller)
+{
+ void __iomem *tibase;
+ int i;
+
+ /* do whatever is necessary to start controller */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ controller->tx[i].transmit = true;
+ controller->tx[i].index = i;
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ controller->rx[i].transmit = false;
+ controller->rx[i].index = i;
+ }
+
+ /* setup BD list on a per channel basis */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
+ cppi_pool_init(controller, controller->tx + i);
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_init(controller, controller->rx + i);
+
+ tibase = controller->tibase;
+ INIT_LIST_HEAD(&controller->tx_complete);
+
+ /* initialise tx/rx channel head pointers to zero */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ struct cppi_channel *tx_ch = controller->tx + i;
+ struct cppi_tx_stateram __iomem *tx;
+
+ INIT_LIST_HEAD(&tx_ch->tx_complete);
+
+ tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+ tx_ch->state_ram = tx;
+ cppi_reset_tx(tx, 0);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ struct cppi_channel *rx_ch = controller->rx + i;
+ struct cppi_rx_stateram __iomem *rx;
+
+ INIT_LIST_HEAD(&rx_ch->tx_complete);
+
+ rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+ rx_ch->state_ram = rx;
+ cppi_reset_rx(rx);
+ }
+
+ /* enable individual cppi channels */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ /* enable tx/rx CPPI control */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+ /* disable RNDIS mode, also host rx RNDIS autorequest */
+ musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
+}
+
+/*
+ * Stop DMA controller
+ *
+ * De-Init the DMA controller as necessary.
+ */
+
+static void cppi_controller_stop(struct cppi *controller)
+{
+ void __iomem *tibase;
+ int i;
+ struct musb *musb;
+
+ musb = controller->controller.musb;
+
+ tibase = controller->tibase;
+ /* DISABLE INDIVIDUAL CHANNEL Interrupts */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ musb_dbg(musb, "Tearing down RX and TX Channels");
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ /* FIXME restructure of txdma to use bds like rxdma */
+ controller->tx[i].last_processed = NULL;
+ cppi_pool_free(controller->tx + i);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_free(controller->rx + i);
+
+ /* in Tx Case proper teardown is supported. We resort to disabling
+ * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+ * complete TX CPPI cannot be disabled.
+ */
+ /*disable tx/rx cppi */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side. We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *ep, u8 transmit)
+{
+ struct cppi *controller;
+ u8 index;
+ struct cppi_channel *cppi_ch;
+ void __iomem *tibase;
+ struct musb *musb;
+
+ controller = container_of(c, struct cppi, controller);
+ tibase = controller->tibase;
+ musb = c->musb;
+
+ /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
+ index = ep->epnum - 1;
+
+ /* return the corresponding CPPI Channel Handle, and
+ * probably disable the non-CPPI irq until we need it.
+ */
+ if (transmit) {
+ if (index >= ARRAY_SIZE(controller->tx)) {
+ musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
+ return NULL;
+ }
+ cppi_ch = controller->tx + index;
+ } else {
+ if (index >= ARRAY_SIZE(controller->rx)) {
+ musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
+ return NULL;
+ }
+ cppi_ch = controller->rx + index;
+ core_rxirq_disable(tibase, ep->epnum);
+ }
+
+ /* REVISIT make this an error later once the same driver code works
+ * with the other DMA engine too
+ */
+ if (cppi_ch->hw_ep)
+ musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
+ index, transmit ? 'T' : 'R', cppi_ch);
+ cppi_ch->hw_ep = ep;
+ cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->channel.max_len = 0x7fffffff;
+
+ musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
+ return &cppi_ch->channel;
+}
+
+/* Release a CPPI Channel. */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+ struct cppi_channel *c;
+ void __iomem *tibase;
+
+ /* REVISIT: for paranoia, check state and abort if needed... */
+
+ c = container_of(channel, struct cppi_channel, channel);
+ tibase = c->controller->tibase;
+ if (!c->hw_ep)
+ musb_dbg(c->controller->controller.musb,
+ "releasing idle DMA channel %p", c);
+ else if (!c->transmit)
+ core_rxirq_enable(tibase, c->index + 1);
+
+ /* for now, leave its cppi IRQ enabled (we won't trigger it) */
+ c->hw_ep = NULL;
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_rx_stateram __iomem *rx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ musb_dbg(c->controller->controller.musb,
+ "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x",
+ c->index, tag,
+ musb_readl(c->controller->tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
+ musb_readw(c->hw_ep->regs, MUSB_RXCSR),
+
+ musb_readl(&rx->rx_skipbytes, 0),
+ musb_readl(&rx->rx_head, 0),
+ musb_readl(&rx->rx_sop, 0),
+ musb_readl(&rx->rx_current, 0),
+
+ musb_readl(&rx->rx_buf_current, 0),
+ musb_readl(&rx->rx_len_len, 0),
+ musb_readl(&rx->rx_cnt_cnt, 0),
+ musb_readl(&rx->rx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_tx_stateram __iomem *tx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ musb_dbg(c->controller->controller.musb,
+ "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x",
+ c->index, tag,
+ musb_readw(c->hw_ep->regs, MUSB_TXCSR),
+
+ musb_readl(&tx->tx_head, 0),
+ musb_readl(&tx->tx_buf, 0),
+ musb_readl(&tx->tx_current, 0),
+ musb_readl(&tx->tx_buf_current, 0),
+
+ musb_readl(&tx->tx_info, 0),
+ musb_readl(&tx->tx_rem_len, 0),
+ /* dummy/unused word 6 */
+ musb_readl(&tx->tx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+ void __iomem *tibase, int is_rndis)
+{
+ /* we may need to change the rndis flag for this cppi channel */
+ if (c->is_rndis != is_rndis) {
+ u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
+ u32 temp = 1 << (c->index);
+
+ if (is_rx)
+ temp <<= 16;
+ if (is_rndis)
+ value |= temp;
+ else
+ value &= ~temp;
+ musb_writel(tibase, DAVINCI_RNDIS_REG, value);
+ c->is_rndis = is_rndis;
+ }
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+ pr_debug("RXBD/%s %08x: "
+ "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+ tag, bd->dma,
+ bd->hw_next, bd->hw_bufp, bd->hw_off_len,
+ bd->hw_options);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+ struct cppi_descriptor *bd;
+
+ cppi_dump_rx(level, rx, tag);
+ if (rx->last_processed)
+ cppi_dump_rxbd("last", rx->last_processed);
+ for (bd = rx->head; bd; bd = bd->next)
+ cppi_dump_rxbd("active", bd);
+}
+
+
+/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+ void __iomem *tibase, int onepacket, unsigned n_bds)
+{
+ u32 val;
+
+#ifdef RNDIS_RX_IS_USABLE
+ u32 tmp;
+ /* assert(is_host_active(musb)) */
+
+ /* start from "AutoReq never" */
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ val = tmp & ~((0x3) << (rx->index * 2));
+
+ /* HCD arranged reqpkt for packet #1. we arrange int
+ * for all but the last one, maybe in two segments.
+ */
+ if (!onepacket) {
+#if 0
+ /* use two segments, autoreq "all" then the last "never" */
+ val |= ((0x3) << (rx->index * 2));
+ n_bds--;
+#else
+ /* one segment, autoreq "all-but-last" */
+ val |= ((0x1) << (rx->index * 2));
+#endif
+ }
+
+ if (val != tmp) {
+ int n = 100;
+
+ /* make sure that autoreq is updated before continuing */
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+ do {
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ if (tmp == val)
+ break;
+ cpu_relax();
+ } while (n-- > 0);
+ }
+#endif
+
+ /* REQPKT is turned off after each segment */
+ if (n_bds && rx->channel.actual_len) {
+ void __iomem *regs = rx->hw_ep->regs;
+
+ val = musb_readw(regs, MUSB_RXCSR);
+ if (!(val & MUSB_RXCSR_H_REQPKT)) {
+ val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, val);
+ /* flush writebuffer */
+ val = musb_readw(regs, MUSB_RXCSR);
+ }
+ }
+ return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ * - RX builds new queues each time, to help handle routine "early
+ * termination" cases (faults, including errors and short reads)
+ * more correctly.
+ *
+ * - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma). Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths. (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often. RNDIS mode seems to behave too
+ * (except how it handles the exactly-N-packets case). Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length. It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+ unsigned maxpacket = tx->maxpacket;
+ dma_addr_t addr = tx->buf_dma + tx->offset;
+ size_t length = tx->buf_len - tx->offset;
+ struct cppi_descriptor *bd;
+ unsigned n_bds;
+ unsigned i;
+ struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
+ int rndis;
+
+ /* TX can use the CPPI "rndis" mode, where we can probably fit this
+ * transfer in one BD and one IRQ. The only time we would NOT want
+ * to use it is when hardware constraints prevent it, or if we'd
+ * trigger the "send a ZLP?" confusion.
+ */
+ rndis = (maxpacket & 0x3f) == 0
+ && length > maxpacket
+ && length < 0xffff
+ && (length % maxpacket) != 0;
+
+ if (rndis) {
+ maxpacket = length;
+ n_bds = 1;
+ } else {
+ if (length)
+ n_bds = DIV_ROUND_UP(length, maxpacket);
+ else
+ n_bds = 1;
+ n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+ length = min(n_bds * maxpacket, length);
+ }
+
+ musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
+ tx->index,
+ maxpacket,
+ rndis ? "rndis" : "transparent",
+ n_bds,
+ (unsigned long long)addr, length);
+
+ cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+ /* assuming here that channel_program is called during
+ * transfer initiation ... current code maintains state
+ * for one outstanding request only (no queues, not even
+ * the implicit ones of an iso urb).
+ */
+
+ bd = tx->freelist;
+ tx->head = bd;
+ tx->last_processed = NULL;
+
+ /* FIXME use BD pool like RX side does, and just queue
+ * the minimum number for this request.
+ */
+
+ /* Prepare queue of BDs first, then hand it to hardware.
+ * All BDs except maybe the last should be of full packet
+ * size; for RNDIS there _is_ only that last packet.
+ */
+ for (i = 0; i < n_bds; ) {
+ if (++i < n_bds && bd->next)
+ bd->hw_next = bd->next->dma;
+ else
+ bd->hw_next = 0;
+
+ bd->hw_bufp = tx->buf_dma + tx->offset;
+
+ /* FIXME set EOP only on the last packet,
+ * SOP only on the first ... avoid IRQs
+ */
+ if ((tx->offset + maxpacket) <= tx->buf_len) {
+ tx->offset += maxpacket;
+ bd->hw_off_len = maxpacket;
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | maxpacket;
+ } else {
+ /* only this one may be a partial USB Packet */
+ u32 partial_len;
+
+ partial_len = tx->buf_len - tx->offset;
+ tx->offset = tx->buf_len;
+ bd->hw_off_len = partial_len;
+
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | partial_len;
+ if (partial_len == 0)
+ bd->hw_options |= CPPI_ZERO_SET;
+ }
+
+ musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ /* update the last BD enqueued to the list */
+ tx->tail = bd;
+ bd = bd->next;
+ }
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* Write to the HeadPtr in state RAM to trigger */
+ musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
+
+ cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes. How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
+ * (b) loses **BADLY** because nothing (!) happens when that second packet
+ * fills the buffer, much less when a third one arrives. (Which makes this
+ * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
+ * is optional, and it's fine if peripherals -- not hosts! -- pad messages
+ * out to end-of-buffer. Standard PCI host controller DMA descriptors
+ * implement that mode by default ... which is no accident.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
+ * ignores SOP/EOP markings and processes both of those BDs; so both packets
+ * are loaded into the buffer (with a 212 byte gap between them), and the next
+ * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ * are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ * reliably make both cases work, with software handling both cases correctly
+ * and at the significant penalty of needing an IRQ per packet. (The lack of
+ * I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include RNDIS host drivers, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
+ * that applies best on the peripheral side (and which could fail rudely).
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class. Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+
+/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
+ *
+ * IFF
+ * (a) peripheral mode ... since rndis peripherals could pad their
+ * writes to hosts, causing i/o failure; or we'd have to cope with
+ * a largely unknowable variety of host side protocol variants
+ * (b) and short reads are NOT errors ... since full reads would
+ * cause those same i/o failures
+ * (c) and read length is
+ * - less than 64KB (max per cppi descriptor)
+ * - not a multiple of 4096 (g_zero default, full reads typical)
+ * - N (>1) packets long, ditto (full reads not EXPECTED)
+ * THEN
+ * try rx rndis mode
+ *
+ * Cost of heuristic failing: RXDMA wedges at the end of transfers that
+ * fill out the whole buffer. Buggy host side usb network drivers could
+ * trigger that, but "in the field" such bugs seem to be all but unknown.
+ *
+ * So this module parameter lets the heuristic be disabled. When using
+ * gadgetfs, the heuristic will probably need to be disabled.
+ */
+static bool cppi_rx_rndis = 1;
+
+module_param(cppi_rx_rndis, bool, 0);
+MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
+
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ * performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode) with complete safety..
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+ unsigned maxpacket = rx->maxpacket;
+ dma_addr_t addr = rx->buf_dma + rx->offset;
+ size_t length = rx->buf_len - rx->offset;
+ struct cppi_descriptor *bd, *tail;
+ unsigned n_bds;
+ unsigned i;
+ void __iomem *tibase = musb->ctrl_base;
+ int is_rndis = 0;
+ struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
+ struct cppi_descriptor *d;
+
+ if (onepacket) {
+ /* almost every USB driver, host or peripheral side */
+ n_bds = 1;
+
+ /* maybe apply the heuristic above */
+ if (cppi_rx_rndis
+ && is_peripheral_active(musb)
+ && length > maxpacket
+ && (length & ~0xffff) == 0
+ && (length & 0x0fff) != 0
+ && (length & (maxpacket - 1)) == 0) {
+ maxpacket = length;
+ is_rndis = 1;
+ }
+ } else {
+ /* virtually nothing except mass storage class */
+ if (length > 0xffff) {
+ n_bds = 0xffff / maxpacket;
+ length = n_bds * maxpacket;
+ } else {
+ n_bds = DIV_ROUND_UP(length, maxpacket);
+ }
+ if (n_bds == 1)
+ onepacket = 1;
+ else
+ n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+ }
+
+ /* In host mode, autorequest logic can generate some IN tokens; it's
+ * tricky since we can't leave REQPKT set in RXCSR after the transfer
+ * finishes. So: multipacket transfers involve two or more segments.
+ * And always at least two IRQs ... RNDIS mode is not an option.
+ */
+ if (is_host_active(musb))
+ n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+ cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
+
+ length = min(n_bds * maxpacket, length);
+
+ musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%llx len %u %u/%u",
+ rx->index, maxpacket,
+ onepacket
+ ? (is_rndis ? "rndis" : "onepacket")
+ : "multipacket",
+ n_bds,
+ musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff,
+ (unsigned long long)addr, length,
+ rx->channel.actual_len, rx->buf_len);
+
+ /* only queue one segment at a time, since the hardware prevents
+ * correct queue shutdown after unexpected short packets
+ */
+ bd = cppi_bd_alloc(rx);
+ rx->head = bd;
+
+ /* Build BDs for all packets in this segment */
+ for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+ u32 bd_len;
+
+ if (i) {
+ bd = cppi_bd_alloc(rx);
+ if (!bd)
+ break;
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+ bd->hw_next = 0;
+
+ /* all but the last packet will be maxpacket size */
+ if (maxpacket < length)
+ bd_len = maxpacket;
+ else
+ bd_len = length;
+
+ bd->hw_bufp = addr;
+ addr += bd_len;
+ rx->offset += bd_len;
+
+ bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
+ bd->buflen = bd_len;
+
+ bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
+ length -= bd_len;
+ }
+
+ /* we always expect at least one reusable BD! */
+ if (!tail) {
+ WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
+ return;
+ } else if (i < n_bds)
+ WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
+
+ tail->next = NULL;
+ tail->hw_next = 0;
+
+ bd = rx->head;
+ rx->tail = tail;
+
+ /* short reads and other faults should terminate this entire
+ * dma segment. we want one "dma packet" per dma segment, not
+ * one per USB packet, terminating the whole queue at once...
+ * NOTE that current hardware seems to ignore SOP and EOP.
+ */
+ bd->hw_options |= CPPI_SOP_SET;
+ tail->hw_options |= CPPI_EOP_SET;
+
+ for (d = rx->head; d; d = d->next)
+ cppi_dump_rxbd("S", d);
+
+ /* in case the preceding transfer left some state... */
+ tail = rx->last_processed;
+ if (tail) {
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+
+ core_rxirq_enable(tibase, rx->index + 1);
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* REVISIT specs say to write this AFTER the BUFCNT register
+ * below ... but that loses badly.
+ */
+ musb_writel(&rx_ram->rx_head, 0, bd->dma);
+
+ /* bufferCount must be at least 3, and zeroes on completion
+ * unless it underflows below zero, or stops at two, or keeps
+ * growing ... grr.
+ */
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+
+ if (!i)
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ else if (n_bds > (i - 3))
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds - (i - 3));
+
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+ if (i < (2 + n_bds)) {
+ musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
+ rx->index, i, n_bds);
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ }
+
+ cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @ch: the channel
+ * @maxpacket: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ * all short reads as errors and kick in high level fault recovery.
+ * For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @len: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *ch,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ struct musb *musb;
+
+ cppi_ch = container_of(ch, struct cppi_channel, channel);
+ controller = cppi_ch->controller;
+ musb = controller->controller.musb;
+
+ switch (ch->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* fault irq handler should have handled cleanup */
+ WARNING("%cX DMA%d not cleaned up after abort!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_BUSY:
+ WARNING("program active channel? %cX DMA%d\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ musb_dbg(musb, "%cX DMA%d not allocated!",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ fallthrough;
+ case MUSB_DMA_STATUS_FREE:
+ break;
+ }
+
+ ch->status = MUSB_DMA_STATUS_BUSY;
+
+ /* set transfer parameters, then queue up its first segment */
+ cppi_ch->buf_dma = dma_addr;
+ cppi_ch->offset = 0;
+ cppi_ch->maxpacket = maxpacket;
+ cppi_ch->buf_len = len;
+ cppi_ch->channel.actual_len = 0;
+
+ /* TX channel? or RX? */
+ if (cppi_ch->transmit)
+ cppi_next_tx_segment(musb, cppi_ch);
+ else
+ cppi_next_rx_segment(musb, cppi_ch, mode);
+
+ return true;
+}
+
+static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+ struct cppi_channel *rx = &cppi->rx[ch];
+ struct cppi_rx_stateram __iomem *state = rx->state_ram;
+ struct cppi_descriptor *bd;
+ struct cppi_descriptor *last = rx->last_processed;
+ bool completed = false;
+ bool acked = false;
+ int i;
+ dma_addr_t safe2ack;
+ void __iomem *regs = rx->hw_ep->regs;
+ struct musb *musb = cppi->controller.musb;
+
+ cppi_dump_rx(6, rx, "/K");
+
+ bd = last ? last->next : rx->head;
+ if (!bd)
+ return false;
+
+ /* run through all completed BDs */
+ for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
+ (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (!completed && (bd->hw_options & CPPI_OWN_SET))
+ break;
+
+ musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)",
+ (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options,
+ rx->channel.actual_len);
+
+ /* actual packet received length */
+ if ((bd->hw_options & CPPI_SOP_SET) && !completed)
+ len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
+ else
+ len = 0;
+
+ if (bd->hw_options & CPPI_EOQ_MASK)
+ completed = true;
+
+ if (!completed && len < bd->buflen) {
+ /* NOTE: when we get a short packet, RXCSR_H_REQPKT
+ * must have been cleared, and no more DMA packets may
+ * active be in the queue... TI docs didn't say, but
+ * CPPI ignores those BDs even though OWN is still set.
+ */
+ completed = true;
+ musb_dbg(musb, "rx short %d/%d (%d)",
+ len, bd->buflen,
+ rx->channel.actual_len);
+ }
+
+ /* If we got here, we expect to ack at least one BD; meanwhile
+ * CPPI may completing other BDs while we scan this list...
+ *
+ * RACE: we can notice OWN cleared before CPPI raises the
+ * matching irq by writing that BD as the completion pointer.
+ * In such cases, stop scanning and wait for the irq, avoiding
+ * lost acks and states where BD ownership is unclear.
+ */
+ if (bd->dma == safe2ack) {
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ safe2ack = musb_readl(&state->rx_complete, 0);
+ acked = true;
+ if (bd->dma == safe2ack)
+ safe2ack = 0;
+ }
+
+ rx->channel.actual_len += len;
+
+ cppi_bd_free(rx, last);
+ last = bd;
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+ rx->last_processed = last;
+
+ /* dma abort, lost ack, or ... */
+ if (!acked && last) {
+ int csr;
+
+ if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ if (safe2ack == 0) {
+ cppi_bd_free(rx, last);
+ rx->last_processed = NULL;
+
+ /* if we land here on the host side, H_REQPKT will
+ * be clear and we need to restart the queue...
+ */
+ WARN_ON(rx->head);
+ }
+ musb_ep_select(cppi->mregs, rx->index + 1);
+ csr = musb_readw(regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_DMAENAB) {
+ musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
+ rx->index,
+ rx->head, rx->tail,
+ rx->last_processed
+ ? (unsigned long long)
+ rx->last_processed->dma
+ : 0,
+ completed ? ", completed" : "",
+ csr);
+ cppi_dump_rxq(4, "/what?", rx);
+ }
+ }
+ if (!completed) {
+ int csr;
+
+ rx->head = bd;
+
+ /* REVISIT seems like "autoreq all but EOP" doesn't...
+ * setting it here "should" be racey, but seems to work
+ */
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ if (is_host_active(cppi->controller.musb)
+ && bd
+ && !(csr & MUSB_RXCSR_H_REQPKT)) {
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(regs, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | csr);
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ }
+ } else {
+ rx->head = NULL;
+ rx->tail = NULL;
+ }
+
+ cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+ return completed;
+}
+
+irqreturn_t cppi_interrupt(int irq, void *dev_id)
+{
+ struct musb *musb = dev_id;
+ struct cppi *cppi;
+ void __iomem *tibase;
+ struct musb_hw_ep *hw_ep = NULL;
+ u32 rx, tx;
+ int i, index;
+ unsigned long flags;
+
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+ if (cppi->irq)
+ spin_lock_irqsave(&musb->lock, flags);
+
+ tibase = musb->ctrl_base;
+
+ tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (!tx && !rx) {
+ if (cppi->irq)
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return IRQ_NONE;
+ }
+
+ musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
+
+ /* process TX channels */
+ for (index = 0; tx; tx = tx >> 1, index++) {
+ struct cppi_channel *tx_ch;
+ struct cppi_tx_stateram __iomem *tx_ram;
+ bool completed = false;
+ struct cppi_descriptor *bd;
+
+ if (!(tx & 1))
+ continue;
+
+ tx_ch = cppi->tx + index;
+ tx_ram = tx_ch->state_ram;
+
+ /* FIXME need a cppi_tx_scan() routine, which
+ * can also be called from abort code
+ */
+
+ cppi_dump_tx(5, tx_ch, "/E");
+
+ bd = tx_ch->head;
+
+ /*
+ * If Head is null then this could mean that a abort interrupt
+ * that needs to be acknowledged.
+ */
+ if (NULL == bd) {
+ musb_dbg(musb, "null BD");
+ musb_writel(&tx_ram->tx_complete, 0, 0);
+ continue;
+ }
+
+ /* run through all completed BDs */
+ for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (bd->hw_options & CPPI_OWN_SET)
+ break;
+
+ musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
+ tx_ch->channel.actual_len += len;
+
+ tx_ch->last_processed = bd;
+
+ /* write completion register to acknowledge
+ * processing of completed BDs, and possibly
+ * release the IRQ; EOQ might not be set ...
+ *
+ * REVISIT use the same ack strategy as rx
+ *
+ * REVISIT have observed bit 18 set; huh??
+ */
+ /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
+ musb_writel(&tx_ram->tx_complete, 0, bd->dma);
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+
+ /* on end of segment, maybe go to next one */
+ if (completed) {
+ /* cppi_dump_tx(4, tx_ch, "/complete"); */
+
+ /* transfer more, or report completion */
+ if (tx_ch->offset >= tx_ch->buf_len) {
+ tx_ch->head = NULL;
+ tx_ch->tail = NULL;
+ tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = tx_ch->hw_ep;
+
+ musb_dma_completion(musb, index + 1, 1);
+
+ } else {
+ /* Bigger transfer than we could fit in
+ * that first batch of descriptors...
+ */
+ cppi_next_tx_segment(musb, tx_ch);
+ }
+ } else
+ tx_ch->head = bd;
+ }
+
+ /* Start processing the RX block */
+ for (index = 0; rx; rx = rx >> 1, index++) {
+
+ if (rx & 1) {
+ struct cppi_channel *rx_ch;
+
+ rx_ch = cppi->rx + index;
+
+ /* let incomplete dma segments finish */
+ if (!cppi_rx_scan(cppi, index))
+ continue;
+
+ /* start another dma segment if needed */
+ if (rx_ch->channel.actual_len != rx_ch->buf_len
+ && rx_ch->channel.actual_len
+ == rx_ch->offset) {
+ cppi_next_rx_segment(musb, rx_ch, 1);
+ continue;
+ }
+
+ /* all segments completed! */
+ rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = rx_ch->hw_ep;
+
+ core_rxirq_disable(tibase, index + 1);
+ musb_dma_completion(musb, index + 1, 0);
+ }
+ }
+
+ /* write to CPPI EOI register to re-enable interrupts */
+ musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+
+ if (cppi->irq)
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(cppi_interrupt);
+
+/* Instantiate a software object representing a DMA controller. */
+struct dma_controller *
+cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
+{
+ struct cppi *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq_byname(pdev, "dma");
+
+ controller = kzalloc(sizeof *controller, GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->mregs = mregs;
+ controller->tibase = mregs - DAVINCI_BASE_OFFSET;
+
+ controller->controller.musb = musb;
+ controller->controller.channel_alloc = cppi_channel_allocate;
+ controller->controller.channel_release = cppi_channel_release;
+ controller->controller.channel_program = cppi_channel_program;
+ controller->controller.channel_abort = cppi_channel_abort;
+
+ /* NOTE: allocating from on-chip SRAM would give the least
+ * contention for memory access, if that ever matters here.
+ */
+
+ /* setup BufferPool */
+ controller->pool = dma_pool_create("cppi",
+ controller->controller.musb->controller,
+ sizeof(struct cppi_descriptor),
+ CPPI_DESCRIPTOR_ALIGN, 0);
+ if (!controller->pool) {
+ kfree(controller);
+ return NULL;
+ }
+
+ if (irq > 0) {
+ if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ musb_dma_controller_destroy(&controller->controller);
+ return NULL;
+ }
+ controller->irq = irq;
+ }
+
+ cppi_controller_start(controller);
+ return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(cppi_dma_controller_create);
+
+/*
+ * Destroy a previously-instantiated DMA controller.
+ */
+void cppi_dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi *cppi;
+
+ cppi = container_of(c, struct cppi, controller);
+
+ cppi_controller_stop(cppi);
+
+ if (cppi->irq)
+ free_irq(cppi->irq, cppi->controller.musb);
+
+ /* assert: caller stopped the controller first */
+ dma_pool_destroy(cppi->pool);
+
+ kfree(cppi);
+}
+EXPORT_SYMBOL_GPL(cppi_dma_controller_destroy);
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ void __iomem *mbase;
+ void __iomem *tibase;
+ void __iomem *regs;
+ u32 value;
+ struct cppi_descriptor *queue;
+
+ cppi_ch = container_of(channel, struct cppi_channel, channel);
+
+ controller = cppi_ch->controller;
+
+ switch (channel->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* from RX or TX fault irq handler */
+ case MUSB_DMA_STATUS_BUSY:
+ /* the hardware needs shutting down */
+ regs = cppi_ch->hw_ep->regs;
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ case MUSB_DMA_STATUS_FREE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (!cppi_ch->transmit && cppi_ch->head)
+ cppi_dump_rxq(3, "/abort", cppi_ch);
+
+ mbase = controller->mregs;
+ tibase = controller->tibase;
+
+ queue = cppi_ch->head;
+ cppi_ch->head = NULL;
+ cppi_ch->tail = NULL;
+
+ /* REVISIT should rely on caller having done this,
+ * and caller should rely on us not changing it.
+ * peripheral code is safe ... check host too.
+ */
+ musb_ep_select(mbase, cppi_ch->index + 1);
+
+ if (cppi_ch->transmit) {
+ struct cppi_tx_stateram __iomem *tx_ram;
+ /* REVISIT put timeouts on these controller handshakes */
+
+ cppi_dump_tx(6, cppi_ch, " (teardown)");
+
+ /* teardown DMA engine then usb core */
+ do {
+ value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
+ } while (!(value & CPPI_TEAR_READY));
+ musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
+
+ tx_ram = cppi_ch->state_ram;
+ do {
+ value = musb_readl(&tx_ram->tx_complete, 0);
+ } while (0xFFFFFFFC != value);
+
+ /* FIXME clean up the transfer state ... here?
+ * the completion routine should get called with
+ * an appropriate status code.
+ */
+
+ value = musb_readw(regs, MUSB_TXCSR);
+ value &= ~MUSB_TXCSR_DMAENAB;
+ value |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(regs, MUSB_TXCSR, value);
+ musb_writew(regs, MUSB_TXCSR, value);
+
+ /*
+ * 1. Write to completion Ptr value 0x1(bit 0 set)
+ * (write back mode)
+ * 2. Wait for abort interrupt and then put the channel in
+ * compare mode by writing 1 to the tx_complete register.
+ */
+ cppi_reset_tx(tx_ram, 1);
+ cppi_ch->head = NULL;
+ musb_writel(&tx_ram->tx_complete, 0, 1);
+ cppi_dump_tx(5, cppi_ch, " (done teardown)");
+
+ /* REVISIT tx side _should_ clean up the same way
+ * as the RX side ... this does no cleanup at all!
+ */
+
+ } else /* RX */ {
+ u16 csr;
+
+ /* NOTE: docs don't guarantee any of this works ... we
+ * expect that if the usb core stops telling the cppi core
+ * to pull more data from it, then it'll be safe to flush
+ * current RX DMA state iff any pending fifo transfer is done.
+ */
+
+ core_rxirq_disable(tibase, cppi_ch->index + 1);
+
+ /* for host, ensure ReqPkt is never set again */
+ if (is_host_active(cppi_ch->controller->controller.musb)) {
+ value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ value &= ~((0x3) << (cppi_ch->index * 2));
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
+ }
+
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* for host, clear (just) ReqPkt at end of current packet(s) */
+ if (is_host_active(cppi_ch->controller->controller.musb)) {
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ csr &= ~MUSB_RXCSR_H_REQPKT;
+ } else
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+
+ /* clear dma enable */
+ csr &= ~(MUSB_RXCSR_DMAENAB);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* Quiesce: wait for current dma to finish (if not cleanup).
+ * We can't use bit zero of stateram->rx_sop, since that
+ * refers to an entire "DMA packet" not just emptying the
+ * current fifo. Most segments need multiple usb packets.
+ */
+ if (channel->status == MUSB_DMA_STATUS_BUSY)
+ udelay(50);
+
+ /* scan the current list, reporting any data that was
+ * transferred and acking any IRQ
+ */
+ cppi_rx_scan(controller, cppi_ch->index);
+
+ /* clobber the existing state once it's idle
+ *
+ * NOTE: arguably, we should also wait for all the other
+ * RX channels to quiesce (how??) and then temporarily
+ * disable RXCPPI_CTRL_REG ... but it seems that we can
+ * rely on the controller restarting from state ram, with
+ * only RXCPPI_BUFCNT state being bogus. BUFCNT will
+ * correct itself after the next DMA transfer though.
+ *
+ * REVISIT does using rndis mode change that?
+ */
+ cppi_reset_rx(cppi_ch->state_ram);
+
+ /* next DMA request _should_ load cppi head ptr */
+
+ /* ... we don't "free" that list, only mutate it in place. */
+ cppi_dump_rx(5, cppi_ch, " (done abort)");
+
+ /* clean up previously pending bds */
+ cppi_bd_free(cppi_ch, cppi_ch->last_processed);
+ cppi_ch->last_processed = NULL;
+
+ while (queue) {
+ struct cppi_descriptor *tmp = queue->next;
+
+ cppi_bd_free(cppi_ch, queue);
+ queue = tmp;
+ }
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->buf_dma = 0;
+ cppi_ch->offset = 0;
+ cppi_ch->buf_len = 0;
+ cppi_ch->maxpacket = 0;
+ return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram? Clocking is presumably shared with usb core.
+ */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 000000000..16dd1ed44
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+
+#include "musb_core.h"
+#include "musb_dma.h"
+
+/* CPPI RX/TX state RAM */
+
+struct cppi_tx_stateram {
+ u32 tx_head; /* "DMA packet" head descriptor */
+ u32 tx_buf;
+ u32 tx_current; /* current descriptor */
+ u32 tx_buf_current;
+ u32 tx_info; /* flags, remaining buflen */
+ u32 tx_rem_len;
+ u32 tx_dummy; /* unused */
+ u32 tx_complete;
+};
+
+struct cppi_rx_stateram {
+ u32 rx_skipbytes;
+ u32 rx_head;
+ u32 rx_sop; /* "DMA packet" head descriptor */
+ u32 rx_current; /* current descriptor */
+ u32 rx_buf_current;
+ u32 rx_len_len;
+ u32 rx_cnt_cnt;
+ u32 rx_complete;
+};
+
+/* hw_options bits in CPPI buffer descriptors */
+#define CPPI_SOP_SET ((u32)(1 << 31))
+#define CPPI_EOP_SET ((u32)(1 << 30))
+#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
+#define CPPI_EOQ_MASK ((u32)(1 << 28))
+#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */
+
+struct cppi_descriptor {
+ /* hardware overlay */
+ u32 hw_next; /* next buffer descriptor Pointer */
+ u32 hw_bufp; /* i/o buffer pointer */
+ u32 hw_off_len; /* buffer_offset16, buffer_length16 */
+ u32 hw_options; /* flags: SOP, EOP etc*/
+
+ struct cppi_descriptor *next;
+ dma_addr_t dma; /* address of this descriptor */
+ u32 buflen; /* for RX: original buffer length */
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+struct cppi;
+
+/* CPPI Channel Control structure */
+struct cppi_channel {
+ struct dma_channel channel;
+
+ /* back pointer to the DMA controller structure */
+ struct cppi *controller;
+
+ /* which direction of which endpoint? */
+ struct musb_hw_ep *hw_ep;
+ bool transmit;
+ u8 index;
+
+ /* DMA modes: RNDIS or "transparent" */
+ u8 is_rndis;
+
+ /* book keeping for current transfer request */
+ dma_addr_t buf_dma;
+ u32 buf_len;
+ u32 maxpacket;
+ u32 offset; /* dma requested */
+
+ void __iomem *state_ram; /* CPPI state */
+
+ struct cppi_descriptor *freelist;
+
+ /* BD management fields */
+ struct cppi_descriptor *head;
+ struct cppi_descriptor *tail;
+ struct cppi_descriptor *last_processed;
+
+ /* use tx_complete in host role to track endpoints waiting for
+ * FIFONOTEMPTY to clear.
+ */
+ struct list_head tx_complete;
+};
+
+/* CPPI DMA controller object */
+struct cppi {
+ struct dma_controller controller;
+ void __iomem *mregs; /* Mentor regs */
+ void __iomem *tibase; /* TI/CPPI regs */
+
+ int irq;
+
+ struct cppi_channel tx[4];
+ struct cppi_channel rx[4];
+
+ struct dma_pool *pool;
+
+ struct list_head tx_complete;
+};
+
+/* CPPI IRQ handler */
+extern irqreturn_t cppi_interrupt(int, void *);
+
+struct cppi41_dma_channel {
+ struct dma_channel channel;
+ struct cppi41_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dc;
+ dma_cookie_t cookie;
+ u8 port_num;
+ u8 is_tx;
+ u8 is_allocated;
+ u8 usb_toggle;
+
+ dma_addr_t buf_addr;
+ u32 total_len;
+ u32 prog_len;
+ u32 transferred;
+ u32 packet_sz;
+ struct list_head tx_check;
+ int tx_zlp;
+};
+
+#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
new file mode 100644
index 000000000..a4e55b0c5
--- /dev/null
+++ b/drivers/usb/musb/da8xx.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments DA8xx/OMAP-L1x "glue layer"
+ *
+ * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Based on the DaVinci "glue layer" code.
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * DT support
+ * Copyright (c) 2016 Petr Kulhavy <petr@barix.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+
+#include "musb_core.h"
+
+/*
+ * DA8XX specific definitions
+ */
+
+/* USB 2.0 OTG module registers */
+#define DA8XX_USB_REVISION_REG 0x00
+#define DA8XX_USB_CTRL_REG 0x04
+#define DA8XX_USB_STAT_REG 0x08
+#define DA8XX_USB_EMULATION_REG 0x0c
+#define DA8XX_USB_SRP_FIX_TIME_REG 0x18
+#define DA8XX_USB_INTR_SRC_REG 0x20
+#define DA8XX_USB_INTR_SRC_SET_REG 0x24
+#define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
+#define DA8XX_USB_INTR_MASK_REG 0x2c
+#define DA8XX_USB_INTR_MASK_SET_REG 0x30
+#define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34
+#define DA8XX_USB_INTR_SRC_MASKED_REG 0x38
+#define DA8XX_USB_END_OF_INTR_REG 0x3c
+#define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2))
+
+/* Control register bits */
+#define DA8XX_SOFT_RESET_MASK 1
+
+#define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */
+#define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */
+
+/* USB interrupt register bits */
+#define DA8XX_INTR_USB_SHIFT 16
+#define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */
+ /* interrupts and DRVVBUS interrupt */
+#define DA8XX_INTR_DRVVBUS 0x100
+#define DA8XX_INTR_RX_SHIFT 8
+#define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT)
+#define DA8XX_INTR_TX_SHIFT 0
+#define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT)
+
+#define DA8XX_MENTOR_CORE_OFFSET 0x400
+
+struct da8xx_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *usb_phy;
+ struct clk *clk;
+ struct phy *phy;
+};
+
+/*
+ * Because we don't set CTRL.UINT, it's "important" to:
+ * - not read/write INTRUSB/INTRUSBE (except during
+ * initial setup, as a workaround);
+ * - use INTSET/INTCLR instead.
+ */
+
+/**
+ * da8xx_musb_enable - enable interrupts
+ */
+static void da8xx_musb_enable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 mask;
+
+ /* Workaround: setup IRQs through both register sets. */
+ mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) |
+ ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) |
+ DA8XX_INTR_USB_MASK;
+ musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask);
+
+ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
+ musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG,
+ DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT);
+}
+
+/**
+ * da8xx_musb_disable - disable HDRC and flush interrupts
+ */
+static void da8xx_musb_disable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG,
+ DA8XX_INTR_USB_MASK |
+ DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK);
+ musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
+}
+
+#define portstate(stmt) stmt
+
+static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+}
+
+#define POLL_SECONDS 2
+
+static void otg_timer(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /*
+ * We poll because DaVinci's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /*
+ * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3
+ * RTL seems to mis-handle session "start" otherwise (or in
+ * our case "recover"), in routine "VBUS was valid by the time
+ * VBUSERR got reported during enumeration" cases.
+ */
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG,
+ MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ /*
+ * There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.Session bit).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE: setting the session flag is _supposed_ to trigger
+ * SRP but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ del_timer(&musb->dev_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb->dev_timer, timeout);
+}
+
+static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
+{
+ struct musb *musb = hci;
+ void __iomem *reg_base = musb->ctrl_base;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /*
+ * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ */
+
+ /* Acknowledge and handle non-CPPI interrupts */
+ status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG);
+ if (!status)
+ goto eoi;
+
+ musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status);
+ dev_dbg(musb->controller, "USB IRQ %08x\n", status);
+
+ musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT;
+ musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT;
+ musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT;
+
+ /*
+ * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
+ * DA8xx's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.Session per Mentor docs requires that we know its
+ * value but DEVCTL.BDevice is invalid without DEVCTL.Session set.
+ * Also, DRVVBUS pulses for SRP (but not at 5 V)...
+ */
+ if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) {
+ int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /*
+ * The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"-starting sessions
+ * without waiting for VBUS to stop registering in
+ * devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&musb->dev_timer);
+ } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
+ /*
+ * When babble condition happens, drvvbus interrupt
+ * is also generated. Ignore this drvvbus interrupt
+ * and let babble interrupt handler recovers the
+ * controller; otherwise, the host-mode flag is lost
+ * due to the MUSB_DEV_MODE() call below and babble
+ * recovery logic will not be called.
+ */
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ err ? " ERROR" : "",
+ devctl);
+ ret = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ ret |= musb_interrupt(musb);
+
+ eoi:
+ /* EOI needs to be written for the IRQ to be re-asserted. */
+ if (ret == IRQ_HANDLED || status)
+ musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
+
+ /* Poll for ID change */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
+{
+ struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
+ enum phy_mode phy_mode;
+
+ /*
+ * The PHY has some issues when it is forced in device or host mode.
+ * Unless the user request another mode, configure the PHY in OTG mode.
+ */
+ if (!musb->is_initialized)
+ return phy_set_mode(glue->phy, PHY_MODE_USB_OTG);
+
+ switch (musb_mode) {
+ case MUSB_HOST: /* Force VBUS valid, ID = 0 */
+ phy_mode = PHY_MODE_USB_HOST;
+ break;
+ case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
+ phy_mode = PHY_MODE_USB_DEVICE;
+ break;
+ case MUSB_OTG: /* Don't override the VBUS/ID comparators */
+ phy_mode = PHY_MODE_USB_OTG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_set_mode(glue->phy, phy_mode);
+}
+
+static int da8xx_musb_init(struct musb *musb)
+{
+ struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 rev;
+ int ret = -ENODEV;
+
+ musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
+
+ ret = clk_prepare_enable(glue->clk);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ /* Returns zero if e.g. not clocked */
+ rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
+ if (!rev)
+ goto fail;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+
+ timer_setup(&musb->dev_timer, otg_timer, 0);
+
+ /* Reset the controller */
+ musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
+
+ /* Start the on-chip PHY and its PLL. */
+ ret = phy_init(glue->phy);
+ if (ret) {
+ dev_err(glue->dev, "Failed to init phy.\n");
+ goto fail;
+ }
+
+ ret = phy_power_on(glue->phy);
+ if (ret) {
+ dev_err(glue->dev, "Failed to power on phy.\n");
+ goto err_phy_power_on;
+ }
+
+ msleep(5);
+
+ /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */
+ pr_debug("DA8xx OTG revision %08x, control %02x\n", rev,
+ musb_readb(reg_base, DA8XX_USB_CTRL_REG));
+
+ musb->isr = da8xx_musb_interrupt;
+ return 0;
+
+err_phy_power_on:
+ phy_exit(glue->phy);
+fail:
+ clk_disable_unprepare(glue->clk);
+ return ret;
+}
+
+static int da8xx_musb_exit(struct musb *musb)
+{
+ struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ del_timer_sync(&musb->dev_timer);
+
+ phy_power_off(glue->phy);
+ phy_exit(glue->phy);
+ clk_disable_unprepare(glue->clk);
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static inline u8 get_vbus_power(struct device *dev)
+{
+ struct regulator *vbus_supply;
+ int current_uA;
+
+ vbus_supply = regulator_get_optional(dev, "vbus");
+ if (IS_ERR(vbus_supply))
+ return 255;
+ current_uA = regulator_get_current_limit(vbus_supply);
+ regulator_put(vbus_supply);
+ if (current_uA <= 0 || current_uA > 510000)
+ return 255;
+ return current_uA / 1000 / 2;
+}
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+static void da8xx_dma_controller_callback(struct dma_controller *c)
+{
+ struct musb *musb = c->musb;
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
+}
+
+static struct dma_controller *
+da8xx_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct dma_controller *controller;
+
+ controller = cppi41_dma_controller_create(musb, base);
+ if (IS_ERR_OR_NULL(controller))
+ return controller;
+
+ controller->dma_callback = da8xx_dma_controller_callback;
+
+ return controller;
+}
+#endif
+
+static const struct musb_platform_ops da8xx_ops = {
+ .quirks = MUSB_INDEXED_EP | MUSB_PRESERVE_SESSION |
+ MUSB_DMA_CPPI41 | MUSB_DA8XX,
+ .init = da8xx_musb_init,
+ .exit = da8xx_musb_exit,
+
+ .fifo_mode = 2,
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+ .dma_init = da8xx_dma_controller_create,
+ .dma_exit = cppi41_dma_controller_destroy,
+#endif
+ .enable = da8xx_musb_enable,
+ .disable = da8xx_musb_disable,
+
+ .set_mode = da8xx_musb_set_mode,
+ .try_idle = da8xx_musb_try_idle,
+
+ .set_vbus = da8xx_musb_set_vbus,
+};
+
+static const struct platform_device_info da8xx_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static const struct musb_hdrc_config da8xx_config = {
+ .ram_bits = 10,
+ .num_eps = 5,
+ .multipoint = 1,
+};
+
+static struct of_dev_auxdata da8xx_auxdata_lookup[] = {
+ OF_DEV_AUXDATA("ti,da830-cppi41", 0x01e01000, "cppi41-dmaengine",
+ NULL),
+ {}
+};
+
+static int da8xx_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct da8xx_glue *glue;
+ struct platform_device_info pinfo;
+ struct clk *clk;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
+ "failed to get phy\n");
+
+ glue->dev = &pdev->dev;
+ glue->clk = clk;
+
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->config = &da8xx_config;
+ pdata->mode = musb_get_mode(&pdev->dev);
+ pdata->power = get_vbus_power(&pdev->dev);
+ }
+
+ pdata->platform_ops = &da8xx_ops;
+
+ glue->usb_phy = usb_phy_generic_register();
+ ret = PTR_ERR_OR_ZERO(glue->usb_phy);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register usb_phy\n");
+ return ret;
+ }
+ platform_set_drvdata(pdev, glue);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL,
+ da8xx_auxdata_lookup, &pdev->dev);
+ if (ret)
+ return ret;
+
+ pinfo = da8xx_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(np);
+ pinfo.of_node_reused = true;
+
+ glue->musb = platform_device_register_full(&pinfo);
+ ret = PTR_ERR_OR_ZERO(glue->musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ usb_phy_generic_unregister(glue->usb_phy);
+ }
+
+ return ret;
+}
+
+static int da8xx_remove(struct platform_device *pdev)
+{
+ struct da8xx_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->usb_phy);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da8xx_suspend(struct device *dev)
+{
+ int ret;
+ struct da8xx_glue *glue = dev_get_drvdata(dev);
+
+ ret = phy_power_off(glue->phy);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+static int da8xx_resume(struct device *dev)
+{
+ int ret;
+ struct da8xx_glue *glue = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(glue->clk);
+ if (ret)
+ return ret;
+ return phy_power_on(glue->phy);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da8xx_pm_ops, da8xx_suspend, da8xx_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_id_table[] = {
+ {
+ .compatible = "ti,da830-musb",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, da8xx_id_table);
+#endif
+
+static struct platform_driver da8xx_driver = {
+ .probe = da8xx_probe,
+ .remove = da8xx_remove,
+ .driver = {
+ .name = "musb-da8xx",
+ .pm = &da8xx_pm_ops,
+ .of_match_table = of_match_ptr(da8xx_id_table),
+ },
+};
+
+MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer");
+MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(da8xx_driver);
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 000000000..704435526
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+
+#include <mach/cputype.h>
+#include <mach/hardware.h>
+
+#include <asm/mach-types.h>
+
+#include "musb_core.h"
+
+#include "davinci.h"
+#include "cppi_dma.h"
+
+
+#define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR)
+#define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR)
+
+struct davinci_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+ bool vbus_state;
+ struct gpio_desc *vbus;
+ struct work_struct vbus_work;
+};
+
+/* REVISIT (PM) we should be able to keep the PHY in low power mode most
+ * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHYPLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ /* power everything up; start the on-chip PHY and its PLL */
+ phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN);
+ phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+
+ /* wait for PLL to lock before proceeding */
+ while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0)
+ cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ /* powerdown the on-chip PHY, its PLL, and the OTG block */
+ phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON);
+ phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+}
+
+static int dma_off = 1;
+
+static void davinci_musb_enable(struct musb *musb)
+{
+ u32 tmp, old, val;
+
+ /* workaround: setup irqs through both register sets */
+ tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
+ << DAVINCI_USB_TXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ old = tmp;
+ tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+ << DAVINCI_USB_RXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ tmp |= old;
+
+ val = ~MUSB_INTR_SOF;
+ tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+ if (is_dma_capable() && !dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 0;
+
+ /* force a DRVVBUS irq so we can start polling for ID change */
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+static void davinci_musb_disable(struct musb *musb)
+{
+ /* because we don't set CTRLR.UINT, "important" to:
+ * - not read/write INTRUSB/INTRUSBE
+ * - (except during initial setup, as workaround)
+ * - use INTSETR/INTCLRR instead
+ */
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+ DAVINCI_USB_USBINT_MASK
+ | DAVINCI_USB_TXINT_MASK
+ | DAVINCI_USB_RXINT_MASK);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+ if (is_dma_capable() && !dma_off)
+ WARNING("dma still active\n");
+}
+
+
+#define portstate(stmt) stmt
+
+/*
+ * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM,
+ * which doesn't wire DRVVBUS to the FET that switches it. Unclear
+ * if that's a problem with the DM6446 chip or just with that board.
+ *
+ * In either case, the DM355 EVM automates DRVVBUS the normal way,
+ * when J10 is out, and TI documents it as handling OTG.
+ */
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(struct work_struct *work)
+{
+ struct davinci_glue *glue = container_of(work, struct davinci_glue,
+ vbus_work);
+
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
+ glue->vbus_state = !glue->vbus_state;
+}
+
+static void davinci_musb_source_power(struct musb *musb, int is_on,
+ int immediate)
+{
+ struct davinci_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /* This GPIO handling is entirely optional */
+ if (!glue->vbus)
+ return;
+
+ if (is_on)
+ is_on = 1;
+
+ if (glue->vbus_state == is_on)
+ return;
+ /* 0/1 vs "-1 == unknown/init" */
+ glue->vbus_state = !is_on;
+
+ if (machine_is_davinci_evm()) {
+ if (immediate)
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
+ else
+ schedule_work(&glue->vbus_work);
+ }
+ if (immediate)
+ glue->vbus_state = is_on;
+}
+
+static void davinci_musb_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+ davinci_musb_source_power(musb, is_on, 0);
+}
+
+
+#define POLL_SECONDS 2
+
+static void otg_timer(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /* We poll because DaVinci's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_VFALL:
+ /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
+ * seems to mis-handle session "start" otherwise (or in our
+ * case "recover"), in routine "VBUS was valid by the time
+ * VBUSERR got reported during enumeration" cases.
+ */
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ /*
+ * There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.SESSION flag).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE setting the session flag is _supposed_ to trigger
+ * SRP, but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL,
+ devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+ struct usb_otg *otg = musb->xceiv->otg;
+ void __iomem *tibase = musb->ctrl_base;
+ struct cppi *cppi;
+ u32 tmp;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ *
+ * Docs describe irq "vector" registers associated with the CPPI and
+ * USB EOI registers. These hold a bitmask corresponding to the
+ * current IRQ, not an irq handler address. Would using those bits
+ * resolve some of the races observed in this dispatch code??
+ */
+
+ /* CPPI interrupts share the same IRQ line, but have their own
+ * mask, state, "vector", and EOI registers.
+ */
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+ if (is_cppi_enabled(musb) && musb->dma_controller && !cppi->irq)
+ retval = cppi_interrupt(irq, __hci);
+
+ /* ack and handle non-CPPI interrupts */
+ tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+ musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+ dev_dbg(musb->controller, "IRQ %08x\n", tmp);
+
+ musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+ >> DAVINCI_USB_RXINT_SHIFT;
+ musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+ >> DAVINCI_USB_TXINT_SHIFT;
+ musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+ >> DAVINCI_USB_USBINT_SHIFT;
+
+ /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
+ * DaVinci's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires we know its
+ * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
+ int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err = musb->int_usb & MUSB_INTR_VBUSERROR;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /* The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"starting sessions
+ * without waiting (on EVM, a **long** time) for VBUS
+ * to stop registering in devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&musb->dev_timer);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ /* NOTE: this must complete poweron within 100 msec
+ * (OTG_TIME_A_WAIT_VRISE) but we don't check for that.
+ */
+ davinci_musb_source_power(musb, drvvbus, 0);
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ err ? " ERROR" : "",
+ devctl);
+ retval = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ retval |= musb_interrupt(musb);
+
+ /* irq stays asserted until EOI is written */
+ musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+ /* poll for ID change */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static int davinci_musb_set_mode(struct musb *musb, u8 mode)
+{
+ /* EVM can't do this (right?) */
+ return -EIO;
+}
+
+static int davinci_musb_init(struct musb *musb)
+{
+ void __iomem *tibase = musb->ctrl_base;
+ u32 revision;
+ int ret = -ENODEV;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ ret = -EPROBE_DEFER;
+ goto unregister;
+ }
+
+ musb->mregs += DAVINCI_BASE_OFFSET;
+
+ /* returns zero if e.g. not clocked */
+ revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+ if (revision == 0)
+ goto fail;
+
+ timer_setup(&musb->dev_timer, otg_timer, 0);
+
+ davinci_musb_source_power(musb, 0, 1);
+
+ /* dm355 EVM swaps D+/D- for signal integrity, and
+ * is clocked from the main 24 MHz crystal.
+ */
+ if (machine_is_davinci_dm355_evm()) {
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ phy_ctrl &= ~(3 << 9);
+ phy_ctrl |= USBPHY_DATAPOL;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+ }
+
+ /* On dm355, the default-A state machine needs DRVVBUS control.
+ * If we won't be a host, there's no need to turn it on.
+ */
+ if (cpu_is_davinci_dm355()) {
+ u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
+
+ deepsleep &= ~DRVVBUS_FORCE;
+ __raw_writel(deepsleep, DM355_DEEPSLEEP);
+ }
+
+ /* reset the controller */
+ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+ /* start the on-chip PHY and its PLL */
+ phy_on();
+
+ msleep(5);
+
+ /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
+ pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+ revision, __raw_readl(USB_PHY_CTRL),
+ musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+ musb->isr = davinci_musb_interrupt;
+ return 0;
+
+fail:
+ usb_put_phy(musb->xceiv);
+unregister:
+ usb_phy_generic_unregister();
+ return ret;
+}
+
+static int davinci_musb_exit(struct musb *musb)
+{
+ int maxdelay = 30;
+ u8 devctl, warn = 0;
+
+ del_timer_sync(&musb->dev_timer);
+
+ /* force VBUS off */
+ if (cpu_is_davinci_dm355()) {
+ u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
+
+ deepsleep &= ~DRVVBUS_FORCE;
+ deepsleep |= DRVVBUS_OVERRIDE;
+ __raw_writel(deepsleep, DM355_DEEPSLEEP);
+ }
+
+ davinci_musb_source_power(musb, 0 /*off*/, 1);
+
+ /*
+ * delay, to avoid problems with module reload.
+ * if there's no peripheral connected, this can take a
+ * long time to fall, especially on EVM with huge C133.
+ */
+ do {
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (!(devctl & MUSB_DEVCTL_VBUS))
+ break;
+ if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
+ warn = devctl & MUSB_DEVCTL_VBUS;
+ dev_dbg(musb->controller, "VBUS %d\n",
+ warn >> MUSB_DEVCTL_VBUS_SHIFT);
+ }
+ msleep(1000);
+ maxdelay--;
+ } while (maxdelay > 0);
+
+ /* in OTG mode, another host might be connected */
+ if (devctl & MUSB_DEVCTL_VBUS)
+ dev_dbg(musb->controller, "VBUS off timeout (devctl %02x)\n", devctl);
+
+ phy_off();
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static const struct musb_platform_ops davinci_ops = {
+ .quirks = MUSB_DMA_CPPI,
+ .init = davinci_musb_init,
+ .exit = davinci_musb_exit,
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+ .dma_init = cppi_dma_controller_create,
+ .dma_exit = cppi_dma_controller_destroy,
+#endif
+ .enable = davinci_musb_enable,
+ .disable = davinci_musb_disable,
+
+ .set_mode = davinci_musb_set_mode,
+
+ .set_vbus = davinci_musb_set_vbus,
+};
+
+static const struct platform_device_info davinci_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int davinci_probe(struct platform_device *pdev)
+{
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct davinci_glue *glue;
+ struct platform_device_info pinfo;
+ struct clk *clk;
+
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ goto err0;
+
+ clk = devm_clk_get(&pdev->dev, "usb");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err0;
+ }
+
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err0;
+ }
+
+ glue->dev = &pdev->dev;
+ glue->clk = clk;
+
+ pdata->platform_ops = &davinci_ops;
+
+ glue->vbus = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(glue->vbus)) {
+ ret = PTR_ERR(glue->vbus);
+ goto err0;
+ } else {
+ glue->vbus_state = -1;
+ INIT_WORK(&glue->vbus_work, evm_deferred_drvvbus);
+ }
+
+ usb_phy_generic_register();
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ /*
+ * For DM6467 3 resources are passed. A placeholder for the 3rd
+ * resource is always there, so it's safe to always copy it...
+ */
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
+ pinfo = davinci_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ clk_disable(clk);
+
+err0:
+ return ret;
+}
+
+static int davinci_remove(struct platform_device *pdev)
+{
+ struct davinci_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister();
+ clk_disable(glue->clk);
+
+ return 0;
+}
+
+static struct platform_driver davinci_driver = {
+ .probe = davinci_probe,
+ .remove = davinci_remove,
+ .driver = {
+ .name = "musb-davinci",
+ },
+};
+
+MODULE_DESCRIPTION("DaVinci MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(davinci_driver);
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 000000000..c8e67d15b
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define USBPHY_CTL_PADDR 0x01c40034
+#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
+#define USBPHY_PHYCLKGD BIT(8)
+#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN BIT(6) /* v(bus) comparator */
+#define USBPHY_VBUSSENS BIT(5) /* (dm355,ro) is vbus > 0.5V */
+#define USBPHY_PHYPLLON BIT(4) /* override pll suspend */
+#define USBPHY_CLKO1SEL BIT(3)
+#define USBPHY_OSCPDWN BIT(2)
+#define USBPHY_OTGPDWN BIT(1)
+#define USBPHY_PHYPDWN BIT(0)
+
+#define DM355_DEEPSLEEP_PADDR 0x01c40048
+#define DRVVBUS_FORCE BIT(2)
+#define DRVVBUS_OVERRIDE BIT(1)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG 0x00
+#define DAVINCI_USB_CTRL_REG 0x04
+#define DAVINCI_USB_STAT_REG 0x08
+#define DAVINCI_RNDIS_REG 0x10
+#define DAVINCI_AUTOREQ_REG 0x14
+#define DAVINCI_USB_INT_SOURCE_REG 0x20
+#define DAVINCI_USB_INT_SET_REG 0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
+#define DAVINCI_USB_INT_MASK_REG 0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG 0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
+#define DAVINCI_USB_EOI_REG 0x3c
+#define DAVINCI_USB_EOI_INTVEC 0x40
+
+/* BEGIN CPPI-generic (?) */
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG 0x80
+#define DAVINCI_TXCPPI_TEAR_REG 0x84
+#define DAVINCI_CPPI_EOI_REG 0x88
+#define DAVINCI_CPPI_INTVEC_REG 0x8c
+#define DAVINCI_TXCPPI_MASKED_REG 0x90
+#define DAVINCI_TXCPPI_RAW_REG 0x94
+#define DAVINCI_TXCPPI_INTENAB_REG 0x98
+#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG 0xC0
+#define DAVINCI_RXCPPI_MASKED_REG 0xD0
+#define DAVINCI_RXCPPI_RAW_REG 0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE 1
+#define DAVINCI_DMA_CTRL_DISABLE 0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* END CPPI-generic (?) */
+
+#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT 16
+#define DAVINCI_USB_TXINT_SHIFT 0
+#define DAVINCI_USB_RXINT_SHIFT 8
+
+#define DAVINCI_INTR_DRVVBUS 0x0100
+
+#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+ (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+ (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET 0x400
+
+#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
new file mode 100644
index 000000000..d1e4e0deb
--- /dev/null
+++ b/drivers/usb/musb/jz4740.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ingenic JZ4740 "glue layer"
+ *
+ * Copyright (C) 2013, Apelete Seketeli <apelete@seketeli.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include <linux/usb/usb_phy_generic.h>
+
+#include "musb_core.h"
+
+struct jz4740_glue {
+ struct platform_device *pdev;
+ struct musb *musb;
+ struct clk *clk;
+ struct usb_role_switch *role_sw;
+};
+
+static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE, retval_dma = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ if (IS_ENABLED(CONFIG_USB_INVENTRA_DMA) && musb->dma_controller)
+ retval_dma = dma_controller_irq(irq, musb->dma_controller);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ /*
+ * The controller is gadget only, the state of the host mode IRQ bits is
+ * undefined. Mask them to make sure that the musb driver core will
+ * never see them set
+ */
+ musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
+ MUSB_INTR_RESET | MUSB_INTR_SOF;
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (retval == IRQ_HANDLED || retval_dma == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config jz4740_musb_config = {
+ /* Silicon does not implement USB OTG. */
+ .multipoint = 0,
+ /* Max EPs scanned, driver will decide which EP can be used. */
+ .num_eps = 4,
+ /* RAMbits needed to configure EPs from table */
+ .ram_bits = 9,
+ .fifo_cfg = jz4740_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
+};
+
+static int jz4740_musb_role_switch_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct jz4740_glue *glue = usb_role_switch_get_drvdata(sw);
+ struct usb_phy *phy = glue->musb->xceiv;
+
+ switch (role) {
+ case USB_ROLE_NONE:
+ atomic_notifier_call_chain(&phy->notifier, USB_EVENT_NONE, phy);
+ break;
+ case USB_ROLE_DEVICE:
+ atomic_notifier_call_chain(&phy->notifier, USB_EVENT_VBUS, phy);
+ break;
+ case USB_ROLE_HOST:
+ atomic_notifier_call_chain(&phy->notifier, USB_EVENT_ID, phy);
+ break;
+ }
+
+ return 0;
+}
+
+static int jz4740_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller->parent;
+ struct jz4740_glue *glue = dev_get_drvdata(dev);
+ struct usb_role_switch_desc role_sw_desc = {
+ .set = jz4740_musb_role_switch_set,
+ .driver_data = glue,
+ .fwnode = dev_fwnode(dev),
+ };
+
+ glue->musb = musb;
+
+ if (dev->of_node)
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ else
+ musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(musb->xceiv))
+ return dev_err_probe(dev, PTR_ERR(musb->xceiv),
+ "No transceiver configured\n");
+
+ glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
+ if (IS_ERR(glue->role_sw)) {
+ dev_err(dev, "Failed to register USB role switch\n");
+ return PTR_ERR(glue->role_sw);
+ }
+
+ /*
+ * Silicon does not implement ConfigData register.
+ * Set dyn_fifo to avoid reading EP config from hardware.
+ */
+ musb->dyn_fifo = true;
+
+ musb->isr = jz4740_musb_interrupt;
+
+ return 0;
+}
+
+static int jz4740_musb_exit(struct musb *musb)
+{
+ struct jz4740_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ usb_role_switch_unregister(glue->role_sw);
+
+ return 0;
+}
+
+static const struct musb_platform_ops jz4740_musb_ops = {
+ .quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP,
+ .fifo_mode = 2,
+ .init = jz4740_musb_init,
+ .exit = jz4740_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create_noirq,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+};
+
+static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
+ .mode = MUSB_PERIPHERAL,
+ .config = &jz4740_musb_config,
+ .platform_ops = &jz4740_musb_ops,
+};
+
+static struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+};
+
+static struct musb_hdrc_config jz4770_musb_config = {
+ .multipoint = 1,
+ .num_eps = 11,
+ .ram_bits = 11,
+ .fifo_cfg = jz4770_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4770_musb_fifo_cfg),
+};
+
+static const struct musb_hdrc_platform_data jz4770_musb_pdata = {
+ .mode = MUSB_PERIPHERAL, /* TODO: support OTG */
+ .config = &jz4770_musb_config,
+ .platform_ops = &jz4740_musb_ops,
+};
+
+static int jz4740_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct musb_hdrc_platform_data *pdata;
+ struct platform_device *musb;
+ struct jz4740_glue *glue;
+ struct clk *clk;
+ int ret;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ pdata = of_device_get_match_data(dev);
+ if (!pdata) {
+ dev_err(dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(dev, "udc");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err_platform_device_put;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ goto err_platform_device_put;
+ }
+
+ musb->dev.parent = dev;
+ musb->dev.dma_mask = &musb->dev.coherent_dma_mask;
+ musb->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ device_set_of_node_from_dev(&musb->dev, dev);
+
+ glue->pdev = musb;
+ glue->clk = clk;
+
+ platform_set_drvdata(pdev, glue);
+
+ ret = platform_device_add_resources(musb, pdev->resource,
+ pdev->num_resources);
+ if (ret) {
+ dev_err(dev, "failed to add resources\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(dev, "failed to add platform_data\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(dev, "failed to register musb device\n");
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_platform_device_put:
+ platform_device_put(musb);
+ return ret;
+}
+
+static int jz4740_remove(struct platform_device *pdev)
+{
+ struct jz4740_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->pdev);
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+static const struct of_device_id jz4740_musb_of_match[] = {
+ { .compatible = "ingenic,jz4740-musb", .data = &jz4740_musb_pdata },
+ { .compatible = "ingenic,jz4770-musb", .data = &jz4770_musb_pdata },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
+
+static struct platform_driver jz4740_driver = {
+ .probe = jz4740_probe,
+ .remove = jz4740_remove,
+ .driver = {
+ .name = "musb-jz4740",
+ .of_match_table = jz4740_musb_of_match,
+ },
+};
+
+MODULE_DESCRIPTION("JZ4740 MUSB Glue Layer");
+MODULE_AUTHOR("Apelete Seketeli <apelete@seketeli.net>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(jz4740_driver);
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
new file mode 100644
index 000000000..27b9bd258
--- /dev/null
+++ b/drivers/usb/musb/mediatek.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author:
+ * Min Guo <min.guo@mediatek.com>
+ * Yonglong Wu <yonglong.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define USB_L1INTS 0x00a0
+#define USB_L1INTM 0x00a4
+#define MTK_MUSB_TXFUNCADDR 0x0480
+
+/* MediaTek controller toggle enable and status reg */
+#define MUSB_RXTOG 0x80
+#define MUSB_RXTOGEN 0x82
+#define MUSB_TXTOG 0x84
+#define MUSB_TXTOGEN 0x86
+#define MTK_TOGGLE_EN GENMASK(15, 0)
+
+#define TX_INT_STATUS BIT(0)
+#define RX_INT_STATUS BIT(1)
+#define USBCOM_INT_STATUS BIT(2)
+#define DMA_INT_STATUS BIT(3)
+
+#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
+#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
+
+#define MTK_MUSB_CLKS_NUM 3
+
+struct mtk_glue {
+ struct device *dev;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
+ struct platform_device *usb_phy;
+ struct phy *phy;
+ struct usb_phy *xceiv;
+ enum phy_mode phy_mode;
+ struct clk_bulk_data clks[MTK_MUSB_CLKS_NUM];
+ enum usb_role role;
+ struct usb_role_switch *role_sw;
+};
+
+static int mtk_musb_clks_get(struct mtk_glue *glue)
+{
+ struct device *dev = glue->dev;
+
+ glue->clks[0].id = "main";
+ glue->clks[1].id = "mcu";
+ glue->clks[2].id = "univpll";
+
+ return devm_clk_bulk_get(dev, MTK_MUSB_CLKS_NUM, glue->clks);
+}
+
+static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role)
+{
+ struct musb *musb = glue->musb;
+ u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
+ enum usb_role new_role;
+
+ if (role == glue->role)
+ return 0;
+
+ switch (role) {
+ case USB_ROLE_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ MUSB_HST_MODE(musb);
+ break;
+ case USB_ROLE_DEVICE:
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ MUSB_DEV_MODE(musb);
+ break;
+ case USB_ROLE_NONE:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role != USB_ROLE_NONE)
+ phy_power_off(glue->phy);
+
+ break;
+ default:
+ dev_err(glue->dev, "Invalid State\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+ return 0;
+}
+
+static int musb_usb_role_sx_set(struct usb_role_switch *sw, enum usb_role role)
+{
+ return mtk_otg_switch_set(usb_role_switch_get_drvdata(sw), role);
+}
+
+static enum usb_role musb_usb_role_sx_get(struct usb_role_switch *sw)
+{
+ struct mtk_glue *glue = usb_role_switch_get_drvdata(sw);
+
+ return glue->role;
+}
+
+static int mtk_otg_switch_init(struct mtk_glue *glue)
+{
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+
+ role_sx_desc.set = musb_usb_role_sx_set;
+ role_sx_desc.get = musb_usb_role_sx_get;
+ role_sx_desc.allow_userspace_control = true;
+ role_sx_desc.fwnode = dev_fwnode(glue->dev);
+ role_sx_desc.driver_data = glue;
+ glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
+
+ return PTR_ERR_OR_ZERO(glue->role_sw);
+}
+
+static void mtk_otg_switch_exit(struct mtk_glue *glue)
+{
+ return usb_role_switch_unregister(glue->role_sw);
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
+ musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
+ musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+
+ if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
+ /* ep0 FADDR must be 0 when (re)entering peripheral mode */
+ musb_ep_select(musb->mregs, 0);
+ musb_writeb(musb->mregs, MUSB_FADDR, 0);
+ }
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = (struct musb *)dev_id;
+ u32 l1_ints;
+
+ l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
+ musb_readl(musb->mregs, USB_L1INTM);
+
+ if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
+ retval = generic_interrupt(irq, musb);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ if (l1_ints & DMA_INT_STATUS)
+ retval = dma_controller_irq(irq, musb->dma_controller);
+#endif
+ return retval;
+}
+
+static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
+{
+ return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
+}
+
+static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
+{
+ u8 data;
+
+ /* W1C */
+ data = musb_readb(addr, offset);
+ musb_writeb(addr, offset, data);
+ return data;
+}
+
+static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
+{
+ u16 data;
+
+ /* W1C */
+ data = musb_readw(addr, offset);
+ musb_writew(addr, offset, data);
+ return data;
+}
+
+static int mtk_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ enum phy_mode new_mode;
+ enum usb_role new_role;
+
+ switch (mode) {
+ case MUSB_HOST:
+ new_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ break;
+ case MUSB_PERIPHERAL:
+ new_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ break;
+ case MUSB_OTG:
+ new_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(glue->dev, "Invalid mode request\n");
+ return -EINVAL;
+ }
+
+ if (glue->phy_mode == new_mode)
+ return 0;
+
+ if (musb->port_mode != MUSB_OTG) {
+ dev_err(glue->dev, "Does not support changing modes\n");
+ return -EINVAL;
+ }
+
+ mtk_otg_switch_set(glue, new_role);
+ return 0;
+}
+
+static int mtk_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ int ret;
+
+ glue->musb = musb;
+ musb->phy = glue->phy;
+ musb->xceiv = glue->xceiv;
+ musb->is_host = false;
+ musb->isr = mtk_musb_interrupt;
+
+ /* Set TX/RX toggle enable */
+ musb_writew(musb->mregs, MUSB_TXTOGEN, MTK_TOGGLE_EN);
+ musb_writew(musb->mregs, MUSB_RXTOGEN, MTK_TOGGLE_EN);
+
+ if (musb->port_mode == MUSB_OTG) {
+ ret = mtk_otg_switch_init(glue);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_init(glue->phy);
+ if (ret)
+ goto err_phy_init;
+
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ goto err_phy_power_on;
+
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ musb_writel(musb->mregs, MUSB_HSDMA_INTR,
+ DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
+#endif
+ musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
+ USBCOM_INT_STATUS | DMA_INT_STATUS);
+ return 0;
+
+err_phy_power_on:
+ phy_exit(glue->phy);
+err_phy_init:
+ if (musb->port_mode == MUSB_OTG)
+ mtk_otg_switch_exit(glue);
+ return ret;
+}
+
+static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 toggle;
+
+ toggle = musb_readw(musb->mregs, is_out ? MUSB_TXTOG : MUSB_RXTOG);
+ return toggle & (1 << epnum);
+}
+
+static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 value, toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out) {
+ value = musb_readw(musb->mregs, MUSB_TXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_TXTOG, value);
+ } else {
+ value = musb_readw(musb->mregs, MUSB_RXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_RXTOG, value);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+
+ mtk_otg_switch_exit(glue);
+ phy_power_off(glue->phy);
+ phy_exit(glue->phy);
+ clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return 0;
+}
+
+static const struct musb_platform_ops mtk_musb_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+ .init = mtk_musb_init,
+ .get_toggle = mtk_musb_get_toggle,
+ .set_toggle = mtk_musb_set_toggle,
+ .exit = mtk_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create_noirq,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .clearb = mtk_musb_clearb,
+ .clearw = mtk_musb_clearw,
+ .busctl_offset = mtk_musb_busctl_offset,
+ .set_mode = mtk_musb_set_mode,
+};
+
+#define MTK_MUSB_MAX_EP_NUM 8
+#define MTK_MUSB_RAM_BITS 11
+
+static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
+ { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
+ { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config mtk_musb_hdrc_config = {
+ .fifo_cfg = mtk_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = MTK_MUSB_MAX_EP_NUM,
+ .ram_bits = MTK_MUSB_RAM_BITS,
+};
+
+static const struct platform_device_info mtk_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int mtk_musb_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata;
+ struct mtk_glue *glue;
+ struct platform_device_info pinfo;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = dev;
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create child devices at %p\n", np);
+ return ret;
+ }
+
+ ret = mtk_musb_clks_get(glue);
+ if (ret)
+ return ret;
+
+ pdata->config = &mtk_musb_hdrc_config;
+ pdata->platform_ops = &mtk_musb_ops;
+ pdata->mode = usb_get_dr_mode(dev);
+
+ if (IS_ENABLED(CONFIG_USB_MUSB_HOST))
+ pdata->mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_MUSB_GADGET))
+ pdata->mode = USB_DR_MODE_PERIPHERAL;
+
+ switch (pdata->mode) {
+ case USB_DR_MODE_HOST:
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ glue->role = USB_ROLE_HOST;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ glue->role = USB_ROLE_DEVICE;
+ break;
+ case USB_DR_MODE_OTG:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ glue->role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(&pdev->dev, "Error 'dr_mode' property\n");
+ return -EINVAL;
+ }
+
+ glue->phy = devm_of_phy_get_by_index(dev, np, 0);
+ if (IS_ERR(glue->phy)) {
+ dev_err(dev, "fail to getting phy %ld\n",
+ PTR_ERR(glue->phy));
+ return PTR_ERR(glue->phy);
+ }
+
+ glue->usb_phy = usb_phy_generic_register();
+ if (IS_ERR(glue->usb_phy)) {
+ dev_err(dev, "fail to registering usb-phy %ld\n",
+ PTR_ERR(glue->usb_phy));
+ return PTR_ERR(glue->usb_phy);
+ }
+
+ glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(glue->xceiv)) {
+ ret = PTR_ERR(glue->xceiv);
+ dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ goto err_unregister_usb_phy;
+ }
+
+ platform_set_drvdata(pdev, glue);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ ret = clk_bulk_prepare_enable(MTK_MUSB_CLKS_NUM, glue->clks);
+ if (ret)
+ goto err_enable_clk;
+
+ pinfo = mtk_dev_info;
+ pinfo.parent = dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(np);
+ pinfo.of_node_reused = true;
+
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
+ dev_err(dev, "failed to register musb device: %d\n", ret);
+ goto err_device_register;
+ }
+
+ return 0;
+
+err_device_register:
+ clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
+err_enable_clk:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+err_unregister_usb_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+}
+
+static int mtk_musb_remove(struct platform_device *pdev)
+{
+ struct mtk_glue *glue = platform_get_drvdata(pdev);
+ struct platform_device *usb_phy = glue->usb_phy;
+
+ platform_device_unregister(glue->musb_pdev);
+ usb_phy_generic_unregister(usb_phy);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_musb_match[] = {
+ {.compatible = "mediatek,mtk-musb",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_musb_match);
+#endif
+
+static struct platform_driver mtk_musb_driver = {
+ .probe = mtk_musb_probe,
+ .remove = mtk_musb_remove,
+ .driver = {
+ .name = "musb-mtk",
+ .of_match_table = of_match_ptr(mtk_musb_match),
+ },
+};
+
+module_platform_driver(mtk_musb_driver);
+
+MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
+MODULE_AUTHOR("Min Guo <min.guo@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
new file mode 100644
index 000000000..cea2e8108
--- /dev/null
+++ b/drivers/usb/musb/mpfs.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PolarFire SoC (MPFS) MUSB Glue Layer
+ *
+ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ * Based on {omap2430,tusb6010,ux500}.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define MPFS_MUSB_MAX_EP_NUM 8
+#define MPFS_MUSB_RAM_BITS 12
+
+struct mpfs_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+ struct clk *clk;
+};
+
+static struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 1024, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 4096, },
+};
+
+static const struct musb_hdrc_config mpfs_musb_hdrc_config = {
+ .fifo_cfg = mpfs_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(mpfs_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = MPFS_MUSB_MAX_EP_NUM,
+ .ram_bits = MPFS_MUSB_RAM_BITS,
+};
+
+static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx) {
+ musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
+ musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
+ musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
+ ret = musb_interrupt(musb);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+
+ /*
+ * HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ musb->is_active = 1;
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+
+ /*
+ * NOTE: skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+ musb->xceiv->otg->default_a = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static int mpfs_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+
+ musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(musb->xceiv)) {
+ dev_err(dev, "HS UDC: no transceiver configured\n");
+ return PTR_ERR(musb->xceiv);
+ }
+
+ musb->dyn_fifo = true;
+ musb->isr = mpfs_musb_interrupt;
+
+ musb_platform_set_vbus(musb, 1);
+
+ return 0;
+}
+
+static const struct musb_platform_ops mpfs_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+ .init = mpfs_musb_init,
+ .fifo_mode = 2,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .set_vbus = mpfs_musb_set_vbus
+};
+
+static int mpfs_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mpfs_glue *glue;
+ struct platform_device *musb_pdev;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ musb_pdev = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb_pdev) {
+ dev_err(dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err_phy_release;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err_phy_release;
+ }
+
+ musb_pdev->dev.parent = dev;
+ musb_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(39);
+ musb_pdev->dev.dma_mask = &musb_pdev->dev.coherent_dma_mask;
+ device_set_of_node_from_dev(&musb_pdev->dev, dev);
+
+ glue->dev = dev;
+ glue->musb = musb_pdev;
+ glue->clk = clk;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ pdata->config = &mpfs_musb_hdrc_config;
+ pdata->platform_ops = &mpfs_ops;
+
+ pdata->mode = usb_get_dr_mode(dev);
+ if (pdata->mode == USB_DR_MODE_UNKNOWN) {
+ dev_info(dev, "No dr_mode property found, defaulting to otg\n");
+ pdata->mode = USB_DR_MODE_OTG;
+ }
+
+ glue->phy = usb_phy_generic_register();
+ if (IS_ERR(glue->phy)) {
+ dev_err(dev, "failed to register usb-phy %ld\n",
+ PTR_ERR(glue->phy));
+ ret = PTR_ERR(glue->phy);
+ goto err_clk_disable;
+ }
+
+ platform_set_drvdata(pdev, glue);
+
+ ret = platform_device_add_resources(musb_pdev, pdev->resource, pdev->num_resources);
+ if (ret) {
+ dev_err(dev, "failed to add resources\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add_data(musb_pdev, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(dev, "failed to add platform_data\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add(musb_pdev);
+ if (ret) {
+ dev_err(dev, "failed to register musb device\n");
+ goto err_clk_disable;
+ }
+
+ dev_info(&pdev->dev, "Registered MPFS MUSB driver\n");
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+
+err_phy_release:
+ usb_phy_generic_unregister(glue->phy);
+ platform_device_put(musb_pdev);
+ return ret;
+}
+
+static int mpfs_remove(struct platform_device *pdev)
+{
+ struct mpfs_glue *glue = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(glue->clk);
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mpfs_id_table[] = {
+ { .compatible = "microchip,mpfs-musb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mpfs_id_table);
+#endif
+
+static struct platform_driver mpfs_musb_driver = {
+ .probe = mpfs_probe,
+ .remove = mpfs_remove,
+ .driver = {
+ .name = "mpfs-musb",
+ .of_match_table = of_match_ptr(mpfs_id_table)
+ },
+};
+
+module_platform_driver(mpfs_musb_driver);
+
+MODULE_DESCRIPTION("PolarFire SoC MUSB Glue Layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 000000000..03027c6fa
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2966 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works. These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments. Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE: the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version. This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ * - Lack of host-side transaction scheduling, for all transfer types.
+ * The hardware doesn't do it; instead, software must.
+ *
+ * This is not an issue for OTG devices that don't support external
+ * hubs, but for more "normal" USB hosts it's a user issue that the
+ * "multipoint" support doesn't scale in the expected ways. That
+ * includes DaVinci EVM in a common non-OTG mode.
+ *
+ * * Control and bulk use dedicated endpoints, and there's as
+ * yet no mechanism to either (a) reclaim the hardware when
+ * peripherals are NAKing, which gets complicated with bulk
+ * endpoints, or (b) use more than a single bulk endpoint in
+ * each direction.
+ *
+ * RESULT: one device may be perceived as blocking another one.
+ *
+ * * Interrupt and isochronous will dynamically allocate endpoint
+ * hardware, but (a) there's no record keeping for bandwidth;
+ * (b) in the common case that few endpoints are available, there
+ * is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ * RESULT: At one extreme, bandwidth can be overcommitted in
+ * some hardware configurations, no faults will be reported.
+ * At the other extreme, the bandwidth capabilities which do
+ * exist tend to be severely undercommitted. You can't yet hook
+ * up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ * - Kconfig for everything user-configurable
+ * - platform_device for addressing, irq, and platform_data
+ * - platform_data is mostly for board-specific information
+ * (plus recentrly, SOC or family details)
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/prefetch.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+#include <linux/usb/of.h>
+
+#include "musb_core.h"
+#include "musb_trace.h"
+
+#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
+
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION "6.0"
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+#define MUSB_DRIVER_NAME "musb-hdrc"
+const char musb_driver_name[] = MUSB_DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+enum musb_mode musb_get_mode(struct device *dev)
+{
+ enum usb_dr_mode mode;
+
+ mode = usb_get_dr_mode(dev);
+ switch (mode) {
+ case USB_DR_MODE_HOST:
+ return MUSB_HOST;
+ case USB_DR_MODE_PERIPHERAL:
+ return MUSB_PERIPHERAL;
+ case USB_DR_MODE_OTG:
+ case USB_DR_MODE_UNKNOWN:
+ default:
+ return MUSB_OTG;
+ }
+}
+EXPORT_SYMBOL_GPL(musb_get_mode);
+
+/*-------------------------------------------------------------------------*/
+
+static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
+{
+ void __iomem *addr = phy->io_priv;
+ int i = 0;
+ u8 r;
+ u8 power;
+ int ret;
+
+ pm_runtime_get_sync(phy->io_dev);
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
+ * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
+ */
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
+ MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ }
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+ ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
+
+out:
+ pm_runtime_put(phy->io_dev);
+
+ return ret;
+}
+
+static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+{
+ void __iomem *addr = phy->io_priv;
+ int i = 0;
+ u8 r = 0;
+ u8 power;
+ int ret = 0;
+
+ pm_runtime_get_sync(phy->io_dev);
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
+ musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ }
+
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+out:
+ pm_runtime_put(phy->io_dev);
+
+ return ret;
+}
+
+static struct usb_phy_io_ops musb_ulpi_access = {
+ .read = musb_ulpi_read,
+ .write = musb_ulpi_write,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static u32 musb_default_fifo_offset(u8 epnum)
+{
+ return 0x20 + (epnum * 4);
+}
+
+/* "flat" mapping: each endpoint has its own i/o address */
+static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
+{
+}
+
+static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
+{
+ return 0x100 + (0x10 * epnum) + offset;
+}
+
+/* "indexed" mapping: INDEX register controls register bank select */
+static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
+{
+ musb_writeb(mbase, MUSB_INDEX, epnum);
+}
+
+static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
+{
+ return 0x10 + offset;
+}
+
+static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
+{
+ return 0x80 + (0x08 * epnum) + offset;
+}
+
+static u8 musb_default_readb(void __iomem *addr, u32 offset)
+{
+ u8 data = __raw_readb(addr + offset);
+
+ trace_musb_readb(__builtin_return_address(0), addr, offset, data);
+ return data;
+}
+
+static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
+{
+ trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
+ __raw_writeb(data, addr + offset);
+}
+
+static u16 musb_default_readw(void __iomem *addr, u32 offset)
+{
+ u16 data = __raw_readw(addr + offset);
+
+ trace_musb_readw(__builtin_return_address(0), addr, offset, data);
+ return data;
+}
+
+static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
+{
+ trace_musb_writew(__builtin_return_address(0), addr, offset, data);
+ __raw_writew(data, addr + offset);
+}
+
+static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
+{
+ void __iomem *epio = qh->hw_ep->regs;
+ u16 csr;
+
+ if (is_out)
+ csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
+
+ return csr;
+}
+
+static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
+ struct urb *urb)
+{
+ u16 csr;
+ u16 toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out)
+ csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE)
+ : MUSB_TXCSR_CLRDATATOG;
+ else
+ csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE) : 0;
+
+ return csr;
+}
+
+/*
+ * Load an endpoint's FIFO
+ */
+static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
+ const u8 *src)
+{
+ struct musb *musb = hw_ep->musb;
+ void __iomem *fifo = hw_ep->fifo;
+
+ if (unlikely(len == 0))
+ return;
+
+ prefetch((u8 *)src);
+
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', hw_ep->epnum, fifo, len, src);
+
+ /* we can't assume unaligned reads work */
+ if (likely((0x01 & (unsigned long) src) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned source address */
+ if ((0x02 & (unsigned long) src) == 0) {
+ if (len >= 4) {
+ iowrite32_rep(fifo, src + index, len >> 2);
+ index += len & ~0x03;
+ }
+ if (len & 0x02) {
+ __raw_writew(*(u16 *)&src[index], fifo);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ iowrite16_rep(fifo, src + index, len >> 1);
+ index += len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ __raw_writeb(src[index], fifo);
+ } else {
+ /* byte aligned */
+ iowrite8_rep(fifo, src, len);
+ }
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ struct musb *musb = hw_ep->musb;
+ void __iomem *fifo = hw_ep->fifo;
+
+ if (unlikely(len == 0))
+ return;
+
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
+
+ /* we can't assume unaligned writes work */
+ if (likely((0x01 & (unsigned long) dst) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) dst) == 0) {
+ if (len >= 4) {
+ ioread32_rep(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ *(u16 *)&dst[index] = __raw_readw(fifo);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ ioread16_rep(fifo, dst, len >> 1);
+ index = len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ dst[index] = __raw_readb(fifo);
+ } else {
+ /* byte aligned */
+ ioread8_rep(fifo, dst, len);
+ }
+}
+
+/*
+ * Old style IO functions
+ */
+u8 (*musb_readb)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_readb);
+
+void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
+EXPORT_SYMBOL_GPL(musb_writeb);
+
+u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearb);
+
+u16 (*musb_readw)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_readw);
+
+void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
+EXPORT_SYMBOL_GPL(musb_writew);
+
+u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearw);
+
+u32 musb_readl(void __iomem *addr, u32 offset)
+{
+ u32 data = __raw_readl(addr + offset);
+
+ trace_musb_readl(__builtin_return_address(0), addr, offset, data);
+ return data;
+}
+EXPORT_SYMBOL_GPL(musb_readl);
+
+void musb_writel(void __iomem *addr, u32 offset, u32 data)
+{
+ trace_musb_writel(__builtin_return_address(0), addr, offset, data);
+ __raw_writel(data, addr + offset);
+}
+EXPORT_SYMBOL_GPL(musb_writel);
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+struct dma_controller *
+(*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
+EXPORT_SYMBOL(musb_dma_controller_create);
+
+void (*musb_dma_controller_destroy)(struct dma_controller *c);
+EXPORT_SYMBOL(musb_dma_controller_destroy);
+#endif
+
+/*
+ * New style IO functions
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+ return hw_ep->musb->io.write_fifo(hw_ep, len, src);
+}
+
+static u8 musb_read_devctl(struct musb *musb)
+{
+ return musb_readb(musb->mregs, MUSB_DEVCTL);
+}
+
+/**
+ * musb_set_host - set and initialize host mode
+ * @musb: musb controller driver data
+ *
+ * At least some musb revisions need to enable devctl session bit in
+ * peripheral mode to switch to host mode. Initializes things to host
+ * mode and sets A_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ *
+ * Note that the SoC glue code may need to wait for musb to settle
+ * on enable before calling this to avoid babble.
+ */
+int musb_set_host(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
+ trace_musb_state(musb, devctl, "Already in host mode");
+ goto init_data;
+ }
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ !(devctl & MUSB_DEVCTL_BDEVICE), 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set host: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+ devctl = musb_read_devctl(musb);
+ trace_musb_state(musb, devctl, "Host mode set");
+
+init_data:
+ musb->is_active = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_host);
+
+/**
+ * musb_set_peripheral - set and initialize peripheral mode
+ * @musb: musb controller driver data
+ *
+ * Clears devctl session bit and initializes things for peripheral
+ * mode and sets B_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ */
+int musb_set_peripheral(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ trace_musb_state(musb, devctl, "Already in peripheral mode");
+ goto init_data;
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ devctl & MUSB_DEVCTL_BDEVICE, 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+ devctl = musb_read_devctl(musb);
+ trace_musb_state(musb, devctl, "Peripheral mode set");
+
+init_data:
+ musb->is_active = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_peripheral);
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+ /* implicit CRC16 then EOP to end */
+};
+
+void musb_load_testpacket(struct musb *musb)
+{
+ void __iomem *regs = musb->endpoints[0].regs;
+
+ musb_ep_select(musb->mregs, 0);
+ musb_write_fifo(musb->control_ep,
+ sizeof(musb_test_packet), musb_test_packet);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+static void musb_otg_timer_func(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, otg_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_WAIT_ACON:
+ musb_dbg(musb,
+ "HNP: b_wait_acon timeout; back to b_peripheral");
+ musb_g_disconnect(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_SUSPEND:
+ case OTG_STATE_A_WAIT_BCON:
+ musb_dbg(musb, "HNP: %s timeout",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ musb_platform_set_vbus(musb, 0);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ default:
+ musb_dbg(musb, "HNP: Unhandled mode %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Stops the HNP transition. Caller must take care of locking.
+ */
+void musb_hnp_stop(struct musb *musb)
+{
+ struct usb_hcd *hcd = musb->hcd;
+ void __iomem *mbase = musb->mregs;
+ u8 reg;
+
+ musb_dbg(musb, "HNP: stop from %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_PERIPHERAL:
+ musb_g_disconnect(musb);
+ musb_dbg(musb, "HNP: back to %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ break;
+ case OTG_STATE_B_HOST:
+ musb_dbg(musb, "HNP: Disabling HR");
+ if (hcd)
+ hcd->self.is_b_host = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ reg = musb_readb(mbase, MUSB_POWER);
+ reg |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ /* REVISIT: Start SESSION_REQUEST here? */
+ break;
+ default:
+ musb_dbg(musb, "HNP: Stopping in unknown state %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+
+ /*
+ * When returning to A state after HNP, avoid hub_port_rebounce(),
+ * which cause occasional OPT A "Did not receive reset after connect"
+ * errors.
+ */
+ musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
+}
+
+static void musb_recover_from_babble(struct musb *musb);
+
+static void musb_handle_intr_resume(struct musb *musb, u8 devctl)
+{
+ musb_dbg(musb, "RESUME (%s)",
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ if (devctl & MUSB_DEVCTL_HM) {
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_SUSPEND:
+ /* remote wakeup? */
+ musb->port1_status |=
+ (USB_PORT_STAT_C_SUSPEND << 16)
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+ musb->is_active = 1;
+ musb_host_resume_root_hub(musb);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 1;
+ MUSB_DEV_MODE(musb);
+ break;
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "host",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+ } else {
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_SUSPEND:
+ /* possibly DISCONNECT is upcoming */
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+ musb_host_resume_root_hub(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ /* disconnect while suspended? we may
+ * not get a disconnect irq...
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+ ) {
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+ }
+ musb_g_resume(musb);
+ break;
+ case OTG_STATE_B_IDLE:
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "peripheral",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+ }
+}
+
+/* return IRQ_HANDLED to tell the caller to return immediately */
+static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl)
+{
+ void __iomem *mbase = musb->mregs;
+
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
+ && (devctl & MUSB_DEVCTL_BDEVICE)) {
+ musb_dbg(musb, "SessReq while on B state");
+ return IRQ_HANDLED;
+ }
+
+ musb_dbg(musb, "SESSION_REQUEST (%s)",
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ /* IRQ arrives from ID pin sense or (later, if VBUS power
+ * is removed) SRP. responses are time critical:
+ * - turn on VBUS (with silicon-specific mechanism)
+ * - go through A_WAIT_VRISE
+ * - ... to A_WAIT_BCON.
+ * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+ */
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb->ep0_stage = MUSB_EP0_START;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb_platform_set_vbus(musb, 1);
+
+ return IRQ_NONE;
+}
+
+static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl)
+{
+ int ignore = 0;
+
+ /* During connection as an A-Device, we may see a short
+ * current spikes causing voltage drop, because of cable
+ * and peripheral capacitance combined with vbus draw.
+ * (So: less common with truly self-powered devices, where
+ * vbus doesn't act like a power supply.)
+ *
+ * Such spikes are short; usually less than ~500 usec, max
+ * of ~2 msec. That is, they're not sustained overcurrent
+ * errors, though they're reported using VBUSERROR irqs.
+ *
+ * Workarounds: (a) hardware: use self powered devices.
+ * (b) software: ignore non-repeated VBUS errors.
+ *
+ * REVISIT: do delays from lots of DEBUG_KERNEL checks
+ * make trouble here, keeping VBUS < 4.4V ?
+ */
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_HOST:
+ /* recovery is dicey once we've gotten past the
+ * initial stages of enumeration, but if VBUS
+ * stayed ok at the other end of the link, and
+ * another reset is due (at least for high speed,
+ * to redo the chirp etc), it might work OK...
+ */
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_WAIT_VRISE:
+ if (musb->vbuserr_retry) {
+ void __iomem *mbase = musb->mregs;
+
+ musb->vbuserr_retry--;
+ ignore = 1;
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mbase, MUSB_DEVCTL, devctl);
+ } else {
+ musb->port1_status |=
+ USB_PORT_STAT_OVERCURRENT
+ | (USB_PORT_STAT_C_OVERCURRENT << 16);
+ }
+ break;
+ default:
+ break;
+ }
+
+ dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
+ "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ devctl,
+ ({ char *s;
+ switch (devctl & MUSB_DEVCTL_VBUS) {
+ case 0 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<SessEnd"; break;
+ case 1 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<AValid"; break;
+ case 2 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<VBusValid"; break;
+ /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
+ default:
+ s = "VALID"; break;
+ } s; }),
+ VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
+ musb->port1_status);
+
+ /* go through A_WAIT_VFALL then start a new session */
+ if (!ignore)
+ musb_platform_set_vbus(musb, 0);
+}
+
+static void musb_handle_intr_suspend(struct musb *musb, u8 devctl)
+{
+ musb_dbg(musb, "SUSPEND (%s) devctl %02x",
+ usb_otg_state_string(musb->xceiv->otg->state), devctl);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_PERIPHERAL:
+ /* We also come here if the cable is removed, since
+ * this silicon doesn't report ID-no-longer-grounded.
+ *
+ * We depend on T(a_wait_bcon) to shut us down, and
+ * hope users don't do anything dicey during this
+ * undesired detour through A_WAIT_BCON.
+ */
+ musb_hnp_stop(musb);
+ musb_host_resume_root_hub(musb);
+ musb_root_disconnect(musb);
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon
+ ? : OTG_TIME_A_WAIT_BCON));
+
+ break;
+ case OTG_STATE_B_IDLE:
+ if (!musb->is_active)
+ break;
+ fallthrough;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = musb->g.b_hnp_enable;
+ if (musb->is_active) {
+ musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
+ musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_B_ASE0_BRST));
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = musb->hcd->self.b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+}
+
+static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb)
+{
+ struct usb_hcd *hcd = musb->hcd;
+
+ musb->is_active = 1;
+ musb->ep0_stage = MUSB_EP0_START;
+
+ musb->intrtxe = musb->epmask;
+ musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
+ musb->intrrxe = musb->epmask & 0xfffe;
+ musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
+ musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+ |USB_PORT_STAT_HIGH_SPEED
+ |USB_PORT_STAT_ENABLE
+ );
+ musb->port1_status |= USB_PORT_STAT_CONNECTION
+ |(USB_PORT_STAT_C_CONNECTION << 16);
+
+ /* high vs full speed is just a guess until after reset */
+ if (devctl & MUSB_DEVCTL_LSDEV)
+ musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+ /* indicate new connection to OTG machine */
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_PERIPHERAL:
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
+ int_usb &= ~MUSB_INTR_SUSPEND;
+ goto b_host;
+ } else
+ musb_dbg(musb, "CONNECT as b_peripheral???");
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb_dbg(musb, "HNP: CONNECT, now b_host");
+b_host:
+ musb->xceiv->otg->state = OTG_STATE_B_HOST;
+ if (musb->hcd)
+ musb->hcd->self.is_b_host = 1;
+ del_timer(&musb->otg_timer);
+ break;
+ default:
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+ if (hcd)
+ hcd->self.is_b_host = 0;
+ }
+ break;
+ }
+
+ musb_host_poke_root_hub(musb);
+
+ musb_dbg(musb, "CONNECT (%s) devctl %02x",
+ usb_otg_state_string(musb->xceiv->otg->state), devctl);
+}
+
+static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl)
+{
+ musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ MUSB_MODE(musb), devctl);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb_host_resume_root_hub(musb);
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_B_HOST:
+ /* REVISIT this behaves for "real disconnect"
+ * cases; make sure the other transitions from
+ * from B_HOST act right too. The B_HOST code
+ * in hnp_stop() is currently not used...
+ */
+ musb_root_disconnect(musb);
+ if (musb->hcd)
+ musb->hcd->self.is_b_host = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ musb_g_disconnect(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ fallthrough;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ break;
+ }
+}
+
+/*
+ * mentor saves a bit: bus reset and babble share the same irq.
+ * only host sees babble; only peripheral sees bus reset.
+ */
+static void musb_handle_intr_reset(struct musb *musb)
+{
+ if (is_host_active(musb)) {
+ /*
+ * When BABBLE happens what we can depends on which
+ * platform MUSB is running, because some platforms
+ * implemented proprietary means for 'recovering' from
+ * Babble conditions. One such platform is AM335x. In
+ * most cases, however, the only thing we can do is
+ * drop the session.
+ */
+ dev_err(musb->controller, "Babble\n");
+ musb_recover_from_babble(musb);
+ } else {
+ musb_dbg(musb, "BUS RESET as %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_SUSPEND:
+ musb_g_reset(musb);
+ fallthrough;
+ case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
+ /* never use invalid T(a_wait_bcon) */
+ musb_dbg(musb, "HNP: in %s, %d msec timeout",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ TA_WAIT_BCON(musb));
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(TA_WAIT_BCON(musb)));
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ del_timer(&musb->otg_timer);
+ musb_g_reset(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ musb_g_reset(musb);
+ break;
+ case OTG_STATE_B_IDLE:
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ fallthrough;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_reset(musb);
+ break;
+ default:
+ musb_dbg(musb, "Unhandled BUS RESET as %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+ }
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ */
+
+static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ u8 devctl)
+{
+ irqreturn_t handled = IRQ_NONE;
+
+ musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
+
+ /* in host mode, the peripheral may issue remote wakeup.
+ * in peripheral mode, the host may resume the link.
+ * spurious RESUME irqs happen too, paired with SUSPEND.
+ */
+ if (int_usb & MUSB_INTR_RESUME) {
+ musb_handle_intr_resume(musb, devctl);
+ handled = IRQ_HANDLED;
+ }
+
+ /* see manual for the order of the tests */
+ if (int_usb & MUSB_INTR_SESSREQ) {
+ if (musb_handle_intr_sessreq(musb, devctl))
+ return IRQ_HANDLED;
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_VBUSERROR) {
+ musb_handle_intr_vbuserr(musb, devctl);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ musb_handle_intr_suspend(musb, devctl);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_CONNECT) {
+ musb_handle_intr_connect(musb, devctl, int_usb);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_DISCONNECT) {
+ musb_handle_intr_disconnect(musb, devctl);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_RESET) {
+ musb_handle_intr_reset(musb);
+ handled = IRQ_HANDLED;
+ }
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+ if (int_usb & MUSB_INTR_SOF) {
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *ep;
+ u8 epnum;
+ u16 frame;
+
+ dev_dbg(musb->controller, "START_OF_FRAME\n");
+ handled = IRQ_HANDLED;
+
+ /* start any periodic Tx transfers waiting for current frame */
+ frame = musb_readw(mbase, MUSB_FRAME);
+ ep = musb->endpoints;
+ for (epnum = 1; (epnum < musb->nr_endpoints)
+ && (musb->epmask >= (1 << epnum));
+ epnum++, ep++) {
+ /*
+ * FIXME handle framecounter wraps (12 bits)
+ * eliminate duplicated StartUrb logic
+ */
+ if (ep->dwWaitFrame >= frame) {
+ ep->dwWaitFrame = 0;
+ pr_debug("SOF --> periodic TX%s on %d\n",
+ ep->tx_channel ? " DMA" : "",
+ epnum);
+ if (!ep->tx_channel)
+ musb_h_tx_start(musb, epnum);
+ else
+ cppi_hostdma_start(musb, epnum);
+ }
+ } /* end of for loop */
+ }
+#endif
+
+ schedule_delayed_work(&musb->irq_work, 0);
+
+ return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void musb_disable_interrupts(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+
+ /* disable interrupts */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0);
+ musb->intrtxe = 0;
+ musb_writew(mbase, MUSB_INTRTXE, 0);
+ musb->intrrxe = 0;
+ musb_writew(mbase, MUSB_INTRRXE, 0);
+
+ /* flush pending interrupts */
+ musb_clearb(mbase, MUSB_INTRUSB);
+ musb_clearw(mbase, MUSB_INTRTX);
+ musb_clearw(mbase, MUSB_INTRRX);
+}
+
+static void musb_enable_interrupts(struct musb *musb)
+{
+ void __iomem *regs = musb->mregs;
+
+ /* Set INT enable registers, enable interrupts */
+ musb->intrtxe = musb->epmask;
+ musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
+ musb->intrrxe = musb->epmask & 0xfffe;
+ musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
+ musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+}
+
+/*
+ * Program the HDRC to start (enable interrupts, dma, etc.).
+ */
+void musb_start(struct musb *musb)
+{
+ void __iomem *regs = musb->mregs;
+ u8 devctl = musb_readb(regs, MUSB_DEVCTL);
+ u8 power;
+
+ musb_dbg(musb, "<== devctl %02x", devctl);
+
+ musb_enable_interrupts(musb);
+ musb_writeb(regs, MUSB_TESTMODE, 0);
+
+ power = MUSB_POWER_ISOUPDATE;
+ /*
+ * treating UNKNOWN as unspecified maximum speed, in which case
+ * we will default to high-speed.
+ */
+ if (musb->config->maximum_speed == USB_SPEED_HIGH ||
+ musb->config->maximum_speed == USB_SPEED_UNKNOWN)
+ power |= MUSB_POWER_HSENAB;
+ musb_writeb(regs, MUSB_POWER, power);
+
+ musb->is_active = 0;
+ devctl = musb_readb(regs, MUSB_DEVCTL);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ /* session started after:
+ * (a) ID-grounded irq, host mode;
+ * (b) vbus present/connect IRQ, peripheral mode;
+ * (c) peripheral initiates, using SRP
+ */
+ if (musb->port_mode != MUSB_HOST &&
+ musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
+ (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
+ musb->is_active = 1;
+ } else {
+ devctl |= MUSB_DEVCTL_SESSION;
+ }
+
+ musb_platform_enable(musb);
+ musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb *musb)
+{
+ /* stop IRQs, timers, ... */
+ musb_platform_disable(musb);
+ musb_disable_interrupts(musb);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+ /* FIXME
+ * - mark host and/or peripheral drivers unusable/inactive
+ * - disable DMA (and enable it in HdrcStart)
+ * - make sure we can musb_start() after musb_stop(); with
+ * OTG mode, gadget driver module rmmod/modprobe cycles that
+ * - ...
+ */
+ musb_platform_try_idle(musb, 0);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing. The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested. Since we switched
+ * away from compile-time hardware parameters, we can no longer rely on
+ * dead code elimination to leave only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+static ushort fifo_mode;
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+/*
+ * tables defining fifo_mode values. define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static struct musb_fifo_cfg mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static struct musb_fifo_cfg mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static struct musb_fifo_cfg mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+/* mode 3 - fits in 4KB */
+static struct musb_fifo_cfg mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 4 - fits in 16KB */
+static struct musb_fifo_cfg mode_4_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+/* mode 5 - fits in 8KB */
+static struct musb_fifo_cfg mode_5_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int
+fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ const struct musb_fifo_cfg *cfg, u16 offset)
+{
+ void __iomem *mbase = musb->mregs;
+ int size = 0;
+ u16 maxpacket = cfg->maxpacket;
+ u16 c_off = offset >> 3;
+ u8 c_size;
+
+ /* expect hw_ep has already been zero-initialized */
+
+ size = ffs(max(maxpacket, (u16) 8)) - 1;
+ maxpacket = 1 << size;
+
+ c_size = size - 3;
+ if (cfg->mode == BUF_DOUBLE) {
+ if ((offset + (maxpacket << 1)) >
+ (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ c_size |= MUSB_FIFOSZ_DPB;
+ } else {
+ if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ }
+
+ /* configure the FIFO */
+ musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
+
+ /* EP0 reserved endpoint for control, bidirectional;
+ * EP1 reserved for bulk, two unidirectional halves.
+ */
+ if (hw_ep->epnum == 1)
+ musb->bulk_ep = hw_ep;
+ /* REVISIT error check: be sure ep0 can both rx and tx ... */
+ switch (cfg->style) {
+ case FIFO_TX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_tx = maxpacket;
+ break;
+ case FIFO_RX:
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+ break;
+ case FIFO_RXTX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+ hw_ep->max_packet_sz_tx = maxpacket;
+
+ hw_ep->is_shared_fifo = true;
+ break;
+ }
+
+ /* NOTE rx and tx endpoint irqs aren't managed separately,
+ * which happens to be ok
+ */
+ musb->epmask |= (1 << hw_ep->epnum);
+
+ return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static struct musb_fifo_cfg ep0_cfg = {
+ .style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int ep_config_from_table(struct musb *musb)
+{
+ const struct musb_fifo_cfg *cfg;
+ unsigned i, n;
+ int offset;
+ struct musb_hw_ep *hw_ep = musb->endpoints;
+
+ if (musb->config->fifo_cfg) {
+ cfg = musb->config->fifo_cfg;
+ n = musb->config->fifo_cfg_size;
+ goto done;
+ }
+
+ switch (fifo_mode) {
+ default:
+ fifo_mode = 0;
+ fallthrough;
+ case 0:
+ cfg = mode_0_cfg;
+ n = ARRAY_SIZE(mode_0_cfg);
+ break;
+ case 1:
+ cfg = mode_1_cfg;
+ n = ARRAY_SIZE(mode_1_cfg);
+ break;
+ case 2:
+ cfg = mode_2_cfg;
+ n = ARRAY_SIZE(mode_2_cfg);
+ break;
+ case 3:
+ cfg = mode_3_cfg;
+ n = ARRAY_SIZE(mode_3_cfg);
+ break;
+ case 4:
+ cfg = mode_4_cfg;
+ n = ARRAY_SIZE(mode_4_cfg);
+ break;
+ case 5:
+ cfg = mode_5_cfg;
+ n = ARRAY_SIZE(mode_5_cfg);
+ break;
+ }
+
+ pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
+
+
+done:
+ offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+ /* assert(offset > 0) */
+
+ /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
+ * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
+ */
+
+ for (i = 0; i < n; i++) {
+ u8 epn = cfg->hw_ep_num;
+
+ if (epn >= musb->config->num_eps) {
+ pr_debug("%s: invalid ep %d\n",
+ musb_driver_name, epn);
+ return -EINVAL;
+ }
+ offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+ if (offset < 0) {
+ pr_debug("%s: mem overrun, ep %d\n",
+ musb_driver_name, epn);
+ return offset;
+ }
+ epn++;
+ musb->nr_endpoints = max(epn, musb->nr_endpoints);
+ }
+
+ pr_debug("%s: %d/%d max ep, %d/%d memory\n",
+ musb_driver_name,
+ n + 1, musb->config->num_eps * 2 - 1,
+ offset, (1 << (musb->config->ram_bits + 2)));
+
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+static int ep_config_from_hw(struct musb *musb)
+{
+ u8 epnum = 0;
+ struct musb_hw_ep *hw_ep;
+ void __iomem *mbase = musb->mregs;
+ int ret = 0;
+
+ musb_dbg(musb, "<== static silicon ep config");
+
+ /* FIXME pick up ep0 maxpacket size */
+
+ for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
+ musb_ep_select(mbase, epnum);
+ hw_ep = musb->endpoints + epnum;
+
+ ret = musb_read_fifosize(musb, hw_ep, epnum);
+ if (ret < 0)
+ break;
+
+ /* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+ /* pick an RX/TX endpoint for bulk */
+ if (hw_ep->max_packet_sz_tx < 512
+ || hw_ep->max_packet_sz_rx < 512)
+ continue;
+
+ /* REVISIT: this algorithm is lazy, we should at least
+ * try to pick a double buffered endpoint.
+ */
+ if (musb->bulk_ep)
+ continue;
+ musb->bulk_ep = hw_ep;
+ }
+
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int musb_core_init(u16 musb_type, struct musb *musb)
+{
+ u8 reg;
+ char *type;
+ char aInfo[90];
+ void __iomem *mbase = musb->mregs;
+ int status = 0;
+ int i;
+
+ /* log core options (read using indexed model) */
+ reg = musb_read_configdata(mbase);
+
+ strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
+ strcat(aInfo, ", dyn FIFOs");
+ musb->dyn_fifo = true;
+ }
+ if (reg & MUSB_CONFIGDATA_MPRXE) {
+ strcat(aInfo, ", bulk combine");
+ musb->bulk_combine = true;
+ }
+ if (reg & MUSB_CONFIGDATA_MPTXE) {
+ strcat(aInfo, ", bulk split");
+ musb->bulk_split = true;
+ }
+ if (reg & MUSB_CONFIGDATA_HBRXE) {
+ strcat(aInfo, ", HB-ISO Rx");
+ musb->hb_iso_rx = true;
+ }
+ if (reg & MUSB_CONFIGDATA_HBTXE) {
+ strcat(aInfo, ", HB-ISO Tx");
+ musb->hb_iso_tx = true;
+ }
+ if (reg & MUSB_CONFIGDATA_SOFTCONE)
+ strcat(aInfo, ", SoftConn");
+
+ pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
+
+ if (MUSB_CONTROLLER_MHDRC == musb_type) {
+ musb->is_multipoint = 1;
+ type = "M";
+ } else {
+ musb->is_multipoint = 0;
+ type = "";
+ if (IS_ENABLED(CONFIG_USB) &&
+ !IS_ENABLED(CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB)) {
+ pr_err("%s: kernel must disable external hubs, please fix the configuration\n",
+ musb_driver_name);
+ }
+ }
+
+ /* log release info */
+ musb->hwvers = musb_readw(mbase, MUSB_HWVERS);
+ pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
+ musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
+ MUSB_HWVERS_MINOR(musb->hwvers),
+ (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
+
+ /* configure ep0 */
+ musb_configure_ep0(musb);
+
+ /* discover endpoint configuration */
+ musb->nr_endpoints = 1;
+ musb->epmask = 1;
+
+ if (musb->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else
+ status = ep_config_from_hw(musb);
+
+ if (status < 0)
+ return status;
+
+ /* finish init, and print endpoint config */
+ for (i = 0; i < musb->nr_endpoints; i++) {
+ struct musb_hw_ep *hw_ep = musb->endpoints + i;
+
+ hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
+ if (musb->ops->quirks & MUSB_IN_TUSB) {
+ hw_ep->fifo_async = musb->async + 0x400 +
+ musb->io.fifo_offset(i);
+ hw_ep->fifo_sync = musb->sync + 0x400 +
+ musb->io.fifo_offset(i);
+ hw_ep->fifo_sync_va =
+ musb->sync_va + 0x400 + musb->io.fifo_offset(i);
+
+ if (i == 0)
+ hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+ else
+ hw_ep->conf = mbase + 0x400 +
+ (((i - 1) & 0xf) << 2);
+ }
+#endif
+
+ hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
+ hw_ep->rx_reinit = 1;
+ hw_ep->tx_reinit = 1;
+
+ if (hw_ep->max_packet_sz_tx) {
+ musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
+ musb_driver_name, i,
+ hw_ep->is_shared_fifo ? "shared" : "tx",
+ hw_ep->tx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_tx);
+ }
+ if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+ musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
+ musb_driver_name, i,
+ "rx",
+ hw_ep->rx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_rx);
+ }
+ if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+ musb_dbg(musb, "hw_ep %d not configured", i);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect: other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+ irqreturn_t retval = IRQ_NONE;
+ unsigned long status;
+ unsigned long epnum;
+ u8 devctl;
+
+ if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
+ return IRQ_NONE;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ trace_musb_isr(musb);
+
+ /**
+ * According to Mentor Graphics' documentation, flowchart on page 98,
+ * IRQ should be handled as follows:
+ *
+ * . Resume IRQ
+ * . Session Request IRQ
+ * . VBUS Error IRQ
+ * . Suspend IRQ
+ * . Connect IRQ
+ * . Disconnect IRQ
+ * . Reset/Babble IRQ
+ * . SOF IRQ (we're not using this one)
+ * . Endpoint 0 IRQ
+ * . TX Endpoints
+ * . RX Endpoints
+ *
+ * We will be following that flowchart in order to avoid any problems
+ * that might arise with internal Finite State Machine.
+ */
+
+ if (musb->int_usb)
+ retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
+
+ if (musb->int_tx & 1) {
+ if (is_host_active(musb))
+ retval |= musb_h_ep0_irq(musb);
+ else
+ retval |= musb_g_ep0_irq(musb);
+
+ /* we have just handled endpoint 0 IRQ, clear it */
+ musb->int_tx &= ~BIT(0);
+ }
+
+ status = musb->int_tx;
+
+ for_each_set_bit(epnum, &status, 16) {
+ retval = IRQ_HANDLED;
+ if (is_host_active(musb))
+ musb_host_tx(musb, epnum);
+ else
+ musb_g_tx(musb, epnum);
+ }
+
+ status = musb->int_rx;
+
+ for_each_set_bit(epnum, &status, 16) {
+ retval = IRQ_HANDLED;
+ if (is_host_active(musb))
+ musb_host_rx(musb, epnum);
+ else
+ musb_g_rx(musb, epnum);
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(musb_interrupt);
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static bool use_dma = true;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+{
+ /* called with controller lock already held */
+
+ if (!epnum) {
+ if (!is_cppi_enabled(musb)) {
+ /* endpoint 0 */
+ if (is_host_active(musb))
+ musb_h_ep0_irq(musb);
+ else
+ musb_g_ep0_irq(musb);
+ }
+ } else {
+ /* endpoints 1..15 */
+ if (transmit) {
+ if (is_host_active(musb))
+ musb_host_tx(musb, epnum);
+ else
+ musb_g_tx(musb, epnum);
+ } else {
+ /* receive */
+ if (is_host_active(musb))
+ musb_host_rx(musb, epnum);
+ else
+ musb_g_rx(musb, epnum);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(musb_dma_completion);
+
+#else
+#define use_dma 0
+#endif
+
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
+
+/*
+ * musb_mailbox - optional phy notifier function
+ * @status phy state change
+ *
+ * Optionally gets called from the USB PHY. Note that the USB PHY must be
+ * disabled at the point the phy_callback is registered or unregistered.
+ */
+int musb_mailbox(enum musb_vbus_id_status status)
+{
+ if (musb_phy_callback)
+ return musb_phy_callback(status);
+
+ return -ENODEV;
+};
+EXPORT_SYMBOL_GPL(musb_mailbox);
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static ssize_t
+mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ if (sysfs_streq(buf, "host"))
+ status = musb_platform_set_mode(musb, MUSB_HOST);
+ else if (sysfs_streq(buf, "peripheral"))
+ status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ else if (sysfs_streq(buf, "otg"))
+ status = musb_platform_set_mode(musb, MUSB_OTG);
+ else
+ status = -EINVAL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return (status == 0) ? n : status;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t
+vbus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+
+ if (sscanf(buf, "%lu", &val) < 1) {
+ dev_err(dev, "Invalid VBUS timeout ms value\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
+ musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
+ if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+
+static ssize_t
+vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+ int vbus;
+ u8 devctl;
+
+ pm_runtime_get_sync(dev);
+ spin_lock_irqsave(&musb->lock, flags);
+ val = musb->a_wait_bcon;
+ vbus = musb_platform_get_vbus_status(musb);
+ if (vbus < 0) {
+ /* Use default MUSB method by means of DEVCTL register */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ == (3 << MUSB_DEVCTL_VBUS_SHIFT))
+ vbus = 1;
+ else
+ vbus = 0;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_put_sync(dev);
+
+ return sprintf(buf, "Vbus %s, timeout %lu msec\n",
+ vbus ? "on" : "off", val);
+}
+static DEVICE_ATTR_RW(vbus);
+
+/* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can. This allows userspace to trigger SRP.
+ */
+static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned short srp;
+
+ if (sscanf(buf, "%hu", &srp) != 1
+ || (srp != 1)) {
+ dev_err(dev, "SRP: Value must be 1\n");
+ return -EINVAL;
+ }
+
+ if (srp == 1)
+ musb_g_wakeup(musb);
+
+ return n;
+}
+static DEVICE_ATTR_WO(srp);
+
+static struct attribute *musb_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_vbus.attr,
+ &dev_attr_srp.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(musb);
+
+#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
+ (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
+ (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
+
+static bool musb_state_needs_recheck(struct musb *musb, u8 devctl,
+ const char *desc)
+{
+ if (musb->quirk_retries && !musb->flush_irq_work) {
+ trace_musb_state(musb, devctl, desc);
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ musb->quirk_retries--;
+
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check the musb devctl session bit to determine if we want to
+ * allow PM runtime for the device. In general, we want to keep things
+ * active when the session bit is set except after host disconnect.
+ *
+ * Only called from musb_irq_work. If this ever needs to get called
+ * elsewhere, proper locking must be implemented for musb->session.
+ */
+static void musb_pm_runtime_check_session(struct musb *musb)
+{
+ u8 devctl, s;
+ int error;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ /* Handle session status quirks first */
+ s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
+ MUSB_DEVCTL_HR;
+ switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+ musb_state_needs_recheck(musb, devctl,
+ "Poll devctl in case of suspend after disconnect");
+ break;
+ case MUSB_QUIRK_B_INVALID_VBUS_91:
+ if (musb_state_needs_recheck(musb, devctl,
+ "Poll devctl on invalid vbus, assume no session"))
+ return;
+ fallthrough;
+ case MUSB_QUIRK_A_DISCONNECT_19:
+ if (musb_state_needs_recheck(musb, devctl,
+ "Poll devctl on possible host mode disconnect"))
+ return;
+ if (!musb->session)
+ break;
+ trace_musb_state(musb, devctl, "Allow PM on possible host mode disconnect");
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ musb->session = false;
+ return;
+ default:
+ break;
+ }
+
+ /* No need to do anything if session has not changed */
+ s = devctl & MUSB_DEVCTL_SESSION;
+ if (s == musb->session)
+ return;
+
+ /* Block PM or allow PM? */
+ if (s) {
+ trace_musb_state(musb, devctl, "Block PM on active session");
+ error = pm_runtime_get_sync(musb->controller);
+ if (error < 0)
+ dev_err(musb->controller, "Could not enable: %i\n",
+ error);
+ musb->quirk_retries = 3;
+
+ /*
+ * We can get a spurious MUSB_INTR_SESSREQ interrupt on start-up
+ * in B-peripheral mode with nothing connected and the session
+ * bit clears silently. Check status again in 3 seconds.
+ */
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(3000));
+ } else {
+ trace_musb_state(musb, devctl, "Allow PM with no session");
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+
+ musb->session = s;
+}
+
+/* Only used to provide driver mode change events */
+static void musb_irq_work(struct work_struct *data)
+{
+ struct musb *musb = container_of(data, struct musb, irq_work.work);
+ int error;
+
+ error = pm_runtime_resume_and_get(musb->controller);
+ if (error < 0) {
+ dev_err(musb->controller, "Could not enable: %i\n", error);
+
+ return;
+ }
+
+ musb_pm_runtime_check_session(musb);
+
+ if (musb->xceiv->otg->state != musb->xceiv_old_state) {
+ musb->xceiv_old_state = musb->xceiv->otg->state;
+ sysfs_notify(&musb->controller->kobj, NULL, "mode");
+ }
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+}
+
+static void musb_recover_from_babble(struct musb *musb)
+{
+ int ret;
+ u8 devctl;
+
+ musb_disable_interrupts(musb);
+
+ /*
+ * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
+ * it some slack and wait for 10us.
+ */
+ udelay(10);
+
+ ret = musb_platform_recover(musb);
+ if (ret) {
+ musb_enable_interrupts(musb);
+ return;
+ }
+
+ /* drop session bit */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ /* tell usbcore about it */
+ musb_root_disconnect(musb);
+
+ /*
+ * When a babble condition occurs, the musb controller
+ * removes the session bit and the endpoint config is lost.
+ */
+ if (musb->dyn_fifo)
+ ret = ep_config_from_table(musb);
+ else
+ ret = ep_config_from_hw(musb);
+
+ /* restart session */
+ if (ret == 0)
+ musb_start(musb);
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *allocate_instance(struct device *dev,
+ const struct musb_hdrc_config *config, void __iomem *mbase)
+{
+ struct musb *musb;
+ struct musb_hw_ep *ep;
+ int epnum;
+ int ret;
+
+ musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
+ if (!musb)
+ return NULL;
+
+ INIT_LIST_HEAD(&musb->control);
+ INIT_LIST_HEAD(&musb->in_bulk);
+ INIT_LIST_HEAD(&musb->out_bulk);
+ INIT_LIST_HEAD(&musb->pending_list);
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
+ musb->mregs = mbase;
+ musb->ctrl_base = mbase;
+ musb->nIrq = -ENODEV;
+ musb->config = config;
+ BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
+ for (epnum = 0, ep = musb->endpoints;
+ epnum < musb->config->num_eps;
+ epnum++, ep++) {
+ ep->musb = musb;
+ ep->epnum = epnum;
+ }
+
+ musb->controller = dev;
+
+ ret = musb_host_alloc(musb);
+ if (ret < 0)
+ goto err_free;
+
+ dev_set_drvdata(dev, musb);
+
+ return musb;
+
+err_free:
+ return NULL;
+}
+
+static void musb_free(struct musb *musb)
+{
+ /* this has multiple entry modes. it handles fault cleanup after
+ * probe(), where things may be partially set up, as well as rmmod
+ * cleanup after everything's been de-activated.
+ */
+
+ if (musb->nIrq >= 0) {
+ if (musb->irq_wake)
+ disable_irq_wake(musb->nIrq);
+ free_irq(musb->nIrq, musb);
+ }
+
+ musb_host_free(musb);
+}
+
+struct musb_pending_work {
+ int (*callback)(struct musb *musb, void *data);
+ void *data;
+ struct list_head node;
+};
+
+#ifdef CONFIG_PM
+/*
+ * Called from musb_runtime_resume(), musb_resume(), and
+ * musb_queue_resume_work(). Callers must take musb->lock.
+ */
+static int musb_run_resume_work(struct musb *musb)
+{
+ struct musb_pending_work *w, *_w;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&musb->list_lock, flags);
+ list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
+ if (w->callback) {
+ error = w->callback(musb, w->data);
+ if (error < 0) {
+ dev_err(musb->controller,
+ "resume callback %p failed: %i\n",
+ w->callback, error);
+ }
+ }
+ list_del(&w->node);
+ devm_kfree(musb->controller, w);
+ }
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
+ return error;
+}
+#endif
+
+/*
+ * Called to run work if device is active or else queue the work to happen
+ * on resume. Caller must take musb->lock and must hold an RPM reference.
+ *
+ * Note that we cowardly refuse queuing work after musb PM runtime
+ * resume is done calling musb_run_resume_work() and return -EINPROGRESS
+ * instead.
+ */
+int musb_queue_resume_work(struct musb *musb,
+ int (*callback)(struct musb *musb, void *data),
+ void *data)
+{
+ struct musb_pending_work *w;
+ unsigned long flags;
+ bool is_suspended;
+ int error;
+
+ if (WARN_ON(!callback))
+ return -EINVAL;
+
+ spin_lock_irqsave(&musb->list_lock, flags);
+ is_suspended = musb->is_runtime_suspended;
+
+ if (is_suspended) {
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+ if (!w) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
+
+ w->callback = callback;
+ w->data = data;
+
+ list_add_tail(&w->node, &musb->pending_list);
+ error = 0;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
+ if (!is_suspended)
+ error = callback(musb, data);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_queue_resume_work);
+
+static void musb_deassert_reset(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+
+ musb = container_of(work, struct musb, deassert_reset_work.work);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->port1_status & USB_PORT_STAT_RESET)
+ musb_port_reset(musb, false);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @dev: the controller (already clocked, etc)
+ * @nIrq: IRQ number
+ * @ctrl: virtual address of controller registers,
+ * not yet corrected for platform-specific offsets
+ */
+static int
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+ int status;
+ struct musb *musb;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+
+ /* The driver might handle more features than the board; OK.
+ * Fail when the board needs a feature that's not enabled.
+ */
+ if (!plat) {
+ dev_err(dev, "no platform_data?\n");
+ status = -ENODEV;
+ goto fail0;
+ }
+
+ /* allocate */
+ musb = allocate_instance(dev, plat->config, ctrl);
+ if (!musb) {
+ status = -ENOMEM;
+ goto fail0;
+ }
+
+ spin_lock_init(&musb->lock);
+ spin_lock_init(&musb->list_lock);
+ musb->board_set_power = plat->set_power;
+ musb->min_power = plat->min_power;
+ musb->ops = plat->platform_ops;
+ musb->port_mode = plat->mode;
+
+ /*
+ * Initialize the default IO functions. At least omap2430 needs
+ * these early. We initialize the platform specific IO functions
+ * later on.
+ */
+ musb_readb = musb_default_readb;
+ musb_writeb = musb_default_writeb;
+ musb_readw = musb_default_readw;
+ musb_writew = musb_default_writew;
+
+ /* The musb_platform_init() call:
+ * - adjusts musb->mregs
+ * - sets the musb->isr
+ * - may initialize an integrated transceiver
+ * - initializes musb->xceiv, usually by otg_get_phy()
+ * - stops powering VBUS
+ *
+ * There are various transceiver configurations.
+ * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
+ * external/discrete ones in various flavors (twl4030 family,
+ * isp1504, non-OTG, etc) mostly hooking up through ULPI.
+ */
+ status = musb_platform_init(musb);
+ if (status < 0)
+ goto fail1;
+
+ if (!musb->isr) {
+ status = -ENODEV;
+ goto fail2;
+ }
+
+
+ /* Most devices use indexed offset or flat offset */
+ if (musb->ops->quirks & MUSB_INDEXED_EP) {
+ musb->io.ep_offset = musb_indexed_ep_offset;
+ musb->io.ep_select = musb_indexed_ep_select;
+ } else {
+ musb->io.ep_offset = musb_flat_ep_offset;
+ musb->io.ep_select = musb_flat_ep_select;
+ }
+
+ if (musb->ops->quirks & MUSB_G_NO_SKB_RESERVE)
+ musb->g.quirk_avoids_skb_reserve = 1;
+
+ /* At least tusb6010 has its own offsets */
+ if (musb->ops->ep_offset)
+ musb->io.ep_offset = musb->ops->ep_offset;
+ if (musb->ops->ep_select)
+ musb->io.ep_select = musb->ops->ep_select;
+
+ if (musb->ops->fifo_mode)
+ fifo_mode = musb->ops->fifo_mode;
+ else
+ fifo_mode = 4;
+
+ if (musb->ops->fifo_offset)
+ musb->io.fifo_offset = musb->ops->fifo_offset;
+ else
+ musb->io.fifo_offset = musb_default_fifo_offset;
+
+ if (musb->ops->busctl_offset)
+ musb->io.busctl_offset = musb->ops->busctl_offset;
+ else
+ musb->io.busctl_offset = musb_default_busctl_offset;
+
+ if (musb->ops->readb)
+ musb_readb = musb->ops->readb;
+ if (musb->ops->writeb)
+ musb_writeb = musb->ops->writeb;
+ if (musb->ops->clearb)
+ musb_clearb = musb->ops->clearb;
+ else
+ musb_clearb = musb_readb;
+
+ if (musb->ops->readw)
+ musb_readw = musb->ops->readw;
+ if (musb->ops->writew)
+ musb_writew = musb->ops->writew;
+ if (musb->ops->clearw)
+ musb_clearw = musb->ops->clearw;
+ else
+ musb_clearw = musb_readw;
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (!musb->ops->dma_init || !musb->ops->dma_exit) {
+ dev_err(dev, "DMA controller not set\n");
+ status = -ENODEV;
+ goto fail2;
+ }
+ musb_dma_controller_create = musb->ops->dma_init;
+ musb_dma_controller_destroy = musb->ops->dma_exit;
+#endif
+
+ if (musb->ops->read_fifo)
+ musb->io.read_fifo = musb->ops->read_fifo;
+ else
+ musb->io.read_fifo = musb_default_read_fifo;
+
+ if (musb->ops->write_fifo)
+ musb->io.write_fifo = musb->ops->write_fifo;
+ else
+ musb->io.write_fifo = musb_default_write_fifo;
+
+ if (musb->ops->get_toggle)
+ musb->io.get_toggle = musb->ops->get_toggle;
+ else
+ musb->io.get_toggle = musb_default_get_toggle;
+
+ if (musb->ops->set_toggle)
+ musb->io.set_toggle = musb->ops->set_toggle;
+ else
+ musb->io.set_toggle = musb_default_set_toggle;
+
+ if (!musb->xceiv->io_ops) {
+ musb->xceiv->io_dev = musb->controller;
+ musb->xceiv->io_priv = musb->mregs;
+ musb->xceiv->io_ops = &musb_ulpi_access;
+ }
+
+ if (musb->ops->phy_callback)
+ musb_phy_callback = musb->ops->phy_callback;
+
+ /*
+ * We need musb_read/write functions initialized for PM.
+ * Note that at least 2430 glue needs autosuspend delay
+ * somewhere above 300 ms for the hardware to idle properly
+ * after disconnecting the cable in host mode. Let's use
+ * 500 ms for some margin.
+ */
+ pm_runtime_use_autosuspend(musb->controller);
+ pm_runtime_set_autosuspend_delay(musb->controller, 500);
+ pm_runtime_enable(musb->controller);
+ pm_runtime_get_sync(musb->controller);
+
+ status = usb_phy_init(musb->xceiv);
+ if (status < 0)
+ goto err_usb_phy_init;
+
+ if (use_dma && dev->dma_mask) {
+ musb->dma_controller =
+ musb_dma_controller_create(musb, musb->mregs);
+ if (IS_ERR(musb->dma_controller)) {
+ status = PTR_ERR(musb->dma_controller);
+ goto fail2_5;
+ }
+ }
+
+ /* be sure interrupts are disabled before connecting ISR */
+ musb_platform_disable(musb);
+ musb_disable_interrupts(musb);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+ /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+ musb_writeb(musb->mregs, MUSB_POWER, 0);
+
+ /* Init IRQ workqueue before request_irq */
+ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
+ INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
+ INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
+
+ /* setup musb parts of the core (especially endpoints) */
+ status = musb_core_init(plat->config->multipoint
+ ? MUSB_CONTROLLER_MHDRC
+ : MUSB_CONTROLLER_HDRC, musb);
+ if (status < 0)
+ goto fail3;
+
+ timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
+
+ /* attach to the IRQ */
+ if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
+ dev_err(dev, "request_irq %d failed!\n", nIrq);
+ status = -ENODEV;
+ goto fail3;
+ }
+ musb->nIrq = nIrq;
+ /* FIXME this handles wakeup irqs wrong */
+ if (enable_irq_wake(nIrq) == 0) {
+ musb->irq_wake = 1;
+ device_init_wakeup(dev, 1);
+ } else {
+ musb->irq_wake = 0;
+ }
+
+ /* program PHY to use external vBus if required */
+ if (plat->extvbus) {
+ u8 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
+ busctl |= MUSB_ULPI_USE_EXTVBUS;
+ musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
+ }
+
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+
+ switch (musb->port_mode) {
+ case MUSB_HOST:
+ status = musb_host_setup(musb, plat->power);
+ if (status < 0)
+ goto fail3;
+ status = musb_platform_set_mode(musb, MUSB_HOST);
+ break;
+ case MUSB_PERIPHERAL:
+ status = musb_gadget_setup(musb);
+ if (status < 0)
+ goto fail3;
+ status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ break;
+ case MUSB_OTG:
+ status = musb_host_setup(musb, plat->power);
+ if (status < 0)
+ goto fail3;
+ status = musb_gadget_setup(musb);
+ if (status) {
+ musb_host_cleanup(musb);
+ goto fail3;
+ }
+ status = musb_platform_set_mode(musb, MUSB_OTG);
+ break;
+ default:
+ dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
+ break;
+ }
+
+ if (status < 0)
+ goto fail3;
+
+ musb_init_debugfs(musb);
+
+ musb->is_initialized = 1;
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
+ return 0;
+
+fail3:
+ cancel_delayed_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->finish_resume_work);
+ cancel_delayed_work_sync(&musb->deassert_reset_work);
+ if (musb->dma_controller)
+ musb_dma_controller_destroy(musb->dma_controller);
+
+fail2_5:
+ usb_phy_shutdown(musb->xceiv);
+
+err_usb_phy_init:
+ pm_runtime_dont_use_autosuspend(musb->controller);
+ pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
+
+fail2:
+ if (musb->irq_wake)
+ device_init_wakeup(dev, 0);
+ musb_platform_exit(musb);
+
+fail1:
+ dev_err_probe(musb->controller, status, "%s failed\n", __func__);
+
+ musb_free(musb);
+
+fail0:
+
+ return status;
+
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+static int musb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq_byname(pdev, "mc");
+ void __iomem *base;
+
+ if (irq <= 0)
+ return -ENODEV;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return musb_init_controller(dev, irq, base);
+}
+
+static int musb_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+
+ /* this gets called on rmmod.
+ * - Host mode: host may still be active
+ * - Peripheral mode: peripheral is deactivated (or never-activated)
+ * - OTG mode: both roles are deactivated (or never-activated)
+ */
+ musb_exit_debugfs(musb);
+
+ cancel_delayed_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->finish_resume_work);
+ cancel_delayed_work_sync(&musb->deassert_reset_work);
+ pm_runtime_get_sync(musb->controller);
+ musb_host_cleanup(musb);
+ musb_gadget_cleanup(musb);
+
+ musb_platform_disable(musb);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_disable_interrupts(musb);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ musb_platform_exit(musb);
+
+ pm_runtime_dont_use_autosuspend(musb->controller);
+ pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
+ musb_phy_callback = NULL;
+ if (musb->dma_controller)
+ musb_dma_controller_destroy(musb->dma_controller);
+ usb_phy_shutdown(musb->xceiv);
+ musb_free(musb);
+ device_init_wakeup(dev, 0);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static void musb_save_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *epio;
+
+ musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
+ musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ musb->context.busctl = musb_readb(musb_base, MUSB_ULPI_BUSCONTROL);
+ musb->context.power = musb_readb(musb_base, MUSB_POWER);
+ musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ musb->context.index = musb_readb(musb_base, MUSB_INDEX);
+ musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ epio = musb->endpoints[i].regs;
+ if (!epio)
+ continue;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb->context.index_regs[i].txmaxp =
+ musb_readw(epio, MUSB_TXMAXP);
+ musb->context.index_regs[i].txcsr =
+ musb_readw(epio, MUSB_TXCSR);
+ musb->context.index_regs[i].rxmaxp =
+ musb_readw(epio, MUSB_RXMAXP);
+ musb->context.index_regs[i].rxcsr =
+ musb_readw(epio, MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ musb->context.index_regs[i].txfifoadd =
+ musb_readw(musb_base, MUSB_TXFIFOADD);
+ musb->context.index_regs[i].rxfifoadd =
+ musb_readw(musb_base, MUSB_RXFIFOADD);
+ musb->context.index_regs[i].txfifosz =
+ musb_readb(musb_base, MUSB_TXFIFOSZ);
+ musb->context.index_regs[i].rxfifosz =
+ musb_readb(musb_base, MUSB_RXFIFOSZ);
+ }
+
+ musb->context.index_regs[i].txtype =
+ musb_readb(epio, MUSB_TXTYPE);
+ musb->context.index_regs[i].txinterval =
+ musb_readb(epio, MUSB_TXINTERVAL);
+ musb->context.index_regs[i].rxtype =
+ musb_readb(epio, MUSB_RXTYPE);
+ musb->context.index_regs[i].rxinterval =
+ musb_readb(epio, MUSB_RXINTERVAL);
+
+ musb->context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb, i);
+ musb->context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb, i);
+ musb->context.index_regs[i].txhubport =
+ musb_read_txhubport(musb, i);
+
+ musb->context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb, i);
+ musb->context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb, i);
+ musb->context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb, i);
+ }
+}
+
+static void musb_restore_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *epio;
+ u8 power;
+
+ musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
+ musb_writeb(musb_base, MUSB_ULPI_BUSCONTROL, musb->context.busctl);
+
+ /* Don't affect SUSPENDM/RESUME bits in POWER reg */
+ power = musb_readb(musb_base, MUSB_POWER);
+ power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
+ musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
+ power |= musb->context.power;
+ musb_writeb(musb_base, MUSB_POWER, power);
+
+ musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
+ if (musb->context.devctl & MUSB_DEVCTL_SESSION)
+ musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ epio = musb->endpoints[i].regs;
+ if (!epio)
+ continue;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_writew(epio, MUSB_TXMAXP,
+ musb->context.index_regs[i].txmaxp);
+ musb_writew(epio, MUSB_TXCSR,
+ musb->context.index_regs[i].txcsr);
+ musb_writew(epio, MUSB_RXMAXP,
+ musb->context.index_regs[i].rxmaxp);
+ musb_writew(epio, MUSB_RXCSR,
+ musb->context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_writeb(musb_base, MUSB_TXFIFOSZ,
+ musb->context.index_regs[i].txfifosz);
+ musb_writeb(musb_base, MUSB_RXFIFOSZ,
+ musb->context.index_regs[i].rxfifosz);
+ musb_writew(musb_base, MUSB_TXFIFOADD,
+ musb->context.index_regs[i].txfifoadd);
+ musb_writew(musb_base, MUSB_RXFIFOADD,
+ musb->context.index_regs[i].rxfifoadd);
+ }
+
+ musb_writeb(epio, MUSB_TXTYPE,
+ musb->context.index_regs[i].txtype);
+ musb_writeb(epio, MUSB_TXINTERVAL,
+ musb->context.index_regs[i].txinterval);
+ musb_writeb(epio, MUSB_RXTYPE,
+ musb->context.index_regs[i].rxtype);
+ musb_writeb(epio, MUSB_RXINTERVAL,
+
+ musb->context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb, i,
+ musb->context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb, i,
+ musb->context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb, i,
+ musb->context.index_regs[i].txhubport);
+
+ musb_write_rxfunaddr(musb, i,
+ musb->context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(musb, i,
+ musb->context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(musb, i,
+ musb->context.index_regs[i].rxhubport);
+ }
+ musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
+}
+
+static int musb_suspend(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ musb_platform_disable(musb);
+ musb_disable_interrupts(musb);
+
+ musb->flush_irq_work = true;
+ while (flush_delayed_work(&musb->irq_work))
+ ;
+ musb->flush_irq_work = false;
+
+ if (!(musb->ops->quirks & MUSB_PRESERVE_SESSION))
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+ WARN_ON(!list_empty(&musb->pending_list));
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (is_peripheral_active(musb)) {
+ /* FIXME force disconnect unless we know USB will wake
+ * the system up quickly enough to respond ...
+ */
+ } else if (is_host_active(musb)) {
+ /* we know all the children are suspended; sometimes
+ * they will even be wakeup-enabled.
+ */
+ }
+
+ musb_save_context(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static int musb_resume(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int error;
+ u8 devctl;
+ u8 mask;
+
+ /*
+ * For static cmos like DaVinci, register values were preserved
+ * unless for some reason the whole soc powered down or the USB
+ * module got reset through the PSC (vs just being disabled).
+ *
+ * For the DSPS glue layer though, a full register restore has to
+ * be done. As it shouldn't harm other platforms, we do it
+ * unconditionally.
+ */
+
+ musb_restore_context(musb);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
+ if ((devctl & mask) != (musb->context.devctl & mask))
+ musb->port1_status = 0;
+
+ musb_enable_interrupts(musb);
+ musb_platform_enable(musb);
+
+ /* session might be disabled in suspend */
+ if (musb->port_mode == MUSB_HOST &&
+ !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) {
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+ dev_err(musb->controller, "resume work failed with %i\n",
+ error);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int musb_runtime_suspend(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+
+ musb_save_context(musb);
+ musb->is_runtime_suspended = 1;
+
+ return 0;
+}
+
+static int musb_runtime_resume(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int error;
+
+ /*
+ * When pm_runtime_get_sync called for the first time in driver
+ * init, some of the structure is still not initialized which is
+ * used in restore function. But clock needs to be
+ * enabled before any register access, so
+ * pm_runtime_get_sync has to be called.
+ * Also context restore without save does not make
+ * any sense
+ */
+ if (!musb->is_initialized)
+ return 0;
+
+ musb_restore_context(musb);
+
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+ dev_err(musb->controller, "resume work failed with %i\n",
+ error);
+ musb->is_runtime_suspended = 0;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops musb_dev_pm_ops = {
+ .suspend = musb_suspend,
+ .resume = musb_resume,
+ .runtime_suspend = musb_runtime_suspend,
+ .runtime_resume = musb_runtime_resume,
+};
+
+#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
+#else
+#define MUSB_DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver musb_driver = {
+ .driver = {
+ .name = musb_driver_name,
+ .bus = &platform_bus_type,
+ .pm = MUSB_DEV_PM_OPS,
+ .dev_groups = musb_groups,
+ },
+ .probe = musb_probe,
+ .remove = musb_remove,
+};
+
+module_platform_driver(musb_driver);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 000000000..a8a65effe
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef __MUSB_CORE_H__
+#define __MUSB_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/musb.h>
+#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+struct musb_qh;
+
+/* Helper defines for struct musb->hwvers */
+#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
+#define MUSB_HWVERS_RC 0x8000
+#define MUSB_HWVERS_1300 0x52C
+#define MUSB_HWVERS_1400 0x590
+#define MUSB_HWVERS_1800 0x720
+#define MUSB_HWVERS_1900 0x784
+#define MUSB_HWVERS_2000 0x800
+
+#include "musb_debug.h"
+#include "musb_dma.h"
+
+#include "musb_io.h"
+
+#include "musb_gadget.h"
+#include <linux/usb/hcd.h>
+#include "musb_host.h"
+
+/* NOTE: otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m) (!(m)->is_host)
+#define is_host_active(m) ((m)->is_host)
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musb_h_ep0_state {
+ MUSB_EP0_IDLE,
+ MUSB_EP0_START, /* expect ack of setup */
+ MUSB_EP0_IN, /* expect IN DATA */
+ MUSB_EP0_OUT, /* expect ack of OUT DATA */
+ MUSB_EP0_STATUS, /* expect ack of STATUS */
+} __attribute__ ((packed));
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+ MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */
+ MUSB_EP0_STAGE_SETUP, /* received SETUP */
+ MUSB_EP0_STAGE_TX, /* IN data */
+ MUSB_EP0_STAGE_RX, /* OUT data */
+ MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
+ MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
+ MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/*
+ * OTG protocol constants. See USB OTG 1.3 spec,
+ * sections 5.5 "Device Timings" and 6.6.5 "Timers".
+ */
+#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
+#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
+#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
+#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_musb)\
+ { (_musb)->is_host = true; }
+#define MUSB_DEV_MODE(_musb) \
+ { (_musb)->is_host = false; }
+
+#define test_devctl_hst_mode(_x) \
+ (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
+
+#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
+
+/******************************** TYPES *************************************/
+
+struct musb_io;
+
+/**
+ * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
+ * @quirks: flags for platform specific quirks
+ * @enable: enable device
+ * @disable: disable device
+ * @ep_offset: returns the end point offset
+ * @ep_select: selects the specified end point
+ * @fifo_mode: sets the fifo mode
+ * @fifo_offset: returns the fifo offset
+ * @readb: read 8 bits
+ * @writeb: write 8 bits
+ * @clearb: could be clear-on-readb or W1C
+ * @readw: read 16 bits
+ * @writew: write 16 bits
+ * @clearw: could be clear-on-readw or W1C
+ * @read_fifo: reads the fifo
+ * @write_fifo: writes to fifo
+ * @get_toggle: platform specific get toggle function
+ * @set_toggle: platform specific set toggle function
+ * @dma_init: platform specific dma init function
+ * @dma_exit: platform specific dma exit function
+ * @init: turns on clocks, sets up platform-specific registers, etc
+ * @exit: undoes @init
+ * @set_mode: forcefully changes operating mode
+ * @try_idle: tries to idle the IP
+ * @recover: platform-specific babble recovery
+ * @vbus_status: returns vbus status if possible
+ * @set_vbus: forces vbus status
+ * @pre_root_reset_end: called before the root usb port reset flag gets cleared
+ * @post_root_reset_end: called after the root usb port reset flag gets cleared
+ * @phy_callback: optional callback function for the phy to call
+ */
+struct musb_platform_ops {
+
+#define MUSB_G_NO_SKB_RESERVE BIT(9)
+#define MUSB_DA8XX BIT(8)
+#define MUSB_PRESERVE_SESSION BIT(7)
+#define MUSB_DMA_UX500 BIT(6)
+#define MUSB_DMA_CPPI41 BIT(5)
+#define MUSB_DMA_CPPI BIT(4)
+#define MUSB_DMA_TUSB_OMAP BIT(3)
+#define MUSB_DMA_INVENTRA BIT(2)
+#define MUSB_IN_TUSB BIT(1)
+#define MUSB_INDEXED_EP BIT(0)
+ u32 quirks;
+
+ int (*init)(struct musb *musb);
+ int (*exit)(struct musb *musb);
+
+ void (*enable)(struct musb *musb);
+ void (*disable)(struct musb *musb);
+
+ u32 (*ep_offset)(u8 epnum, u16 offset);
+ void (*ep_select)(void __iomem *mbase, u8 epnum);
+ u16 fifo_mode;
+ u32 (*fifo_offset)(u8 epnum);
+ u32 (*busctl_offset)(u8 epnum, u16 offset);
+ u8 (*readb)(void __iomem *addr, u32 offset);
+ void (*writeb)(void __iomem *addr, u32 offset, u8 data);
+ u8 (*clearb)(void __iomem *addr, u32 offset);
+ u16 (*readw)(void __iomem *addr, u32 offset);
+ void (*writew)(void __iomem *addr, u32 offset, u16 data);
+ u16 (*clearw)(void __iomem *addr, u32 offset);
+ void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
+ void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
+ struct dma_controller *
+ (*dma_init) (struct musb *musb, void __iomem *base);
+ void (*dma_exit)(struct dma_controller *c);
+ int (*set_mode)(struct musb *musb, u8 mode);
+ void (*try_idle)(struct musb *musb, unsigned long timeout);
+ int (*recover)(struct musb *musb);
+
+ int (*vbus_status)(struct musb *musb);
+ void (*set_vbus)(struct musb *musb, int on);
+
+ void (*pre_root_reset_end)(struct musb *musb);
+ void (*post_root_reset_end)(struct musb *musb);
+ int (*phy_callback)(enum musb_vbus_id_status status);
+ void (*clear_ep_rxintr)(struct musb *musb, int epnum);
+};
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+ struct musb *musb;
+ void __iomem *fifo;
+ void __iomem *regs;
+
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
+ void __iomem *conf;
+#endif
+
+ /* index in musb->endpoints[] */
+ u8 epnum;
+
+ /* hardware configuration, possibly dynamic */
+ bool is_shared_fifo;
+ bool tx_double_buffered;
+ bool rx_double_buffered;
+ u16 max_packet_sz_tx;
+ u16 max_packet_sz_rx;
+
+ struct dma_channel *tx_channel;
+ struct dma_channel *rx_channel;
+
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
+ /* TUSB has "asynchronous" and "synchronous" dma modes */
+ dma_addr_t fifo_async;
+ dma_addr_t fifo_sync;
+ void __iomem *fifo_sync_va;
+#endif
+
+ /* currently scheduled peripheral endpoint */
+ struct musb_qh *in_qh;
+ struct musb_qh *out_qh;
+
+ u8 rx_reinit;
+ u8 tx_reinit;
+
+ /* peripheral side */
+ struct musb_ep ep_in; /* TX */
+ struct musb_ep ep_out; /* RX */
+};
+
+static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+ return next_request(&hw_ep->ep_in);
+}
+
+static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+ return next_request(&hw_ep->ep_out);
+}
+
+struct musb_csr_regs {
+ /* FIFO registers */
+ u16 txmaxp, txcsr, rxmaxp, rxcsr;
+ u16 rxfifoadd, txfifoadd;
+ u8 txtype, txinterval, rxtype, rxinterval;
+ u8 rxfifosz, txfifosz;
+ u8 txfunaddr, txhubaddr, txhubport;
+ u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musb_context_registers {
+
+ u8 power;
+ u8 intrusbe;
+ u16 frame;
+ u8 index, testmode;
+
+ u8 devctl, busctl, misc;
+ u32 otg_interfsel;
+
+ struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+};
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+ /* device lock */
+ spinlock_t lock;
+ spinlock_t list_lock; /* resume work list lock */
+
+ struct musb_io io;
+ const struct musb_platform_ops *ops;
+ struct musb_context_registers context;
+
+ irqreturn_t (*isr)(int, void *);
+ struct delayed_work irq_work;
+ struct delayed_work deassert_reset_work;
+ struct delayed_work finish_resume_work;
+ struct delayed_work gadget_work;
+ u16 hwvers;
+
+ u16 intrrxe;
+ u16 intrtxe;
+/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSB_PORT_STAT_RESUME (1 << 31)
+
+ u32 port1_status;
+
+ unsigned long rh_timer;
+
+ enum musb_h_ep0_state ep0_stage;
+
+ /* bulk traffic normally dedicates endpoint hardware, and each
+ * direction has its own ring of host side endpoints.
+ * we try to progress the transfer at the head of each endpoint's
+ * queue until it completes or NAKs too much; then we try the next
+ * endpoint.
+ */
+ struct musb_hw_ep *bulk_ep;
+
+ struct list_head control; /* of musb_qh */
+ struct list_head in_bulk; /* of musb_qh */
+ struct list_head out_bulk; /* of musb_qh */
+ struct list_head pending_list; /* pending work list */
+
+ struct timer_list otg_timer;
+ struct timer_list dev_timer;
+ struct notifier_block nb;
+
+ struct dma_controller *dma_controller;
+
+ struct device *controller;
+ void __iomem *ctrl_base;
+ void __iomem *mregs;
+
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
+ dma_addr_t async;
+ dma_addr_t sync;
+ void __iomem *sync_va;
+ u8 tusb_revision;
+#endif
+
+ /* passed down from chip/board specific irq handlers */
+ u8 int_usb;
+ u16 int_rx;
+ u16 int_tx;
+
+ struct usb_phy *xceiv;
+ struct phy *phy;
+
+ int nIrq;
+ unsigned irq_wake:1;
+
+ struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
+#define control_ep endpoints
+
+#define VBUSERR_RETRY_COUNT 3
+ u16 vbuserr_retry;
+ u16 epmask;
+ u8 nr_endpoints;
+
+ int (*board_set_power)(int state);
+
+ u8 min_power; /* vbus for periph, in mA/2 */
+
+ enum musb_mode port_mode;
+ bool session;
+ unsigned long quirk_retries;
+ bool is_host;
+
+ int a_wait_bcon; /* VBUS timeout in msecs */
+ unsigned long idle_timeout; /* Next timeout in jiffies */
+
+ unsigned is_initialized:1;
+ unsigned is_runtime_suspended:1;
+
+ /* active means connected and not suspended */
+ unsigned is_active:1;
+
+ unsigned is_multipoint:1;
+
+ unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
+ unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
+ unsigned dyn_fifo:1; /* dynamic FIFO supported? */
+
+ unsigned bulk_split:1;
+#define can_bulk_split(musb, type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+
+ unsigned bulk_combine:1;
+#define can_bulk_combine(musb, type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+
+ /* is_suspended means USB B_PERIPHERAL suspend */
+ unsigned is_suspended:1;
+
+ /* may_wakeup means remote wakeup is enabled */
+ unsigned may_wakeup:1;
+
+ /* is_self_powered is reported in device status and the
+ * config descriptor. is_bus_powered means B_PERIPHERAL
+ * draws some VBUS current; both can be true.
+ */
+ unsigned is_self_powered:1;
+ unsigned is_bus_powered:1;
+
+ unsigned set_address:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
+
+ unsigned flush_irq_work:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+ struct usb_hcd *hcd; /* the usb hcd */
+
+ const struct musb_hdrc_config *config;
+
+ int xceiv_old_state;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+};
+
+/* This must be included after struct musb is defined */
+#include "musb_regs.h"
+
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+ return container_of(g, struct musb, g);
+}
+
+static inline char *musb_ep_xfertype_string(u8 type)
+{
+ char *s;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ s = "ctrl";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ s = "iso";
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ s = "bulk";
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ s = "int";
+ break;
+ default:
+ s = "";
+ break;
+ }
+ return s;
+}
+
+static inline int musb_read_fifosize(struct musb *musb,
+ struct musb_hw_ep *hw_ep, u8 epnum)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 reg = 0;
+
+ /* read from core using indexed model */
+ reg = musb_readb(mbase, musb->io.ep_offset(epnum, MUSB_FIFOSIZE));
+ /* 0's returned when no more endpoints */
+ if (!reg)
+ return -ENODEV;
+
+ musb->nr_endpoints++;
+ musb->epmask |= (1 << epnum);
+
+ hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+ /* shared TX/RX FIFO? */
+ if ((reg & 0xf0) == 0xf0) {
+ hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+ hw_ep->is_shared_fifo = true;
+ return 0;
+ } else {
+ hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+ hw_ep->is_shared_fifo = false;
+ }
+
+ return 0;
+}
+
+static inline void musb_configure_ep0(struct musb *musb)
+{
+ musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].is_shared_fifo = true;
+}
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_stop(struct musb *musb);
+extern void musb_start(struct musb *musb);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+
+extern int musb_set_host(struct musb *musb);
+extern int musb_set_peripheral(struct musb *musb);
+
+extern void musb_load_testpacket(struct musb *);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_hnp_stop(struct musb *musb);
+
+int musb_queue_resume_work(struct musb *musb,
+ int (*callback)(struct musb *musb, void *data),
+ void *data);
+
+static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
+{
+ if (musb->ops->set_vbus)
+ musb->ops->set_vbus(musb, is_on);
+}
+
+static inline void musb_platform_enable(struct musb *musb)
+{
+ if (musb->ops->enable)
+ musb->ops->enable(musb);
+}
+
+static inline void musb_platform_disable(struct musb *musb)
+{
+ if (musb->ops->disable)
+ musb->ops->disable(musb);
+}
+
+static inline int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+ if (!musb->ops->set_mode)
+ return 0;
+
+ return musb->ops->set_mode(musb, mode);
+}
+
+static inline void musb_platform_try_idle(struct musb *musb,
+ unsigned long timeout)
+{
+ if (musb->ops->try_idle)
+ musb->ops->try_idle(musb, timeout);
+}
+
+static inline int musb_platform_recover(struct musb *musb)
+{
+ if (!musb->ops->recover)
+ return 0;
+
+ return musb->ops->recover(musb);
+}
+
+static inline int musb_platform_get_vbus_status(struct musb *musb)
+{
+ if (!musb->ops->vbus_status)
+ return -EINVAL;
+
+ return musb->ops->vbus_status(musb);
+}
+
+static inline int musb_platform_init(struct musb *musb)
+{
+ if (!musb->ops->init)
+ return -EINVAL;
+
+ return musb->ops->init(musb);
+}
+
+static inline int musb_platform_exit(struct musb *musb)
+{
+ if (!musb->ops->exit)
+ return -EINVAL;
+
+ return musb->ops->exit(musb);
+}
+
+static inline void musb_platform_pre_root_reset_end(struct musb *musb)
+{
+ if (musb->ops->pre_root_reset_end)
+ musb->ops->pre_root_reset_end(musb);
+}
+
+static inline void musb_platform_post_root_reset_end(struct musb *musb)
+{
+ if (musb->ops->post_root_reset_end)
+ musb->ops->post_root_reset_end(musb);
+}
+
+static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+ if (musb->ops->clear_ep_rxintr)
+ musb->ops->clear_ep_rxintr(musb, epnum);
+}
+
+/*
+ * gets the "dr_mode" property from DT and converts it into musb_mode
+ * if the property is not found or not recognized returns MUSB_OTG
+ */
+extern enum musb_mode musb_get_mode(struct device *dev);
+
+#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
new file mode 100644
index 000000000..9589243e8
--- /dev/null
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sizes.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "cppi_dma.h"
+#include "musb_core.h"
+#include "musb_trace.h"
+
+#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
+
+#define EP_MODE_AUTOREQ_NONE 0
+#define EP_MODE_AUTOREQ_ALL_NEOP 1
+#define EP_MODE_AUTOREQ_ALWAYS 3
+
+#define EP_MODE_DMA_TRANSPARENT 0
+#define EP_MODE_DMA_RNDIS 1
+#define EP_MODE_DMA_GEN_RNDIS 3
+
+#define USB_CTRL_TX_MODE 0x70
+#define USB_CTRL_RX_MODE 0x74
+#define USB_CTRL_AUTOREQ 0xd0
+#define USB_TDOWN 0xd8
+
+#define MUSB_DMA_NUM_CHANNELS 15
+
+#define DA8XX_USB_MODE 0x10
+#define DA8XX_USB_AUTOREQ 0x14
+#define DA8XX_USB_TEARDOWN 0x1c
+
+#define DA8XX_DMA_NUM_CHANNELS 4
+
+struct cppi41_dma_controller {
+ struct dma_controller controller;
+ struct cppi41_dma_channel *rx_channel;
+ struct cppi41_dma_channel *tx_channel;
+ struct hrtimer early_tx;
+ struct list_head early_tx_list;
+ u32 rx_mode;
+ u32 tx_mode;
+ u32 auto_req;
+
+ u32 tdown_reg;
+ u32 autoreq_reg;
+
+ void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode);
+ u8 num_channels;
+};
+
+static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+{
+ u16 csr;
+ u8 toggle;
+
+ if (cppi41_channel->is_tx)
+ return;
+ if (!is_host_active(cppi41_channel->controller->controller.musb))
+ return;
+
+ csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
+ toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
+
+ cppi41_channel->usb_toggle = toggle;
+}
+
+static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+{
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ u16 csr;
+ u8 toggle;
+
+ if (cppi41_channel->is_tx)
+ return;
+ if (!is_host_active(musb))
+ return;
+
+ musb_ep_select(musb->mregs, hw_ep->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
+
+ /*
+ * AM335x Advisory 1.0.13: Due to internal synchronisation error the
+ * data toggle may reset from DATA1 to DATA0 during receiving data from
+ * more than one endpoint.
+ */
+ if (!toggle && toggle == cppi41_channel->usb_toggle) {
+ csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
+ musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
+ musb_dbg(musb, "Restoring DATA1 toggle.");
+ }
+
+ cppi41_channel->usb_toggle = toggle;
+}
+
+static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
+{
+ u8 epnum = hw_ep->epnum;
+ struct musb *musb = hw_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ u16 csr;
+
+ musb_ep_select(musb->mregs, hw_ep->epnum);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ return false;
+ return true;
+}
+
+static void cppi41_dma_callback(void *private_data,
+ const struct dmaengine_result *result);
+
+static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
+{
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ void __iomem *epio = hw_ep->regs;
+ u16 csr;
+
+ if (!cppi41_channel->prog_len ||
+ (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
+
+ /* done, complete */
+ cppi41_channel->channel.actual_len =
+ cppi41_channel->transferred;
+ cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ cppi41_channel->channel.rx_packet_done = true;
+
+ /*
+ * transmit ZLP using PIO mode for transfers which size is
+ * multiple of EP packet size.
+ */
+ if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
+ cppi41_channel->packet_sz) == 0) {
+ musb_ep_select(musb->mregs, hw_ep->epnum);
+ csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ trace_musb_cppi41_done(cppi41_channel);
+ musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
+ } else {
+ /* next iteration, reload */
+ struct dma_chan *dc = cppi41_channel->dc;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ u32 remain_bytes;
+
+ cppi41_channel->buf_addr += cppi41_channel->packet_sz;
+
+ remain_bytes = cppi41_channel->total_len;
+ remain_bytes -= cppi41_channel->transferred;
+ remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
+ cppi41_channel->prog_len = remain_bytes;
+
+ direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
+ : DMA_DEV_TO_MEM;
+ dma_desc = dmaengine_prep_slave_single(dc,
+ cppi41_channel->buf_addr,
+ remain_bytes,
+ direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (WARN_ON(!dma_desc))
+ return;
+
+ dma_desc->callback_result = cppi41_dma_callback;
+ dma_desc->callback_param = &cppi41_channel->channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ trace_musb_cppi41_cont(cppi41_channel);
+ dma_async_issue_pending(dc);
+
+ if (!cppi41_channel->is_tx) {
+ musb_ep_select(musb->mregs, hw_ep->epnum);
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+}
+
+static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+{
+ struct cppi41_dma_controller *controller;
+ struct cppi41_dma_channel *cppi41_channel, *n;
+ struct musb *musb;
+ unsigned long flags;
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
+
+ controller = container_of(timer, struct cppi41_dma_controller,
+ early_tx);
+ musb = controller->controller.musb;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
+ tx_check) {
+ bool empty;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ list_del_init(&cppi41_channel->tx_check);
+ cppi41_trans_done(cppi41_channel);
+ }
+ }
+
+ if (!list_empty(&controller->early_tx_list) &&
+ !hrtimer_is_queued(&controller->early_tx)) {
+ ret = HRTIMER_RESTART;
+ hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+static void cppi41_dma_callback(void *private_data,
+ const struct dmaengine_result *result)
+{
+ struct dma_channel *channel = private_data;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct cppi41_dma_controller *controller;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+ struct dma_tx_state txstate;
+ u32 transferred;
+ int is_hs = 0;
+ bool empty;
+
+ controller = cppi41_channel->controller;
+ if (controller->controller.dma_callback)
+ controller->controller.dma_callback(&controller->controller);
+
+ if (result->result == DMA_TRANS_ABORTED)
+ return;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
+ &txstate);
+ transferred = cppi41_channel->prog_len - txstate.residue;
+ cppi41_channel->transferred += transferred;
+
+ trace_musb_cppi41_gb(cppi41_channel);
+ update_rx_toggle(cppi41_channel);
+
+ if (cppi41_channel->transferred == cppi41_channel->total_len ||
+ transferred < cppi41_channel->packet_sz)
+ cppi41_channel->prog_len = 0;
+
+ if (cppi41_channel->is_tx) {
+ u8 type;
+
+ if (is_host_active(musb))
+ type = hw_ep->out_qh->type;
+ else
+ type = hw_ep->ep_in.type;
+
+ if (type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * Don't use the early-TX-interrupt workaround below
+ * for Isoch transfter. Since Isoch are periodic
+ * transfer, by the time the next transfer is
+ * scheduled, the current one should be done already.
+ *
+ * This avoids audio playback underrun issue.
+ */
+ empty = true;
+ else
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ }
+
+ if (!cppi41_channel->is_tx || empty) {
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+
+ /*
+ * On AM335x it has been observed that the TX interrupt fires
+ * too early that means the TXFIFO is not yet empty but the DMA
+ * engine says that it is done with the transfer. We don't
+ * receive a FIFO empty interrupt so the only thing we can do is
+ * to poll for the bit. On HS it usually takes 2us, on FS around
+ * 110us - 150us depending on the transfer size.
+ * We spin on HS (no longer than 25us and setup a timer on
+ * FS to check for the bit and complete the transfer.
+ */
+ if (is_host_active(musb)) {
+ if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
+ is_hs = 1;
+ } else {
+ if (musb->g.speed == USB_SPEED_HIGH)
+ is_hs = 1;
+ }
+ if (is_hs) {
+ unsigned wait = 25;
+
+ do {
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+ wait--;
+ if (!wait)
+ break;
+ cpu_relax();
+ } while (1);
+ }
+ list_add_tail(&cppi41_channel->tx_check,
+ &controller->early_tx_list);
+ if (!hrtimer_is_queued(&controller->early_tx)) {
+ unsigned long usecs = cppi41_channel->total_len / 10;
+
+ hrtimer_start_range_ns(&controller->early_tx,
+ usecs * NSEC_PER_USEC,
+ 20 * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
+ }
+
+out:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
+{
+ unsigned shift;
+
+ shift = (ep - 1) * 2;
+ old &= ~(3 << shift);
+ old |= mode << shift;
+ return old;
+}
+
+static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ if (cppi41_channel->is_tx)
+ old_mode = controller->tx_mode;
+ else
+ old_mode = controller->rx_mode;
+ port = cppi41_channel->port_num;
+ new_mode = update_ep_mode(port, mode, old_mode);
+
+ if (new_mode == old_mode)
+ return;
+ if (cppi41_channel->is_tx) {
+ controller->tx_mode = new_mode;
+ musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
+ } else {
+ controller->rx_mode = new_mode;
+ musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
+ }
+}
+
+static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+ unsigned int shift;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->tx_mode;
+ port = cppi41_channel->port_num;
+
+ shift = (port - 1) * 4;
+ if (!cppi41_channel->is_tx)
+ shift += 16;
+ new_mode = old_mode & ~(3 << shift);
+ new_mode |= mode << shift;
+
+ if (new_mode == old_mode)
+ return;
+ controller->tx_mode = new_mode;
+ musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
+}
+
+
+static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->auto_req;
+ port = cppi41_channel->port_num;
+ new_mode = update_ep_mode(port, mode, old_mode);
+
+ if (new_mode == old_mode)
+ return;
+ controller->auto_req = new_mode;
+ musb_writel(controller->controller.musb->ctrl_base,
+ controller->autoreq_reg, new_mode);
+}
+
+static bool cppi41_configure_channel(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct dma_chan *dc = cppi41_channel->dc;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ struct musb *musb = cppi41_channel->controller->controller.musb;
+ unsigned use_gen_rndis = 0;
+
+ cppi41_channel->buf_addr = dma_addr;
+ cppi41_channel->total_len = len;
+ cppi41_channel->transferred = 0;
+ cppi41_channel->packet_sz = packet_sz;
+ cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
+
+ /*
+ * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
+ * than max packet size at a time.
+ */
+ if (cppi41_channel->is_tx)
+ use_gen_rndis = 1;
+
+ if (use_gen_rndis) {
+ /* RNDIS mode */
+ if (len > packet_sz) {
+ musb_writel(musb->ctrl_base,
+ RNDIS_REG(cppi41_channel->port_num), len);
+ /* gen rndis */
+ controller->set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_GEN_RNDIS);
+
+ /* auto req */
+ cppi41_set_autoreq_mode(cppi41_channel,
+ EP_MODE_AUTOREQ_ALL_NEOP);
+ } else {
+ musb_writel(musb->ctrl_base,
+ RNDIS_REG(cppi41_channel->port_num), 0);
+ controller->set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
+ cppi41_set_autoreq_mode(cppi41_channel,
+ EP_MODE_AUTOREQ_NONE);
+ }
+ } else {
+ /* fallback mode */
+ controller->set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
+ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+ len = min_t(u32, packet_sz, len);
+ }
+ cppi41_channel->prog_len = len;
+ direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ return false;
+
+ dma_desc->callback_result = cppi41_dma_callback;
+ dma_desc->callback_param = channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ cppi41_channel->channel.rx_packet_done = false;
+
+ trace_musb_cppi41_config(cppi41_channel);
+
+ save_rx_toggle(cppi41_channel);
+ dma_async_issue_pending(dc);
+ return true;
+}
+
+static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 is_tx)
+{
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+ struct cppi41_dma_channel *cppi41_channel = NULL;
+ u8 ch_num = hw_ep->epnum - 1;
+
+ if (ch_num >= controller->num_channels)
+ return NULL;
+
+ if (is_tx)
+ cppi41_channel = &controller->tx_channel[ch_num];
+ else
+ cppi41_channel = &controller->rx_channel[ch_num];
+
+ if (!cppi41_channel->dc)
+ return NULL;
+
+ if (cppi41_channel->is_allocated)
+ return NULL;
+
+ cppi41_channel->hw_ep = hw_ep;
+ cppi41_channel->is_allocated = 1;
+
+ trace_musb_cppi41_alloc(cppi41_channel);
+ return &cppi41_channel->channel;
+}
+
+static void cppi41_dma_channel_release(struct dma_channel *channel)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+
+ trace_musb_cppi41_free(cppi41_channel);
+ if (cppi41_channel->is_allocated) {
+ cppi41_channel->is_allocated = 0;
+ channel->status = MUSB_DMA_STATUS_FREE;
+ channel->actual_len = 0;
+ }
+}
+
+static int cppi41_dma_channel_program(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ int ret;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ int hb_mult = 0;
+
+ BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ channel->status == MUSB_DMA_STATUS_BUSY);
+
+ if (is_host_active(cppi41_channel->controller->controller.musb)) {
+ if (cppi41_channel->is_tx)
+ hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
+ else
+ hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
+ }
+
+ channel->status = MUSB_DMA_STATUS_BUSY;
+ channel->actual_len = 0;
+
+ if (hb_mult)
+ packet_sz = hb_mult * (packet_sz & 0x7FF);
+
+ ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
+ if (!ret)
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return ret;
+}
+
+static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
+ void *buf, u32 length)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+
+ if (is_host_active(musb)) {
+ WARN_ON(1);
+ return 1;
+ }
+ if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
+ return 0;
+ if (cppi41_channel->is_tx)
+ return 1;
+ /* AM335x Advisory 1.0.13. No workaround for device RX mode */
+ return 0;
+}
+
+static int cppi41_dma_channel_abort(struct dma_channel *channel)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+ void __iomem *epio = cppi41_channel->hw_ep->regs;
+ int tdbit;
+ int ret;
+ unsigned is_tx;
+ u16 csr;
+
+ is_tx = cppi41_channel->is_tx;
+ trace_musb_cppi41_abort(cppi41_channel);
+
+ if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
+ return 0;
+
+ list_del_init(&cppi41_channel->tx_check);
+ if (is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+
+ /* delay to drain to cppi dma pipeline for isoch */
+ udelay(250);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ /* wait to drain cppi dma pipe line */
+ udelay(50);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ csr |= MUSB_RXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
+ if (musb->ops->quirks & MUSB_DA8XX)
+ mdelay(250);
+
+ tdbit = 1 << cppi41_channel->port_num;
+ if (is_tx)
+ tdbit <<= 16;
+
+ do {
+ if (is_tx)
+ musb_writel(musb->ctrl_base, controller->tdown_reg,
+ tdbit);
+ ret = dmaengine_terminate_all(cppi41_channel->dc);
+ } while (ret == -EAGAIN);
+
+ if (is_tx) {
+ musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+ cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ return 0;
+}
+
+static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
+{
+ struct dma_chan *dc;
+ int i;
+
+ for (i = 0; i < ctrl->num_channels; i++) {
+ dc = ctrl->tx_channel[i].dc;
+ if (dc)
+ dma_release_channel(dc);
+ dc = ctrl->rx_channel[i].dc;
+ if (dc)
+ dma_release_channel(dc);
+ }
+}
+
+static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
+{
+ cppi41_release_all_dma_chans(controller);
+}
+
+static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+{
+ struct musb *musb = controller->controller.musb;
+ struct device *dev = musb->controller;
+ struct device_node *np = dev->parent->of_node;
+ struct cppi41_dma_channel *cppi41_channel;
+ int count;
+ int i;
+ int ret;
+
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ struct dma_chan *dc;
+ struct dma_channel *musb_dma;
+ const char *str;
+ unsigned is_tx;
+ unsigned int port;
+
+ ret = of_property_read_string_index(np, "dma-names", i, &str);
+ if (ret)
+ goto err;
+ if (strstarts(str, "tx"))
+ is_tx = 1;
+ else if (strstarts(str, "rx"))
+ is_tx = 0;
+ else {
+ dev_err(dev, "Wrong dmatype %s\n", str);
+ goto err;
+ }
+ ret = kstrtouint(str + 2, 0, &port);
+ if (ret)
+ goto err;
+
+ ret = -EINVAL;
+ if (port > controller->num_channels || !port)
+ goto err;
+ if (is_tx)
+ cppi41_channel = &controller->tx_channel[port - 1];
+ else
+ cppi41_channel = &controller->rx_channel[port - 1];
+
+ cppi41_channel->controller = controller;
+ cppi41_channel->port_num = port;
+ cppi41_channel->is_tx = is_tx;
+ INIT_LIST_HEAD(&cppi41_channel->tx_check);
+
+ musb_dma = &cppi41_channel->channel;
+ musb_dma->private_data = cppi41_channel;
+ musb_dma->status = MUSB_DMA_STATUS_FREE;
+ musb_dma->max_len = SZ_4M;
+
+ dc = dma_request_chan(dev->parent, str);
+ if (IS_ERR(dc)) {
+ ret = dev_err_probe(dev, PTR_ERR(dc),
+ "Failed to request %s.\n", str);
+ goto err;
+ }
+
+ cppi41_channel->dc = dc;
+ }
+ return 0;
+err:
+ cppi41_release_all_dma_chans(controller);
+ return ret;
+}
+
+void cppi41_dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+
+ hrtimer_cancel(&controller->early_tx);
+ cppi41_dma_controller_stop(controller);
+ kfree(controller->rx_channel);
+ kfree(controller->tx_channel);
+ kfree(controller);
+}
+EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
+
+struct dma_controller *
+cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct cppi41_dma_controller *controller;
+ int channel_size;
+ int ret = 0;
+
+ if (!musb->controller->parent->of_node) {
+ dev_err(musb->controller, "Need DT for the DMA engine.\n");
+ return NULL;
+ }
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ goto kzalloc_fail;
+
+ hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ controller->early_tx.function = cppi41_recheck_tx_req;
+ INIT_LIST_HEAD(&controller->early_tx_list);
+
+ controller->controller.channel_alloc = cppi41_dma_channel_allocate;
+ controller->controller.channel_release = cppi41_dma_channel_release;
+ controller->controller.channel_program = cppi41_dma_channel_program;
+ controller->controller.channel_abort = cppi41_dma_channel_abort;
+ controller->controller.is_compatible = cppi41_is_compatible;
+ controller->controller.musb = musb;
+
+ if (musb->ops->quirks & MUSB_DA8XX) {
+ controller->tdown_reg = DA8XX_USB_TEARDOWN;
+ controller->autoreq_reg = DA8XX_USB_AUTOREQ;
+ controller->set_dma_mode = da8xx_set_dma_mode;
+ controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
+ } else {
+ controller->tdown_reg = USB_TDOWN;
+ controller->autoreq_reg = USB_CTRL_AUTOREQ;
+ controller->set_dma_mode = cppi41_set_dma_mode;
+ controller->num_channels = MUSB_DMA_NUM_CHANNELS;
+ }
+
+ channel_size = controller->num_channels *
+ sizeof(struct cppi41_dma_channel);
+ controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->rx_channel)
+ goto rx_channel_alloc_fail;
+ controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->tx_channel)
+ goto tx_channel_alloc_fail;
+
+ ret = cppi41_dma_controller_start(controller);
+ if (ret)
+ goto plat_get_fail;
+ return &controller->controller;
+
+plat_get_fail:
+ kfree(controller->tx_channel);
+tx_channel_alloc_fail:
+ kfree(controller->rx_channel);
+rx_channel_alloc_fail:
+ kfree(controller);
+kzalloc_fail:
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 000000000..e5b3506c7
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+ do { printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); } while (0)
+#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
+#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
+#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+void musb_dbg(struct musb *musb, const char *fmt, ...);
+
+#ifdef CONFIG_DEBUG_FS
+void musb_init_debugfs(struct musb *musb);
+void musb_exit_debugfs(struct musb *musb);
+#else
+static inline void musb_init_debugfs(struct musb *musb)
+{
+}
+static inline void musb_exit_debugfs(struct musb *musb)
+{
+}
+#endif
+
+#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
new file mode 100644
index 000000000..5401ae668
--- /dev/null
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG driver debugfs support
+ *
+ * Copyright 2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/uaccess.h>
+
+#include "musb_core.h"
+#include "musb_debug.h"
+
+struct musb_register_map {
+ char *name;
+ unsigned offset;
+ unsigned size;
+};
+
+static const struct musb_register_map musb_regmap[] = {
+ { "FAddr", MUSB_FADDR, 8 },
+ { "Power", MUSB_POWER, 8 },
+ { "Frame", MUSB_FRAME, 16 },
+ { "Index", MUSB_INDEX, 8 },
+ { "Testmode", MUSB_TESTMODE, 8 },
+ { "TxMaxPp", MUSB_TXMAXP, 16 },
+ { "TxCSRp", MUSB_TXCSR, 16 },
+ { "RxMaxPp", MUSB_RXMAXP, 16 },
+ { "RxCSR", MUSB_RXCSR, 16 },
+ { "RxCount", MUSB_RXCOUNT, 16 },
+ { "IntrRxE", MUSB_INTRRXE, 16 },
+ { "IntrTxE", MUSB_INTRTXE, 16 },
+ { "IntrUsbE", MUSB_INTRUSBE, 8 },
+ { "DevCtl", MUSB_DEVCTL, 8 },
+ { "VControl", 0x68, 32 },
+ { "HWVers", MUSB_HWVERS, 16 },
+ { "LinkInfo", MUSB_LINKINFO, 8 },
+ { "VPLen", MUSB_VPLEN, 8 },
+ { "HS_EOF1", MUSB_HS_EOF1, 8 },
+ { "FS_EOF1", MUSB_FS_EOF1, 8 },
+ { "LS_EOF1", MUSB_LS_EOF1, 8 },
+ { "SOFT_RST", 0x7F, 8 },
+ { "DMA_CNTLch0", 0x204, 16 },
+ { "DMA_ADDRch0", 0x208, 32 },
+ { "DMA_COUNTch0", 0x20C, 32 },
+ { "DMA_CNTLch1", 0x214, 16 },
+ { "DMA_ADDRch1", 0x218, 32 },
+ { "DMA_COUNTch1", 0x21C, 32 },
+ { "DMA_CNTLch2", 0x224, 16 },
+ { "DMA_ADDRch2", 0x228, 32 },
+ { "DMA_COUNTch2", 0x22C, 32 },
+ { "DMA_CNTLch3", 0x234, 16 },
+ { "DMA_ADDRch3", 0x238, 32 },
+ { "DMA_COUNTch3", 0x23C, 32 },
+ { "DMA_CNTLch4", 0x244, 16 },
+ { "DMA_ADDRch4", 0x248, 32 },
+ { "DMA_COUNTch4", 0x24C, 32 },
+ { "DMA_CNTLch5", 0x254, 16 },
+ { "DMA_ADDRch5", 0x258, 32 },
+ { "DMA_COUNTch5", 0x25C, 32 },
+ { "DMA_CNTLch6", 0x264, 16 },
+ { "DMA_ADDRch6", 0x268, 32 },
+ { "DMA_COUNTch6", 0x26C, 32 },
+ { "DMA_CNTLch7", 0x274, 16 },
+ { "DMA_ADDRch7", 0x278, 32 },
+ { "DMA_COUNTch7", 0x27C, 32 },
+ { "ConfigData", MUSB_CONFIGDATA,8 },
+ { "BabbleCtl", MUSB_BABBLE_CTL,8 },
+ { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
+ { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
+ { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
+ { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
+ { "EPInfo", MUSB_EPINFO, 8 },
+ { "RAMInfo", MUSB_RAMINFO, 8 },
+ { } /* Terminating Entry */
+};
+
+static int musb_regdump_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned i;
+
+ seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+ pm_runtime_get_sync(musb->controller);
+
+ for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
+ switch (musb_regmap[i].size) {
+ case 8:
+ seq_printf(s, "%-12s: %02x\n", musb_regmap[i].name,
+ musb_readb(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 16:
+ seq_printf(s, "%-12s: %04x\n", musb_regmap[i].name,
+ musb_readw(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 32:
+ seq_printf(s, "%-12s: %08x\n", musb_regmap[i].name,
+ musb_readl(musb->mregs, musb_regmap[i].offset));
+ break;
+ }
+ }
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(musb_regdump);
+
+static int musb_test_mode_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned test;
+
+ pm_runtime_get_sync(musb->controller);
+ test = musb_readb(musb->mregs, MUSB_TESTMODE);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
+ if (test == (MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS))
+ seq_printf(s, "force host full-speed\n");
+
+ else if (test == (MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_HS))
+ seq_printf(s, "force host high-speed\n");
+
+ else if (test == MUSB_TEST_FORCE_HOST)
+ seq_printf(s, "force host\n");
+
+ else if (test == MUSB_TEST_FIFO_ACCESS)
+ seq_printf(s, "fifo access\n");
+
+ else if (test == MUSB_TEST_FORCE_FS)
+ seq_printf(s, "force full-speed\n");
+
+ else if (test == MUSB_TEST_FORCE_HS)
+ seq_printf(s, "force high-speed\n");
+
+ else if (test == MUSB_TEST_PACKET)
+ seq_printf(s, "test packet\n");
+
+ else if (test == MUSB_TEST_K)
+ seq_printf(s, "test K\n");
+
+ else if (test == MUSB_TEST_J)
+ seq_printf(s, "test J\n");
+
+ else if (test == MUSB_TEST_SE0_NAK)
+ seq_printf(s, "test SE0 NAK\n");
+
+ return 0;
+}
+
+static int musb_test_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, musb_test_mode_show, inode->i_private);
+}
+
+static ssize_t musb_test_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct musb *musb = s->private;
+ u8 test;
+ char buf[24];
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ pm_runtime_get_sync(musb->controller);
+ test = musb_readb(musb->mregs, MUSB_TESTMODE);
+ if (test) {
+ dev_err(musb->controller, "Error: test mode is already set. "
+ "Please do USB Bus Reset to start a new test.\n");
+ goto ret;
+ }
+
+ if (strstarts(buf, "force host full-speed"))
+ test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS;
+
+ else if (strstarts(buf, "force host high-speed"))
+ test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_HS;
+
+ else if (strstarts(buf, "force host"))
+ test = MUSB_TEST_FORCE_HOST;
+
+ else if (strstarts(buf, "fifo access"))
+ test = MUSB_TEST_FIFO_ACCESS;
+
+ else if (strstarts(buf, "force full-speed"))
+ test = MUSB_TEST_FORCE_FS;
+
+ else if (strstarts(buf, "force high-speed"))
+ test = MUSB_TEST_FORCE_HS;
+
+ else if (strstarts(buf, "test packet")) {
+ test = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ }
+
+ else if (strstarts(buf, "test K"))
+ test = MUSB_TEST_K;
+
+ else if (strstarts(buf, "test J"))
+ test = MUSB_TEST_J;
+
+ else if (strstarts(buf, "test SE0 NAK"))
+ test = MUSB_TEST_SE0_NAK;
+
+ musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+
+ret:
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ return count;
+}
+
+static const struct file_operations musb_test_mode_fops = {
+ .open = musb_test_mode_open,
+ .write = musb_test_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int musb_softconnect_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ u8 reg;
+ int connect;
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_WAIT_BCON:
+ pm_runtime_get_sync(musb->controller);
+
+ reg = musb_readb(musb->mregs, MUSB_DEVCTL);
+ connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ break;
+ default:
+ connect = -1;
+ }
+
+ seq_printf(s, "%d\n", connect);
+
+ return 0;
+}
+
+static int musb_softconnect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, musb_softconnect_show, inode->i_private);
+}
+
+static ssize_t musb_softconnect_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct musb *musb = s->private;
+ char buf[2];
+ u8 reg;
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ pm_runtime_get_sync(musb->controller);
+ if (!strncmp(buf, "0", 1)) {
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_HOST:
+ musb_root_disconnect(musb);
+ reg = musb_readb(musb->mregs, MUSB_DEVCTL);
+ reg &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, reg);
+ break;
+ default:
+ break;
+ }
+ } else if (!strncmp(buf, "1", 1)) {
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ /*
+ * musb_save_context() called in musb_runtime_suspend()
+ * might cache devctl with SESSION bit cleared during
+ * soft-disconnect, so specifically set SESSION bit
+ * here to preserve it for musb_runtime_resume().
+ */
+ musb->context.devctl |= MUSB_DEVCTL_SESSION;
+ reg = musb_readb(musb->mregs, MUSB_DEVCTL);
+ reg |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, reg);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ return count;
+}
+
+/*
+ * In host mode, connect/disconnect the bus without physically
+ * remove the devices.
+ */
+static const struct file_operations musb_softconnect_fops = {
+ .open = musb_softconnect_open,
+ .write = musb_softconnect_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void musb_init_debugfs(struct musb *musb)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(dev_name(musb->controller), usb_debug_root);
+ musb->debugfs_root = root;
+
+ debugfs_create_file("regdump", S_IRUGO, root, musb, &musb_regdump_fops);
+ debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, musb,
+ &musb_test_mode_fops);
+ debugfs_create_file("softconnect", S_IRUGO | S_IWUSR, root, musb,
+ &musb_softconnect_fops);
+}
+
+void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb)
+{
+ debugfs_remove_recursive(musb->debugfs_root);
+}
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 000000000..7d67b69df
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores. On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does. Responsibilities
+ * of a DMA controller driver include:
+ *
+ * - Handling the details of moving multiple USB packets
+ * in cooperation with the Inventra USB core, including especially
+ * the correct RX side treatment of short packets and buffer-full
+ * states (both of which terminate transfers).
+ *
+ * - Knowing the correlation between dma channels and the
+ * Inventra core's local endpoint resources and data direction.
+ *
+ * - Maintaining a list of allocated/available channels.
+ *
+ * - Updating channel status on interrupts,
+ * whether shared with the Inventra core or separate.
+ */
+
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#ifdef CONFIG_MUSB_PIO_ONLY
+#define is_dma_capable() (0)
+#else
+#define is_dma_capable() (1)
+#endif
+
+#ifdef CONFIG_USB_UX500_DMA
+#define musb_dma_ux500(musb) (musb->ops->quirks & MUSB_DMA_UX500)
+#else
+#define musb_dma_ux500(musb) 0
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+#define musb_dma_cppi41(musb) (musb->ops->quirks & MUSB_DMA_CPPI41)
+#else
+#define musb_dma_cppi41(musb) 0
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#define musb_dma_cppi(musb) (musb->ops->quirks & MUSB_DMA_CPPI)
+#else
+#define musb_dma_cppi(musb) 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap(musb) (musb->ops->quirks & MUSB_DMA_TUSB_OMAP)
+#else
+#define tusb_dma_omap(musb) 0
+#endif
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+#define musb_dma_inventra(musb) (musb->ops->quirks & MUSB_DMA_INVENTRA)
+#else
+#define musb_dma_inventra(musb) 0
+#endif
+
+#if defined(CONFIG_USB_TI_CPPI_DMA) || defined(CONFIG_USB_TI_CPPI41_DMA)
+#define is_cppi_enabled(musb) \
+ (musb_dma_cppi(musb) || musb_dma_cppi41(musb))
+#else
+#define is_cppi_enabled(musb) 0
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+ /* unallocated */
+ MUSB_DMA_STATUS_UNKNOWN,
+ /* allocated ... but not busy, no errors */
+ MUSB_DMA_STATUS_FREE,
+ /* busy ... transactions are active */
+ MUSB_DMA_STATUS_BUSY,
+ /* transaction(s) aborted due to ... dma or memory bus error */
+ MUSB_DMA_STATUS_BUS_ABORT,
+ /* transaction(s) aborted due to ... core error or USB fault */
+ MUSB_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ * transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+ void *private_data;
+ /* FIXME not void* private_data, but a dma_controller * */
+ size_t max_len;
+ size_t actual_len;
+ enum dma_channel_status status;
+ bool desired_mode;
+ bool rx_packet_done;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status. If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+ return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @musb: the usb controller
+ * @start: call this to start a DMA controller;
+ * return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ * return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ * returning it to FREE (but allocated) state
+ * @dma_callback: invoked on DMA completion, useful to run platform
+ * code such IRQ acknowledgment.
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+ struct musb *musb;
+ struct dma_channel *(*channel_alloc)(struct dma_controller *,
+ struct musb_hw_ep *, u8 is_tx);
+ void (*channel_release)(struct dma_channel *);
+ int (*channel_program)(struct dma_channel *channel,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr,
+ u32 length);
+ int (*channel_abort)(struct dma_channel *);
+ int (*is_compatible)(struct dma_channel *channel,
+ u16 maxpacket,
+ void *buf, u32 length);
+ void (*dma_callback)(struct dma_controller *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+#ifdef CONFIG_MUSB_PIO_ONLY
+static inline struct dma_controller *
+musb_dma_controller_create(struct musb *m, void __iomem *io)
+{
+ return NULL;
+}
+
+static inline void musb_dma_controller_destroy(struct dma_controller *d) { }
+
+#else
+
+extern struct dma_controller *
+(*musb_dma_controller_create)(struct musb *, void __iomem *);
+
+extern void (*musb_dma_controller_destroy)(struct dma_controller *);
+#endif
+
+/* Platform specific DMA functions */
+extern struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base);
+extern void musbhs_dma_controller_destroy(struct dma_controller *c);
+extern struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base);
+extern irqreturn_t dma_controller_irq(int irq, void *private_data);
+
+extern struct dma_controller *
+tusb_dma_controller_create(struct musb *musb, void __iomem *base);
+extern void tusb_dma_controller_destroy(struct dma_controller *c);
+
+extern struct dma_controller *
+cppi_dma_controller_create(struct musb *musb, void __iomem *base);
+extern void cppi_dma_controller_destroy(struct dma_controller *c);
+
+extern struct dma_controller *
+cppi41_dma_controller_create(struct musb *musb, void __iomem *base);
+extern void cppi41_dma_controller_destroy(struct dma_controller *c);
+
+extern struct dma_controller *
+ux500_dma_controller_create(struct musb *musb, void __iomem *base);
+extern void ux500_dma_controller_destroy(struct dma_controller *c);
+
+#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
new file mode 100644
index 000000000..f75cde0f2
--- /dev/null
+++ b/drivers/usb/musb/musb_dsps.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments DSPS platforms "glue layer"
+ *
+ * Copyright (C) 2012, by Texas Instruments
+ *
+ * Based on the am35x "glue layer" code.
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * musb_dsps.c will be a common file for all the TI DSPS platforms
+ * such as dm64x, dm36x, dm35x, da8x, am35x and ti81x.
+ * For now only ti81x is using this and in future davinci.c, am35x.c
+ * da8xx.c would be merged to this file after testing.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/platform_data/usb-omap.h>
+#include <linux/sizes.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/usb/of.h>
+
+#include <linux/debugfs.h>
+
+#include "musb_core.h"
+
+static const struct of_device_id musb_dsps_of_match[];
+
+/*
+ * DSPS musb wrapper register offset.
+ * FIXME: This should be expanded to have all the wrapper registers from TI DSPS
+ * musb ips.
+ */
+struct dsps_musb_wrapper {
+ u16 revision;
+ u16 control;
+ u16 status;
+ u16 epintr_set;
+ u16 epintr_clear;
+ u16 epintr_status;
+ u16 coreintr_set;
+ u16 coreintr_clear;
+ u16 coreintr_status;
+ u16 phy_utmi;
+ u16 mode;
+ u16 tx_mode;
+ u16 rx_mode;
+
+ /* bit positions for control */
+ unsigned reset:5;
+
+ /* bit positions for interrupt */
+ unsigned usb_shift:5;
+ u32 usb_mask;
+ u32 usb_bitmap;
+ unsigned drvvbus:5;
+
+ unsigned txep_shift:5;
+ u32 txep_mask;
+ u32 txep_bitmap;
+
+ unsigned rxep_shift:5;
+ u32 rxep_mask;
+ u32 rxep_bitmap;
+
+ /* bit positions for phy_utmi */
+ unsigned otg_disable:5;
+
+ /* bit positions for mode */
+ unsigned iddig:5;
+ unsigned iddig_mux:5;
+ /* miscellaneous stuff */
+ unsigned poll_timeout;
+};
+
+/*
+ * register shadow for suspend
+ */
+struct dsps_context {
+ u32 control;
+ u32 epintr;
+ u32 coreintr;
+ u32 phy_utmi;
+ u32 mode;
+ u32 tx_mode;
+ u32 rx_mode;
+};
+
+/*
+ * DSPS glue structure.
+ */
+struct dsps_glue {
+ struct device *dev;
+ struct platform_device *musb; /* child musb pdev */
+ const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
+ int vbus_irq; /* optional vbus irq */
+ unsigned long last_timer; /* last timer data for each instance */
+ bool sw_babble_enabled;
+ void __iomem *usbss_base;
+
+ struct dsps_context context;
+ struct debugfs_regset32 regset;
+ struct dentry *dbgfs_root;
+};
+
+static const struct debugfs_reg32 dsps_musb_regs[] = {
+ { "revision", 0x00 },
+ { "control", 0x14 },
+ { "status", 0x18 },
+ { "eoi", 0x24 },
+ { "intr0_stat", 0x30 },
+ { "intr1_stat", 0x34 },
+ { "intr0_set", 0x38 },
+ { "intr1_set", 0x3c },
+ { "txmode", 0x70 },
+ { "rxmode", 0x74 },
+ { "autoreq", 0xd0 },
+ { "srpfixtime", 0xd4 },
+ { "tdown", 0xd8 },
+ { "phy_utmi", 0xe0 },
+ { "mode", 0xe8 },
+};
+
+static void dsps_mod_timer(struct dsps_glue *glue, int wait_ms)
+{
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ int wait;
+
+ if (wait_ms < 0)
+ wait = msecs_to_jiffies(glue->wrp->poll_timeout);
+ else
+ wait = msecs_to_jiffies(wait_ms);
+
+ mod_timer(&musb->dev_timer, jiffies + wait);
+}
+
+/*
+ * If no vbus irq from the PMIC is configured, we need to poll VBUS status.
+ */
+static void dsps_mod_timer_optional(struct dsps_glue *glue)
+{
+ if (glue->vbus_irq)
+ return;
+
+ dsps_mod_timer(glue, -1);
+}
+
+/* USBSS / USB AM335x */
+#define USBSS_IRQ_STATUS 0x28
+#define USBSS_IRQ_ENABLER 0x2c
+#define USBSS_IRQ_CLEARR 0x30
+
+#define USBSS_IRQ_PD_COMP (1 << 2)
+
+/*
+ * dsps_musb_enable - enable interrupts
+ */
+static void dsps_musb_enable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 epmask, coremask;
+
+ /* Workaround: setup IRQs through both register sets. */
+ epmask = ((musb->epmask & wrp->txep_mask) << wrp->txep_shift) |
+ ((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
+ coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
+
+ musb_writel(reg_base, wrp->epintr_set, epmask);
+ musb_writel(reg_base, wrp->coreintr_set, coremask);
+ /*
+ * start polling for runtime PM active and idle,
+ * and for ID change in dual-role idle mode.
+ */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ dsps_mod_timer(glue, -1);
+}
+
+/*
+ * dsps_musb_disable - disable HDRC and flush interrupts
+ */
+static void dsps_musb_disable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
+ musb_writel(reg_base, wrp->epintr_clear,
+ wrp->txep_bitmap | wrp->rxep_bitmap);
+ del_timer_sync(&musb->dev_timer);
+}
+
+/* Caller must take musb->lock */
+static int dsps_check_status(struct musb *musb, void *unused)
+{
+ void __iomem *mregs = musb->mregs;
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ u8 devctl;
+ int skip_session = 0;
+
+ if (glue->vbus_irq)
+ del_timer(&musb->dev_timer);
+
+ /*
+ * We poll because DSPS IP's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ if (musb->port_mode == MUSB_HOST) {
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+ dsps_mod_timer_optional(glue);
+ break;
+ }
+ fallthrough;
+
+ case OTG_STATE_A_WAIT_BCON:
+ /* keep VBUS on for host-only mode */
+ if (musb->port_mode == MUSB_HOST) {
+ dsps_mod_timer_optional(glue);
+ break;
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ skip_session = 1;
+ fallthrough;
+
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_B_IDLE:
+ if (!glue->vbus_irq) {
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+
+ if (musb->port_mode == MUSB_PERIPHERAL)
+ skip_session = 1;
+
+ if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
+ musb_writeb(mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
+ }
+ dsps_mod_timer_optional(glue);
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, wrp->coreintr_set,
+ MUSB_INTR_VBUSERROR << wrp->usb_shift);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void otg_timer(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ struct device *dev = musb->controller;
+ unsigned long flags;
+ int err;
+
+ err = pm_runtime_get(dev);
+ if ((err != -EINPROGRESS) && err < 0) {
+ dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
+ pm_runtime_put_noidle(dev);
+
+ return;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ err = musb_queue_resume_work(musb, dsps_check_status, NULL);
+ if (err < 0)
+ dev_err(dev, "%s resume work: %i\n", __func__, err);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+ u32 epintr;
+ struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+
+ /* musb->lock might already been held */
+ epintr = (1 << epnum) << wrp->rxep_shift;
+ musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
+}
+
+static irqreturn_t dsps_interrupt(int irq, void *hci)
+{
+ struct musb *musb = hci;
+ void __iomem *reg_base = musb->ctrl_base;
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u32 epintr, usbintr;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Get endpoint interrupts */
+ epintr = musb_readl(reg_base, wrp->epintr_status);
+ musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
+ musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
+
+ if (epintr)
+ musb_writel(reg_base, wrp->epintr_status, epintr);
+
+ /* Get usb core interrupts */
+ usbintr = musb_readl(reg_base, wrp->coreintr_status);
+ if (!usbintr && !epintr)
+ goto out;
+
+ musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
+ if (usbintr)
+ musb_writel(reg_base, wrp->coreintr_status, usbintr);
+
+ dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
+ usbintr, epintr);
+
+ if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
+ int drvvbus = musb_readl(reg_base, wrp->status);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /*
+ * The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"-starting sessions
+ * without waiting for VBUS to stop registering in
+ * devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
+ dsps_mod_timer_optional(glue);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ dsps_mod_timer_optional(glue);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ }
+
+ /* NOTE: this must complete power-on within 100 ms. */
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ err ? " ERROR" : "",
+ devctl);
+ ret = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ ret |= musb_interrupt(musb);
+
+ /* Poll for ID change and connect */
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_A_WAIT_BCON:
+ dsps_mod_timer_optional(glue);
+ break;
+ default:
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
+{
+ struct dentry *root;
+ char buf[128];
+
+ sprintf(buf, "%s.dsps", dev_name(musb->controller));
+ root = debugfs_create_dir(buf, usb_debug_root);
+ glue->dbgfs_root = root;
+
+ glue->regset.regs = dsps_musb_regs;
+ glue->regset.nregs = ARRAY_SIZE(dsps_musb_regs);
+ glue->regset.base = musb->ctrl_base;
+
+ debugfs_create_regset32("regdump", S_IRUGO, root, &glue->regset);
+ return 0;
+}
+
+static int dsps_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ struct platform_device *parent = to_platform_device(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *reg_base;
+ struct resource *r;
+ u32 rev, val;
+ int ret;
+
+ r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
+ reg_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+ musb->ctrl_base = reg_base;
+
+ /* NOP driver needs change if supporting dual instance */
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent, "phys", 0);
+ if (IS_ERR(musb->xceiv))
+ return PTR_ERR(musb->xceiv);
+
+ musb->phy = devm_phy_get(dev->parent, "usb2-phy");
+
+ /* Returns zero if e.g. not clocked */
+ rev = musb_readl(reg_base, wrp->revision);
+ if (!rev)
+ return -ENODEV;
+
+ if (IS_ERR(musb->phy)) {
+ musb->phy = NULL;
+ } else {
+ ret = phy_init(musb->phy);
+ if (ret < 0)
+ return ret;
+ ret = phy_power_on(musb->phy);
+ if (ret) {
+ phy_exit(musb->phy);
+ return ret;
+ }
+ }
+
+ timer_setup(&musb->dev_timer, otg_timer, 0);
+
+ /* Reset the musb */
+ musb_writel(reg_base, wrp->control, (1 << wrp->reset));
+
+ musb->isr = dsps_interrupt;
+
+ /* reset the otgdisable bit, needed for host mode to work */
+ val = musb_readl(reg_base, wrp->phy_utmi);
+ val &= ~(1 << wrp->otg_disable);
+ musb_writel(musb->ctrl_base, wrp->phy_utmi, val);
+
+ /*
+ * Check whether the dsps version has babble control enabled.
+ * In latest silicon revision the babble control logic is enabled.
+ * If MUSB_BABBLE_CTL returns 0x4 then we have the babble control
+ * logic enabled.
+ */
+ val = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
+ if (val & MUSB_BABBLE_RCV_DISABLE) {
+ glue->sw_babble_enabled = true;
+ val |= MUSB_BABBLE_SW_SESSION_CTRL;
+ musb_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
+ }
+
+ dsps_mod_timer(glue, -1);
+
+ return dsps_musb_dbg_init(musb, glue);
+}
+
+static int dsps_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+
+ del_timer_sync(&musb->dev_timer);
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ debugfs_remove_recursive(glue->dbgfs_root);
+
+ return 0;
+}
+
+static int dsps_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *ctrl_base = musb->ctrl_base;
+ u32 reg;
+
+ reg = musb_readl(ctrl_base, wrp->mode);
+
+ switch (mode) {
+ case MUSB_HOST:
+ reg &= ~(1 << wrp->iddig);
+
+ /*
+ * if we're setting mode to host-only or device-only, we're
+ * going to ignore whatever the PHY sends us and just force
+ * ID pin status by SW
+ */
+ reg |= (1 << wrp->iddig_mux);
+
+ musb_writel(ctrl_base, wrp->mode, reg);
+ musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ break;
+ case MUSB_PERIPHERAL:
+ reg |= (1 << wrp->iddig);
+
+ /*
+ * if we're setting mode to host-only or device-only, we're
+ * going to ignore whatever the PHY sends us and just force
+ * ID pin status by SW
+ */
+ reg |= (1 << wrp->iddig_mux);
+
+ musb_writel(ctrl_base, wrp->mode, reg);
+ break;
+ case MUSB_OTG:
+ musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ break;
+ default:
+ dev_err(glue->dev, "unsupported mode %d\n", mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool dsps_sw_babble_control(struct musb *musb)
+{
+ u8 babble_ctl;
+ bool session_restart = false;
+
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
+ dev_dbg(musb->controller, "babble: MUSB_BABBLE_CTL value %x\n",
+ babble_ctl);
+ /*
+ * check line monitor flag to check whether babble is
+ * due to noise
+ */
+ dev_dbg(musb->controller, "STUCK_J is %s\n",
+ babble_ctl & MUSB_BABBLE_STUCK_J ? "set" : "reset");
+
+ if (babble_ctl & MUSB_BABBLE_STUCK_J) {
+ int timeout = 10;
+
+ /*
+ * babble is due to noise, then set transmit idle (d7 bit)
+ * to resume normal operation
+ */
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl |= MUSB_BABBLE_FORCE_TXIDLE;
+ musb_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
+
+ /* wait till line monitor flag cleared */
+ dev_dbg(musb->controller, "Set TXIDLE, wait J to clear\n");
+ do {
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
+ udelay(1);
+ } while ((babble_ctl & MUSB_BABBLE_STUCK_J) && timeout--);
+
+ /* check whether stuck_at_j bit cleared */
+ if (babble_ctl & MUSB_BABBLE_STUCK_J) {
+ /*
+ * real babble condition has occurred
+ * restart the controller to start the
+ * session again
+ */
+ dev_dbg(musb->controller, "J not cleared, misc (%x)\n",
+ babble_ctl);
+ session_restart = true;
+ }
+ } else {
+ session_restart = true;
+ }
+
+ return session_restart;
+}
+
+static int dsps_musb_recover(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ int session_restart = 0;
+
+ if (glue->sw_babble_enabled)
+ session_restart = dsps_sw_babble_control(musb);
+ else
+ session_restart = 1;
+
+ return session_restart ? 0 : -EPIPE;
+}
+
+/* Similar to am35x, dm81xx support only 32-bit read operation */
+static void dsps_read_fifo32(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ if (len >= 4) {
+ ioread32_rep(fifo, dst, len >> 2);
+ dst += len & ~0x03;
+ len &= 0x03;
+ }
+
+ /* Read any remaining 1 to 3 bytes */
+ if (len > 0) {
+ u32 val = musb_readl(fifo, 0);
+ memcpy(dst, &val, len);
+ }
+}
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+static void dsps_dma_controller_callback(struct dma_controller *c)
+{
+ struct musb *musb = c->musb;
+ struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
+ void __iomem *usbss_base = glue->usbss_base;
+ u32 status;
+
+ status = musb_readl(usbss_base, USBSS_IRQ_STATUS);
+ if (status & USBSS_IRQ_PD_COMP)
+ musb_writel(usbss_base, USBSS_IRQ_STATUS, USBSS_IRQ_PD_COMP);
+}
+
+static struct dma_controller *
+dsps_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct dma_controller *controller;
+ struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
+ void __iomem *usbss_base = glue->usbss_base;
+
+ controller = cppi41_dma_controller_create(musb, base);
+ if (IS_ERR_OR_NULL(controller))
+ return controller;
+
+ musb_writel(usbss_base, USBSS_IRQ_ENABLER, USBSS_IRQ_PD_COMP);
+ controller->dma_callback = dsps_dma_controller_callback;
+
+ return controller;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void dsps_dma_controller_suspend(struct dsps_glue *glue)
+{
+ void __iomem *usbss_base = glue->usbss_base;
+
+ musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
+}
+
+static void dsps_dma_controller_resume(struct dsps_glue *glue)
+{
+ void __iomem *usbss_base = glue->usbss_base;
+
+ musb_writel(usbss_base, USBSS_IRQ_ENABLER, USBSS_IRQ_PD_COMP);
+}
+#endif
+#else /* CONFIG_USB_TI_CPPI41_DMA */
+#ifdef CONFIG_PM_SLEEP
+static void dsps_dma_controller_suspend(struct dsps_glue *glue) {}
+static void dsps_dma_controller_resume(struct dsps_glue *glue) {}
+#endif
+#endif /* CONFIG_USB_TI_CPPI41_DMA */
+
+static struct musb_platform_ops dsps_ops = {
+ .quirks = MUSB_DMA_CPPI41 | MUSB_INDEXED_EP,
+ .init = dsps_musb_init,
+ .exit = dsps_musb_exit,
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+ .dma_init = dsps_dma_controller_create,
+ .dma_exit = cppi41_dma_controller_destroy,
+#endif
+ .enable = dsps_musb_enable,
+ .disable = dsps_musb_disable,
+
+ .set_mode = dsps_musb_set_mode,
+ .recover = dsps_musb_recover,
+ .clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
+};
+
+static u64 musb_dmamask = DMA_BIT_MASK(32);
+
+static int get_int_prop(struct device_node *dn, const char *s)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(dn, s, &val);
+ if (ret)
+ return 0;
+ return val;
+}
+
+static int dsps_create_musb_pdev(struct dsps_glue *glue,
+ struct platform_device *parent)
+{
+ struct musb_hdrc_platform_data pdata;
+ struct resource resources[2];
+ struct resource *res;
+ struct device *dev = &parent->dev;
+ struct musb_hdrc_config *config;
+ struct platform_device *musb;
+ struct device_node *dn = parent->dev.of_node;
+ int ret, val;
+
+ memset(resources, 0, sizeof(resources));
+ res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
+ if (!res) {
+ dev_err(dev, "failed to get memory.\n");
+ return -EINVAL;
+ }
+ resources[0] = *res;
+
+ ret = platform_get_irq_byname(parent, "mc");
+ if (ret < 0)
+ return ret;
+
+ resources[1].start = ret;
+ resources[1].end = ret;
+ resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
+ resources[1].name = "mc";
+
+ /* allocate the child platform device */
+ musb = platform_device_alloc("musb-hdrc",
+ (resources[0].start & 0xFFF) == 0x400 ? 0 : 1);
+ if (!musb) {
+ dev_err(dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ musb->dev.parent = dev;
+ musb->dev.dma_mask = &musb_dmamask;
+ musb->dev.coherent_dma_mask = musb_dmamask;
+ device_set_of_node_from_dev(&musb->dev, &parent->dev);
+
+ glue->musb = musb;
+
+ ret = platform_device_add_resources(musb, resources,
+ ARRAY_SIZE(resources));
+ if (ret) {
+ dev_err(dev, "failed to add resources\n");
+ goto err;
+ }
+
+ config = devm_kzalloc(&parent->dev, sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdata.config = config;
+ pdata.platform_ops = &dsps_ops;
+
+ config->num_eps = get_int_prop(dn, "mentor,num-eps");
+ config->ram_bits = get_int_prop(dn, "mentor,ram-bits");
+ config->host_port_deassert_reset_at_resume = 1;
+ pdata.mode = musb_get_mode(dev);
+ /* DT keeps this entry in mA, musb expects it as per USB spec */
+ pdata.power = get_int_prop(dn, "mentor,power") / 2;
+
+ ret = of_property_read_u32(dn, "mentor,multipoint", &val);
+ if (!ret && val)
+ config->multipoint = true;
+
+ config->maximum_speed = usb_get_maximum_speed(&parent->dev);
+ switch (config->maximum_speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ break;
+ case USB_SPEED_SUPER:
+ dev_warn(dev, "ignore incorrect maximum_speed "
+ "(super-speed) setting in dts");
+ fallthrough;
+ default:
+ config->maximum_speed = USB_SPEED_HIGH;
+ }
+
+ ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
+ if (ret) {
+ dev_err(dev, "failed to add platform_data\n");
+ goto err;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(dev, "failed to register musb device\n");
+ goto err;
+ }
+ return 0;
+
+err:
+ platform_device_put(musb);
+ return ret;
+}
+
+static irqreturn_t dsps_vbus_threaded_irq(int irq, void *priv)
+{
+ struct dsps_glue *glue = priv;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+
+ if (!musb)
+ return IRQ_NONE;
+
+ dev_dbg(glue->dev, "VBUS interrupt\n");
+ dsps_mod_timer(glue, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int dsps_setup_optional_vbus_irq(struct platform_device *pdev,
+ struct dsps_glue *glue)
+{
+ int error;
+
+ glue->vbus_irq = platform_get_irq_byname(pdev, "vbus");
+ if (glue->vbus_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (glue->vbus_irq <= 0) {
+ glue->vbus_irq = 0;
+ return 0;
+ }
+
+ error = devm_request_threaded_irq(glue->dev, glue->vbus_irq,
+ NULL, dsps_vbus_threaded_irq,
+ IRQF_ONESHOT,
+ "vbus", glue);
+ if (error) {
+ glue->vbus_irq = 0;
+ return error;
+ }
+ dev_dbg(glue->dev, "VBUS irq %i configured\n", glue->vbus_irq);
+
+ return 0;
+}
+
+static int dsps_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct dsps_musb_wrapper *wrp;
+ struct dsps_glue *glue;
+ int ret;
+
+ if (!strcmp(pdev->name, "musb-hdrc"))
+ return -ENODEV;
+
+ match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "fail to get matching of_match struct\n");
+ return -EINVAL;
+ }
+ wrp = match->data;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "ti,musb-dm816"))
+ dsps_ops.read_fifo = dsps_read_fifo32;
+
+ /* allocate glue */
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = &pdev->dev;
+ glue->wrp = wrp;
+ glue->usbss_base = of_iomap(pdev->dev.parent->of_node, 0);
+ if (!glue->usbss_base)
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, glue);
+ pm_runtime_enable(&pdev->dev);
+ ret = dsps_create_musb_pdev(glue, pdev);
+ if (ret)
+ goto err;
+
+ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
+ ret = dsps_setup_optional_vbus_irq(pdev, glue);
+ if (ret)
+ goto unregister_pdev;
+ }
+
+ return 0;
+
+unregister_pdev:
+ platform_device_unregister(glue->musb);
+err:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(glue->usbss_base);
+ return ret;
+}
+
+static int dsps_remove(struct platform_device *pdev)
+{
+ struct dsps_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+
+ pm_runtime_disable(&pdev->dev);
+ iounmap(glue->usbss_base);
+
+ return 0;
+}
+
+static const struct dsps_musb_wrapper am33xx_driver_data = {
+ .revision = 0x00,
+ .control = 0x14,
+ .status = 0x18,
+ .epintr_set = 0x38,
+ .epintr_clear = 0x40,
+ .epintr_status = 0x30,
+ .coreintr_set = 0x3c,
+ .coreintr_clear = 0x44,
+ .coreintr_status = 0x34,
+ .phy_utmi = 0xe0,
+ .mode = 0xe8,
+ .tx_mode = 0x70,
+ .rx_mode = 0x74,
+ .reset = 0,
+ .otg_disable = 21,
+ .iddig = 8,
+ .iddig_mux = 7,
+ .usb_shift = 0,
+ .usb_mask = 0x1ff,
+ .usb_bitmap = (0x1ff << 0),
+ .drvvbus = 8,
+ .txep_shift = 0,
+ .txep_mask = 0xffff,
+ .txep_bitmap = (0xffff << 0),
+ .rxep_shift = 16,
+ .rxep_mask = 0xfffe,
+ .rxep_bitmap = (0xfffe << 16),
+ .poll_timeout = 2000, /* ms */
+};
+
+static const struct of_device_id musb_dsps_of_match[] = {
+ { .compatible = "ti,musb-am33xx",
+ .data = &am33xx_driver_data, },
+ { .compatible = "ti,musb-dm816",
+ .data = &am33xx_driver_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, musb_dsps_of_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int dsps_suspend(struct device *dev)
+{
+ struct dsps_glue *glue = dev_get_drvdata(dev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ void __iomem *mbase;
+ int ret;
+
+ if (!musb)
+ /* This can happen if the musb device is in -EPROBE_DEFER */
+ return 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ del_timer_sync(&musb->dev_timer);
+
+ mbase = musb->ctrl_base;
+ glue->context.control = musb_readl(mbase, wrp->control);
+ glue->context.epintr = musb_readl(mbase, wrp->epintr_set);
+ glue->context.coreintr = musb_readl(mbase, wrp->coreintr_set);
+ glue->context.phy_utmi = musb_readl(mbase, wrp->phy_utmi);
+ glue->context.mode = musb_readl(mbase, wrp->mode);
+ glue->context.tx_mode = musb_readl(mbase, wrp->tx_mode);
+ glue->context.rx_mode = musb_readl(mbase, wrp->rx_mode);
+
+ dsps_dma_controller_suspend(glue);
+
+ return 0;
+}
+
+static int dsps_resume(struct device *dev)
+{
+ struct dsps_glue *glue = dev_get_drvdata(dev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ void __iomem *mbase;
+
+ if (!musb)
+ return 0;
+
+ dsps_dma_controller_resume(glue);
+
+ mbase = musb->ctrl_base;
+ musb_writel(mbase, wrp->control, glue->context.control);
+ musb_writel(mbase, wrp->epintr_set, glue->context.epintr);
+ musb_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
+ musb_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
+ musb_writel(mbase, wrp->mode, glue->context.mode);
+ musb_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ musb_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
+ musb->port_mode == MUSB_OTG)
+ dsps_mod_timer(glue, -1);
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume);
+
+static struct platform_driver dsps_usbss_driver = {
+ .probe = dsps_probe,
+ .remove = dsps_remove,
+ .driver = {
+ .name = "musb-dsps",
+ .pm = &dsps_pm_ops,
+ .of_match_table = musb_dsps_of_match,
+ },
+};
+
+MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer");
+MODULE_AUTHOR("Ravi B <ravibabu@ti.com>");
+MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(dsps_usbss_driver);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 000000000..ba20272d2
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2093 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG driver peripheral support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "musb_core.h"
+#include "musb_trace.h"
+
+
+/* ----------------------------------------------------------------------- */
+
+#define is_buffer_mapped(req) (is_dma_capable() && \
+ (req->map_state != UN_MAPPED))
+
+/* Maps the buffer to dma */
+
+static inline void map_dma_buffer(struct musb_request *request,
+ struct musb *musb, struct musb_ep *musb_ep)
+{
+ int compatible = true;
+ struct dma_controller *dma = musb->dma_controller;
+
+ request->map_state = UN_MAPPED;
+
+ if (!is_dma_capable() || !musb_ep->dma)
+ return;
+
+ /* Check if DMA engine can handle this request.
+ * DMA code must reject the USB request explicitly.
+ * Default behaviour is to map the request.
+ */
+ if (dma->is_compatible)
+ compatible = dma->is_compatible(musb_ep->dma,
+ musb_ep->packet_sz, request->request.buf,
+ request->request.length);
+ if (!compatible)
+ return;
+
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(
+ musb->controller,
+ request->request.buf,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ ret = dma_mapping_error(musb->controller, dma_addr);
+ if (ret)
+ return;
+
+ request->request.dma = dma_addr;
+ request->map_state = MUSB_MAPPED;
+ } else {
+ dma_sync_single_for_device(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->map_state = PRE_MAPPED;
+ }
+}
+
+/* Unmap the buffer from dma and maps it back to cpu */
+static inline void unmap_dma_buffer(struct musb_request *request,
+ struct musb *musb)
+{
+ struct musb_ep *musb_ep = request->ep;
+
+ if (!is_buffer_mapped(request) || !musb_ep->dma)
+ return;
+
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ dev_vdbg(musb->controller,
+ "not unmapping a never mapped buffer\n");
+ return;
+ }
+ if (request->map_state == MUSB_MAPPED) {
+ dma_unmap_single(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->request.dma = DMA_ADDR_INVALID;
+ } else { /* PRE_MAPPED */
+ dma_sync_single_for_cpu(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ }
+ request->map_state = UN_MAPPED;
+}
+
+/*
+ * Immediately complete a request.
+ *
+ * @param request the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+ struct musb_ep *ep,
+ struct usb_request *request,
+ int status)
+__releases(ep->musb->lock)
+__acquires(ep->musb->lock)
+{
+ struct musb_request *req;
+ struct musb *musb;
+ int busy = ep->busy;
+
+ req = to_musb_request(request);
+
+ list_del(&req->list);
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+ musb = req->musb;
+
+ ep->busy = 1;
+ spin_unlock(&musb->lock);
+
+ if (!dma_mapping_error(&musb->g.dev, request->dma))
+ unmap_dma_buffer(req, musb);
+
+ trace_musb_req_gb(req);
+ usb_gadget_giveback_request(&req->ep->end_point, &req->request);
+ spin_lock(&musb->lock);
+ ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+ struct musb *musb = ep->musb;
+ struct musb_request *req = NULL;
+ void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
+
+ ep->busy = 1;
+
+ if (is_dma_capable() && ep->dma) {
+ struct dma_controller *c = ep->musb->dma_controller;
+ int value;
+
+ if (ep->is_in) {
+ /*
+ * The programming guide says that we must not clear
+ * the DMAMODE bit before DMAENAB, so we only
+ * clear it in the second write...
+ */
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ } else {
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ }
+
+ value = c->channel_abort(ep->dma);
+ musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
+ c->channel_release(ep->dma);
+ ep->dma = NULL;
+ }
+
+ while (!list_empty(&ep->req_list)) {
+ req = list_first_entry(&ep->req_list, struct musb_request, list);
+ musb_g_giveback(ep, &req->request, status);
+ }
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Data transfers - pure PIO, pure DMA, or mixed mode */
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+{
+ if (can_bulk_split(musb, ep->type))
+ return ep->hw_ep->max_packet_sz_tx;
+ else
+ return ep->packet_sz;
+}
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *musb, struct musb_request *req)
+{
+ u8 epnum = req->epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct usb_request *request;
+ u16 fifo_count = 0, csr;
+ int use_dma = 0;
+
+ musb_ep = req->ep;
+
+ /* Check if EP is disabled */
+ if (!musb_ep->desc) {
+ musb_dbg(musb, "ep:%s disabled - ignore request",
+ musb_ep->end_point.name);
+ return;
+ }
+
+ /* we shouldn't get here while DMA is active ... but we do ... */
+ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+ musb_dbg(musb, "dma pending...");
+ return;
+ }
+
+ /* read TXCSR before */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ request = &req->request;
+ fifo_count = min(max_ep_writesize(musb, musb_ep),
+ (int)(request->length - request->actual));
+
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ musb_dbg(musb, "%s old packet still ready , txcsr %03x",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (csr & MUSB_TXCSR_P_SENDSTALL) {
+ musb_dbg(musb, "%s stalling, txcsr %03x",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
+ epnum, musb_ep->packet_sz, fifo_count,
+ csr);
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (is_buffer_mapped(req)) {
+ struct dma_controller *c = musb->dma_controller;
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min_t(size_t, request->length - request->actual,
+ musb_ep->dma->max_len);
+
+ use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
+
+ /* MUSB_TXCSR_P_ISO is still set correctly */
+
+ if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
+ if (request_size < musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ musb_ep->dma->desired_mode,
+ request->dma + request->actual, request_size);
+ if (use_dma) {
+ if (musb_ep->dma->desired_mode == 0) {
+ /*
+ * We must not clear the DMAMODE bit
+ * before the DMAENAB bit -- and the
+ * latter doesn't always get cleared
+ * before we get here...
+ */
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB);
+ musb_writew(epio, MUSB_TXCSR, csr
+ | MUSB_TXCSR_P_WZC_BITS);
+ csr &= ~MUSB_TXCSR_DMAMODE;
+ csr |= (MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_MODE);
+ /* against programming guide */
+ } else {
+ csr |= (MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_MODE);
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 0 Yes(Normal)
+ * 0 >0 No(High BW ISO)
+ * 1 0 Yes(HS bulk)
+ * 1 >0 Yes(FS bulk)
+ */
+ if (!musb_ep->hb_mult ||
+ can_bulk_split(musb,
+ musb_ep->type))
+ csr |= MUSB_TXCSR_AUTOSET;
+ }
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+ if (is_cppi_enabled(musb)) {
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+ MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
+ ~MUSB_TXCSR_P_UNDERRUN) | csr);
+
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /*
+ * NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and
+ * Mentor fifos) just tells CPPI it could start. Data
+ * only moves to the USB TX fifo when both fifos are
+ * ready.
+ */
+ /*
+ * "mode" is irrelevant here; handle terminating ZLPs
+ * like PIO does, since the hardware RNDIS mode seems
+ * unreliable except for the
+ * last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma + request->actual,
+ request_size);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* invariant: prequest->buf is non-null */
+ }
+ } else if (tusb_dma_omap(musb))
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma + request->actual,
+ request_size);
+ }
+#endif
+
+ if (!use_dma) {
+ /*
+ * Unmap the dma buffer back to cpu if dma channel
+ * programming fails
+ */
+ unmap_dma_buffer(req, musb);
+
+ musb_write_fifo(musb_ep->hw_ep, fifo_count,
+ (u8 *) (request->buf + request->actual));
+ request->actual += fifo_count;
+ csr |= MUSB_TXCSR_TXPKTRDY;
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* host may already have the data when this message shows... */
+ musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
+ musb_ep->end_point.name, use_dma ? "dma" : "pio",
+ request->actual, request->length,
+ musb_readw(epio, MUSB_TXCSR),
+ fifo_count,
+ musb_readw(epio, MUSB_TXMAXP));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ, with controller locked.
+ */
+void musb_g_tx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct musb_request *req;
+ struct usb_request *request;
+ u8 __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+ req = next_request(musb_ep);
+ request = &req->request;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
+
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+ /*
+ * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error.
+ */
+ if (csr & MUSB_TXCSR_P_SENTSTALL) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~MUSB_TXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ return;
+ }
+
+ if (csr & MUSB_TXCSR_P_UNDERRUN) {
+ /* We NAKed, no big deal... little reason to care. */
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
+ epnum, request);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /*
+ * SHOULD NOT HAPPEN... has with CPPI though, after
+ * changing SENDSTALL (and other cases); harmless?
+ */
+ musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
+ return;
+ }
+
+ if (req) {
+
+ trace_musb_req_tx(req);
+
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
+ MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* Ensure writebuffer is empty. */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ request->actual += musb_ep->dma->actual_len;
+ musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
+ epnum, csr, musb_ep->dma->actual_len, request);
+ }
+
+ /*
+ * First, maybe a terminating short packet. Some DMA
+ * engines might handle this by themselves.
+ */
+ if ((request->zero && request->length)
+ && (request->length % musb_ep->packet_sz == 0)
+ && (request->actual == request->length)) {
+
+ /*
+ * On DMA completion, FIFO may not be
+ * available yet...
+ */
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ return;
+
+ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
+ | MUSB_TXCSR_TXPKTRDY);
+ request->zero = 0;
+ }
+
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
+ /*
+ * In the giveback function the MUSB lock is
+ * released and acquired after sometime. During
+ * this time period the INDEX register could get
+ * changed by the gadget_queue function especially
+ * on SMP systems. Reselect the INDEX to be sure
+ * we are reading/modifying the right registers
+ */
+ musb_ep_select(mbase, epnum);
+ req = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!req) {
+ musb_dbg(musb, "%s idle now",
+ musb_ep->end_point.name);
+ return;
+ }
+ }
+
+ txstate(musb, req);
+ }
+}
+
+/* ------------------------------------------------------------ */
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *musb, struct musb_request *req)
+{
+ const u8 epnum = req->epnum;
+ struct usb_request *request = &req->request;
+ struct musb_ep *musb_ep;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ unsigned len = 0;
+ u16 fifo_count;
+ u16 csr = musb_readw(epio, MUSB_RXCSR);
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+ u8 use_mode_1;
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
+
+ fifo_count = musb_ep->packet_sz;
+
+ /* Check if EP is disabled */
+ if (!musb_ep->desc) {
+ musb_dbg(musb, "ep:%s disabled - ignore request",
+ musb_ep->end_point.name);
+ return;
+ }
+
+ /* We shouldn't get here while DMA is active, but we do... */
+ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+ musb_dbg(musb, "DMA pending...");
+ return;
+ }
+
+ if (csr & MUSB_RXCSR_P_SENDSTALL) {
+ musb_dbg(musb, "%s stalling, RXCSR %04x",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+
+ /* NOTE: CPPI won't actually stop advancing the DMA
+ * queue after short packet transfers, so this is almost
+ * always going to run as IRQ-per-packet DMA so that
+ * faults will be handled correctly.
+ */
+ if (c->channel_program(channel,
+ musb_ep->packet_sz,
+ !request->short_not_ok,
+ request->dma + request->actual,
+ request->length - request->actual)) {
+
+ /* make sure that if an rxpkt arrived after the irq,
+ * the cppi engine will be ready to take it as soon
+ * as DMA is enabled
+ */
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAMODE);
+ csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ return;
+ }
+ }
+
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ fifo_count = musb_readw(epio, MUSB_RXCOUNT);
+
+ /*
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
+ */
+
+ if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
+ use_mode_1 = 1;
+ else
+ use_mode_1 = 0;
+
+ if (request->actual < request->length) {
+ if (!is_buffer_mapped(req))
+ goto buffer_aint_mapped;
+
+ if (musb_dma_inventra(musb)) {
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ int use_dma = 0;
+ unsigned int transfer_size;
+
+ c = musb->dma_controller;
+ channel = musb_ep->dma;
+
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the transfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
+ /* Experimental: Mode1 works with mass storage use cases */
+ if (use_mode_1) {
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ /*
+ * this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ transfer_size = min_t(unsigned int,
+ request->length -
+ request->actual,
+ channel->max_len);
+ musb_ep->dma->desired_mode = 1;
+ } else {
+ if (!musb_ep->hb_mult &&
+ musb_ep->hw_ep->rx_double_buffered)
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ transfer_size = min(request->length - request->actual,
+ (unsigned)fifo_count);
+ musb_ep->dma->desired_mode = 0;
+ }
+
+ use_dma = c->channel_program(
+ channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size);
+
+ if (use_dma)
+ return;
+ }
+
+ if ((musb_dma_ux500(musb)) &&
+ (request->actual < request->length)) {
+
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ unsigned int transfer_size = 0;
+
+ c = musb->dma_controller;
+ channel = musb_ep->dma;
+
+ /* In case first packet is short */
+ if (fifo_count < musb_ep->packet_sz)
+ transfer_size = fifo_count;
+ else if (request->short_not_ok)
+ transfer_size = min_t(unsigned int,
+ request->length -
+ request->actual,
+ channel->max_len);
+ else
+ transfer_size = min_t(unsigned int,
+ request->length -
+ request->actual,
+ (unsigned)fifo_count);
+
+ csr &= ~MUSB_RXCSR_DMAMODE;
+ csr |= (MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_AUTOCLEAR);
+
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (transfer_size <= musb_ep->packet_sz) {
+ musb_ep->dma->desired_mode = 0;
+ } else {
+ musb_ep->dma->desired_mode = 1;
+ /* Mode must be set after DMAENAB */
+ csr |= MUSB_RXCSR_DMAMODE;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ if (c->channel_program(channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size))
+
+ return;
+ }
+
+ len = request->length - request->actual;
+ musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
+ musb_ep->end_point.name,
+ fifo_count, len,
+ musb_ep->packet_sz);
+
+ fifo_count = min_t(unsigned, len, fifo_count);
+
+ if (tusb_dma_omap(musb)) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+ u32 dma_addr = request->dma + request->actual;
+ int ret;
+
+ ret = c->channel_program(channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ dma_addr,
+ fifo_count);
+ if (ret)
+ return;
+ }
+
+ /*
+ * Unmap the dma buffer back to cpu if dma channel
+ * programming fails. This buffer is mapped if the
+ * channel allocation is successful
+ */
+ unmap_dma_buffer(req, musb);
+
+ /*
+ * Clear DMAENAB and AUTOCLEAR for the
+ * PIO mode transfer
+ */
+ csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+buffer_aint_mapped:
+ fifo_count = min_t(unsigned int,
+ request->length - request->actual,
+ (unsigned int)fifo_count);
+ musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+ (request->buf + request->actual));
+ request->actual += fifo_count;
+
+ /* REVISIT if we left anything in the fifo, flush
+ * it and report -EOVERFLOW
+ */
+
+ /* ack the read! */
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ /* reach the end or short packet detected */
+ if (request->actual == request->length ||
+ fifo_count < musb_ep->packet_sz)
+ musb_g_giveback(musb_ep, request, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ */
+void musb_g_rx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct musb_request *req;
+ struct usb_request *request;
+ void __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
+
+ musb_ep_select(mbase, epnum);
+
+ req = next_request(musb_ep);
+ if (!req)
+ return;
+
+ trace_musb_req_rx(req);
+ request = &req->request;
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+ musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
+ csr, dma ? " (dma)" : "", request);
+
+ if (csr & MUSB_RXCSR_P_SENTSTALL) {
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ return;
+ }
+
+ if (csr & MUSB_RXCSR_P_OVERRUN) {
+ /* csr |= MUSB_RXCSR_P_WZC_BITS; */
+ csr &= ~MUSB_RXCSR_P_OVERRUN;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
+ if (request->status == -EINPROGRESS)
+ request->status = -EOVERFLOW;
+ }
+ if (csr & MUSB_RXCSR_INCOMPRX) {
+ /* REVISIT not necessarily an error */
+ musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* "should not happen"; likely RXPKTRDY pending for DMA */
+ musb_dbg(musb, "%s busy, csr %04x",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_P_WZC_BITS | csr);
+
+ request->actual += musb_ep->dma->actual_len;
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
+ defined(CONFIG_USB_UX500_DMA)
+ /* Autoclear doesn't clear RxPktRdy for short packets */
+ if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
+ || (dma->actual_len
+ & (musb_ep->packet_sz - 1))) {
+ /* ack the read! */
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* incomplete, and not short? wait for next IN packet */
+ if ((request->actual < request->length)
+ && (musb_ep->dma->actual_len
+ == musb_ep->packet_sz)) {
+ /* In double buffer case, continue to unload fifo if
+ * there is Rx packet in FIFO.
+ **/
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if ((csr & MUSB_RXCSR_RXPKTRDY) &&
+ hw_ep->rx_double_buffered)
+ goto exit;
+ return;
+ }
+#endif
+ musb_g_giveback(musb_ep, request, 0);
+ /*
+ * In the giveback function the MUSB lock is
+ * released and acquired after sometime. During
+ * this time period the INDEX register could get
+ * changed by the gadget_queue function especially
+ * on SMP systems. Reselect the INDEX to be sure
+ * we are reading/modifying the right registers
+ */
+ musb_ep_select(mbase, epnum);
+
+ req = next_request(musb_ep);
+ if (!req)
+ return;
+ }
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
+ defined(CONFIG_USB_UX500_DMA)
+exit:
+#endif
+ /* Analyze request */
+ rxstate(musb, req);
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *hw_ep;
+ void __iomem *regs;
+ struct musb *musb;
+ void __iomem *mbase;
+ u8 epnum;
+ u16 csr;
+ unsigned tmp;
+ int status = -EINVAL;
+
+ if (!ep || !desc)
+ return -EINVAL;
+
+ musb_ep = to_musb_ep(ep);
+ hw_ep = musb_ep->hw_ep;
+ regs = hw_ep->regs;
+ musb = musb_ep->musb;
+ mbase = musb->mregs;
+ epnum = musb_ep->current_epnum;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_ep->desc) {
+ status = -EBUSY;
+ goto fail;
+ }
+ musb_ep->type = usb_endpoint_type(desc);
+
+ /* check direction and (later) maxpacket size against endpoint */
+ if (usb_endpoint_num(desc) != epnum)
+ goto fail;
+
+ /* REVISIT this rules out high bandwidth periodic transfers */
+ tmp = usb_endpoint_maxp_mult(desc) - 1;
+ if (tmp) {
+ int ok;
+
+ if (usb_endpoint_dir_in(desc))
+ ok = musb->hb_iso_tx;
+ else
+ ok = musb->hb_iso_rx;
+
+ if (!ok) {
+ musb_dbg(musb, "no support for high bandwidth ISO");
+ goto fail;
+ }
+ musb_ep->hb_mult = tmp;
+ } else {
+ musb_ep->hb_mult = 0;
+ }
+
+ musb_ep->packet_sz = usb_endpoint_maxp(desc);
+ tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
+
+ /* enable the interrupts for the endpoint, set the endpoint
+ * packet size (or fail), set the mode, clear the fifo
+ */
+ musb_ep_select(mbase, epnum);
+ if (usb_endpoint_dir_in(desc)) {
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 1;
+ if (!musb_ep->is_in)
+ goto fail;
+
+ if (tmp > hw_ep->max_packet_sz_tx) {
+ musb_dbg(musb, "packet size beyond hardware FIFO size");
+ goto fail;
+ }
+
+ musb->intrtxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
+
+ /* REVISIT if can_bulk_split(), use by updating "tmp";
+ * likewise high bandwidth periodic tx
+ */
+ /* Set TXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ if (can_bulk_split(musb, musb_ep->type))
+ musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
+ musb_ep->packet_sz) - 1;
+ musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+ | (musb_ep->hb_mult << 11));
+
+ csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
+ if (musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_TXCSR_P_ISO;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(regs, MUSB_TXCSR, csr);
+
+ } else {
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 0;
+ if (musb_ep->is_in)
+ goto fail;
+
+ if (tmp > hw_ep->max_packet_sz_rx) {
+ musb_dbg(musb, "packet size beyond hardware FIFO size");
+ goto fail;
+ }
+
+ musb->intrrxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
+
+ /* REVISIT if can_bulk_combine() use by updating "tmp"
+ * likewise high bandwidth periodic rx
+ */
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+ | (musb_ep->hb_mult << 11));
+
+ /* force shared fifo to OUT-only mode */
+ if (hw_ep->is_shared_fifo) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ }
+
+ csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_RXCSR_P_ISO;
+ else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_RXCSR, csr);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* NOTE: all the I/O code _should_ work fine without DMA, in case
+ * for some reason you run out of channels here.
+ */
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep->dma = c->channel_alloc(c, hw_ep,
+ (desc->bEndpointAddress & USB_DIR_IN));
+ } else
+ musb_ep->dma = NULL;
+
+ musb_ep->desc = desc;
+ musb_ep->busy = 0;
+ musb_ep->wedged = 0;
+ status = 0;
+
+ pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+ musb_driver_name, musb_ep->end_point.name,
+ musb_ep_xfertype_string(musb_ep->type),
+ musb_ep->is_in ? "IN" : "OUT",
+ musb_ep->dma ? "dma, " : "",
+ musb_ep->packet_sz);
+
+ schedule_delayed_work(&musb->irq_work, 0);
+
+fail:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+ unsigned long flags;
+ struct musb *musb;
+ u8 epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+ epnum = musb_ep->current_epnum;
+ epio = musb->endpoints[epnum].regs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(musb->mregs, epnum);
+
+ /* zero the endpoint sizes */
+ if (musb_ep->is_in) {
+ musb->intrtxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
+ musb_writew(epio, MUSB_TXMAXP, 0);
+ } else {
+ musb->intrrxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
+ musb_writew(epio, MUSB_RXMAXP, 0);
+ }
+
+ /* abort all pending DMA and requests */
+ nuke(musb_ep, -ESHUTDOWN);
+
+ musb_ep->desc = NULL;
+ musb_ep->end_point.desc = NULL;
+
+ schedule_delayed_work(&musb->irq_work, 0);
+
+ spin_unlock_irqrestore(&(musb->lock), flags);
+
+ musb_dbg(musb, "%s", musb_ep->end_point.name);
+
+ return 0;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb_request *request = NULL;
+
+ request = kzalloc(sizeof *request, gfp_flags);
+ if (!request)
+ return NULL;
+
+ request->request.dma = DMA_ADDR_INVALID;
+ request->epnum = musb_ep->current_epnum;
+ request->ep = musb_ep;
+
+ trace_musb_req_alloc(request);
+ return &request->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ struct musb_request *request = to_musb_request(req);
+
+ trace_musb_req_free(request);
+ kfree(request);
+}
+
+static LIST_HEAD(buffers);
+
+struct free_record {
+ struct list_head list;
+ struct device *dev;
+ unsigned bytes;
+ dma_addr_t dma;
+};
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
+{
+ trace_musb_req_start(req);
+ musb_ep_select(musb->mregs, req->epnum);
+ if (req->tx)
+ txstate(musb, req);
+ else
+ rxstate(musb, req);
+}
+
+static int musb_ep_restart_resume_work(struct musb *musb, void *data)
+{
+ struct musb_request *req = data;
+
+ musb_ep_restart(musb, req);
+
+ return 0;
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep;
+ struct musb_request *request;
+ struct musb *musb;
+ int status;
+ unsigned long lockflags;
+
+ if (!ep || !req)
+ return -EINVAL;
+ if (!req->buf)
+ return -ENODATA;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+
+ request = to_musb_request(req);
+ request->musb = musb;
+
+ if (request->ep != musb_ep)
+ return -EINVAL;
+
+ status = pm_runtime_get(musb->controller);
+ if ((status != -EINPROGRESS) && status < 0) {
+ dev_err(musb->controller,
+ "pm runtime get failed in %s\n",
+ __func__);
+ pm_runtime_put_noidle(musb->controller);
+
+ return status;
+ }
+ status = 0;
+
+ trace_musb_req_enq(request);
+
+ /* request is mine now... */
+ request->request.actual = 0;
+ request->request.status = -EINPROGRESS;
+ request->epnum = musb_ep->current_epnum;
+ request->tx = musb_ep->is_in;
+
+ map_dma_buffer(request, musb, musb_ep);
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ /* don't queue if the ep is down */
+ if (!musb_ep->desc) {
+ musb_dbg(musb, "req %p queued to %s while ep %s",
+ req, ep->name, "disabled");
+ status = -ESHUTDOWN;
+ unmap_dma_buffer(request, musb);
+ goto unlock;
+ }
+
+ /* add request to the list */
+ list_add_tail(&request->list, &musb_ep->req_list);
+
+ /* it this is the head of the queue, start i/o ... */
+ if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
+ status = musb_queue_resume_work(musb,
+ musb_ep_restart_resume_work,
+ request);
+ if (status < 0) {
+ dev_err(musb->controller, "%s resume work: %i\n",
+ __func__, status);
+ list_del(&request->list);
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
+ return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb_request *req = to_musb_request(request);
+ struct musb_request *r;
+ unsigned long flags;
+ int status = 0;
+ struct musb *musb = musb_ep->musb;
+
+ if (!ep || !request || req->ep != musb_ep)
+ return -EINVAL;
+
+ trace_musb_req_deq(req);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ list_for_each_entry(r, &musb_ep->req_list, list) {
+ if (r == req)
+ break;
+ }
+ if (r != req) {
+ dev_err(musb->controller, "request %p not queued to %s\n",
+ request, ep->name);
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* if the hardware doesn't have the request, easy ... */
+ if (musb_ep->req_list.next != &req->list || musb_ep->busy)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+
+ /* ... else abort the dma transfer ... */
+ else if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep_select(musb->mregs, musb_ep->current_epnum);
+ if (c->channel_abort)
+ status = c->channel_abort(musb_ep->dma);
+ else
+ status = -EBUSY;
+ if (status == 0)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ } else {
+ /* NOTE: by sticking to easily tested hardware/driver states,
+ * we leave counting of in-flight packets imprecise.
+ */
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ }
+
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+static int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ u8 epnum = musb_ep->current_epnum;
+ struct musb *musb = musb_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr;
+ struct musb_request *request;
+ int status = 0;
+
+ if (!ep)
+ return -EINVAL;
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
+ status = -EINVAL;
+ goto done;
+ }
+
+ musb_ep_select(mbase, epnum);
+
+ request = next_request(musb_ep);
+ if (value) {
+ if (request) {
+ musb_dbg(musb, "request in progress, cannot halt %s",
+ ep->name);
+ status = -EAGAIN;
+ goto done;
+ }
+ /* Cannot portably stall with non-empty FIFO */
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ musb_dbg(musb, "FIFO busy, cannot halt %s",
+ ep->name);
+ status = -EAGAIN;
+ goto done;
+ }
+ }
+ } else
+ musb_ep->wedged = 0;
+
+ /* set/clear the stall and toggle bits */
+ musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_P_WZC_BITS
+ | MUSB_TXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_TXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_P_SENTSTALL);
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_WZC_BITS
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_RXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* maybe start the first request in the queue */
+ if (!musb_ep->busy && !value && request) {
+ musb_dbg(musb, "restarting the request");
+ musb_ep_restart(musb, request);
+ }
+
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Sets the halt feature with the clear requests ignored
+ */
+static int musb_gadget_set_wedge(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+
+ if (!ep)
+ return -EINVAL;
+
+ musb_ep->wedged = 1;
+
+ return usb_ep_set_halt(ep);
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ void __iomem *epio = musb_ep->hw_ep->regs;
+ int retval = -EINVAL;
+
+ if (musb_ep->desc && !musb_ep->is_in) {
+ struct musb *musb = musb_ep->musb;
+ int epnum = musb_ep->current_epnum;
+ void __iomem *mbase = musb->mregs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb_ep_select(mbase, epnum);
+ /* FIXME return zero unless RXPKTRDY is set */
+ retval = musb_readw(epio, MUSB_RXCOUNT);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb *musb = musb_ep->musb;
+ u8 epnum = musb_ep->current_epnum;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr;
+
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(mbase, (u8) epnum);
+
+ /* disable interrupts */
+ musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
+
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ /*
+ * Setting both TXPKTRDY and FLUSHFIFO makes controller
+ * to interrupt current FIFO loading, but not flushing
+ * the already loaded ones.
+ */
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+ .enable = musb_gadget_enable,
+ .disable = musb_gadget_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_gadget_queue,
+ .dequeue = musb_gadget_dequeue,
+ .set_halt = musb_gadget_set_halt,
+ .set_wedge = musb_gadget_set_wedge,
+ .fifo_status = musb_gadget_fifo_status,
+ .fifo_flush = musb_gadget_fifo_flush
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return (int)musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ void __iomem *mregs = musb->mregs;
+ unsigned long flags;
+ int status = -EINVAL;
+ u8 power, devctl;
+ int retries;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_PERIPHERAL:
+ /* NOTE: OTG state machine doesn't include B_SUSPENDED;
+ * that's part of the standard usb 1.1 state machine, and
+ * doesn't affect OTG transitions.
+ */
+ if (musb->may_wakeup && musb->is_suspended)
+ break;
+ goto done;
+ case OTG_STATE_B_IDLE:
+ /* Start SRP ... OTG not required. */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mregs, MUSB_DEVCTL, devctl);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ retries = 100;
+ while (!(devctl & MUSB_DEVCTL_SESSION)) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+ retries = 10000;
+ while (devctl & MUSB_DEVCTL_SESSION) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ otg_start_srp(musb->xceiv->otg);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Block idling for at least 1s */
+ musb_platform_try_idle(musb,
+ jiffies + msecs_to_jiffies(1 * HZ));
+
+ status = 0;
+ goto done;
+ default:
+ musb_dbg(musb, "Unhandled wake: %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ goto done;
+ }
+
+ status = 0;
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+ musb_dbg(musb, "issue wakeup");
+
+ /* FIXME do this next chunk in a timer callback, no udelay */
+ mdelay(2);
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ gadget->is_selfpowered = !!is_selfpowered;
+ return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ if (is_on)
+ power |= MUSB_POWER_SOFTCONN;
+ else
+ power &= ~MUSB_POWER_SOFTCONN;
+
+ /* FIXME if on, HdrcStart; if off, HdrcStop */
+
+ musb_dbg(musb, "gadget D+ pullup %s",
+ is_on ? "on" : "off");
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ musb_dbg(musb, "<= %s =>\n", __func__);
+
+ /*
+ * FIXME iff driver's softconnect flag is set (as it is during probe,
+ * though that can clear it), just musb_pullup().
+ */
+
+ return -EINVAL;
+}
+#endif
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return usb_phy_set_power(musb->xceiv, mA);
+}
+
+static void musb_gadget_work(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+
+ musb = container_of(work, struct musb, gadget_work.work);
+ pm_runtime_get_sync(musb->controller);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_pullup(musb, musb->softconnect);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+}
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ unsigned long flags;
+
+ is_on = !!is_on;
+
+ /* NOTE: this assumes we are sensing vbus; we'd rather
+ * not pullup unless the B-session is active.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (is_on != musb->softconnect) {
+ musb->softconnect = is_on;
+ schedule_delayed_work(&musb->gadget_work, 0);
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return 0;
+}
+
+static int musb_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int musb_gadget_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+ .get_frame = musb_gadget_get_frame,
+ .wakeup = musb_gadget_wakeup,
+ .set_selfpowered = musb_gadget_set_self_powered,
+ /* .vbus_session = musb_gadget_vbus_session, */
+ .vbus_draw = musb_gadget_vbus_draw,
+ .pullup = musb_gadget_pullup,
+ .udc_start = musb_gadget_start,
+ .udc_stop = musb_gadget_stop,
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* Registration */
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port. It assumes
+ * all peripheral ports are external...
+ */
+
+static void
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+{
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+
+ memset(ep, 0, sizeof *ep);
+
+ ep->current_epnum = epnum;
+ ep->musb = musb;
+ ep->hw_ep = hw_ep;
+ ep->is_in = is_in;
+
+ INIT_LIST_HEAD(&ep->req_list);
+
+ sprintf(ep->name, "ep%d%s", epnum,
+ (!epnum || hw_ep->is_shared_fifo) ? "" : (
+ is_in ? "in" : "out"));
+ ep->end_point.name = ep->name;
+ INIT_LIST_HEAD(&ep->end_point.ep_list);
+ if (!epnum) {
+ usb_ep_set_maxpacket_limit(&ep->end_point, 64);
+ ep->end_point.caps.type_control = true;
+ ep->end_point.ops = &musb_g_ep0_ops;
+ musb->g.ep0 = &ep->end_point;
+ } else {
+ if (is_in)
+ usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
+ else
+ usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
+ ep->end_point.caps.type_iso = true;
+ ep->end_point.caps.type_bulk = true;
+ ep->end_point.caps.type_int = true;
+ ep->end_point.ops = &musb_ep_ops;
+ list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+ }
+
+ if (!epnum || hw_ep->is_shared_fifo) {
+ ep->end_point.caps.dir_in = true;
+ ep->end_point.caps.dir_out = true;
+ } else if (is_in)
+ ep->end_point.caps.dir_in = true;
+ else
+ ep->end_point.caps.dir_out = true;
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void musb_g_init_endpoints(struct musb *musb)
+{
+ u8 epnum;
+ struct musb_hw_ep *hw_ep;
+ unsigned count = 0;
+
+ /* initialize endpoint list just once */
+ INIT_LIST_HEAD(&(musb->g.ep_list));
+
+ for (epnum = 0, hw_ep = musb->endpoints;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
+ count++;
+ } else {
+ if (hw_ep->max_packet_sz_tx) {
+ init_peripheral_ep(musb, &hw_ep->ep_in,
+ epnum, 1);
+ count++;
+ }
+ if (hw_ep->max_packet_sz_rx) {
+ init_peripheral_ep(musb, &hw_ep->ep_out,
+ epnum, 0);
+ count++;
+ }
+ }
+ }
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int musb_gadget_setup(struct musb *musb)
+{
+ int status;
+
+ /* REVISIT minor race: if (erroneously) setting up two
+ * musb peripherals at the same time, only the bus lock
+ * is probably held.
+ */
+
+ musb->g.ops = &musb_gadget_operations;
+ musb->g.max_speed = USB_SPEED_HIGH;
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+
+ /* this "gadget" abstracts/virtualizes the controller */
+ musb->g.name = musb_driver_name;
+ /* don't support otg protocols */
+ musb->g.is_otg = 0;
+ INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
+ musb_g_init_endpoints(musb);
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+
+ status = usb_add_gadget_udc(musb->controller, &musb->g);
+ if (status)
+ goto err;
+
+ return 0;
+err:
+ musb->g.dev.parent = NULL;
+ device_unregister(&musb->g.dev);
+ return status;
+}
+
+void musb_gadget_cleanup(struct musb *musb)
+{
+ if (musb->port_mode == MUSB_HOST)
+ return;
+
+ cancel_delayed_work_sync(&musb->gadget_work);
+ usb_del_gadget_udc(&musb->g);
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+static int musb_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct musb *musb = gadget_to_musb(g);
+ struct usb_otg *otg = musb->xceiv->otg;
+ unsigned long flags;
+ int retval = 0;
+
+ if (driver->max_speed < USB_SPEED_HIGH) {
+ retval = -EINVAL;
+ goto err;
+ }
+
+ pm_runtime_get_sync(musb->controller);
+
+ musb->softconnect = 0;
+ musb->gadget_driver = driver;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->is_active = 1;
+
+ otg_set_peripheral(otg, &musb->g);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ musb_start(musb);
+
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ if (musb->xceiv->last_event == USB_EVENT_ID)
+ musb_platform_set_vbus(musb, 1);
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
+ return 0;
+
+err:
+ return retval;
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+static int musb_gadget_stop(struct usb_gadget *g)
+{
+ struct musb *musb = gadget_to_musb(g);
+ unsigned long flags;
+
+ pm_runtime_get_sync(musb->controller);
+
+ /*
+ * REVISIT always use otg_set_peripheral() here too;
+ * this needs to shut down the OTG engine.
+ */
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb_hnp_stop(musb);
+
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
+ musb_stop(musb);
+ otg_set_peripheral(musb->xceiv->otg, NULL);
+
+ musb->is_active = 0;
+ musb->gadget_driver = NULL;
+ musb_platform_try_idle(musb, 0);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /*
+ * FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
+
+ /* Force check of devctl register for PM runtime */
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *musb)
+{
+ musb->is_suspended = 0;
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_IDLE:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_active = 1;
+ if (musb->gadget_driver && musb->gadget_driver->resume) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->resume(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ WARNING("unhandled RESUME transition (%s)\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *musb)
+{
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_IDLE:
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_suspended = 1;
+ if (musb->gadget_driver && musb->gadget_driver->suspend) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->suspend(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+ * A_PERIPHERAL may need care too
+ */
+ WARNING("unhandled SUSPEND transition (%s)",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+}
+
+/* Called during SRP */
+void musb_g_wakeup(struct musb *musb)
+{
+ musb_gadget_wakeup(&musb->g);
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *musb)
+{
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+
+ musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
+
+ /* clear HR */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
+
+ /* don't draw vbus until new b-default session */
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->g.speed = USB_SPEED_UNKNOWN;
+ if (musb->gadget_driver && musb->gadget_driver->disconnect) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+
+ switch (musb->xceiv->otg->state) {
+ default:
+ musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+ MUSB_HST_MODE(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ break;
+ }
+
+ musb->is_active = 0;
+}
+
+void musb_g_reset(struct musb *musb)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
+ u8 power;
+
+ musb_dbg(musb, "<== %s driver '%s'",
+ (devctl & MUSB_DEVCTL_BDEVICE)
+ ? "B-Device" : "A-Device",
+ musb->gadget_driver
+ ? musb->gadget_driver->driver.name
+ : NULL
+ );
+
+ /* report reset, if we didn't already (flushing EP state) */
+ if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
+ spin_unlock(&musb->lock);
+ usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
+ spin_lock(&musb->lock);
+ }
+
+ /* clear HR */
+ else if (devctl & MUSB_DEVCTL_HR)
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+
+
+ /* what speed did we negotiate? */
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ /* start in USB_STATE_DEFAULT */
+ musb->is_active = 1;
+ musb->is_suspended = 0;
+ MUSB_DEV_MODE(musb);
+ musb->address = 0;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+
+ musb->may_wakeup = 0;
+ musb->g.b_hnp_enable = 0;
+ musb->g.a_alt_hnp_support = 0;
+ musb->g.a_hnp_support = 0;
+ musb->g.quirk_zlp_not_supp = 1;
+
+ /* Normal reset, as B-Device;
+ * or else after HNP, as A-Device
+ */
+ if (!musb->g.is_otg) {
+ /* USB device controllers that are not OTG compatible
+ * may not have DEVCTL register in silicon.
+ * In that case, do not rely on devctl for setting
+ * peripheral mode.
+ */
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else {
+ musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ }
+
+ /* start with default limits on VBUS power draw */
+ (void) musb_gadget_vbus_draw(&musb->g, 8);
+}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 000000000..f49f25b3b
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver peripheral defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+#include <linux/list.h>
+
+#if IS_ENABLED(CONFIG_USB_MUSB_GADGET) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+extern int musb_gadget_setup(struct musb *);
+
+#else
+static inline irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+ return 0;
+}
+
+static inline void musb_g_tx(struct musb *musb, u8 epnum) {}
+static inline void musb_g_rx(struct musb *musb, u8 epnum) {}
+static inline void musb_g_reset(struct musb *musb) {}
+static inline void musb_g_suspend(struct musb *musb) {}
+static inline void musb_g_resume(struct musb *musb) {}
+static inline void musb_g_wakeup(struct musb *musb) {}
+static inline void musb_g_disconnect(struct musb *musb) {}
+static inline void musb_gadget_cleanup(struct musb *musb) {}
+static inline int musb_gadget_setup(struct musb *musb)
+{
+ return 0;
+}
+#endif
+
+enum buffer_map_state {
+ UN_MAPPED = 0,
+ PRE_MAPPED,
+ MUSB_MAPPED
+};
+
+struct musb_request {
+ struct usb_request request;
+ struct list_head list;
+ struct musb_ep *ep;
+ struct musb *musb;
+ u8 tx; /* endpoint direction */
+ u8 epnum;
+ enum buffer_map_state map_state;
+};
+
+#define to_musb_request(r) container_of((r), struct musb_request, request)
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+ /* stuff towards the head is basically write-once. */
+ struct usb_ep end_point;
+ char name[12];
+ struct musb_hw_ep *hw_ep;
+ struct musb *musb;
+ u8 current_epnum;
+
+ /* ... when enabled/disabled ... */
+ u8 type;
+ u8 is_in;
+ u16 packet_sz;
+ const struct usb_endpoint_descriptor *desc;
+ struct dma_channel *dma;
+
+ /* later things are modified based on usage */
+ struct list_head req_list;
+
+ u8 wedged;
+
+ /* true if lock must be dropped but req_list may not be advanced */
+ u8 busy;
+
+ u8 hb_mult;
+};
+
+#define to_musb_ep(ep) container_of((ep), struct musb_ep, end_point)
+
+static inline struct musb_request *next_request(struct musb_ep *ep)
+{
+ struct list_head *queue = &ep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+ return container_of(queue->next, struct musb_request, list);
+}
+
+extern const struct usb_ep_ops musb_g_ep0_ops;
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
+#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 000000000..6d7336727
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG peripheral driver ep0 handling
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musb_core.h"
+
+/* ep0 is always musb->endpoints[0].ep_in */
+#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
+
+/*
+ * locking note: we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers. And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+ switch (stage) {
+ case MUSB_EP0_STAGE_IDLE: return "idle";
+ case MUSB_EP0_STAGE_SETUP: return "setup";
+ case MUSB_EP0_STAGE_TX: return "in";
+ case MUSB_EP0_STAGE_RX: return "out";
+ case MUSB_EP0_STAGE_ACKWAIT: return "wait";
+ case MUSB_EP0_STAGE_STATUSIN: return "in/status";
+ case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
+ default: return "?";
+ }
+}
+
+/* handle a standard GET_STATUS request
+ * Context: caller holds controller lock
+ */
+static int service_tx_status_request(
+ struct musb *musb,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ void __iomem *mbase = musb->mregs;
+ int handled = 1;
+ u8 result[2], epnum = 0;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ result[1] = 0;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ result[0] = musb->g.is_selfpowered << USB_DEVICE_SELF_POWERED;
+ result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ if (musb->g.is_otg) {
+ result[0] |= musb->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ result[0] |= musb->g.a_alt_hnp_support
+ << USB_DEVICE_A_ALT_HNP_SUPPORT;
+ result[0] |= musb->g.a_hnp_support
+ << USB_DEVICE_A_HNP_SUPPORT;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ result[0] = 0;
+ break;
+
+ case USB_RECIP_ENDPOINT: {
+ int is_in;
+ struct musb_ep *ep;
+ u16 tmp;
+ void __iomem *regs;
+
+ epnum = (u8) ctrlrequest->wIndex;
+ if (!epnum) {
+ result[0] = 0;
+ break;
+ }
+
+ is_in = epnum & USB_DIR_IN;
+ epnum &= 0x0f;
+ if (epnum >= MUSB_C_NUM_EPS) {
+ handled = -EINVAL;
+ break;
+ }
+
+ if (is_in)
+ ep = &musb->endpoints[epnum].ep_in;
+ else
+ ep = &musb->endpoints[epnum].ep_out;
+ regs = musb->endpoints[epnum].regs;
+
+ if (!ep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+
+ musb_ep_select(mbase, epnum);
+ if (is_in)
+ tmp = musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_P_SENDSTALL;
+ else
+ tmp = musb_readw(regs, MUSB_RXCSR)
+ & MUSB_RXCSR_P_SENDSTALL;
+ musb_ep_select(mbase, 0);
+
+ result[0] = tmp ? 1 : 0;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ /* fill up the fifo; caller updates csr0 */
+ if (handled > 0) {
+ u16 len = le16_to_cpu(ctrlrequest->wLength);
+
+ if (len > 2)
+ len = 2;
+ musb_write_fifo(&musb->endpoints[0], len, result);
+ }
+
+ return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+{
+ int handled = 0; /* not handled */
+
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_GET_STATUS:
+ handled = service_tx_status_request(musb,
+ ctrlrequest);
+ break;
+
+ /* case USB_REQ_SYNC_FRAME: */
+
+ default:
+ break;
+ }
+ }
+ return handled;
+}
+
+/*
+ * Context: caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
+{
+ musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+}
+
+/*
+ * Tries to start B-device HNP negotiation if enabled via sysfs
+ */
+static inline void musb_try_b_hnp_enable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl;
+
+ musb_dbg(musb, "HNP: Setting HR");
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+ musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
+}
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ * always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ * always handled here, except for class/vendor/... features
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *musb,
+ struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int handled = -EINVAL;
+ void __iomem *mbase = musb->mregs;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ /* the gadget driver handles everything except what we MUST handle */
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ musb->set_address = true;
+ musb->address = (u8) (ctrlrequest->wValue & 0x7f);
+ handled = 1;
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ if (ctrlrequest->wValue
+ != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+ musb->may_wakeup = 0;
+ handled = 1;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:{
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ struct musb_request *request;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
+
+ if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+ ctrlrequest->wValue != USB_ENDPOINT_HALT)
+ break;
+
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
+ else
+ musb_ep = &ep->ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ handled = 1;
+ /* Ignore request if endpoint is wedged */
+ if (musb_ep->wedged)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_CLRDATATOG |
+ MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL |
+ MUSB_TXCSR_P_SENTSTALL |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_CLRDATATOG |
+ MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL |
+ MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* Maybe start the first request in the queue */
+ request = next_request(musb_ep);
+ if (!musb_ep->busy && request) {
+ musb_dbg(musb, "restarting the request");
+ musb_ep_restart(musb, request);
+ }
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ } break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ handled = 1;
+ switch (ctrlrequest->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ musb->may_wakeup = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (musb->g.speed != USB_SPEED_HIGH)
+ goto stall;
+ if (ctrlrequest->wIndex & 0xff)
+ goto stall;
+
+ switch (ctrlrequest->wIndex >> 8) {
+ case USB_TEST_J:
+ pr_debug("USB_TEST_J\n");
+ musb->test_mode_nr =
+ MUSB_TEST_J;
+ break;
+ case USB_TEST_K:
+ pr_debug("USB_TEST_K\n");
+ musb->test_mode_nr =
+ MUSB_TEST_K;
+ break;
+ case USB_TEST_SE0_NAK:
+ pr_debug("USB_TEST_SE0_NAK\n");
+ musb->test_mode_nr =
+ MUSB_TEST_SE0_NAK;
+ break;
+ case USB_TEST_PACKET:
+ pr_debug("USB_TEST_PACKET\n");
+ musb->test_mode_nr =
+ MUSB_TEST_PACKET;
+ break;
+
+ case 0xc0:
+ /* TEST_FORCE_HS */
+ pr_debug("TEST_FORCE_HS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HS;
+ break;
+ case 0xc1:
+ /* TEST_FORCE_FS */
+ pr_debug("TEST_FORCE_FS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_FS;
+ break;
+ case 0xc2:
+ /* TEST_FIFO_ACCESS */
+ pr_debug("TEST_FIFO_ACCESS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FIFO_ACCESS;
+ break;
+ case 0xc3:
+ /* TEST_FORCE_HOST */
+ pr_debug("TEST_FORCE_HOST\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HOST;
+ break;
+ default:
+ goto stall;
+ }
+
+ /* enter test mode after irq */
+ if (handled > 0)
+ musb->test_mode = true;
+ break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.b_hnp_enable = 1;
+ musb_try_b_hnp_enable(musb);
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_alt_hnp_support = 1;
+ break;
+ case USB_DEVICE_DEBUG_MODE:
+ handled = 0;
+ break;
+stall:
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_ENDPOINT:{
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
+
+ if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+ ctrlrequest->wValue != USB_ENDPOINT_HALT)
+ break;
+
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
+ else
+ musb_ep = &ep->ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG
+ | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+ } else
+ handled = 0;
+ return handled;
+}
+
+/* we have an ep0out data packet
+ * Context: caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct musb_request *request;
+ struct usb_request *req;
+ u16 count, csr;
+
+ request = next_ep0_request(musb);
+ req = &request->request;
+
+ /* read packet and ack; or stall because of gadget driver bug:
+ * should have provided the rx buffer before setup() returned.
+ */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned len = req->length - req->actual;
+
+ /* read the buffer */
+ count = musb_readb(regs, MUSB_COUNT0);
+ if (count > len) {
+ req->status = -EOVERFLOW;
+ count = len;
+ }
+ if (count > 0) {
+ musb_read_fifo(&musb->endpoints[0], count, buf);
+ req->actual += count;
+ }
+ csr = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (count < 64 || req->actual == req->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ csr |= MUSB_CSR0_P_DATAEND;
+ } else
+ req = NULL;
+ } else
+ csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+
+
+ /* Completion handler may choose to stall, e.g. because the
+ * message just received holds invalid data.
+ */
+ if (req) {
+ musb->ackpend = csr;
+ musb_g_ep0_giveback(musb, req);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+ musb_ep_select(musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context: caller holds controller lock
+ */
+static void ep0_txstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct musb_request *req = next_ep0_request(musb);
+ struct usb_request *request;
+ u16 csr = MUSB_CSR0_TXPKTRDY;
+ u8 *fifo_src;
+ u8 fifo_count;
+
+ if (!req) {
+ /* WARN_ON(1); */
+ musb_dbg(musb, "odd; csr0 %04x", musb_readw(regs, MUSB_CSR0));
+ return;
+ }
+
+ request = &req->request;
+
+ /* load the data */
+ fifo_src = (u8 *) request->buf + request->actual;
+ fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+ request->length - request->actual);
+ musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+ request->actual += fifo_count;
+
+ /* update the flags */
+ if (fifo_count < MUSB_MAX_END0_PACKET
+ || (request->actual == request->length
+ && !request->zero)) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ csr |= MUSB_CSR0_P_DATAEND;
+ } else
+ request = NULL;
+
+ /* report completions as soon as the fifo's loaded; there's no
+ * win in waiting till this last packet gets acked. (other than
+ * very precise fault reporting, needed by USB TMC; possible with
+ * this hardware, but not usable from portable gadget drivers.)
+ */
+ if (request) {
+ musb->ackpend = csr;
+ musb_g_ep0_giveback(musb, request);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ musb_ep_select(musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context: caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+{
+ struct musb_request *r;
+ void __iomem *regs = musb->control_ep->regs;
+
+ musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+ /* NOTE: earlier 2.6 versions changed setup packets to host
+ * order, but now USB packets always stay in USB byte order.
+ */
+ musb_dbg(musb, "SETUP req%02x.%02x v%04x i%04x l%d",
+ req->bRequestType,
+ req->bRequest,
+ le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex),
+ le16_to_cpu(req->wLength));
+
+ /* clean up any leftover transfers */
+ r = next_ep0_request(musb);
+ if (r)
+ musb_g_ep0_giveback(musb, &r->request);
+
+ /* For zero-data requests we want to delay the STATUS stage to
+ * avoid SETUPEND errors. If we read data (OUT), delay accepting
+ * packets until there's a buffer to store them in.
+ *
+ * If we write data, the controller acts happier if we enable
+ * the TX FIFO right away, and give the controller a moment
+ * to switch modes...
+ */
+ musb->set_address = false;
+ musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (req->wLength == 0) {
+ if (req->bRequestType & USB_DIR_IN)
+ musb->ackpend |= MUSB_CSR0_TXPKTRDY;
+ musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
+ } else if (req->bRequestType & USB_DIR_IN) {
+ musb->ep0_state = MUSB_EP0_STAGE_TX;
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
+ while ((musb_readw(regs, MUSB_CSR0)
+ & MUSB_CSR0_RXPKTRDY) != 0)
+ cpu_relax();
+ musb->ackpend = 0;
+ } else
+ musb->ep0_state = MUSB_EP0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int retval;
+ if (!musb->gadget_driver)
+ return -EOPNOTSUPP;
+ spin_unlock(&musb->lock);
+ retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
+ spin_lock(&musb->lock);
+ return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+ u16 csr;
+ u16 len;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *regs = musb->endpoints[0].regs;
+ irqreturn_t retval = IRQ_NONE;
+
+ musb_ep_select(mbase, 0); /* select ep0 */
+ csr = musb_readw(regs, MUSB_CSR0);
+ len = musb_readb(regs, MUSB_COUNT0);
+
+ musb_dbg(musb, "csr %04x, count %d, ep0stage %s",
+ csr, len, decode_ep0stage(musb->ep0_state));
+
+ if (csr & MUSB_CSR0_P_DATAEND) {
+ /*
+ * If DATAEND is set we should not call the callback,
+ * hence the status stage is not complete.
+ */
+ return IRQ_HANDLED;
+ }
+
+ /* I sent a stall.. need to acknowledge it now.. */
+ if (csr & MUSB_CSR0_P_SENTSTALL) {
+ musb_writew(regs, MUSB_CSR0,
+ csr & ~MUSB_CSR0_P_SENTSTALL);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+ csr = musb_readw(regs, MUSB_CSR0);
+ }
+
+ /* request ended "early" */
+ if (csr & MUSB_CSR0_P_SETUPEND) {
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
+ retval = IRQ_HANDLED;
+ /* Transition into the early status phase */
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_TX:
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ break;
+ case MUSB_EP0_STAGE_RX:
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ break;
+ default:
+ ERR("SetupEnd came in a wrong ep0stage %s\n",
+ decode_ep0stage(musb->ep0_state));
+ }
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* NOTE: request may need completion */
+ }
+
+ /* docs from Mentor only describe tx, rx, and idle/setup states.
+ * we need to handle nuances around status stages, and also the
+ * case where status and setup stages come back-to-back ...
+ */
+ switch (musb->ep0_state) {
+
+ case MUSB_EP0_STAGE_TX:
+ /* irq on clearing txpktrdy */
+ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
+ ep0_txstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_RX:
+ /* irq on set rxpktrdy */
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ ep0_rxstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_STATUSIN:
+ /* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+ /* update address (if needed) only @ the end of the
+ * status phase per usb spec, which also guarantees
+ * we get 10 msec to receive this irq... until this
+ * is done we won't see the next packet.
+ */
+ if (musb->set_address) {
+ musb->set_address = false;
+ musb_writeb(mbase, MUSB_FADDR, musb->address);
+ }
+
+ /* enter test mode if needed (exit by reset) */
+ else if (musb->test_mode) {
+ musb_dbg(musb, "entering TESTMODE");
+
+ if (MUSB_TEST_PACKET == musb->test_mode_nr)
+ musb_load_testpacket(musb);
+
+ musb_writeb(mbase, MUSB_TESTMODE,
+ musb->test_mode_nr);
+ }
+ fallthrough;
+
+ case MUSB_EP0_STAGE_STATUSOUT:
+ /* end of sequence #1: write to host (TX state) */
+ {
+ struct musb_request *req;
+
+ req = next_ep0_request(musb);
+ if (req)
+ musb_g_ep0_giveback(musb, &req->request);
+ }
+
+ /*
+ * In case when several interrupts can get coalesced,
+ * check to see if we've already received a SETUP packet...
+ */
+ if (csr & MUSB_CSR0_RXPKTRDY)
+ goto setup;
+
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+ break;
+
+ case MUSB_EP0_STAGE_IDLE:
+ /*
+ * This state is typically (but not always) indiscernible
+ * from the status states since the corresponding interrupts
+ * tend to happen within too little period of time (with only
+ * a zero-length packet in between) and so get coalesced...
+ */
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ fallthrough;
+
+ case MUSB_EP0_STAGE_SETUP:
+setup:
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ struct usb_ctrlrequest setup;
+ int handled = 0;
+
+ if (len != 8) {
+ ERR("SETUP packet len %d != 8 ?\n", len);
+ break;
+ }
+ musb_read_setup(musb, &setup);
+ retval = IRQ_HANDLED;
+
+ /* sometimes the RESET won't be reported */
+ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
+ u8 power;
+
+ printk(KERN_NOTICE "%s: peripheral reset "
+ "irq lost!\n",
+ musb_driver_name);
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ }
+
+ switch (musb->ep0_state) {
+
+ /* sequence #3 (no data stage), includes requests
+ * we can't forward (notably SET_ADDRESS and the
+ * device/endpoint feature set/clear operations)
+ * plus SET_CONFIGURATION and others we must
+ */
+ case MUSB_EP0_STAGE_ACKWAIT:
+ handled = service_zero_data_request(
+ musb, &setup);
+
+ /*
+ * We're expecting no data in any case, so
+ * always set the DATAEND bit -- doing this
+ * here helps avoid SetupEnd interrupt coming
+ * in the idle stage when we're stalling...
+ */
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
+
+ /* status stage might be immediate */
+ if (handled > 0)
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSIN;
+ break;
+
+ /* sequence #1 (IN to host), includes GET_STATUS
+ * requests that we can't forward, GET_DESCRIPTOR
+ * and others that we must
+ */
+ case MUSB_EP0_STAGE_TX:
+ handled = service_in_request(musb, &setup);
+ if (handled > 0) {
+ musb->ackpend = MUSB_CSR0_TXPKTRDY
+ | MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSOUT;
+ }
+ break;
+
+ /* sequence #2 (OUT from host), always forward */
+ default: /* MUSB_EP0_STAGE_RX */
+ break;
+ }
+
+ musb_dbg(musb, "handled %d, csr %04x, ep0stage %s",
+ handled, csr,
+ decode_ep0stage(musb->ep0_state));
+
+ /* unless we need to delegate this to the gadget
+ * driver, we know how to wrap this up: csr0 has
+ * not yet been written.
+ */
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(musb, &setup);
+ if (handled < 0) {
+ musb_ep_select(mbase, 0);
+stall:
+ musb_dbg(musb, "stall (%d)", handled);
+ musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+finish:
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend);
+ musb->ackpend = 0;
+ }
+ }
+ break;
+
+ case MUSB_EP0_STAGE_ACKWAIT:
+ /* This should not happen. But happens with tusb6010 with
+ * g_file_storage and high speed. Do nothing.
+ */
+ retval = IRQ_HANDLED;
+ break;
+
+ default:
+ /* "can't happen" */
+ WARN_ON(1);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+ break;
+ }
+
+ return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+ struct musb_ep *ep;
+ struct musb_request *req;
+ struct musb *musb;
+ int status;
+ unsigned long lockflags;
+ void __iomem *regs;
+
+ if (!e || !r)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ regs = musb->control_ep->regs;
+
+ req = to_musb_request(r);
+ req->musb = musb;
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->tx = ep->is_in;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
+ status = 0;
+ break;
+ default:
+ musb_dbg(musb, "ep0 request queued in state %d",
+ musb->ep0_state);
+ status = -EINVAL;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&req->list, &ep->req_list);
+
+ musb_dbg(musb, "queue to %s (%s), length=%d",
+ ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+ req->request.length);
+
+ musb_ep_select(musb->mregs, 0);
+
+ /* sequence #1, IN ... start writing the data */
+ if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+ ep0_txstate(musb);
+
+ /* sequence #3, no-data ... issue IN status */
+ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
+ if (req->request.length)
+ status = -EINVAL;
+ else {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend | MUSB_CSR0_P_DATAEND);
+ musb->ackpend = 0;
+ musb_g_ep0_giveback(ep->musb, r);
+ }
+
+ /* else for sequence #2 (OUT), caller provides a buffer
+ * before the next packet arrives. deferred responses
+ * (after SETUP is acked) are racey.
+ */
+ } else if (musb->ackpend) {
+ musb_writew(regs, MUSB_CSR0, musb->ackpend);
+ musb->ackpend = 0;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+ struct musb_ep *ep;
+ struct musb *musb;
+ void __iomem *base, *regs;
+ unsigned long flags;
+ int status;
+ u16 csr;
+
+ if (!e || !value)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ base = musb->mregs;
+ regs = musb->control_ep->regs;
+ status = 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ musb_ep_select(base, 0);
+ csr = musb->ackpend;
+
+ switch (musb->ep0_state) {
+
+ /* Stalls are usually issued after parsing SETUP packet, either
+ * directly in irq context from setup() or else later.
+ */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ csr = musb_readw(regs, MUSB_CSR0);
+ fallthrough;
+
+ /* It's also OK to issue stalls during callbacks when a non-empty
+ * DATA stage buffer has been read (or even written).
+ */
+ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
+ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
+
+ csr |= MUSB_CSR0_P_SENDSTALL;
+ musb_writew(regs, MUSB_CSR0, csr);
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+ musb->ackpend = 0;
+ break;
+ default:
+ musb_dbg(musb, "ep0 can't halt in state %d", musb->ep0_state);
+ status = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+const struct usb_ep_ops musb_g_ep0_ops = {
+ .enable = musb_g_ep0_enable,
+ .disable = musb_g_ep0_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_g_ep0_queue,
+ .dequeue = musb_g_ep0_dequeue,
+ .set_halt = musb_g_ep0_halt,
+};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 000000000..ef0b1589b
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2759 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+#include "musb_trace.h"
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ * they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ * + including ep0, with all usbtest cases 9, 10
+ * + usbtest 14 (ep0out) doesn't seem to run at all
+ * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ * configurations, but otherwise double buffering passes basic tests.
+ * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ * + about 1/15 the speed of typical EHCI implementations (PCI)
+ * + RX, all too often reqpkt seems to misbehave after tx
+ * + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
+ * starvation ... nothing yet for TX, interrupt, or bulk.
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ * extra endpoint for periodic use enabling hub + keybd + mouse. That
+ * mostly works, except that with "usbnet" it's easy to trigger cases
+ * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
+ * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ * although ARP RX wins. (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic: the endpoint will be
+ * "claimed" until its software queue is no longer refilled. No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return *(struct musb **) hcd->hcd_priv;
+}
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, int is_out,
+ u8 *buf, u32 offset, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+ struct musb *musb = ep->musb;
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 1000;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /*
+ * FIXME: sometimes the tx fifo flush failed, it has been
+ * observed during device disconnect on AM335x.
+ *
+ * To reproduce the issue, ensure tx urb(s) are queued when
+ * unplug the usb device which is connected to AM335x usb
+ * host port.
+ *
+ * I found using a usb-ethernet device and running iperf
+ * (client on AM335x) has very high chance to trigger it.
+ *
+ * Better to turn on musb_dbg() in musb_cleanup_urb() with
+ * CPPI enabled to see the issue when aborting the tx channel.
+ */
+ if (dev_WARN_ONCE(musb->controller, retries-- < 1,
+ "Could not flush host TX%d fifo: csr: %04x\n",
+ ep->epnum, csr))
+ return;
+ mdelay(1);
+ }
+}
+
+static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
+{
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 5;
+
+ /* scrub any data left in the fifo */
+ do {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
+ break;
+ musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ udelay(10);
+ } while (--retries);
+
+ WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
+ ep->epnum, csr);
+
+ /* and reset for the next transfer */
+ musb_writew(epio, MUSB_TXCSR, 0);
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ if (ep->epnum) {
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+ } else {
+ txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+ musb_writew(ep->regs, MUSB_CSR0, txcsr);
+ }
+
+}
+
+static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+ if (is_cppi_enabled(ep->musb))
+ txcsr |= MUSB_TXCSR_DMAMODE;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
+{
+ if (is_in != 0 || ep->is_shared_fifo)
+ ep->in_qh = qh;
+ if (is_in == 0 || ep->is_shared_fifo)
+ ep->out_qh = qh;
+}
+
+static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
+{
+ return is_in ? ep->in_qh : ep->out_qh;
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+ u32 len;
+ void __iomem *mbase = musb->mregs;
+ struct urb *urb = next_urb(qh);
+ void *buf = urb->transfer_buffer;
+ u32 offset = 0;
+ struct musb_hw_ep *hw_ep = qh->hw_ep;
+ int epnum = hw_ep->epnum;
+
+ /* initialize software qh state */
+ qh->offset = 0;
+ qh->segsize = 0;
+
+ /* gather right source of data */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfers always start with SETUP */
+ is_in = 0;
+ musb->ep0_stage = MUSB_EP0_START;
+ buf = urb->setup_packet;
+ len = 8;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ qh->iso_idx = 0;
+ qh->frame = 0;
+ offset = urb->iso_frame_desc[0].offset;
+ len = urb->iso_frame_desc[0].length;
+ break;
+ default: /* bulk, interrupt */
+ /* actual_length may be nonzero on retry paths */
+ buf = urb->transfer_buffer + urb->actual_length;
+ len = urb->transfer_buffer_length - urb->actual_length;
+ }
+
+ trace_musb_urb_start(musb, urb);
+
+ /* Configure endpoint */
+ musb_ep_set_qh(hw_ep, is_in, qh);
+ musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
+
+ /* transmit may have more work: start it when it is time */
+ if (is_in)
+ return;
+
+ /* determine if the time is right for a periodic transfer */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ musb_dbg(musb, "check whether there's still time for periodic Tx");
+ /* FIXME this doesn't implement that scheduling policy ...
+ * or handle framecounter wrapping
+ */
+ if (1) { /* Always assume URB_ISO_ASAP */
+ /* REVISIT the SOF irq handler shouldn't duplicate
+ * this code; and we don't init urb->start_frame...
+ */
+ qh->frame = 0;
+ goto start;
+ } else {
+ qh->frame = urb->start_frame;
+ /* enable SOF interrupt so we can count down */
+ musb_dbg(musb, "SOF for %d", epnum);
+#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+ }
+ break;
+ default:
+start:
+ musb_dbg(musb, "Start TX%d %s", epnum,
+ hw_ep->tx_channel ? "dma" : "pio");
+
+ if (!hw_ep->tx_channel)
+ musb_h_tx_start(hw_ep);
+ else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
+ musb_h_tx_dma_start(hw_ep);
+ }
+}
+
+/* Context: caller owns controller lock, IRQs are blocked */
+static void musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ trace_musb_urb_gb(musb, urb);
+
+ usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
+ spin_unlock(&musb->lock);
+ usb_hcd_giveback_urb(musb->hcd, urb, status);
+ spin_lock(&musb->lock);
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+ * advancing to either the next URB queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, IRQs are blocked
+ */
+static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
+{
+ struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
+ struct musb_hw_ep *ep = qh->hw_ep;
+ int ready = qh->is_ready;
+ int status;
+ u16 toggle;
+
+ status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
+
+ /* save toggle eagerly, for paranoia */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ toggle = musb->io.get_toggle(qh, !is_in);
+ usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (status == 0 && urb->error_count)
+ status = -EXDEV;
+ break;
+ }
+
+ qh->is_ready = 0;
+ musb_giveback(musb, urb, status);
+ qh->is_ready = ready;
+
+ /*
+ * musb->lock had been unlocked in musb_giveback, so qh may
+ * be freed, need to get it again
+ */
+ qh = musb_ep_get_qh(hw_ep, is_in);
+
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (qh && list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
+ struct dma_controller *dma = musb->dma_controller;
+
+ if (is_in) {
+ ep->rx_reinit = 1;
+ if (ep->rx_channel) {
+ dma->channel_release(ep->rx_channel);
+ ep->rx_channel = NULL;
+ }
+ } else {
+ ep->tx_reinit = 1;
+ if (ep->tx_channel) {
+ dma->channel_release(ep->tx_channel);
+ ep->tx_channel = NULL;
+ }
+ }
+
+ /* Clobber old pointers to this qh */
+ musb_ep_set_qh(ep, is_in, NULL);
+ qh->hep->hcpriv = NULL;
+
+ switch (qh->type) {
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ /* fifo policy for these lists, except that NAKing
+ * should rotate a qh to the end (for fairness).
+ */
+ if (qh->mux == 1) {
+ head = qh->ring.prev;
+ list_del(&qh->ring);
+ kfree(qh);
+ qh = first_qh(head);
+ break;
+ }
+ fallthrough;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ /* this is where periodic bandwidth should be
+ * de-allocated if it's tracked and allocated;
+ * and where we'd update the schedule tree...
+ */
+ kfree(qh);
+ qh = NULL;
+ break;
+ }
+ }
+
+ if (qh != NULL && qh->is_ready) {
+ musb_dbg(musb, "... next ep%d %cX urb %p",
+ hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
+ musb_start_urb(musb, is_in, qh);
+ }
+}
+
+static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+ /* we don't want fifo to fill itself again;
+ * ignore dma (various models),
+ * leave toggle alone (may not have been saved yet)
+ */
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+ csr &= ~(MUSB_RXCSR_H_REQPKT
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+
+ /* write 2x to allow double buffering */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+ /* flush writebuffer */
+ return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+ u16 rx_count;
+ u8 *buf;
+ u16 csr;
+ bool done = false;
+ u32 length;
+ int do_flush = 0;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ int pipe = urb->pipe;
+ void *buffer = urb->transfer_buffer;
+
+ /* musb_ep_select(mbase, epnum); */
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
+ urb->transfer_buffer, qh->offset,
+ urb->transfer_buffer_length);
+
+ /* unload FIFO */
+ if (usb_pipeisoc(pipe)) {
+ int status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ if (iso_err) {
+ status = -EILSEQ;
+ urb->error_count++;
+ }
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ buf = buffer + d->offset;
+ length = d->length;
+ if (rx_count > length) {
+ if (status == 0) {
+ status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ d->actual_length = length;
+
+ d->status = status;
+
+ /* see if we are done */
+ done = (++qh->iso_idx >= urb->number_of_packets);
+ } else {
+ /* non-isoch */
+ buf = buffer + qh->offset;
+ length = urb->transfer_buffer_length - qh->offset;
+ if (rx_count > length) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = -EOVERFLOW;
+ musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ qh->offset += length;
+
+ /* see if we are done */
+ done = (urb->actual_length == urb->transfer_buffer_length)
+ || (rx_count < qh->maxpacket)
+ || (urb->status != -EINPROGRESS);
+ if (done
+ && (urb->status == -EINPROGRESS)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->actual_length
+ < urb->transfer_buffer_length))
+ urb->status = -EREMOTEIO;
+ }
+
+ musb_read_fifo(hw_ep, length, buf);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ if (unlikely(do_flush))
+ musb_h_flush_rxfifo(hw_ep, csr);
+ else {
+ /* REVISIT this assumes AUTOCLEAR is never set */
+ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+ if (!done)
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
+{
+ struct musb_hw_ep *ep = musb->endpoints + epnum;
+ u16 csr;
+
+ /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
+ * That always uses tx_reinit since ep0 repurposes TX register
+ * offsets; the initial SETUP packet is also a kind of OUT.
+ */
+
+ /* if programmed for Tx, put it in RX mode */
+ if (ep->is_shared_fifo) {
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_MODE) {
+ musb_h_tx_flush_fifo(ep);
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
+ musb_writew(ep->regs, MUSB_TXCSR,
+ csr | MUSB_TXCSR_FRCDATATOG);
+ }
+
+ /*
+ * Clear the MODE bit (and everything else) to enable Rx.
+ * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
+ */
+ if (csr & MUSB_TXCSR_DMAMODE)
+ musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
+ musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+ /* scrub all previous state, clearing toggle */
+ }
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
+ musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
+ musb_write_rxhubport(musb, epnum, qh->h_port_reg);
+ } else
+ musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint, interval/NAKlimit, i/o size */
+ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+ musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+ /* NOTE: bulk combining rewrites high bits of maxpacket */
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffer mode.
+ */
+ musb_writew(ep->regs, MUSB_RXMAXP,
+ qh->maxpacket | ((qh->hb_mult - 1) << 11));
+
+ ep->rx_reinit = 0;
+}
+
+static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ u32 *length, u8 *mode)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+ void __iomem *epio = hw_ep->regs;
+ u16 pkt_size = qh->maxpacket;
+ u16 csr;
+
+ if (*length > channel->max_len)
+ *length = channel->max_len;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (*length > pkt_size) {
+ *mode = 1;
+ csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
+ /* autoset shouldn't be set in high bandwidth */
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 1 Yes(Normal)
+ * 0 >1 No(High BW ISO)
+ * 1 1 Yes(HS bulk)
+ * 1 >1 Yes(FS bulk)
+ */
+ if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
+ can_bulk_split(hw_ep->musb, qh->type)))
+ csr |= MUSB_TXCSR_AUTOSET;
+ } else {
+ *mode = 0;
+ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
+ csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
+ }
+ channel->desired_mode = *mode;
+ musb_writew(epio, MUSB_TXCSR, csr);
+}
+
+static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
+ struct urb *urb,
+ u8 *mode)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+
+ channel->actual_len = 0;
+
+ /*
+ * TX uses "RNDIS" mode automatically but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
+}
+
+static bool musb_tx_dma_program(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep, struct musb_qh *qh,
+ struct urb *urb, u32 offset, u32 length)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+ u16 pkt_size = qh->maxpacket;
+ u8 mode;
+
+ if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
+ musb_tx_dma_set_mode_mentor(hw_ep, qh,
+ &length, &mode);
+ else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+ musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, &mode);
+ else
+ return false;
+
+ qh->segsize = length;
+
+ /*
+ * Ensure the data reaches to main memory before starting
+ * DMA transfer
+ */
+ wmb();
+
+ if (!dma->channel_program(channel, pkt_size, mode,
+ urb->transfer_dma + offset, length)) {
+ void __iomem *epio = hw_ep->regs;
+ u16 csr;
+
+ dma->channel_release(channel);
+ hw_ep->tx_channel = NULL;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
+ musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, int is_out,
+ u8 *buf, u32 offset, u32 len)
+{
+ struct dma_controller *dma_controller;
+ struct dma_channel *dma_channel;
+ u8 dma_ok;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
+ u16 packet_sz = qh->maxpacket;
+ u8 use_dma = 1;
+ u16 csr;
+
+ musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d",
+ is_out ? "-->" : "<--",
+ epnum, urb, urb->dev->speed,
+ qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+ qh->h_addr_reg, qh->h_port_reg,
+ len);
+
+ musb_ep_select(mbase, epnum);
+
+ if (is_out && !len) {
+ use_dma = 0;
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ hw_ep->tx_channel = NULL;
+ }
+
+ /* candidate for DMA? */
+ dma_controller = musb->dma_controller;
+ if (use_dma && is_dma_capable() && epnum && dma_controller) {
+ dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+ if (!dma_channel) {
+ dma_channel = dma_controller->channel_alloc(
+ dma_controller, hw_ep, is_out);
+ if (is_out)
+ hw_ep->tx_channel = dma_channel;
+ else
+ hw_ep->rx_channel = dma_channel;
+ }
+ } else
+ dma_channel = NULL;
+
+ /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+ /* OUT/transmit/EP0 or IN/receive? */
+ if (is_out) {
+ u16 csr;
+ u16 int_txe;
+ u16 load_count;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* disable interrupt in case we flush */
+ int_txe = musb->intrtxe;
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ /* general endpoint setup */
+ if (epnum) {
+ /* flush all old state, set default */
+ /*
+ * We could be flushing valid
+ * packets in double buffering
+ * case
+ */
+ if (!hw_ep->tx_double_buffered)
+ musb_h_tx_flush_fifo(hw_ep);
+
+ /*
+ * We must not clear the DMAMODE bit before or in
+ * the same cycle with the DMAENAB bit, so we clear
+ * the latter first...
+ */
+ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_FRCDATATOG
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY
+ );
+ csr |= MUSB_TXCSR_MODE;
+
+ if (!hw_ep->tx_double_buffered)
+ csr |= musb->io.set_toggle(qh, is_out, urb);
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ csr &= ~MUSB_TXCSR_DMAMODE;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ /* endpoint 0: just flush */
+ musb_h_ep0_flush_fifo(hw_ep);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_write_txfunaddr(musb, epnum, qh->addr_reg);
+ musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
+ musb_write_txhubport(musb, epnum, qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+ } else
+ musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint/interval/NAKlimit */
+ if (epnum) {
+ musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ if (can_bulk_split(musb, qh->type)) {
+ qh->hb_mult = hw_ep->max_packet_sz_tx
+ / packet_sz;
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((qh->hb_mult) - 1) << 11);
+ } else {
+ musb_writew(epio, MUSB_TXMAXP,
+ qh->maxpacket |
+ ((qh->hb_mult - 1) << 11));
+ }
+ musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+ } else {
+ musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+ if (musb->is_multipoint)
+ musb_writeb(epio, MUSB_TYPE0,
+ qh->type_reg);
+ }
+
+ if (can_bulk_split(musb, qh->type))
+ load_count = min((u32) hw_ep->max_packet_sz_tx,
+ len);
+ else
+ load_count = min((u32) packet_sz, len);
+
+ if (dma_channel && musb_tx_dma_program(dma_controller,
+ hw_ep, qh, urb, offset, len))
+ load_count = 0;
+
+ if (load_count) {
+ /* PIO to load FIFO */
+ qh->segsize = load_count;
+ if (!buf) {
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ SG_MITER_ATOMIC
+ | SG_MITER_FROM_SG);
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller,
+ "error: sg"
+ "list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ goto finish;
+ }
+ buf = qh->sg_miter.addr + urb->sg->offset +
+ urb->actual_length;
+ load_count = min_t(u32, load_count,
+ qh->sg_miter.length);
+ musb_write_fifo(hw_ep, load_count, buf);
+ qh->sg_miter.consumed = load_count;
+ sg_miter_stop(&qh->sg_miter);
+ } else
+ musb_write_fifo(hw_ep, load_count, buf);
+ }
+finish:
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* IN/receive */
+ } else {
+ u16 csr = 0;
+
+ if (hw_ep->rx_reinit) {
+ musb_rx_reinit(musb, qh, epnum);
+ csr |= musb->io.set_toggle(qh, is_out, urb);
+
+ if (qh->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ if (csr & (MUSB_RXCSR_RXPKTRDY
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ hw_ep->epnum, csr);
+
+ /* scrub any stale state, leaving toggle alone */
+ csr &= MUSB_RXCSR_DISNYET;
+ }
+
+ /* kick things off */
+
+ if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
+ /* Candidate for DMA */
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ /*
+ * Unless caller treats short RX transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(dma_channel,
+ packet_sz, !(urb->transfer_flags &
+ URB_SHORT_NOT_OK),
+ urb->transfer_dma + offset,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->rx_channel = dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
+ }
+
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ }
+}
+
+/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ int is_in)
+{
+ struct dma_channel *dma;
+ struct urb *urb;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *cur_qh, *next_qh;
+ u16 rx_csr, tx_csr;
+ u16 toggle;
+
+ musb_ep_select(mbase, ep->epnum);
+ if (is_in) {
+ dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+ /*
+ * Need to stop the transaction by clearing REQPKT first
+ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
+ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
+ */
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ cur_qh = first_qh(&musb->in_bulk);
+ } else {
+ dma = is_dma_capable() ? ep->tx_channel : NULL;
+
+ /* clear nak timeout bit */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ tx_csr |= MUSB_TXCSR_H_WZC_BITS;
+ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+
+ cur_qh = first_qh(&musb->out_bulk);
+ }
+ if (cur_qh) {
+ urb = next_urb(cur_qh);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ urb->actual_length += dma->actual_len;
+ dma->actual_len = 0L;
+ }
+ toggle = musb->io.get_toggle(cur_qh, !is_in);
+ usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
+
+ if (is_in) {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
+
+ /* get the next qh from musb->in_bulk */
+ next_qh = first_qh(&musb->in_bulk);
+
+ /* set rx_reinit and schedule the next qh */
+ ep->rx_reinit = 1;
+ } else {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->out_bulk);
+
+ /* get the next qh from musb->out_bulk */
+ next_qh = first_qh(&musb->out_bulk);
+
+ /* set tx_reinit and schedule the next qh */
+ ep->tx_reinit = 1;
+ }
+
+ if (next_qh)
+ musb_start_urb(musb, is_in, next_qh);
+ }
+}
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+ bool more = false;
+ u8 *fifo_dest = NULL;
+ u16 fifo_count = 0;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ struct musb_qh *qh = hw_ep->in_qh;
+ struct usb_ctrlrequest *request;
+
+ switch (musb->ep0_stage) {
+ case MUSB_EP0_IN:
+ fifo_dest = urb->transfer_buffer + urb->actual_length;
+ fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
+ urb->actual_length);
+ if (fifo_count < len)
+ urb->status = -EOVERFLOW;
+
+ musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ if (len < qh->maxpacket) {
+ /* always terminate on short read; it's
+ * rarely reported as an error.
+ */
+ } else if (urb->actual_length <
+ urb->transfer_buffer_length)
+ more = true;
+ break;
+ case MUSB_EP0_START:
+ request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+ if (!request->wLength) {
+ musb_dbg(musb, "start no-DATA");
+ break;
+ } else if (request->bRequestType & USB_DIR_IN) {
+ musb_dbg(musb, "start IN-DATA");
+ musb->ep0_stage = MUSB_EP0_IN;
+ more = true;
+ break;
+ } else {
+ musb_dbg(musb, "start OUT-DATA");
+ musb->ep0_stage = MUSB_EP0_OUT;
+ more = true;
+ }
+ fallthrough;
+ case MUSB_EP0_OUT:
+ fifo_count = min_t(size_t, qh->maxpacket,
+ urb->transfer_buffer_length -
+ urb->actual_length);
+ if (fifo_count) {
+ fifo_dest = (u8 *) (urb->transfer_buffer
+ + urb->actual_length);
+ musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
+ fifo_count,
+ (fifo_count == 1) ? "" : "s",
+ fifo_dest);
+ musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ more = true;
+ }
+ break;
+ default:
+ ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+ break;
+ }
+
+ return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from musb_interrupt().
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ struct urb *urb;
+ u16 csr, len;
+ int status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ bool complete = false;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* ep0 only has one queue, "in" */
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, 0);
+ csr = musb_readw(epio, MUSB_CSR0);
+ len = (csr & MUSB_CSR0_RXPKTRDY)
+ ? musb_readb(epio, MUSB_COUNT0)
+ : 0;
+
+ musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
+ csr, qh, len, urb, musb->ep0_stage);
+
+ /* if we just did status stage, we are done */
+ if (MUSB_EP0_STATUS == musb->ep0_stage) {
+ retval = IRQ_HANDLED;
+ complete = true;
+ }
+
+ /* prepare status */
+ if (csr & MUSB_CSR0_H_RXSTALL) {
+ musb_dbg(musb, "STALLING ENDPOINT");
+ status = -EPIPE;
+
+ } else if (csr & MUSB_CSR0_H_ERROR) {
+ musb_dbg(musb, "no response, csr0 %04x", csr);
+ status = -EPROTO;
+
+ } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+ musb_dbg(musb, "control NAK timeout");
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * control transfer, if another one is queued, so that
+ * ep0 is more likely to stay busy. That's already done
+ * for bulk RX transfers.
+ *
+ * if (qh->ring.next != &musb->control), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_writew(epio, MUSB_CSR0, 0);
+ retval = IRQ_HANDLED;
+ }
+
+ if (status) {
+ musb_dbg(musb, "aborting");
+ retval = IRQ_HANDLED;
+ if (urb)
+ urb->status = status;
+ complete = true;
+
+ /* use the proper sequence to abort the transfer */
+ if (csr & MUSB_CSR0_H_REQPKT) {
+ csr &= ~MUSB_CSR0_H_REQPKT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ } else {
+ musb_h_ep0_flush_fifo(hw_ep);
+ }
+
+ musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+ /* clear it */
+ musb_writew(epio, MUSB_CSR0, 0);
+ }
+
+ if (unlikely(!urb)) {
+ /* stop endpoint since we have no place for its data, this
+ * SHOULD NEVER HAPPEN! */
+ ERR("no URB for end 0\n");
+
+ musb_h_ep0_flush_fifo(hw_ep);
+ goto done;
+ }
+
+ if (!complete) {
+ /* call common logic and prepare response */
+ if (musb_h_ep0_continue(musb, len, urb)) {
+ /* more packets required */
+ csr = (MUSB_EP0_IN == musb->ep0_stage)
+ ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+ } else {
+ /* data transfer complete; perform status phase */
+ if (usb_pipeout(urb->pipe)
+ || !urb->transfer_buffer_length)
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_H_REQPKT;
+ else
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_TXPKTRDY;
+
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
+ /* flag status stage */
+ musb->ep0_stage = MUSB_EP0_STATUS;
+
+ musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
+
+ }
+ musb_writew(epio, MUSB_CSR0, csr);
+ retval = IRQ_HANDLED;
+ } else
+ musb->ep0_stage = MUSB_EP0_IDLE;
+
+ /* call completion handler if done */
+ if (complete)
+ musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+ return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+ - ... which starts DMA to fifo in mode 1 or 0
+
+ DMA Isr (transfer complete) -> TxAvail()
+ - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
+ only in musb_cleanup_urb)
+ - TxPktRdy has to be set in mode 0 or for
+ short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+ int pipe;
+ bool done = false;
+ u16 tx_csr;
+ size_t length = 0;
+ size_t offset = 0;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->out_qh;
+ struct urb *urb = next_urb(qh);
+ u32 status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct dma_channel *dma;
+ bool transfer_pending = false;
+
+ musb_ep_select(mbase, epnum);
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* with CPPI, DMA sometimes triggers "extra" irqs */
+ if (!urb) {
+ musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
+ return;
+ }
+
+ pipe = urb->pipe;
+ dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+ trace_musb_urb_tx(musb, urb);
+ musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
+ dma ? ", dma" : "");
+
+ /* check for errors */
+ if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
+ /* dma was disabled, fifo flushed */
+ musb_dbg(musb, "TX end %d stall", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
+ /* (NON-ISO) dma was disabled, fifo flushed */
+ musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
+
+ status = -ETIMEDOUT;
+
+ } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+ if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
+ && !list_is_singular(&musb->out_bulk)) {
+ musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
+ musb_bulk_nak_timeout(musb, hw_ep, 0);
+ } else {
+ musb_dbg(musb, "TX ep%d device not responding", epnum);
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ * That's already done for bulk RX transfers.
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ }
+ return;
+ }
+
+done:
+ if (status) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ }
+
+ /* do the proper sequence to abort the transfer in the
+ * usb core; the dma engine should already be stopped.
+ */
+ musb_h_tx_flush_fifo(hw_ep);
+ tx_csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ );
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ musb_writeb(epio, MUSB_TXINTERVAL, 0);
+
+ done = true;
+ }
+
+ /* second cppi case */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
+ return;
+ }
+
+ if (is_dma_capable() && dma && !status) {
+ /*
+ * DMA has completed. But if we're using DMA mode 1 (multi
+ * packet DMA), we need a terminal TXPKTRDY interrupt before
+ * we can consider this transfer completed, lest we trash
+ * its last packet when writing the next URB's data. So we
+ * switch back to mode 0 to get that interrupt; we'll come
+ * back here once it happens.
+ */
+ if (tx_csr & MUSB_TXCSR_DMAMODE) {
+ /*
+ * We shouldn't clear DMAMODE with DMAENAB set; so
+ * clear them in a safe order. That should be OK
+ * once TXPKTRDY has been set (and I've never seen
+ * it being 0 at this moment -- DMA interrupt latency
+ * is significant) but if it hasn't been then we have
+ * no choice but to stop being polite and ignore the
+ * programmer's guide... :-)
+ *
+ * Note that we must write TXCSR with TXPKTRDY cleared
+ * in order not to re-trigger the packet send (this bit
+ * can't be cleared by CPU), and there's another caveat:
+ * TXPKTRDY may be set shortly and then cleared in the
+ * double-buffered FIFO mode, so we do an extra TXCSR
+ * read for debouncing...
+ */
+ tx_csr &= musb_readw(epio, MUSB_TXCSR);
+ if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
+ tx_csr &= ~(MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR,
+ tx_csr | MUSB_TXCSR_H_WZC_BITS);
+ }
+ tx_csr &= ~(MUSB_TXCSR_DMAMODE |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR,
+ tx_csr | MUSB_TXCSR_H_WZC_BITS);
+
+ /*
+ * There is no guarantee that we'll get an interrupt
+ * after clearing DMAMODE as we might have done this
+ * too late (after TXPKTRDY was cleared by controller).
+ * Re-read TXCSR as we have spoiled its previous value.
+ */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ }
+
+ /*
+ * We may get here from a DMA completion or TXPKTRDY interrupt.
+ * In any case, we must check the FIFO status here and bail out
+ * only if the FIFO still has data -- that should prevent the
+ * "missed" TXPKTRDY interrupts and deal with double-buffered
+ * FIFO mode too...
+ */
+ if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
+ musb_dbg(musb,
+ "DMA complete but FIFO not empty, CSR %04x",
+ tx_csr);
+ return;
+ }
+ }
+
+ if (!status || dma || usb_pipeisoc(pipe)) {
+ if (dma)
+ length = dma->actual_len;
+ else
+ length = qh->segsize;
+ qh->offset += length;
+
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = length;
+ d->status = status;
+ if (++qh->iso_idx >= urb->number_of_packets) {
+ done = true;
+ } else {
+ d++;
+ offset = d->offset;
+ length = d->length;
+ }
+ } else if (dma && urb->transfer_buffer_length == qh->offset) {
+ done = true;
+ } else {
+ /* see if we need to send more data, or ZLP */
+ if (qh->segsize < qh->maxpacket)
+ done = true;
+ else if (qh->offset == urb->transfer_buffer_length
+ && !(urb->transfer_flags
+ & URB_ZERO_PACKET))
+ done = true;
+ if (!done) {
+ offset = qh->offset;
+ length = urb->transfer_buffer_length - offset;
+ transfer_pending = true;
+ }
+ }
+ }
+
+ /* urb->status != -EINPROGRESS means request has been faulted,
+ * so we must abort this transfer after cleanup
+ */
+ if (urb->status != -EINPROGRESS) {
+ done = true;
+ if (status == 0)
+ status = urb->status;
+ }
+
+ if (done) {
+ /* set status */
+ urb->status = status;
+ urb->actual_length = qh->offset;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+ return;
+ } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
+ if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
+ offset, length)) {
+ if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
+ musb_h_tx_dma_start(hw_ep);
+ return;
+ }
+ } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
+ musb_dbg(musb, "not complete, but DMA enabled?");
+ return;
+ }
+
+ /*
+ * PIO: start next packet in this URB.
+ *
+ * REVISIT: some docs say that when hw_ep->tx_double_buffered,
+ * (and presumably, FIFO is not half-full) we should write *two*
+ * packets before updating TXCSR; other docs disagree...
+ */
+ if (length > qh->maxpacket)
+ length = qh->maxpacket;
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer) {
+ /* sg_miter_start is already done in musb_ep_program */
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ goto done;
+ }
+ length = min_t(u32, length, qh->sg_miter.length);
+ musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
+ qh->sg_miter.consumed = length;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+ }
+
+ qh->segsize = length;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+}
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+/* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
+static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ struct dma_channel *channel = hw_ep->rx_channel;
+ void __iomem *epio = hw_ep->regs;
+ dma_addr_t *buf;
+ u32 length;
+ u16 val;
+
+ buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
+ (u32)urb->transfer_dma;
+
+ length = urb->iso_frame_desc[qh->iso_idx].length;
+
+ val = musb_readw(epio, MUSB_RXCSR);
+ val |= MUSB_RXCSR_DMAENAB;
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ return dma->channel_program(channel, qh->maxpacket, 0,
+ (u32)buf, length);
+}
+#else
+static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ return false;
+}
+#endif
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
+/* Host side RX (IN) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, ProgramEndpoint
+ - first IN token is sent out (by setting ReqPkt)
+ LinuxIsr -> RxReady()
+ /\ => first packet is received
+ | - Set in mode 0 (DmaEnab, ~ReqPkt)
+ | -> DMA Isr (transfer complete) -> RxReady()
+ | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+ | - if urb not complete, send next IN token (ReqPkt)
+ | | else complete urb.
+ | |
+ ---------------------------
+ *
+ * Nuances of mode 1:
+ * For short packets, no ack (+RxPktRdy) is sent automatically
+ * (even if AutoClear is ON)
+ * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ * automatically => major problem, as collecting the next packet becomes
+ * difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ * All we care about at this driver level is that
+ * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ * (b) termination conditions are: short RX, or buffer full;
+ * (c) fault modes include
+ * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ * (and that endpoint's dma queue stops immediately)
+ * - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ * thus be a great candidate for using mode 1 ... for all but the
+ * last packet of one URB's transfer.
+ */
+static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ struct dma_channel *channel = hw_ep->rx_channel;
+ void __iomem *epio = hw_ep->regs;
+ u16 val;
+ int pipe;
+ bool done;
+
+ pipe = urb->pipe;
+
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = len;
+
+ /* even if there was an error, we did the dma
+ * for iso_frame_desc->length
+ */
+ if (d->status != -EILSEQ && d->status != -EOVERFLOW)
+ d->status = 0;
+
+ if (++qh->iso_idx >= urb->number_of_packets) {
+ done = true;
+ } else {
+ /* REVISIT: Why ignore return value here? */
+ if (musb_dma_cppi41(hw_ep->musb))
+ done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
+ urb, len);
+ done = false;
+ }
+
+ } else {
+ /* done if urb buffer is full or short packet is recd */
+ done = (urb->actual_length + len >=
+ urb->transfer_buffer_length
+ || channel->actual_len < qh->maxpacket
+ || channel->rx_packet_done);
+ }
+
+ /* send IN token for next packet, without AUTOREQ */
+ if (!done) {
+ val = musb_readw(epio, MUSB_RXCSR);
+ val |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
+ }
+
+ return done;
+}
+
+/* Disadvantage of using mode 1:
+ * It's basically usable only for mass storage class; essentially all
+ * other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ * If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ * to use the extra IN token to grab the last packet using mode 0, then
+ * the problem is that you cannot be sure when the device will send the
+ * last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ * such that it gets lost when RxCSR is re-set at the end of the mode 1
+ * transfer, while sometimes it is recd just a little late so that if you
+ * try to configure for mode 0 soon after the mode 1 transfer is
+ * completed, you will find rxcount 0. Okay, so you might think why not
+ * wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len,
+ u8 iso_err)
+{
+ struct musb *musb = hw_ep->musb;
+ void __iomem *epio = hw_ep->regs;
+ struct dma_channel *channel = hw_ep->rx_channel;
+ u16 rx_count, val;
+ int length, pipe, done;
+ dma_addr_t buf;
+
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ pipe = urb->pipe;
+
+ if (usb_pipeisoc(pipe)) {
+ int d_status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+
+ if (iso_err) {
+ d_status = -EILSEQ;
+ urb->error_count++;
+ }
+ if (rx_count > d->length) {
+ if (d_status == 0) {
+ d_status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ musb_dbg(musb, "** OVERFLOW %d into %d",
+ rx_count, d->length);
+
+ length = d->length;
+ } else
+ length = rx_count;
+ d->status = d_status;
+ buf = urb->transfer_dma + d->offset;
+ } else {
+ length = rx_count;
+ buf = urb->transfer_dma + urb->actual_length;
+ }
+
+ channel->desired_mode = 0;
+#ifdef USE_MODE1
+ /* because of the issue below, mode 1 will
+ * only rarely behave with correct semantics.
+ */
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->transfer_buffer_length - urb->actual_length)
+ > qh->maxpacket)
+ channel->desired_mode = 1;
+ if (rx_count < hw_ep->max_packet_sz_rx) {
+ length = rx_count;
+ channel->desired_mode = 0;
+ } else {
+ length = urb->transfer_buffer_length;
+ }
+#endif
+
+ /* See comments above on disadvantages of using mode 1 */
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+
+ if (channel->desired_mode == 0)
+ val &= ~MUSB_RXCSR_H_AUTOREQ;
+ else
+ val |= MUSB_RXCSR_H_AUTOREQ;
+ val |= MUSB_RXCSR_DMAENAB;
+
+ /* autoclear shouldn't be set in high bandwidth */
+ if (qh->hb_mult == 1)
+ val |= MUSB_RXCSR_AUTOCLEAR;
+
+ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
+
+ /* REVISIT if when actual_length != 0,
+ * transfer_buffer_length needs to be
+ * adjusted first...
+ */
+ done = dma->channel_program(channel, qh->maxpacket,
+ channel->desired_mode,
+ buf, length);
+
+ if (!done) {
+ dma->channel_release(channel);
+ hw_ep->rx_channel = NULL;
+ channel = NULL;
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, val);
+ }
+
+ return done;
+}
+#else
+static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ return false;
+}
+
+static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len,
+ u8 iso_err)
+{
+ return false;
+}
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *musb, u8 epnum)
+{
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ struct dma_controller *c = musb->dma_controller;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ size_t xfer_len;
+ void __iomem *mbase = musb->mregs;
+ u16 rx_csr, val;
+ bool iso_err = false;
+ bool done = false;
+ u32 status;
+ struct dma_channel *dma;
+ unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+
+ musb_ep_select(mbase, epnum);
+
+ urb = next_urb(qh);
+ dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+ status = 0;
+ xfer_len = 0;
+
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ val = rx_csr;
+
+ if (unlikely(!urb)) {
+ /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+ * usbtest #11 (unlinks) triggers it regularly, sometimes
+ * with fifo full. (Only with DMA??)
+ */
+ musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
+ epnum, val, musb_readw(epio, MUSB_RXCOUNT));
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ return;
+ }
+
+ trace_musb_urb_rx(musb, urb);
+
+ /* check for errors, concurrent stall & unlink is not really
+ * handled yet! */
+ if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
+ musb_dbg(musb, "RX end %d STALL", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
+ dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
+
+ /*
+ * The three-strikes error could only happen when the USB
+ * device is not accessible, for example detached or powered
+ * off. So return the fatal error -ESHUTDOWN so hopefully the
+ * USB device drivers won't immediately resubmit the same URB.
+ */
+ status = -ESHUTDOWN;
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+
+ rx_csr &= ~MUSB_RXCSR_H_ERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
+
+ if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+ musb_dbg(musb, "RX end %d NAK timeout", epnum);
+
+ /* NOTE: NAKing is *NOT* an error, so we want to
+ * continue. Except ... if there's a request for
+ * another QH, use that instead of starving it.
+ *
+ * Devices like Ethernet and serial adapters keep
+ * reads posted at all times, which will starve
+ * other devices without this logic.
+ */
+ if (usb_pipebulk(urb->pipe)
+ && qh->mux == 1
+ && !list_is_singular(&musb->in_bulk)) {
+ musb_bulk_nak_timeout(musb, hw_ep, 1);
+ return;
+ }
+ musb_ep_select(mbase, epnum);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ goto finish;
+ } else {
+ musb_dbg(musb, "RX end %d ISO data error", epnum);
+ /* packet error reported later */
+ iso_err = true;
+ }
+ } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
+ musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
+ epnum);
+ status = -EPROTO;
+ }
+
+ /* faults abort the transfer */
+ if (status) {
+ /* clean up dma and collect transfer count */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ }
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ done = true;
+ goto finish;
+ }
+
+ if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
+ /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+ ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+ goto finish;
+ }
+
+ /* thorough shutdown for now ... given more precise fault handling
+ * and better queueing support, we might keep a DMA pipeline going
+ * while processing this irq for earlier completions.
+ */
+
+ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+ if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
+ (rx_csr & MUSB_RXCSR_H_REQPKT)) {
+ /* REVISIT this happened for a while on some short reads...
+ * the cleanup still needs investigation... looks bad...
+ * and also duplicates dma cleanup code above ... plus,
+ * shouldn't this be the "half full" double buffer case?
+ */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ done = true;
+ }
+
+ musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
+ xfer_len, dma ? ", dma" : "");
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | rx_csr);
+ }
+
+ if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+ xfer_len = dma->actual_len;
+
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_RXPKTRDY);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
+ musb_dma_cppi41(musb)) {
+ done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
+ musb_dbg(hw_ep->musb,
+ "ep %d dma %s, rxcsr %04x, rxcount %d",
+ epnum, done ? "off" : "reset",
+ musb_readw(epio, MUSB_RXCSR),
+ musb_readw(epio, MUSB_RXCOUNT));
+ } else {
+ done = true;
+ }
+
+ } else if (urb->status == -EINPROGRESS) {
+ /* if no errors, be sure a packet is ready for unloading */
+ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+ status = -EPROTO;
+ ERR("Rx interrupt with no errors or packet!\n");
+
+ /* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+/* SCRUB (RX) */
+ /* do the proper sequence to abort the transfer */
+ musb_ep_select(mbase, epnum);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, val);
+ goto finish;
+ }
+
+ /* we are expecting IN packets */
+ if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
+ musb_dma_cppi41(musb)) && dma) {
+ musb_dbg(hw_ep->musb,
+ "RX%d count %d, buffer 0x%llx len %d/%d",
+ epnum, musb_readw(epio, MUSB_RXCOUNT),
+ (unsigned long long) urb->transfer_dma
+ + urb->actual_length,
+ qh->offset,
+ urb->transfer_buffer_length);
+
+ if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
+ xfer_len, iso_err))
+ goto finish;
+ else
+ dev_err(musb->controller, "error: rx_dma failed\n");
+ }
+
+ if (!dma) {
+ unsigned int received_len;
+
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer) {
+ qh->use_sg = true;
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ sg_flags);
+ }
+
+ if (qh->use_sg) {
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ done = true;
+ goto finish;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ received_len = urb->actual_length;
+ qh->offset = 0x0;
+ done = musb_host_packet_rx(musb, urb, epnum,
+ iso_err);
+ /* Calculate the number of bytes received */
+ received_len = urb->actual_length -
+ received_len;
+ qh->sg_miter.consumed = received_len;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ }
+ musb_dbg(musb, "read %spacket", done ? "last " : "");
+ }
+ }
+
+finish:
+ urb->actual_length += xfer_len;
+ qh->offset += xfer_len;
+ if (done) {
+ if (qh->use_sg) {
+ qh->use_sg = false;
+ urb->transfer_buffer = NULL;
+ }
+
+ if (urb->status == -EINPROGRESS)
+ urb->status = status;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+ }
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+ struct musb *musb,
+ struct musb_qh *qh,
+ int is_in)
+{
+ int idle = 0;
+ int best_diff;
+ int best_end, epnum;
+ struct musb_hw_ep *hw_ep = NULL;
+ struct list_head *head = NULL;
+ u8 toggle;
+ u8 txtype;
+ struct urb *urb = next_urb(qh);
+
+ /* use fixed hardware for control and bulk */
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+ head = &musb->control;
+ hw_ep = musb->control_ep;
+ goto success;
+ }
+
+ /* else, periodic transfers get muxed to other endpoints */
+
+ /*
+ * We know this qh hasn't been scheduled, so all we need to do
+ * is choose which hardware endpoint to put it on ...
+ *
+ * REVISIT what we really want here is a regular schedule tree
+ * like e.g. OHCI uses.
+ */
+ best_diff = 4096;
+ best_end = -1;
+
+ for (epnum = 1, hw_ep = musb->endpoints + 1;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
+ int diff;
+
+ if (musb_ep_get_qh(hw_ep, is_in) != NULL)
+ continue;
+
+ if (hw_ep == musb->bulk_ep)
+ continue;
+
+ if (is_in)
+ diff = hw_ep->max_packet_sz_rx;
+ else
+ diff = hw_ep->max_packet_sz_tx;
+ diff -= (qh->maxpacket * qh->hb_mult);
+
+ if (diff >= 0 && best_diff > diff) {
+
+ /*
+ * Mentor controller has a bug in that if we schedule
+ * a BULK Tx transfer on an endpoint that had earlier
+ * handled ISOC then the BULK transfer has to start on
+ * a zero toggle. If the BULK transfer starts on a 1
+ * toggle then this transfer will fail as the mentor
+ * controller starts the Bulk transfer on a 0 toggle
+ * irrespective of the programming of the toggle bits
+ * in the TXCSR register. Check for this condition
+ * while allocating the EP for a Tx Bulk transfer. If
+ * so skip this EP.
+ */
+ hw_ep = musb->endpoints + epnum;
+ toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
+ txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
+ >> 4) & 0x3;
+ if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
+ toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
+ continue;
+
+ best_diff = diff;
+ best_end = epnum;
+ }
+ }
+ /* use bulk reserved ep1 if no other ep is free */
+ if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
+ hw_ep = musb->bulk_ep;
+ if (is_in)
+ head = &musb->in_bulk;
+ else
+ head = &musb->out_bulk;
+
+ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
+ * multiplexed. This scheme does not work in high speed to full
+ * speed scenario as NAK interrupts are not coming from a
+ * full speed device connected to a high speed device.
+ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
+ * 4 (8 frame or 8ms) for FS device.
+ */
+ if (qh->dev)
+ qh->intv_reg =
+ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
+ goto success;
+ } else if (best_end < 0) {
+ dev_err(musb->controller,
+ "%s hwep alloc failed for %dx%d\n",
+ musb_ep_xfertype_string(qh->type),
+ qh->hb_mult, qh->maxpacket);
+ return -ENOSPC;
+ }
+
+ idle = 1;
+ qh->mux = 0;
+ hw_ep = musb->endpoints + best_end;
+ musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
+success:
+ if (head) {
+ idle = list_empty(head);
+ list_add_tail(&qh->ring, head);
+ qh->mux = 1;
+ }
+ qh->hw_ep = hw_ep;
+ qh->hep->hcpriv = qh;
+ if (idle)
+ musb_start_urb(musb, is_in, qh);
+ return 0;
+}
+
+static int musb_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ struct usb_host_endpoint *hep = urb->ep;
+ struct musb_qh *qh;
+ struct usb_endpoint_descriptor *epd = &hep->desc;
+ int ret;
+ unsigned type_reg;
+ unsigned interval;
+
+ /* host role must be active */
+ if (!is_host_active(musb) || !musb->is_active)
+ return -ENODEV;
+
+ trace_musb_urb_enq(musb, urb);
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ qh = ret ? NULL : hep->hcpriv;
+ if (qh)
+ urb->hcpriv = qh;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* DMA mapping was already done, if needed, and this urb is on
+ * hep->urb_list now ... so we're done, unless hep wasn't yet
+ * scheduled onto a live qh.
+ *
+ * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+ * disabled, testing for empty qh->ring and avoiding qh setup costs
+ * except for the first urb queued after a config change.
+ */
+ if (qh || ret)
+ return ret;
+
+ /* Allocate and initialize qh, minimizing the work done each time
+ * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
+ *
+ * REVISIT consider a dedicated qh kmem_cache, so it's harder
+ * for bugs in other kernel code to break this driver...
+ */
+ qh = kzalloc(sizeof *qh, mem_flags);
+ if (!qh) {
+ spin_lock_irqsave(&musb->lock, flags);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -ENOMEM;
+ }
+
+ qh->hep = hep;
+ qh->dev = urb->dev;
+ INIT_LIST_HEAD(&qh->ring);
+ qh->is_ready = 1;
+
+ qh->maxpacket = usb_endpoint_maxp(epd);
+ qh->type = usb_endpoint_type(epd);
+
+ /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
+ * Some musb cores don't support high bandwidth ISO transfers; and
+ * we don't (yet!) support high bandwidth interrupt transfers.
+ */
+ qh->hb_mult = usb_endpoint_maxp_mult(epd);
+ if (qh->hb_mult > 1) {
+ int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+
+ if (ok)
+ ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
+ || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
+ if (!ok) {
+ dev_err(musb->controller,
+ "high bandwidth %s (%dx%d) not supported\n",
+ musb_ep_xfertype_string(qh->type),
+ qh->hb_mult, qh->maxpacket & 0x7ff);
+ ret = -EMSGSIZE;
+ goto done;
+ }
+ qh->maxpacket &= 0x7ff;
+ }
+
+ qh->epnum = usb_endpoint_num(epd);
+
+ /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+ qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+ /* precompute rxtype/txtype/type0 register */
+ type_reg = (qh->type << 4) | qh->epnum;
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ type_reg |= 0xc0;
+ break;
+ case USB_SPEED_FULL:
+ type_reg |= 0x80;
+ break;
+ default:
+ type_reg |= 0x40;
+ }
+ qh->type_reg = type_reg;
+
+ /* Precompute RXINTERVAL/TXINTERVAL register */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ /*
+ * Full/low speeds use the linear encoding,
+ * high speed uses the logarithmic encoding.
+ */
+ if (urb->dev->speed <= USB_SPEED_FULL) {
+ interval = max_t(u8, epd->bInterval, 1);
+ break;
+ }
+ fallthrough;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* ISO always uses logarithmic encoding */
+ interval = min_t(u8, epd->bInterval, 16);
+ break;
+ default:
+ /* REVISIT we actually want to use NAK limits, hinting to the
+ * transfer scheduling logic to try some other qh, e.g. try
+ * for 2 msec first:
+ *
+ * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+ *
+ * The downside of disabling this is that transfer scheduling
+ * gets VERY unfair for nonperiodic transfers; a misbehaving
+ * peripheral could make that hurt. That's perfectly normal
+ * for reads from network or serial adapters ... so we have
+ * partial NAKlimit support for bulk RX.
+ *
+ * The upside of disabling it is simpler transfer scheduling.
+ */
+ interval = 0;
+ }
+ qh->intv_reg = interval;
+
+ /* precompute addressing for external hub/tt ports */
+ if (musb->is_multipoint) {
+ struct usb_device *parent = urb->dev->parent;
+
+ if (parent != hcd->self.root_hub) {
+ qh->h_addr_reg = (u8) parent->devnum;
+
+ /* set up tt info if needed */
+ if (urb->dev->tt) {
+ qh->h_port_reg = (u8) urb->dev->ttport;
+ if (urb->dev->tt->hub)
+ qh->h_addr_reg =
+ (u8) urb->dev->tt->hub->devnum;
+ if (urb->dev->tt->multi)
+ qh->h_addr_reg |= 0x80;
+ }
+ }
+ }
+
+ /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+ * until we get real dma queues (with an entry for each urb/buffer),
+ * we only have work to do in the former case.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (hep->hcpriv || !next_urb(qh)) {
+ /* some concurrent activity submitted another urb to hep...
+ * odd, rare, error prone, but legal.
+ */
+ kfree(qh);
+ qh = NULL;
+ ret = 0;
+ } else
+ ret = musb_schedule(musb, qh,
+ epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+ if (ret == 0) {
+ urb->hcpriv = qh;
+ /* FIXME set urb->start_frame for iso/intr, it's tested in
+ * musb_start_urb(), but otherwise only konicawc cares ...
+ */
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+done:
+ if (ret != 0) {
+ spin_lock_irqsave(&musb->lock, flags);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ kfree(qh);
+ }
+ return ret;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
+{
+ struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
+ void __iomem *epio = ep->regs;
+ unsigned hw_end = ep->epnum;
+ void __iomem *regs = ep->musb->mregs;
+ int is_in = usb_pipein(urb->pipe);
+ int status = 0;
+ u16 csr;
+ struct dma_channel *dma = NULL;
+
+ musb_ep_select(regs, hw_end);
+
+ if (is_dma_capable()) {
+ dma = is_in ? ep->rx_channel : ep->tx_channel;
+ if (dma) {
+ status = ep->musb->dma_controller->channel_abort(dma);
+ musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
+ is_in ? 'R' : 'T', ep->epnum,
+ urb, status);
+ urb->actual_length += dma->actual_len;
+ }
+ }
+
+ /* turn off DMA requests, discard state, stop polling ... */
+ if (ep->epnum && is_in) {
+ /* giveback saves bulk toggle */
+ csr = musb_h_flush_rxfifo(ep, 0);
+
+ /* clear the endpoint's irq status here to avoid bogus irqs */
+ if (is_dma_capable() && dma)
+ musb_platform_clear_ep_rxintr(musb, ep->epnum);
+ } else if (ep->epnum) {
+ musb_h_tx_flush_fifo(ep);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* flush cpu writebuffer */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ musb_h_ep0_flush_fifo(ep);
+ }
+ if (status == 0)
+ musb_advance_schedule(ep->musb, urb, ep, is_in);
+ return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh;
+ unsigned long flags;
+ int is_in = usb_pipein(urb->pipe);
+ int ret;
+
+ trace_musb_urb_deq(musb, urb);
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto done;
+
+ qh = urb->hcpriv;
+ if (!qh)
+ goto done;
+
+ /*
+ * Any URB not actively programmed into endpoint hardware can be
+ * immediately given back; that's any URB not at the head of an
+ * endpoint queue, unless someday we get real DMA queues. And even
+ * if it's at the head, it might not be known to the hardware...
+ *
+ * Otherwise abort current transfer, pending DMA, etc.; urb->status
+ * has already been updated. This is a synchronous abort; it'd be
+ * OK to hold off until after some IRQ, though.
+ *
+ * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
+ */
+ if (!qh->is_ready
+ || urb->urb_list.prev != &qh->hep->urb_list
+ || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
+ int ready = qh->is_ready;
+
+ qh->is_ready = 0;
+ musb_giveback(musb, urb, 0);
+ qh->is_ready = ready;
+
+ /* If nothing else (usually musb_giveback) is using it
+ * and its URB list has emptied, recycle this qh.
+ */
+ if (ready && list_empty(&qh->hep->urb_list)) {
+ musb_ep_set_qh(qh->hw_ep, is_in, NULL);
+ qh->hep->hcpriv = NULL;
+ list_del(&qh->ring);
+ kfree(qh);
+ }
+ } else
+ ret = musb_cleanup_urb(urb, qh);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh;
+ struct urb *urb;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ qh = hep->hcpriv;
+ if (qh == NULL)
+ goto exit;
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+
+ /* Kick the first URB off the hardware, if needed */
+ qh->is_ready = 0;
+ if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
+ urb = next_urb(qh);
+
+ /* make software (then hardware) stop ASAP */
+ if (!urb->unlinked)
+ urb->status = -ESHUTDOWN;
+
+ /* cleanup */
+ musb_cleanup_urb(urb, qh);
+
+ /* Then nuke all the others ... and advance the
+ * queue on hw_ep (e.g. bulk ring) when we're done.
+ */
+ while (!list_empty(&hep->urb_list)) {
+ urb = next_urb(qh);
+ urb->status = -ESHUTDOWN;
+ musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
+ }
+ } else {
+ /* Just empty the queue; the hardware is busy with
+ * other transfers, and since !qh->is_ready nothing
+ * will activate any of these as it advances.
+ */
+ while (!list_empty(&hep->urb_list))
+ musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+
+ hep->hcpriv = NULL;
+ list_del(&qh->ring);
+ kfree(qh);
+ }
+exit:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ return musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ /* NOTE: musb_start() is called when the hub driver turns
+ * on port power, or when (OTG) peripheral starts.
+ */
+ hcd->state = HC_STATE_RUNNING;
+ musb->port1_status = 0;
+ return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+ musb_stop(hcd_to_musb(hcd));
+ hcd->state = HC_STATE_HALT;
+}
+
+static int musb_bus_suspend(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ u8 devctl;
+ int ret;
+
+ ret = musb_port_suspend(musb, true);
+ if (ret)
+ return ret;
+
+ if (!is_host_active(musb))
+ return 0;
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_SUSPEND:
+ return 0;
+ case OTG_STATE_A_WAIT_VRISE:
+ /* ID could be grounded even if there's no device
+ * on the other end of the cable. NOTE that the
+ * A_WAIT_VRISE timers are messy with MUSB...
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+ break;
+ default:
+ break;
+ }
+
+ if (musb->is_active) {
+ WARNING("trying to suspend as %s while active\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ return -EBUSY;
+ } else
+ return 0;
+}
+
+static int musb_bus_resume(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->config &&
+ musb->config->host_port_deassert_reset_at_resume)
+ musb_port_reset(musb, false);
+
+ return 0;
+}
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+
+#define MUSB_USB_DMA_ALIGN 4
+
+struct musb_temp_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[];
+};
+
+static void musb_free_temp_buffer(struct urb *urb)
+{
+ enum dma_data_direction dir;
+ struct musb_temp_buffer *temp;
+ size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
+ data);
+
+ if (dir == DMA_FROM_DEVICE) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(temp->old_xfer_buffer, temp->data, length);
+ }
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ enum dma_data_direction dir;
+ struct musb_temp_buffer *temp;
+ void *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct temp_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
+
+
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (dir == DMA_TO_DEVICE)
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int ret;
+
+ /*
+ * The DMA engine in RTL1.8 and above cannot handle
+ * DMA addresses that are not aligned to a 4 byte boundary.
+ * For such engine implemented (un)map_urb_for_dma hooks.
+ * Do not use these hooks for RTL<1.8
+ */
+ if (musb->hwvers < MUSB_HWVERS_1800)
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+
+ ret = musb_alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ musb_free_temp_buffer(urb);
+
+ return ret;
+}
+
+static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+
+ /* Do not use this hook for RTL<1.8 (see description above) */
+ if (musb->hwvers < MUSB_HWVERS_1800)
+ return;
+
+ musb_free_temp_buffer(urb);
+}
+#endif /* !CONFIG_MUSB_PIO_ONLY */
+
+static const struct hc_driver musb_hc_driver = {
+ .description = "musb-hcd",
+ .product_desc = "MUSB HDRC host driver",
+ .hcd_priv_size = sizeof(struct musb *),
+ .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
+
+ /* not using irq handler or reset hooks from usbcore, since
+ * those must be shared with peripheral code for OTG configs
+ */
+
+ .start = musb_h_start,
+ .stop = musb_h_stop,
+
+ .get_frame_number = musb_h_get_frame_number,
+
+ .urb_enqueue = musb_urb_enqueue,
+ .urb_dequeue = musb_urb_dequeue,
+ .endpoint_disable = musb_h_disable,
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ .map_urb_for_dma = musb_map_urb_for_dma,
+ .unmap_urb_for_dma = musb_unmap_urb_for_dma,
+#endif
+
+ .hub_status_data = musb_hub_status_data,
+ .hub_control = musb_hub_control,
+ .bus_suspend = musb_bus_suspend,
+ .bus_resume = musb_bus_resume,
+ /* .start_port_reset = NULL, */
+ /* .hub_irq_enable = NULL, */
+};
+
+int musb_host_alloc(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+ musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
+ if (!musb->hcd)
+ return -EINVAL;
+
+ *musb->hcd->hcd_priv = (unsigned long) musb;
+ musb->hcd->self.uses_pio_for_control = 1;
+ musb->hcd->uses_new_polling = 1;
+ musb->hcd->has_tt = 1;
+
+ return 0;
+}
+
+void musb_host_cleanup(struct musb *musb)
+{
+ if (musb->port_mode == MUSB_PERIPHERAL)
+ return;
+ usb_remove_hcd(musb->hcd);
+}
+
+void musb_host_free(struct musb *musb)
+{
+ usb_put_hcd(musb->hcd);
+}
+
+int musb_host_setup(struct musb *musb, int power_budget)
+{
+ int ret;
+ struct usb_hcd *hcd = musb->hcd;
+
+ if (musb->port_mode == MUSB_HOST) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ }
+ otg_set_host(musb->xceiv->otg, &hcd->self);
+ /* don't support otg protocols */
+ hcd->self.otg_port = 0;
+ musb->xceiv->otg->host = &hcd->self;
+ hcd->power_budget = 2 * (power_budget ? : 250);
+ hcd->skip_phy_initialization = 1;
+
+ ret = usb_add_hcd(hcd, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+}
+
+void musb_host_resume_root_hub(struct musb *musb)
+{
+ usb_hcd_resume_root_hub(musb->hcd);
+}
+
+void musb_host_poke_root_hub(struct musb *musb)
+{
+ MUSB_HST_MODE(musb);
+ if (musb->hcd->status_urb)
+ usb_hcd_poll_rh_status(musb->hcd);
+ else
+ usb_hcd_resume_root_hub(musb->hcd);
+}
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 000000000..63298bd23
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+#include <linux/scatterlist.h>
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+ struct usb_host_endpoint *hep; /* usbcore info */
+ struct usb_device *dev;
+ struct musb_hw_ep *hw_ep; /* current binding */
+
+ struct list_head ring; /* of musb_qh */
+ /* struct musb_qh *next; */ /* for periodic tree */
+ u8 mux; /* qh multiplexed to hw_ep */
+
+ unsigned offset; /* in urb->transfer_buffer */
+ unsigned segsize; /* current xfer fragment */
+
+ u8 type_reg; /* {rx,tx} type register */
+ u8 intv_reg; /* {rx,tx} interval register */
+ u8 addr_reg; /* device address register */
+ u8 h_addr_reg; /* hub address register */
+ u8 h_port_reg; /* hub port register */
+
+ u8 is_ready; /* safe to modify hw_ep */
+ u8 type; /* XFERTYPE_* */
+ u8 epnum;
+ u8 hb_mult; /* high bandwidth pkts per uf */
+ u16 maxpacket;
+ u16 frame; /* for periodic schedule */
+ unsigned iso_idx; /* in urb->iso_frame_desc[] */
+ struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */
+ bool use_sg; /* to track urb using sglist */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+ if (list_empty(q))
+ return NULL;
+ return list_entry(q->next, struct musb_qh, ring);
+}
+
+
+#if IS_ENABLED(CONFIG_USB_MUSB_HOST) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+extern struct musb *hcd_to_musb(struct usb_hcd *);
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern int musb_host_alloc(struct musb *);
+extern int musb_host_setup(struct musb *, int);
+extern void musb_host_cleanup(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+extern void musb_root_disconnect(struct musb *musb);
+extern void musb_host_free(struct musb *);
+extern void musb_host_resume_root_hub(struct musb *musb);
+extern void musb_host_poke_root_hub(struct musb *musb);
+extern int musb_port_suspend(struct musb *musb, bool do_suspend);
+extern void musb_port_reset(struct musb *musb, bool do_reset);
+extern void musb_host_finish_resume(struct work_struct *work);
+#else
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return NULL;
+}
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ return 0;
+}
+
+static inline int musb_host_alloc(struct musb *musb)
+{
+ return 0;
+}
+
+static inline int musb_host_setup(struct musb *musb, int power_budget)
+{
+ return 0;
+}
+
+static inline void musb_host_cleanup(struct musb *musb) {}
+static inline void musb_host_free(struct musb *musb) {}
+static inline void musb_host_tx(struct musb *musb, u8 epnum) {}
+static inline void musb_host_rx(struct musb *musb, u8 epnum) {}
+static inline void musb_root_disconnect(struct musb *musb) {}
+static inline void musb_host_resume_root_hub(struct musb *musb) {}
+static inline void musb_host_poke_root_hub(struct musb *musb) {}
+static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ return 0;
+}
+static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
+static inline void musb_host_finish_resume(struct work_struct *work) {}
+#endif
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+ struct list_head *queue;
+
+ if (!qh)
+ return NULL;
+ queue = &qh->hep->urb_list;
+ if (list_empty(queue))
+ return NULL;
+ return list_entry(queue->next, struct urb, urb_list);
+}
+
+#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 000000000..12874d3b2
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+
+#define musb_ep_select(_mbase, _epnum) musb->io.ep_select((_mbase), (_epnum))
+
+/**
+ * struct musb_io - IO functions for MUSB
+ * @ep_offset: platform specific function to get end point offset
+ * @ep_select: platform specific function to select end point
+ * @fifo_offset: platform specific function to get fifo offset
+ * @read_fifo: platform specific function to read fifo
+ * @write_fifo: platform specific function to write fifo
+ * @busctl_offset: platform specific function to get busctl offset
+ * @get_toggle: platform specific function to get toggle
+ * @set_toggle: platform specific function to set toggle
+ */
+struct musb_io {
+ u32 (*ep_offset)(u8 epnum, u16 offset);
+ void (*ep_select)(void __iomem *mbase, u8 epnum);
+ u32 (*fifo_offset)(u8 epnum);
+ void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
+ void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+ u32 (*busctl_offset)(u8 epnum, u16 offset);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
+};
+
+/* Do not add new entries here, add them the struct musb_io instead */
+extern u8 (*musb_readb)(void __iomem *addr, u32 offset);
+extern void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
+extern u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+extern u16 (*musb_readw)(void __iomem *addr, u32 offset);
+extern void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
+extern u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+extern u32 musb_readl(void __iomem *addr, u32 offset);
+extern void musb_writel(void __iomem *addr, u32 offset, u32 data);
+
+#endif
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 000000000..5fa110978
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#ifndef __MUSB_REGS_H__
+#define __MUSB_REGS_H__
+
+#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSB_POWER_ISOUPDATE 0x80
+#define MUSB_POWER_SOFTCONN 0x40
+#define MUSB_POWER_HSENAB 0x20
+#define MUSB_POWER_HSMODE 0x10
+#define MUSB_POWER_RESET 0x08
+#define MUSB_POWER_RESUME 0x04
+#define MUSB_POWER_SUSPENDM 0x02
+#define MUSB_POWER_ENSUSPEND 0x01
+
+/* INTRUSB */
+#define MUSB_INTR_SUSPEND 0x01
+#define MUSB_INTR_RESUME 0x02
+#define MUSB_INTR_RESET 0x04
+#define MUSB_INTR_BABBLE 0x04
+#define MUSB_INTR_SOF 0x08
+#define MUSB_INTR_CONNECT 0x10
+#define MUSB_INTR_DISCONNECT 0x20
+#define MUSB_INTR_SESSREQ 0x40
+#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */
+
+/* DEVCTL */
+#define MUSB_DEVCTL_BDEVICE 0x80
+#define MUSB_DEVCTL_FSDEV 0x40
+#define MUSB_DEVCTL_LSDEV 0x20
+#define MUSB_DEVCTL_VBUS 0x18
+#define MUSB_DEVCTL_VBUS_SHIFT 3
+#define MUSB_DEVCTL_HM 0x04
+#define MUSB_DEVCTL_HR 0x02
+#define MUSB_DEVCTL_SESSION 0x01
+
+/* BABBLE_CTL */
+#define MUSB_BABBLE_FORCE_TXIDLE 0x80
+#define MUSB_BABBLE_SW_SESSION_CTRL 0x40
+#define MUSB_BABBLE_STUCK_J 0x20
+#define MUSB_BABBLE_RCV_DISABLE 0x04
+
+/* MUSB ULPI VBUSCONTROL */
+#define MUSB_ULPI_USE_EXTVBUS 0x01
+#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+/* ULPI_REG_CONTROL */
+#define MUSB_ULPI_REG_REQ (1 << 0)
+#define MUSB_ULPI_REG_CMPLT (1 << 1)
+#define MUSB_ULPI_RDN_WR (1 << 2)
+
+/* TESTMODE */
+#define MUSB_TEST_FORCE_HOST 0x80
+#define MUSB_TEST_FIFO_ACCESS 0x40
+#define MUSB_TEST_FORCE_FS 0x20
+#define MUSB_TEST_FORCE_HS 0x10
+#define MUSB_TEST_PACKET 0x08
+#define MUSB_TEST_K 0x04
+#define MUSB_TEST_J 0x02
+#define MUSB_TEST_SE0_NAK 0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSB_FIFOSZ_DPB 0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSB_FIFOSZ_SIZE 0x0f
+
+/* CSR0 */
+#define MUSB_CSR0_FLUSHFIFO 0x0100
+#define MUSB_CSR0_TXPKTRDY 0x0002
+#define MUSB_CSR0_RXPKTRDY 0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSB_CSR0_P_SVDSETUPEND 0x0080
+#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040
+#define MUSB_CSR0_P_SENDSTALL 0x0020
+#define MUSB_CSR0_P_SETUPEND 0x0010
+#define MUSB_CSR0_P_DATAEND 0x0008
+#define MUSB_CSR0_P_SENTSTALL 0x0004
+
+/* CSR0 in Host mode */
+#define MUSB_CSR0_H_DIS_PING 0x0800
+#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */
+#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */
+#define MUSB_CSR0_H_NAKTIMEOUT 0x0080
+#define MUSB_CSR0_H_STATUSPKT 0x0040
+#define MUSB_CSR0_H_REQPKT 0x0020
+#define MUSB_CSR0_H_ERROR 0x0010
+#define MUSB_CSR0_H_SETUPPKT 0x0008
+#define MUSB_CSR0_H_RXSTALL 0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_CSR0_P_WZC_BITS \
+ (MUSB_CSR0_P_SENTSTALL)
+#define MUSB_CSR0_H_WZC_BITS \
+ (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
+ | MUSB_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSB_TYPE_SPEED 0xc0
+#define MUSB_TYPE_SPEED_SHIFT 6
+#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */
+#define MUSB_TYPE_PROTO_SHIFT 4
+#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */
+#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */
+#define MUSB_CONFIGDATA_BIGENDIAN 0x20
+#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
+#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
+#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */
+#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
+#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSB_TXCSR_AUTOSET 0x8000
+#define MUSB_TXCSR_DMAENAB 0x1000
+#define MUSB_TXCSR_FRCDATATOG 0x0800
+#define MUSB_TXCSR_DMAMODE 0x0400
+#define MUSB_TXCSR_CLRDATATOG 0x0040
+#define MUSB_TXCSR_FLUSHFIFO 0x0008
+#define MUSB_TXCSR_FIFONOTEMPTY 0x0002
+#define MUSB_TXCSR_TXPKTRDY 0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSB_TXCSR_P_ISO 0x4000
+#define MUSB_TXCSR_P_INCOMPTX 0x0080
+#define MUSB_TXCSR_P_SENTSTALL 0x0020
+#define MUSB_TXCSR_P_SENDSTALL 0x0010
+#define MUSB_TXCSR_P_UNDERRUN 0x0004
+
+/* TXCSR in Host mode */
+#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200
+#define MUSB_TXCSR_H_DATATOGGLE 0x0100
+#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080
+#define MUSB_TXCSR_H_RXSTALL 0x0020
+#define MUSB_TXCSR_H_ERROR 0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_TXCSR_P_WZC_BITS \
+ (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
+ | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
+#define MUSB_TXCSR_H_WZC_BITS \
+ (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
+ | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSB_RXCSR_AUTOCLEAR 0x8000
+#define MUSB_RXCSR_DMAENAB 0x2000
+#define MUSB_RXCSR_DISNYET 0x1000
+#define MUSB_RXCSR_PID_ERR 0x1000
+#define MUSB_RXCSR_DMAMODE 0x0800
+#define MUSB_RXCSR_INCOMPRX 0x0100
+#define MUSB_RXCSR_CLRDATATOG 0x0080
+#define MUSB_RXCSR_FLUSHFIFO 0x0010
+#define MUSB_RXCSR_DATAERROR 0x0008
+#define MUSB_RXCSR_FIFOFULL 0x0002
+#define MUSB_RXCSR_RXPKTRDY 0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSB_RXCSR_P_ISO 0x4000
+#define MUSB_RXCSR_P_SENTSTALL 0x0040
+#define MUSB_RXCSR_P_SENDSTALL 0x0020
+#define MUSB_RXCSR_P_OVERRUN 0x0004
+
+/* RXCSR in Host mode */
+#define MUSB_RXCSR_H_AUTOREQ 0x4000
+#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400
+#define MUSB_RXCSR_H_DATATOGGLE 0x0200
+#define MUSB_RXCSR_H_RXSTALL 0x0040
+#define MUSB_RXCSR_H_REQPKT 0x0020
+#define MUSB_RXCSR_H_ERROR 0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_RXCSR_P_WZC_BITS \
+ (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
+ | MUSB_RXCSR_RXPKTRDY)
+#define MUSB_RXCSR_H_WZC_BITS \
+ (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
+ | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSB_HUBADDR_MULTI_TT 0x80
+
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR 0x00 /* 8-bit */
+#define MUSB_POWER 0x01 /* 8-bit */
+
+#define MUSB_INTRTX 0x02 /* 16-bit */
+#define MUSB_INTRRX 0x04
+#define MUSB_INTRTXE 0x06
+#define MUSB_INTRRXE 0x08
+#define MUSB_INTRUSB 0x0A /* 8 bit */
+#define MUSB_INTRUSBE 0x0B /* 8 bit */
+#define MUSB_FRAME 0x0C
+#define MUSB_INDEX 0x0E /* 8 bit */
+#define MUSB_TESTMODE 0x0F /* 8 bit */
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL 0x60 /* 8 bit */
+#define MUSB_BABBLE_CTL 0x61 /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
+#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS 0x6C /* 8 bit */
+#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
+#define MUSB_ULPI_INT_MASK 0x72 /* 8 bit */
+#define MUSB_ULPI_INT_SRC 0x73 /* 8 bit */
+#define MUSB_ULPI_REG_DATA 0x74 /* 8 bit */
+#define MUSB_ULPI_REG_ADDR 0x75 /* 8 bit */
+#define MUSB_ULPI_REG_CONTROL 0x76 /* 8 bit */
+#define MUSB_ULPI_RAW_DATA 0x77 /* 8 bit */
+
+#define MUSB_EPINFO 0x78 /* 8 bit */
+#define MUSB_RAMINFO 0x79 /* 8 bit */
+#define MUSB_LINKINFO 0x7a /* 8 bit */
+#define MUSB_VPLEN 0x7b /* 8 bit */
+#define MUSB_HS_EOF1 0x7c /* 8 bit */
+#define MUSB_FS_EOF1 0x7d /* 8 bit */
+#define MUSB_LS_EOF1 0x7e /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP 0x00
+#define MUSB_TXCSR 0x02
+#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
+#define MUSB_RXMAXP 0x04
+#define MUSB_RXCSR 0x06
+#define MUSB_RXCOUNT 0x08
+#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
+#define MUSB_TXTYPE 0x0A
+#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
+#define MUSB_TXINTERVAL 0x0B
+#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE 0x0C
+#define MUSB_RXINTERVAL 0x0D
+#define MUSB_FIFOSIZE 0x0F
+#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
+
+#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
+
+#define MUSB_TXCSR_MODE 0x2000
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR 0x00
+#define MUSB_TXHUBADDR 0x02
+#define MUSB_TXHUBPORT 0x03
+
+#define MUSB_RXFUNCADDR 0x04
+#define MUSB_RXHUBADDR 0x06
+#define MUSB_RXHUBPORT 0x07
+
+static inline u8 musb_read_configdata(void __iomem *mbase)
+{
+ musb_writeb(mbase, MUSB_INDEX, 0);
+ return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+}
+
+static inline void musb_write_rxfunaddr(struct musb *musb, u8 epnum,
+ u8 qh_addr_reg)
+{
+ musb_writeb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_RXFUNCADDR),
+ qh_addr_reg);
+}
+
+static inline void musb_write_rxhubaddr(struct musb *musb, u8 epnum,
+ u8 qh_h_addr_reg)
+{
+ musb_writeb(musb->mregs, musb->io.busctl_offset(epnum, MUSB_RXHUBADDR),
+ qh_h_addr_reg);
+}
+
+static inline void musb_write_rxhubport(struct musb *musb, u8 epnum,
+ u8 qh_h_port_reg)
+{
+ musb_writeb(musb->mregs, musb->io.busctl_offset(epnum, MUSB_RXHUBPORT),
+ qh_h_port_reg);
+}
+
+static inline void musb_write_txfunaddr(struct musb *musb, u8 epnum,
+ u8 qh_addr_reg)
+{
+ musb_writeb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_TXFUNCADDR),
+ qh_addr_reg);
+}
+
+static inline void musb_write_txhubaddr(struct musb *musb, u8 epnum,
+ u8 qh_addr_reg)
+{
+ musb_writeb(musb->mregs, musb->io.busctl_offset(epnum, MUSB_TXHUBADDR),
+ qh_addr_reg);
+}
+
+static inline void musb_write_txhubport(struct musb *musb, u8 epnum,
+ u8 qh_h_port_reg)
+{
+ musb_writeb(musb->mregs, musb->io.busctl_offset(epnum, MUSB_TXHUBPORT),
+ qh_h_port_reg);
+}
+
+static inline u8 musb_read_rxfunaddr(struct musb *musb, u8 epnum)
+{
+ return musb_readb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_RXFUNCADDR));
+}
+
+static inline u8 musb_read_rxhubaddr(struct musb *musb, u8 epnum)
+{
+ return musb_readb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_RXHUBADDR));
+}
+
+static inline u8 musb_read_rxhubport(struct musb *musb, u8 epnum)
+{
+ return musb_readb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_RXHUBPORT));
+}
+
+static inline u8 musb_read_txfunaddr(struct musb *musb, u8 epnum)
+{
+ return musb_readb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_TXFUNCADDR));
+}
+
+static inline u8 musb_read_txhubaddr(struct musb *musb, u8 epnum)
+{
+ return musb_readb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_TXHUBADDR));
+}
+
+static inline u8 musb_read_txhubport(struct musb *musb, u8 epnum)
+{
+ return musb_readb(musb->mregs,
+ musb->io.busctl_offset(epnum, MUSB_TXHUBPORT));
+}
+
+#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_trace.c b/drivers/usb/musb/musb_trace.c
new file mode 100644
index 000000000..476872adc
--- /dev/null
+++ b/drivers/usb/musb/musb_trace.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * musb_trace.c - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "musb_trace.h"
+
+void musb_dbg(struct musb *musb, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ trace_musb_log(musb, &vaf);
+
+ va_end(args);
+}
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
new file mode 100644
index 000000000..f246b1439
--- /dev/null
+++ b/drivers/usb/musb/musb_trace.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * musb_trace.h - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM musb
+
+#if !defined(__MUSB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MUSB_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+#include "musb_core.h"
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+#include "cppi_dma.h"
+#endif
+
+#define MUSB_MSG_MAX 500
+
+TRACE_EVENT(musb_log,
+ TP_PROTO(struct musb *musb, struct va_format *vaf),
+ TP_ARGS(musb, vaf),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk("%s: %s", __get_str(name), __get_str(msg))
+);
+
+TRACE_EVENT(musb_state,
+ TP_PROTO(struct musb *musb, u8 devctl, const char *desc),
+ TP_ARGS(musb, devctl, desc),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(u8, devctl)
+ __string(desc, desc)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->devctl = devctl;
+ __assign_str(desc, desc);
+ ),
+ TP_printk("%s: devctl: %02x %s", __get_str(name), __entry->devctl,
+ __get_str(desc))
+);
+
+DECLARE_EVENT_CLASS(musb_regb,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void __iomem *, addr)
+ __field(unsigned int, offset)
+ __field(u8, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %02x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regb, musb_readb,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regb, musb_writeb,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regw,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void __iomem *, addr)
+ __field(unsigned int, offset)
+ __field(u16, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %04x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regw, musb_readw,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regw, musb_writew,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regl,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void __iomem *, addr)
+ __field(unsigned int, offset)
+ __field(u32, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %08x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regl, musb_readl,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regl, musb_writel,
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+TRACE_EVENT(musb_isr,
+ TP_PROTO(struct musb *musb),
+ TP_ARGS(musb),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(u8, int_usb)
+ __field(u16, int_tx)
+ __field(u16, int_rx)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->int_usb = musb->int_usb;
+ __entry->int_tx = musb->int_tx;
+ __entry->int_rx = musb->int_rx;
+ ),
+ TP_printk("%s: usb %02x, tx %04x, rx %04x",
+ __get_str(name), __entry->int_usb,
+ __entry->int_tx, __entry->int_rx
+ )
+);
+
+DECLARE_EVENT_CLASS(musb_urb,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(struct urb *, urb)
+ __field(unsigned int, pipe)
+ __field(int, status)
+ __field(unsigned int, flag)
+ __field(u32, buf_len)
+ __field(u32, actual_len)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->urb = urb;
+ __entry->pipe = urb->pipe;
+ __entry->status = urb->status;
+ __entry->flag = urb->transfer_flags;
+ __entry->buf_len = urb->transfer_buffer_length;
+ __entry->actual_len = urb->actual_length;
+ ),
+ TP_printk("%s: %p, dev%d ep%d%s, flag 0x%x, len %d/%d, status %d",
+ __get_str(name), __entry->urb,
+ usb_pipedevice(__entry->pipe),
+ usb_pipeendpoint(__entry->pipe),
+ usb_pipein(__entry->pipe) ? "in" : "out",
+ __entry->flag,
+ __entry->actual_len, __entry->buf_len,
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_start,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_gb,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_rx,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_tx,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_enq,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_deq,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DECLARE_EVENT_CLASS(musb_req,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(struct usb_request *, req)
+ __field(u8, is_tx)
+ __field(u8, epnum)
+ __field(int, status)
+ __field(unsigned int, buf_len)
+ __field(unsigned int, actual_len)
+ __field(unsigned int, zero)
+ __field(unsigned int, short_not_ok)
+ __field(unsigned int, no_interrupt)
+ ),
+ TP_fast_assign(
+ __entry->req = &req->request;
+ __entry->is_tx = req->tx;
+ __entry->epnum = req->epnum;
+ __entry->status = req->request.status;
+ __entry->buf_len = req->request.length;
+ __entry->actual_len = req->request.actual;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ ),
+ TP_printk("%p, ep%d %s, %s%s%s, len %d/%d, status %d",
+ __entry->req, __entry->epnum,
+ __entry->is_tx ? "tx/IN" : "rx/OUT",
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->actual_len, __entry->buf_len,
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(musb_req, musb_req_gb,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_tx,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_rx,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_alloc,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_free,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_start,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_enq,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_deq,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+DECLARE_EVENT_CLASS(musb_cppi41,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch),
+ TP_STRUCT__entry(
+ __field(struct cppi41_dma_channel *, ch)
+ __string(name, dev_name(ch->hw_ep->musb->controller))
+ __field(u8, hwep)
+ __field(u8, port)
+ __field(u8, is_tx)
+ __field(u32, len)
+ __field(u32, prog_len)
+ __field(u32, xferred)
+ ),
+ TP_fast_assign(
+ __entry->ch = ch;
+ __assign_str(name, dev_name(ch->hw_ep->musb->controller));
+ __entry->hwep = ch->hw_ep->epnum;
+ __entry->port = ch->port_num;
+ __entry->is_tx = ch->is_tx;
+ __entry->len = ch->total_len;
+ __entry->prog_len = ch->prog_len;
+ __entry->xferred = ch->transferred;
+ ),
+ TP_printk("%s: %p, hwep%d ch%d%s, prog_len %d, len %d/%d",
+ __get_str(name), __entry->ch, __entry->hwep,
+ __entry->port, __entry->is_tx ? "tx" : "rx",
+ __entry->prog_len, __entry->xferred, __entry->len
+ )
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_done,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_gb,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_config,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_cont,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_alloc,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_abort,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_free,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+#endif /* CONFIG_USB_TI_CPPI41_DMA */
+
+#endif /* __MUSB_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE musb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 000000000..cafc69536
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musb_core.h"
+
+void musb_host_finish_resume(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+ u8 power;
+
+ musb = container_of(work, struct musb, finish_resume_work.work);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ musb_dbg(musb, "root port resume stopped, power %02x", power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+
+ /*
+ * ISSUE: DaVinci (RTL 1.300) disconnects after
+ * resume of high speed peripherals (but not full
+ * speed ones).
+ */
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb->hcd);
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+int musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ struct usb_otg *otg = musb->xceiv->otg;
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+ if (!is_host_active(musb))
+ return 0;
+
+ /* NOTE: this doesn't necessarily put PHY into low power mode,
+ * turning off its clock; that's a function of PHY integration and
+ * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
+ * SE0 changing to connect (J) or wakeup (K) states.
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_suspend) {
+ int retries = 10000;
+
+ if (power & MUSB_POWER_RESUME)
+ return -EBUSY;
+
+ if (!(power & MUSB_POWER_SUSPENDM)) {
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
+ power = musb_readb(mbase, MUSB_POWER);
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
+ }
+
+ musb_dbg(musb, "Root port suspended, power %02x", power);
+
+ musb->port1_status |= USB_PORT_STAT_SUSPEND;
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = otg->host->b_hnp_enable;
+ if (musb->is_active)
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_A_AIDL_BDIS));
+ musb_platform_try_idle(musb, 0);
+ break;
+ case OTG_STATE_B_HOST:
+ musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
+ musb->is_active = otg->host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+ default:
+ musb_dbg(musb, "bogus rh suspend? %s",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+ } else if (power & MUSB_POWER_SUSPENDM) {
+ power &= ~MUSB_POWER_SUSPENDM;
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ musb_dbg(musb, "Root port resuming, power %02x", power);
+
+ musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+ return 0;
+}
+
+void musb_port_reset(struct musb *musb, bool do_reset)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) {
+ musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle");
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ return;
+ }
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: caller guarantees it will turn off the reset when
+ * the appropriate amount of time has passed
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_reset) {
+ /*
+ * If RESUME is set, we must make sure it stays minimum 20 ms.
+ * Then we must clear RESUME and wait a bit to let musb start
+ * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+ * fail with "Error! Did not receive an SOF before suspend
+ * detected".
+ */
+ if (power & MUSB_POWER_RESUME) {
+ long remain = (unsigned long) musb->rh_timer - jiffies;
+
+ if (musb->rh_timer > 0 && remain > 0) {
+ /* take into account the minimum delay after resume */
+ schedule_delayed_work(
+ &musb->deassert_reset_work, remain);
+ return;
+ }
+
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESUME);
+
+ /* Give the core 1 ms to clear MUSB_POWER_RESUME */
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(1));
+ return;
+ }
+
+ power &= 0xf0;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESET);
+
+ musb->port1_status |= USB_PORT_STAT_RESET;
+ musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(50));
+ } else {
+ musb_dbg(musb, "root port reset stopped");
+ musb_platform_pre_root_reset_end(musb);
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESET);
+ musb_platform_post_root_reset_end(musb);
+
+ power = musb_readb(mbase, MUSB_POWER);
+ if (power & MUSB_POWER_HSMODE) {
+ musb_dbg(musb, "high-speed device connected");
+ musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+ }
+
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ musb->port1_status |= USB_PORT_STAT_ENABLE
+ | (USB_PORT_STAT_C_RESET << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
+ usb_hcd_poll_rh_status(musb->hcd);
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ }
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+ struct usb_otg *otg = musb->xceiv->otg;
+
+ musb->port1_status = USB_PORT_STAT_POWER
+ | (USB_PORT_STAT_C_CONNECTION << 16);
+
+ usb_hcd_poll_rh_status(musb->hcd);
+ musb->is_active = 0;
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_SUSPEND:
+ if (otg->host->b_hnp_enable) {
+ musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ break;
+ }
+ fallthrough;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ musb_dbg(musb, "host disconnect (%s)",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+}
+EXPORT_SYMBOL_GPL(musb_root_disconnect);
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musb->lock */
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int retval = 0;
+
+ /* called in_irq() via usb_hcd_poll_rh_status() */
+ if (musb->port1_status & 0xffff0000) {
+ *buf = 0x02;
+ retval = 1;
+ }
+ return retval;
+}
+
+static int musb_has_gadget(struct musb *musb)
+{
+ /*
+ * In host-only mode we start a connection right away. In OTG mode
+ * we have to wait until we loaded a gadget. We don't really need a
+ * gadget if we operate as a host but we should not start a session
+ * as a device without a gadget or else we explode.
+ */
+#ifdef CONFIG_USB_MUSB_HOST
+ return 1;
+#else
+ return musb->port_mode == MUSB_HOST;
+#endif
+}
+
+int musb_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ u32 temp;
+ int retval = 0;
+ unsigned long flags;
+ bool start_musb = false;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ /* hub features: always zero, setting is a NOP
+ * port features: reported, sometimes updated when host is active
+ * no indicators
+ */
+ switch (typeReq) {
+ case ClearHubFeature:
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, false);
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (!hcd->self.is_b_host)
+ musb_platform_set_vbus(musb, 0);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_SUSPEND:
+ break;
+ default:
+ goto error;
+ }
+ musb_dbg(musb, "clear feature %d", wValue);
+ musb->port1_status &= ~(1 << wValue);
+ break;
+ case GetHubDescriptor:
+ {
+ struct usb_hub_descriptor *desc = (void *)buf;
+
+ desc->bDescLength = 9;
+ desc->bDescriptorType = USB_DT_HUB;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM /* per-port power switching */
+ | HUB_CHAR_NO_OCPM /* no overcurrent reporting */
+ );
+ desc->bPwrOn2PwrGood = 5; /* msec/2 */
+ desc->bHubContrCurrent = 0;
+
+ /* workaround bogus struct definition */
+ desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->u.hs.DeviceRemovable[1] = 0xff;
+ }
+ break;
+ case GetHubStatus:
+ temp = 0;
+ *(__le32 *) buf = cpu_to_le32(temp);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ goto error;
+
+ put_unaligned(cpu_to_le32(musb->port1_status
+ & ~MUSB_PORT_STAT_RESUME),
+ (__le32 *) buf);
+
+ /* port change status is more interesting */
+ musb_dbg(musb, "port status %08x", musb->port1_status);
+ break;
+ case SetPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /* NOTE: this controller has a strange state machine
+ * that involves "requesting sessions" according to
+ * magic side effects from incompletely-described
+ * rules about startup...
+ *
+ * This call is what really starts the host mode; be
+ * very careful about side effects if you reorder any
+ * initialization logic, e.g. for OTG, or change any
+ * logic relating to VBUS power-up.
+ */
+ if (!hcd->self.is_b_host && musb_has_gadget(musb))
+ start_musb = true;
+ break;
+ case USB_PORT_FEAT_RESET:
+ musb_port_reset(musb, true);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, true);
+ break;
+ case USB_PORT_FEAT_TEST:
+ if (unlikely(is_host_active(musb)))
+ goto error;
+
+ wIndex >>= 8;
+ switch (wIndex) {
+ case USB_TEST_J:
+ pr_debug("USB_TEST_J\n");
+ temp = MUSB_TEST_J;
+ break;
+ case USB_TEST_K:
+ pr_debug("USB_TEST_K\n");
+ temp = MUSB_TEST_K;
+ break;
+ case USB_TEST_SE0_NAK:
+ pr_debug("USB_TEST_SE0_NAK\n");
+ temp = MUSB_TEST_SE0_NAK;
+ break;
+ case USB_TEST_PACKET:
+ pr_debug("USB_TEST_PACKET\n");
+ temp = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ break;
+ case USB_TEST_FORCE_ENABLE:
+ pr_debug("USB_TEST_FORCE_ENABLE\n");
+ temp = MUSB_TEST_FORCE_HOST
+ | MUSB_TEST_FORCE_HS;
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
+ break;
+ case 6:
+ pr_debug("TEST_FIFO_ACCESS\n");
+ temp = MUSB_TEST_FIFO_ACCESS;
+ break;
+ default:
+ goto error;
+ }
+ musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
+ break;
+ default:
+ goto error;
+ }
+ musb_dbg(musb, "set feature %d", wValue);
+ musb->port1_status |= 1 << wValue;
+ break;
+
+ default:
+error:
+ /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (start_musb)
+ musb_start(musb);
+
+ return retval;
+}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 000000000..7acd16358
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
+ (MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
+
+#define musb_read_hsdma_addr(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS))
+
+#define musb_write_hsdma_addr(mbase, bchannel, addr) \
+ musb_writel(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
+ addr)
+
+#define musb_read_hsdma_count(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
+
+#define musb_write_hsdma_count(mbase, bchannel, len) \
+ musb_writel(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
+ len)
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT 0
+#define MUSB_HSDMA_TRANSMIT_SHIFT 1
+#define MUSB_HSDMA_MODE1_SHIFT 2
+#define MUSB_HSDMA_IRQENABLE_SHIFT 3
+#define MUSB_HSDMA_ENDPOINT_SHIFT 4
+#define MUSB_HSDMA_BUSERROR_SHIFT 8
+#define MUSB_HSDMA_BURSTMODE_SHIFT 9
+#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
+#define MUSB_HSDMA_BURSTMODE_INCR4 1
+#define MUSB_HSDMA_BURSTMODE_INCR8 2
+#define MUSB_HSDMA_BURSTMODE_INCR16 3
+
+#define MUSB_HSDMA_CHANNELS 8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+ struct dma_channel channel;
+ struct musb_dma_controller *controller;
+ u32 start_addr;
+ u32 len;
+ u16 max_packet_sz;
+ u8 idx;
+ u8 epnum;
+ u8 transmit;
+};
+
+struct musb_dma_controller {
+ struct dma_controller controller;
+ struct musb_dma_channel channel[MUSB_HSDMA_CHANNELS];
+ void *private_data;
+ void __iomem *base;
+ u8 channel_count;
+ u8 used_channels;
+ int irq;
+};
+
+static void dma_channel_release(struct dma_channel *channel);
+
+static void dma_controller_stop(struct musb_dma_controller *controller)
+{
+ struct musb *musb = controller->private_data;
+ struct dma_channel *channel;
+ u8 bit;
+
+ if (controller->used_channels != 0) {
+ dev_err(musb->controller,
+ "Stopping DMA controller while channel active\n");
+
+ for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
+ if (controller->used_channels & (1 << bit)) {
+ channel = &controller->channel[bit].channel;
+ dma_channel_release(channel);
+
+ if (!controller->used_channels)
+ break;
+ }
+ }
+ }
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 transmit)
+{
+ struct musb_dma_controller *controller = container_of(c,
+ struct musb_dma_controller, controller);
+ struct musb_dma_channel *musb_channel = NULL;
+ struct dma_channel *channel = NULL;
+ u8 bit;
+
+ for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
+ if (!(controller->used_channels & (1 << bit))) {
+ controller->used_channels |= (1 << bit);
+ musb_channel = &(controller->channel[bit]);
+ musb_channel->controller = controller;
+ musb_channel->idx = bit;
+ musb_channel->epnum = hw_ep->epnum;
+ musb_channel->transmit = transmit;
+ channel = &(musb_channel->channel);
+ channel->private_data = musb_channel;
+ channel->status = MUSB_DMA_STATUS_FREE;
+ channel->max_len = 0x100000;
+ /* Tx => mode 1; Rx => mode 0 */
+ channel->desired_mode = transmit;
+ channel->actual_len = 0;
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static void dma_channel_release(struct dma_channel *channel)
+{
+ struct musb_dma_channel *musb_channel = channel->private_data;
+
+ channel->actual_len = 0;
+ musb_channel->start_addr = 0;
+ musb_channel->len = 0;
+
+ musb_channel->controller->used_channels &=
+ ~(1 << musb_channel->idx);
+
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *musb_channel = channel->private_data;
+ struct musb_dma_controller *controller = musb_channel->controller;
+ struct musb *musb = controller->private_data;
+ void __iomem *mbase = controller->base;
+ u8 bchannel = musb_channel->idx;
+ u16 csr = 0;
+
+ musb_dbg(musb, "%p, pkt_sz %d, addr %pad, len %d, mode %d",
+ channel, packet_sz, &dma_addr, len, mode);
+
+ if (mode) {
+ csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
+ BUG_ON(len < packet_sz);
+ }
+ csr |= MUSB_HSDMA_BURSTMODE_INCR16
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+
+ csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+ | (1 << MUSB_HSDMA_ENABLE_SHIFT)
+ | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
+ | (musb_channel->transmit
+ ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+ : 0);
+
+ /* address/count */
+ musb_write_hsdma_addr(mbase, bchannel, dma_addr);
+ musb_write_hsdma_count(mbase, bchannel, len);
+
+ /* control (this should start things) */
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
+ csr);
+}
+
+static int dma_channel_program(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *musb_channel = channel->private_data;
+ struct musb_dma_controller *controller = musb_channel->controller;
+ struct musb *musb = controller->private_data;
+
+ musb_dbg(musb, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d",
+ musb_channel->epnum,
+ musb_channel->transmit ? "Tx" : "Rx",
+ packet_sz, &dma_addr, len, mode);
+
+ BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ channel->status == MUSB_DMA_STATUS_BUSY);
+
+ /*
+ * The DMA engine in RTL1.8 and above cannot handle
+ * DMA addresses that are not aligned to a 4 byte boundary.
+ * It ends up masking the last two bits of the address
+ * programmed in DMA_ADDR.
+ *
+ * Fail such DMA transfers, so that the backup PIO mode
+ * can carry out the transfer
+ */
+ if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
+ return false;
+
+ channel->actual_len = 0;
+ musb_channel->start_addr = dma_addr;
+ musb_channel->len = len;
+ musb_channel->max_packet_sz = packet_sz;
+ channel->status = MUSB_DMA_STATUS_BUSY;
+
+ configure_channel(channel, packet_sz, mode, dma_addr, len);
+
+ return true;
+}
+
+static int dma_channel_abort(struct dma_channel *channel)
+{
+ struct musb_dma_channel *musb_channel = channel->private_data;
+ void __iomem *mbase = musb_channel->controller->base;
+ struct musb *musb = musb_channel->controller->private_data;
+
+ u8 bchannel = musb_channel->idx;
+ int offset;
+ u16 csr;
+
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ if (musb_channel->transmit) {
+ offset = musb->io.ep_offset(musb_channel->epnum,
+ MUSB_TXCSR);
+
+ /*
+ * The programming guide says that we must clear
+ * the DMAENAB bit before the DMAMODE bit...
+ */
+ csr = musb_readw(mbase, offset);
+ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
+ musb_writew(mbase, offset, csr);
+ csr &= ~MUSB_TXCSR_DMAMODE;
+ musb_writew(mbase, offset, csr);
+ } else {
+ offset = musb->io.ep_offset(musb_channel->epnum,
+ MUSB_RXCSR);
+
+ csr = musb_readw(mbase, offset);
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+ MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_DMAMODE);
+ musb_writew(mbase, offset, csr);
+ }
+
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
+ 0);
+ musb_write_hsdma_addr(mbase, bchannel, 0);
+ musb_write_hsdma_count(mbase, bchannel, 0);
+ channel->status = MUSB_DMA_STATUS_FREE;
+ }
+
+ return 0;
+}
+
+irqreturn_t dma_controller_irq(int irq, void *private_data)
+{
+ struct musb_dma_controller *controller = private_data;
+ struct musb *musb = controller->private_data;
+ struct musb_dma_channel *musb_channel;
+ struct dma_channel *channel;
+
+ void __iomem *mbase = controller->base;
+
+ irqreturn_t retval = IRQ_NONE;
+
+ unsigned long flags;
+
+ u8 bchannel;
+ u8 int_hsdma;
+
+ u32 addr, count;
+ u16 csr;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
+
+ if (!int_hsdma) {
+ musb_dbg(musb, "spurious DMA irq");
+
+ for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+ musb_channel = (struct musb_dma_channel *)
+ &(controller->channel[bchannel]);
+ channel = &musb_channel->channel;
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ count = musb_read_hsdma_count(mbase, bchannel);
+
+ if (count == 0)
+ int_hsdma |= (1 << bchannel);
+ }
+ }
+
+ musb_dbg(musb, "int_hsdma = 0x%x", int_hsdma);
+
+ if (!int_hsdma)
+ goto done;
+ }
+
+ for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+ if (int_hsdma & (1 << bchannel)) {
+ musb_channel = (struct musb_dma_channel *)
+ &(controller->channel[bchannel]);
+ channel = &musb_channel->channel;
+
+ csr = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
+ MUSB_HSDMA_CONTROL));
+
+ if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
+ musb_channel->channel.status =
+ MUSB_DMA_STATUS_BUS_ABORT;
+ } else {
+ addr = musb_read_hsdma_addr(mbase,
+ bchannel);
+ channel->actual_len = addr
+ - musb_channel->start_addr;
+
+ musb_dbg(musb, "ch %p, 0x%x -> 0x%x (%zu / %d) %s",
+ channel, musb_channel->start_addr,
+ addr, channel->actual_len,
+ musb_channel->len,
+ (channel->actual_len
+ < musb_channel->len) ?
+ "=> reconfig 0" : "=> complete");
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ /* completed */
+ if (musb_channel->transmit &&
+ (!channel->desired_mode ||
+ (channel->actual_len %
+ musb_channel->max_packet_sz))) {
+ u8 epnum = musb_channel->epnum;
+ int offset = musb->io.ep_offset(epnum,
+ MUSB_TXCSR);
+ u16 txcsr;
+
+ /*
+ * The programming guide says that we
+ * must clear DMAENAB before DMAMODE.
+ */
+ musb_ep_select(mbase, epnum);
+ txcsr = musb_readw(mbase, offset);
+ if (channel->desired_mode == 1) {
+ txcsr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_AUTOSET);
+ musb_writew(mbase, offset, txcsr);
+ /* Send out the packet */
+ txcsr &= ~MUSB_TXCSR_DMAMODE;
+ txcsr |= MUSB_TXCSR_DMAENAB;
+ }
+ txcsr |= MUSB_TXCSR_TXPKTRDY;
+ musb_writew(mbase, offset, txcsr);
+ }
+ musb_dma_completion(musb, musb_channel->epnum,
+ musb_channel->transmit);
+ }
+ }
+ }
+
+ retval = IRQ_HANDLED;
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dma_controller_irq);
+
+void musbhs_dma_controller_destroy(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller = container_of(c,
+ struct musb_dma_controller, controller);
+
+ dma_controller_stop(controller);
+
+ if (controller->irq)
+ free_irq(controller->irq, c);
+
+ kfree(controller);
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
+
+static struct musb_dma_controller *
+dma_controller_alloc(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->channel_count = MUSB_HSDMA_CHANNELS;
+ controller->private_data = musb;
+ controller->base = base;
+
+ controller->controller.channel_alloc = dma_channel_allocate;
+ controller->controller.channel_release = dma_channel_release;
+ controller->controller.channel_program = dma_channel_program;
+ controller->controller.channel_abort = dma_channel_abort;
+ return controller;
+}
+
+struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq_byname(pdev, "dma");
+
+ if (irq <= 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
+
+ if (request_irq(irq, dma_controller_irq, 0,
+ dev_name(musb->controller), controller)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ musb_dma_controller_destroy(&controller->controller);
+
+ return NULL;
+ }
+
+ controller->irq = irq;
+
+ return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
+
+struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
+
+ return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 000000000..44a21ec86
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2007 by Texas Instruments
+ * Some code has been taken from tusb6010.c
+ * Copyrights for that are attributable to:
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/usb/musb.h>
+#include <linux/phy/omap_control_phy.h>
+#include <linux/of_platform.h>
+
+#include "musb_core.h"
+#include "omap2430.h"
+
+struct omap2430_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ enum musb_vbus_id_status status;
+ struct work_struct omap_musb_mailbox_work;
+ struct device *control_otghs;
+ unsigned int is_runtime_suspended:1;
+ unsigned int needs_resume:1;
+ unsigned int phy_suspended:1;
+};
+#define glue_to_musb(g) platform_get_drvdata(g->musb)
+
+static struct omap2430_glue *_glue;
+
+static inline void omap2430_low_level_exit(struct musb *musb)
+{
+ u32 l;
+
+ /* in any role */
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
+ l |= ENABLEFORCE; /* enable MSTANDBY */
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
+}
+
+static inline void omap2430_low_level_init(struct musb *musb)
+{
+ u32 l;
+
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
+ l &= ~ENABLEFORCE; /* disable MSTANDBY */
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
+{
+ struct omap2430_glue *glue = _glue;
+
+ if (!glue) {
+ pr_err("%s: musb core is not yet initialized\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ glue->status = status;
+
+ if (!glue_to_musb(glue)) {
+ pr_err("%s: musb core is not yet ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ schedule_work(&glue->omap_musb_mailbox_work);
+
+ return 0;
+}
+
+/*
+ * HDRC controls CPEN, but beware current surges during device connect.
+ * They can trigger transient overcurrent conditions that must be ignored.
+ *
+ * Note that we're skipping A_WAIT_VFALL -> A_IDLE and jumping right to B_IDLE
+ * as set by musb_set_peripheral().
+ */
+static void omap_musb_set_mailbox(struct omap2430_glue *glue)
+{
+ struct musb *musb = glue_to_musb(glue);
+ int error;
+
+ pm_runtime_get_sync(musb->controller);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
+ switch (glue->status) {
+ case MUSB_ID_GROUND:
+ dev_dbg(musb->controller, "ID GND\n");
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_IDLE:
+ error = musb_set_host(musb);
+ if (error)
+ break;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ fallthrough;
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ /*
+ * On multiple ID ground interrupts just keep enabling
+ * VBUS. At least cpcap VBUS shuts down otherwise.
+ */
+ otg_set_vbus(musb->xceiv->otg, 1);
+ break;
+ default:
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->last_event = USB_EVENT_ID;
+ if (musb->gadget_driver) {
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_HOST);
+ otg_set_vbus(musb->xceiv->otg, 1);
+ }
+ break;
+ }
+ break;
+
+ case MUSB_VBUS_VALID:
+ dev_dbg(musb->controller, "VBUS Connect\n");
+
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ musb->xceiv->last_event = USB_EVENT_VBUS;
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
+ break;
+
+ case MUSB_ID_FLOAT:
+ case MUSB_VBUS_OFF:
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
+
+ musb->xceiv->last_event = USB_EVENT_NONE;
+ musb_set_peripheral(musb);
+ otg_set_vbus(musb->xceiv->otg, 0);
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+ }
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ atomic_notifier_call_chain(&musb->xceiv->notifier,
+ musb->xceiv->last_event, NULL);
+}
+
+
+static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
+{
+ struct omap2430_glue *glue = container_of(mailbox_work,
+ struct omap2430_glue, omap_musb_mailbox_work);
+
+ omap_musb_set_mailbox(glue);
+}
+
+static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static int omap2430_musb_init(struct musb *musb)
+{
+ u32 l;
+ int status = 0;
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+
+ /* We require some kind of external transceiver, hooked
+ * up through ULPI. TWL4030-family PMICs include one,
+ * which needs a driver, drivers aren't always needed.
+ */
+ musb->phy = devm_phy_get(dev->parent, "usb2-phy");
+
+ /* We can't totally remove musb->xceiv as of now because
+ * musb core uses xceiv.state and xceiv.otg. Once we have
+ * a separate state machine to handle otg, these can be moved
+ * out of xceiv and then we can start using the generic PHY
+ * framework
+ */
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent, "usb-phy", 0);
+
+ if (IS_ERR(musb->xceiv)) {
+ status = PTR_ERR(musb->xceiv);
+
+ if (status == -ENXIO)
+ return status;
+
+ dev_dbg(dev, "HS USB OTG: no transceiver configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (IS_ERR(musb->phy)) {
+ dev_err(dev, "HS USB OTG: no PHY configured\n");
+ return PTR_ERR(musb->phy);
+ }
+ musb->isr = omap2430_musb_interrupt;
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+
+ l = musb_readl(musb->mregs, OTG_INTERFSEL);
+
+ if (data->interface_type == MUSB_INTERFACE_UTMI) {
+ /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
+ l &= ~ULPI_12PIN; /* Disable ULPI */
+ l |= UTMI_8BIT; /* Enable UTMI */
+ } else {
+ l |= ULPI_12PIN;
+ }
+
+ musb_writel(musb->mregs, OTG_INTERFSEL, l);
+
+ dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
+ musb_readl(musb->mregs, OTG_REVISION),
+ musb_readl(musb->mregs, OTG_SYSCONFIG),
+ musb_readl(musb->mregs, OTG_SYSSTATUS),
+ musb_readl(musb->mregs, OTG_INTERFSEL),
+ musb_readl(musb->mregs, OTG_SIMENABLE));
+
+ return 0;
+}
+
+static void omap2430_musb_enable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+
+ if (glue->status == MUSB_UNKNOWN)
+ glue->status = MUSB_VBUS_OFF;
+ omap_musb_set_mailbox(glue);
+}
+
+static void omap2430_musb_disable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+
+ if (glue->status != MUSB_UNKNOWN)
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
+}
+
+static int omap2430_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+
+ omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ musb->phy = NULL;
+ cancel_work_sync(&glue->omap_musb_mailbox_work);
+
+ return 0;
+}
+
+static const struct musb_platform_ops omap2430_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .init = omap2430_musb_init,
+ .exit = omap2430_musb_exit,
+
+ .enable = omap2430_musb_enable,
+ .disable = omap2430_musb_disable,
+
+ .phy_callback = omap2430_musb_mailbox,
+};
+
+static u64 omap2430_dmamask = DMA_BIT_MASK(32);
+
+static int omap2430_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct omap_musb_board_data *data;
+ struct platform_device *musb;
+ struct omap2430_glue *glue;
+ struct device_node *np = pdev->dev.of_node;
+ struct musb_hdrc_config *config;
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+ int ret = -ENOMEM, val;
+ bool populate_irqs = false;
+
+ if (!np)
+ return -ENODEV;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ goto err0;
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err0;
+ }
+
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &omap2430_dmamask;
+ musb->dev.coherent_dma_mask = omap2430_dmamask;
+
+ /*
+ * Legacy SoCs using omap_device get confused if node is moved
+ * because of interconnect properties mixed into the node.
+ */
+ if (of_get_property(np, "ti,hwmods", NULL)) {
+ dev_warn(&pdev->dev, "please update to probe with ti-sysc\n");
+ populate_irqs = true;
+ } else {
+ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
+ }
+ of_node_put(np);
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->status = MUSB_UNKNOWN;
+ glue->control_otghs = ERR_PTR(-ENODEV);
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto err2;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto err2;
+
+ config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ goto err2;
+
+ of_property_read_u32(np, "mode", (u32 *)&pdata->mode);
+ of_property_read_u32(np, "interface-type",
+ (u32 *)&data->interface_type);
+ of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
+ of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
+ of_property_read_u32(np, "power", (u32 *)&pdata->power);
+
+ ret = of_property_read_u32(np, "multipoint", &val);
+ if (!ret && val)
+ config->multipoint = true;
+
+ pdata->board_data = data;
+ pdata->config = config;
+
+ control_node = of_parse_phandle(np, "ctrl-module", 0);
+ if (control_node) {
+ control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ ret = -EINVAL;
+ goto err2;
+ }
+ glue->control_otghs = &control_pdev->dev;
+ }
+
+ pdata->platform_ops = &omap2430_ops;
+
+ platform_set_drvdata(pdev, glue);
+
+ /*
+ * REVISIT if we ever have two instances of the wrapper, we will be
+ * in big trouble
+ */
+ _glue = glue;
+
+ INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
+
+ ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err2;
+ }
+
+ if (populate_irqs) {
+ struct resource musb_res[3];
+ struct resource *res;
+ int i = 0;
+
+ memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto err2;
+ }
+
+ musb_res[i].start = res->start;
+ musb_res[i].end = res->end;
+ musb_res[i].flags = res->flags;
+ musb_res[i].name = res->name;
+ i++;
+
+ ret = of_irq_get_byname(np, "mc");
+ if (ret > 0) {
+ musb_res[i].start = ret;
+ musb_res[i].flags = IORESOURCE_IRQ;
+ musb_res[i].name = "mc";
+ i++;
+ }
+
+ ret = of_irq_get_byname(np, "dma");
+ if (ret > 0) {
+ musb_res[i].start = ret;
+ musb_res[i].flags = IORESOURCE_IRQ;
+ musb_res[i].name = "dma";
+ i++;
+ }
+
+ ret = platform_device_add_resources(musb, musb_res, i);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add IRQ resources\n");
+ goto err2;
+ }
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err2;
+ }
+
+ pm_runtime_enable(glue->dev);
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err3;
+ }
+
+ return 0;
+
+err3:
+ pm_runtime_disable(glue->dev);
+
+err2:
+ platform_device_put(musb);
+
+err0:
+ return ret;
+}
+
+static int omap2430_remove(struct platform_device *pdev)
+{
+ struct omap2430_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ pm_runtime_disable(glue->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int omap2430_runtime_suspend(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ if (!musb)
+ return 0;
+
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
+ omap2430_low_level_exit(musb);
+
+ if (!glue->phy_suspended) {
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ }
+
+ glue->is_runtime_suspended = 1;
+
+ return 0;
+}
+
+static int omap2430_runtime_resume(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ if (!musb)
+ return 0;
+
+ if (!glue->phy_suspended) {
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+ }
+
+ omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
+
+ /* Wait for musb to get oriented. Otherwise we can get babble */
+ usleep_range(200000, 250000);
+
+ glue->is_runtime_suspended = 0;
+
+ return 0;
+}
+
+/* I2C and SPI PHYs need to be suspended before the glue layer */
+static int omap2430_suspend(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ glue->phy_suspended = 1;
+
+ return 0;
+}
+
+/* Glue layer needs to be suspended after musb_suspend() */
+static int omap2430_suspend_late(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+
+ if (glue->is_runtime_suspended)
+ return 0;
+
+ glue->needs_resume = 1;
+
+ return omap2430_runtime_suspend(dev);
+}
+
+static int omap2430_resume_early(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+
+ if (!glue->needs_resume)
+ return 0;
+
+ glue->needs_resume = 0;
+
+ return omap2430_runtime_resume(dev);
+}
+
+static int omap2430_resume(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+ glue->phy_suspended = 0;
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap2430_pm_ops = {
+ .runtime_suspend = omap2430_runtime_suspend,
+ .runtime_resume = omap2430_runtime_resume,
+ .suspend = omap2430_suspend,
+ .suspend_late = omap2430_suspend_late,
+ .resume_early = omap2430_resume_early,
+ .resume = omap2430_resume,
+};
+
+#define DEV_PM_OPS (&omap2430_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap2430_id_table[] = {
+ {
+ .compatible = "ti,omap4-musb"
+ },
+ {
+ .compatible = "ti,omap3-musb"
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2430_id_table);
+#endif
+
+static struct platform_driver omap2430_driver = {
+ .probe = omap2430_probe,
+ .remove = omap2430_remove,
+ .driver = {
+ .name = "musb-omap2430",
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(omap2430_id_table),
+ },
+};
+
+module_platform_driver(omap2430_driver);
+
+MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 000000000..939a0361a
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#include <linux/platform_data/usb-omap.h>
+
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define OTG_REVISION 0x400
+
+#define OTG_SYSCONFIG 0x404
+# define MIDLEMODE 12 /* bit position */
+# define FORCESTDBY (0 << MIDLEMODE)
+# define NOSTDBY (1 << MIDLEMODE)
+# define SMARTSTDBY (2 << MIDLEMODE)
+
+# define SIDLEMODE 3 /* bit position */
+# define FORCEIDLE (0 << SIDLEMODE)
+# define NOIDLE (1 << SIDLEMODE)
+# define SMARTIDLE (2 << SIDLEMODE)
+
+# define ENABLEWAKEUP (1 << 2)
+# define SOFTRST (1 << 1)
+# define AUTOIDLE (1 << 0)
+
+#define OTG_SYSSTATUS 0x408
+# define RESETDONE (1 << 0)
+
+#define OTG_INTERFSEL 0x40c
+# define EXTCP (1 << 2)
+# define PHYSEL 0 /* bit position */
+# define UTMI_8BIT (0 << PHYSEL)
+# define ULPI_12PIN (1 << PHYSEL)
+# define ULPI_8PIN (2 << PHYSEL)
+
+#define OTG_SIMENABLE 0x410
+# define TM1 (1 << 0)
+
+#define OTG_FORCESTDBY 0x414
+# define ENABLEFORCE (1 << 0)
+
+#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
new file mode 100644
index 000000000..7f9a999cd
--- /dev/null
+++ b/drivers/usb/musb/sunxi.c
@@ -0,0 +1,834 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Allwinner sun4i MUSB Glue Layer
+ *
+ * Copyright (C) 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy-sun4i-usb.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/usb/musb.h>
+#include <linux/usb/of.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/workqueue.h>
+#include "musb_core.h"
+
+/*
+ * Register offsets, note sunxi musb has a different layout then most
+ * musb implementations, we translate the layout in musb_readb & friends.
+ */
+#define SUNXI_MUSB_POWER 0x0040
+#define SUNXI_MUSB_DEVCTL 0x0041
+#define SUNXI_MUSB_INDEX 0x0042
+#define SUNXI_MUSB_VEND0 0x0043
+#define SUNXI_MUSB_INTRTX 0x0044
+#define SUNXI_MUSB_INTRRX 0x0046
+#define SUNXI_MUSB_INTRTXE 0x0048
+#define SUNXI_MUSB_INTRRXE 0x004a
+#define SUNXI_MUSB_INTRUSB 0x004c
+#define SUNXI_MUSB_INTRUSBE 0x0050
+#define SUNXI_MUSB_FRAME 0x0054
+#define SUNXI_MUSB_TXFIFOSZ 0x0090
+#define SUNXI_MUSB_TXFIFOADD 0x0092
+#define SUNXI_MUSB_RXFIFOSZ 0x0094
+#define SUNXI_MUSB_RXFIFOADD 0x0096
+#define SUNXI_MUSB_FADDR 0x0098
+#define SUNXI_MUSB_TXFUNCADDR 0x0098
+#define SUNXI_MUSB_TXHUBADDR 0x009a
+#define SUNXI_MUSB_TXHUBPORT 0x009b
+#define SUNXI_MUSB_RXFUNCADDR 0x009c
+#define SUNXI_MUSB_RXHUBADDR 0x009e
+#define SUNXI_MUSB_RXHUBPORT 0x009f
+#define SUNXI_MUSB_CONFIGDATA 0x00c0
+
+/* VEND0 bits */
+#define SUNXI_MUSB_VEND0_PIO_MODE 0
+
+/* flags */
+#define SUNXI_MUSB_FL_ENABLED 0
+#define SUNXI_MUSB_FL_HOSTMODE 1
+#define SUNXI_MUSB_FL_HOSTMODE_PEND 2
+#define SUNXI_MUSB_FL_VBUS_ON 3
+#define SUNXI_MUSB_FL_PHY_ON 4
+#define SUNXI_MUSB_FL_HAS_SRAM 5
+#define SUNXI_MUSB_FL_HAS_RESET 6
+#define SUNXI_MUSB_FL_NO_CONFIGDATA 7
+#define SUNXI_MUSB_FL_PHY_MODE_PEND 8
+
+/* Our read/write methods need access and do not get passed in a musb ref :| */
+static struct musb *sunxi_musb;
+
+struct sunxi_glue {
+ struct device *dev;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct platform_device *usb_phy;
+ struct usb_phy *xceiv;
+ enum phy_mode phy_mode;
+ unsigned long flags;
+ struct work_struct work;
+ struct extcon_dev *extcon;
+ struct notifier_block host_nb;
+};
+
+/* phy_power_on / off may sleep, so we use a workqueue */
+static void sunxi_musb_work(struct work_struct *work)
+{
+ struct sunxi_glue *glue = container_of(work, struct sunxi_glue, work);
+ bool vbus_on, phy_on;
+
+ if (!test_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
+ return;
+
+ if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
+ struct musb *musb = glue->musb;
+ unsigned long flags;
+ u8 devctl;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ devctl = readb(musb->mregs + SUNXI_MUSB_DEVCTL);
+ if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ MUSB_HST_MODE(musb);
+ devctl |= MUSB_DEVCTL_SESSION;
+ } else {
+ clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ }
+ writeb(devctl, musb->mregs + SUNXI_MUSB_DEVCTL);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+
+ vbus_on = test_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ phy_on = test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+
+ if (phy_on != vbus_on) {
+ if (vbus_on) {
+ phy_power_on(glue->phy);
+ set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ } else {
+ phy_power_off(glue->phy);
+ clear_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ }
+ }
+
+ if (test_and_clear_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags))
+ phy_set_mode(glue->phy, glue->phy_mode);
+}
+
+static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ if (is_on) {
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ } else {
+ clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ }
+
+ schedule_work(&glue->work);
+}
+
+static void sunxi_musb_pre_root_reset_end(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ sun4i_usb_phy_set_squelch_detect(glue->phy, false);
+}
+
+static void sunxi_musb_post_root_reset_end(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ sun4i_usb_phy_set_squelch_detect(glue->phy, true);
+}
+
+static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
+{
+ struct musb *musb = __hci;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = readb(musb->mregs + SUNXI_MUSB_INTRUSB);
+ if (musb->int_usb)
+ writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
+
+ if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
+ /* ep0 FADDR must be 0 when (re)entering peripheral mode */
+ musb_ep_select(musb->mregs, 0);
+ musb_writeb(musb->mregs, MUSB_FADDR, 0);
+ }
+
+ musb->int_tx = readw(musb->mregs + SUNXI_MUSB_INTRTX);
+ if (musb->int_tx)
+ writew(musb->int_tx, musb->mregs + SUNXI_MUSB_INTRTX);
+
+ musb->int_rx = readw(musb->mregs + SUNXI_MUSB_INTRRX);
+ if (musb->int_rx)
+ writew(musb->int_rx, musb->mregs + SUNXI_MUSB_INTRRX);
+
+ musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_musb_host_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct sunxi_glue *glue = container_of(nb, struct sunxi_glue, host_nb);
+
+ if (event)
+ set_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
+ else
+ clear_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
+
+ set_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return NOTIFY_DONE;
+}
+
+static int sunxi_musb_init(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ int ret;
+
+ sunxi_musb = musb;
+ musb->phy = glue->phy;
+ musb->xceiv = glue->xceiv;
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) {
+ ret = sunxi_sram_claim(musb->controller->parent);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(glue->clk);
+ if (ret)
+ goto error_sram_release;
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
+ ret = reset_control_deassert(glue->rst);
+ if (ret)
+ goto error_clk_disable;
+ }
+
+ writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
+
+ /* Register notifier before calling phy_init() */
+ ret = devm_extcon_register_notifier(glue->dev, glue->extcon,
+ EXTCON_USB_HOST, &glue->host_nb);
+ if (ret)
+ goto error_reset_assert;
+
+ ret = phy_init(glue->phy);
+ if (ret)
+ goto error_reset_assert;
+
+ musb->isr = sunxi_musb_interrupt;
+
+ /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
+ pm_runtime_get(musb->controller);
+
+ return 0;
+
+error_reset_assert:
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
+ reset_control_assert(glue->rst);
+error_clk_disable:
+ clk_disable_unprepare(glue->clk);
+error_sram_release:
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+ sunxi_sram_release(musb->controller->parent);
+ return ret;
+}
+
+static int sunxi_musb_exit(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ pm_runtime_put(musb->controller);
+
+ cancel_work_sync(&glue->work);
+ if (test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags))
+ phy_power_off(glue->phy);
+
+ phy_exit(glue->phy);
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
+ reset_control_assert(glue->rst);
+
+ clk_disable_unprepare(glue->clk);
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+ sunxi_sram_release(musb->controller->parent);
+
+ devm_usb_put_phy(glue->dev, glue->xceiv);
+
+ return 0;
+}
+
+static void sunxi_musb_enable(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ glue->musb = musb;
+
+ /* musb_core does not call us in a balanced manner */
+ if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
+ return;
+
+ schedule_work(&glue->work);
+}
+
+static void sunxi_musb_disable(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
+}
+
+static struct dma_controller *
+sunxi_musb_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ return NULL;
+}
+
+static void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
+{
+}
+
+static int sunxi_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ enum phy_mode new_mode;
+
+ switch (mode) {
+ case MUSB_HOST:
+ new_mode = PHY_MODE_USB_HOST;
+ break;
+ case MUSB_PERIPHERAL:
+ new_mode = PHY_MODE_USB_DEVICE;
+ break;
+ case MUSB_OTG:
+ new_mode = PHY_MODE_USB_OTG;
+ break;
+ default:
+ dev_err(musb->controller->parent,
+ "Error requested mode not supported by this kernel\n");
+ return -EINVAL;
+ }
+
+ if (glue->phy_mode == new_mode)
+ return 0;
+
+ if (musb->port_mode != MUSB_OTG) {
+ dev_err(musb->controller->parent,
+ "Error changing modes is only supported in dual role mode\n");
+ return -EINVAL;
+ }
+
+ if (musb->port1_status & USB_PORT_STAT_ENABLE)
+ musb_root_disconnect(musb);
+
+ /*
+ * phy_set_mode may sleep, and we're called with a spinlock held,
+ * so let sunxi_musb_work deal with it.
+ */
+ glue->phy_mode = new_mode;
+ set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return 0;
+}
+
+static int sunxi_musb_recover(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /*
+ * Schedule a phy_set_mode with the current glue->phy_mode value,
+ * this will force end the current session.
+ */
+ set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return 0;
+}
+
+/*
+ * sunxi musb register layout
+ * 0x00 - 0x17 fifo regs, 1 long per fifo
+ * 0x40 - 0x57 generic control regs (power - frame)
+ * 0x80 - 0x8f ep control regs (addressed through hw_ep->regs, indexed)
+ * 0x90 - 0x97 fifo control regs (indexed)
+ * 0x98 - 0x9f multipoint / busctl regs (indexed)
+ * 0xc0 configdata reg
+ */
+
+static u32 sunxi_musb_fifo_offset(u8 epnum)
+{
+ return (epnum * 4);
+}
+
+static u32 sunxi_musb_ep_offset(u8 epnum, u16 offset)
+{
+ WARN_ONCE(offset != 0,
+ "sunxi_musb_ep_offset called with non 0 offset\n");
+
+ return 0x80; /* indexed, so ignore epnum */
+}
+
+static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
+{
+ return SUNXI_MUSB_TXFUNCADDR + offset;
+}
+
+static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
+{
+ struct sunxi_glue *glue;
+
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_FADDR:
+ return readb(addr + SUNXI_MUSB_FADDR);
+ case MUSB_POWER:
+ return readb(addr + SUNXI_MUSB_POWER);
+ case MUSB_INTRUSB:
+ return readb(addr + SUNXI_MUSB_INTRUSB);
+ case MUSB_INTRUSBE:
+ return readb(addr + SUNXI_MUSB_INTRUSBE);
+ case MUSB_INDEX:
+ return readb(addr + SUNXI_MUSB_INDEX);
+ case MUSB_TESTMODE:
+ return 0; /* No testmode on sunxi */
+ case MUSB_DEVCTL:
+ return readb(addr + SUNXI_MUSB_DEVCTL);
+ case MUSB_TXFIFOSZ:
+ return readb(addr + SUNXI_MUSB_TXFIFOSZ);
+ case MUSB_RXFIFOSZ:
+ return readb(addr + SUNXI_MUSB_RXFIFOSZ);
+ case MUSB_CONFIGDATA + 0x10: /* See musb_read_configdata() */
+ glue = dev_get_drvdata(sunxi_musb->controller->parent);
+ /* A33 saves a reg, and we get to hardcode this */
+ if (test_bit(SUNXI_MUSB_FL_NO_CONFIGDATA,
+ &glue->flags))
+ return 0xde;
+
+ return readb(addr + SUNXI_MUSB_CONFIGDATA);
+ case MUSB_ULPI_BUSCONTROL:
+ dev_warn(sunxi_musb->controller->parent,
+ "sunxi-musb does not have ULPI bus control register\n");
+ return 0;
+ /* Offset for these is fixed by sunxi_musb_busctl_offset() */
+ case SUNXI_MUSB_TXFUNCADDR:
+ case SUNXI_MUSB_TXHUBADDR:
+ case SUNXI_MUSB_TXHUBPORT:
+ case SUNXI_MUSB_RXFUNCADDR:
+ case SUNXI_MUSB_RXHUBADDR:
+ case SUNXI_MUSB_RXHUBPORT:
+ /* multipoint / busctl reg access */
+ return readb(addr + offset);
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readb offset %u\n", offset);
+ return 0;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ /* sunxi has a 2 byte hole before the txtype register */
+ if (offset >= MUSB_TXTYPE)
+ offset += 2;
+ return readb(addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readb at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+ return 0;
+}
+
+static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_FADDR:
+ return writeb(data, addr + SUNXI_MUSB_FADDR);
+ case MUSB_POWER:
+ return writeb(data, addr + SUNXI_MUSB_POWER);
+ case MUSB_INTRUSB:
+ return writeb(data, addr + SUNXI_MUSB_INTRUSB);
+ case MUSB_INTRUSBE:
+ return writeb(data, addr + SUNXI_MUSB_INTRUSBE);
+ case MUSB_INDEX:
+ return writeb(data, addr + SUNXI_MUSB_INDEX);
+ case MUSB_TESTMODE:
+ if (data)
+ dev_warn(sunxi_musb->controller->parent,
+ "sunxi-musb does not have testmode\n");
+ return;
+ case MUSB_DEVCTL:
+ return writeb(data, addr + SUNXI_MUSB_DEVCTL);
+ case MUSB_TXFIFOSZ:
+ return writeb(data, addr + SUNXI_MUSB_TXFIFOSZ);
+ case MUSB_RXFIFOSZ:
+ return writeb(data, addr + SUNXI_MUSB_RXFIFOSZ);
+ case MUSB_ULPI_BUSCONTROL:
+ dev_warn(sunxi_musb->controller->parent,
+ "sunxi-musb does not have ULPI bus control register\n");
+ return;
+ /* Offset for these is fixed by sunxi_musb_busctl_offset() */
+ case SUNXI_MUSB_TXFUNCADDR:
+ case SUNXI_MUSB_TXHUBADDR:
+ case SUNXI_MUSB_TXHUBPORT:
+ case SUNXI_MUSB_RXFUNCADDR:
+ case SUNXI_MUSB_RXHUBADDR:
+ case SUNXI_MUSB_RXHUBPORT:
+ /* multipoint / busctl reg access */
+ return writeb(data, addr + offset);
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writeb offset %u\n", offset);
+ return;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ if (offset >= MUSB_TXTYPE)
+ offset += 2;
+ return writeb(data, addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writeb at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+}
+
+static u16 sunxi_musb_readw(void __iomem *addr, u32 offset)
+{
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_INTRTX:
+ return readw(addr + SUNXI_MUSB_INTRTX);
+ case MUSB_INTRRX:
+ return readw(addr + SUNXI_MUSB_INTRRX);
+ case MUSB_INTRTXE:
+ return readw(addr + SUNXI_MUSB_INTRTXE);
+ case MUSB_INTRRXE:
+ return readw(addr + SUNXI_MUSB_INTRRXE);
+ case MUSB_FRAME:
+ return readw(addr + SUNXI_MUSB_FRAME);
+ case MUSB_TXFIFOADD:
+ return readw(addr + SUNXI_MUSB_TXFIFOADD);
+ case MUSB_RXFIFOADD:
+ return readw(addr + SUNXI_MUSB_RXFIFOADD);
+ case MUSB_HWVERS:
+ return 0; /* sunxi musb version is not known */
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readw offset %u\n", offset);
+ return 0;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ return readw(addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readw at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+ return 0;
+}
+
+static void sunxi_musb_writew(void __iomem *addr, unsigned offset, u16 data)
+{
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_INTRTX:
+ return writew(data, addr + SUNXI_MUSB_INTRTX);
+ case MUSB_INTRRX:
+ return writew(data, addr + SUNXI_MUSB_INTRRX);
+ case MUSB_INTRTXE:
+ return writew(data, addr + SUNXI_MUSB_INTRTXE);
+ case MUSB_INTRRXE:
+ return writew(data, addr + SUNXI_MUSB_INTRRXE);
+ case MUSB_FRAME:
+ return writew(data, addr + SUNXI_MUSB_FRAME);
+ case MUSB_TXFIFOADD:
+ return writew(data, addr + SUNXI_MUSB_TXFIFOADD);
+ case MUSB_RXFIFOADD:
+ return writew(data, addr + SUNXI_MUSB_RXFIFOADD);
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writew offset %u\n", offset);
+ return;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ return writew(data, addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writew at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+}
+
+static const struct musb_platform_ops sunxi_musb_ops = {
+ .quirks = MUSB_INDEXED_EP,
+ .init = sunxi_musb_init,
+ .exit = sunxi_musb_exit,
+ .enable = sunxi_musb_enable,
+ .disable = sunxi_musb_disable,
+ .fifo_offset = sunxi_musb_fifo_offset,
+ .ep_offset = sunxi_musb_ep_offset,
+ .busctl_offset = sunxi_musb_busctl_offset,
+ .readb = sunxi_musb_readb,
+ .writeb = sunxi_musb_writeb,
+ .readw = sunxi_musb_readw,
+ .writew = sunxi_musb_writew,
+ .dma_init = sunxi_musb_dma_controller_create,
+ .dma_exit = sunxi_musb_dma_controller_destroy,
+ .set_mode = sunxi_musb_set_mode,
+ .recover = sunxi_musb_recover,
+ .set_vbus = sunxi_musb_set_vbus,
+ .pre_root_reset_end = sunxi_musb_pre_root_reset_end,
+ .post_root_reset_end = sunxi_musb_post_root_reset_end,
+};
+
+/* Allwinner OTG supports up to 5 endpoints */
+#define SUNXI_MUSB_MAX_EP_NUM 6
+#define SUNXI_MUSB_RAM_BITS 11
+
+static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
+ MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(2, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(3, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(3, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(4, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(5, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(5, FIFO_RX, 512),
+};
+
+/* H3/V3s OTG supports only 4 endpoints */
+#define SUNXI_MUSB_MAX_EP_NUM_H3 5
+
+static struct musb_fifo_cfg sunxi_musb_mode_cfg_h3[] = {
+ MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(2, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(3, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(3, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(4, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
+};
+
+static const struct musb_hdrc_config sunxi_musb_hdrc_config = {
+ .fifo_cfg = sunxi_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = SUNXI_MUSB_MAX_EP_NUM,
+ .ram_bits = SUNXI_MUSB_RAM_BITS,
+};
+
+static struct musb_hdrc_config sunxi_musb_hdrc_config_h3 = {
+ .fifo_cfg = sunxi_musb_mode_cfg_h3,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_h3),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = SUNXI_MUSB_MAX_EP_NUM_H3,
+ .ram_bits = SUNXI_MUSB_RAM_BITS,
+};
+
+
+static int sunxi_musb_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data pdata;
+ struct platform_device_info pinfo;
+ struct sunxi_glue *glue;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Error no device tree node found\n");
+ return -EINVAL;
+ }
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ memset(&pdata, 0, sizeof(pdata));
+ switch (usb_get_dr_mode(&pdev->dev)) {
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
+ case USB_DR_MODE_HOST:
+ pdata.mode = MUSB_HOST;
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ break;
+#endif
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_GADGET
+ case USB_DR_MODE_PERIPHERAL:
+ pdata.mode = MUSB_PERIPHERAL;
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_DUAL_ROLE
+ case USB_DR_MODE_OTG:
+ pdata.mode = MUSB_OTG;
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ break;
+#endif
+ default:
+ dev_err(&pdev->dev, "Invalid or missing 'dr_mode' property\n");
+ return -EINVAL;
+ }
+ pdata.platform_ops = &sunxi_musb_ops;
+ if (!of_device_is_compatible(np, "allwinner,sun8i-h3-musb"))
+ pdata.config = &sunxi_musb_hdrc_config;
+ else
+ pdata.config = &sunxi_musb_hdrc_config_h3;
+
+ glue->dev = &pdev->dev;
+ INIT_WORK(&glue->work, sunxi_musb_work);
+ glue->host_nb.notifier_call = sunxi_musb_host_notifier;
+
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-musb"))
+ set_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags);
+
+ if (of_device_is_compatible(np, "allwinner,sun6i-a31-musb"))
+ set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+
+ if (of_device_is_compatible(np, "allwinner,sun8i-a33-musb") ||
+ of_device_is_compatible(np, "allwinner,sun8i-h3-musb")) {
+ set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+ set_bit(SUNXI_MUSB_FL_NO_CONFIGDATA, &glue->flags);
+ }
+
+ glue->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(glue->clk)) {
+ dev_err(&pdev->dev, "Error getting clock: %ld\n",
+ PTR_ERR(glue->clk));
+ return PTR_ERR(glue->clk);
+ }
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
+ glue->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(glue->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->rst),
+ "Error getting reset\n");
+ }
+
+ glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(glue->extcon))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->extcon),
+ "Invalid or missing extcon\n");
+
+ glue->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
+ "Error getting phy\n");
+
+ glue->usb_phy = usb_phy_generic_register();
+ if (IS_ERR(glue->usb_phy)) {
+ dev_err(&pdev->dev, "Error registering usb-phy %ld\n",
+ PTR_ERR(glue->usb_phy));
+ return PTR_ERR(glue->usb_phy);
+ }
+
+ glue->xceiv = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(glue->xceiv)) {
+ ret = PTR_ERR(glue->xceiv);
+ dev_err(&pdev->dev, "Error getting usb-phy %d\n", ret);
+ goto err_unregister_usb_phy;
+ }
+
+ platform_set_drvdata(pdev, glue);
+
+ memset(&pinfo, 0, sizeof(pinfo));
+ pinfo.name = "musb-hdrc";
+ pinfo.id = PLATFORM_DEVID_AUTO;
+ pinfo.parent = &pdev->dev;
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = &pdata;
+ pinfo.size_data = sizeof(pdata);
+
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
+ dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
+ goto err_unregister_usb_phy;
+ }
+
+ return 0;
+
+err_unregister_usb_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+}
+
+static int sunxi_musb_remove(struct platform_device *pdev)
+{
+ struct sunxi_glue *glue = platform_get_drvdata(pdev);
+ struct platform_device *usb_phy = glue->usb_phy;
+
+ platform_device_unregister(glue->musb_pdev);
+ usb_phy_generic_unregister(usb_phy);
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_musb_match[] = {
+ { .compatible = "allwinner,sun4i-a10-musb", },
+ { .compatible = "allwinner,sun6i-a31-musb", },
+ { .compatible = "allwinner,sun8i-a33-musb", },
+ { .compatible = "allwinner,sun8i-h3-musb", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_musb_match);
+
+static struct platform_driver sunxi_musb_driver = {
+ .probe = sunxi_musb_probe,
+ .remove = sunxi_musb_remove,
+ .driver = {
+ .name = "musb-sunxi",
+ .of_match_table = sunxi_musb_match,
+ },
+};
+module_platform_driver(sunxi_musb_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi MUSB Glue Layer");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 000000000..5609b4e84
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ * configured for NOR FLASH interface instead of VLYNQ serial
+ * interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/prefetch.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+
+#include "musb_core.h"
+
+struct tusb6010_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+};
+
+static void tusb_musb_set_vbus(struct musb *musb, int is_on);
+
+#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
+
+/*
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+static u8 tusb_get_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 die_id;
+ u8 rev;
+
+ rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+ if (TUSB_REV_MAJOR(rev) == 3) {
+ die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
+ TUSB_DIDR1_HI));
+ if (die_id >= TUSB_DIDR1_HI_REV_31)
+ rev |= 1;
+ }
+
+ return rev;
+}
+
+static void tusb_print_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u8 rev;
+
+ rev = musb->tusb_revision;
+
+ pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
+ "prcm",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ "int",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ "gpio",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ "dma",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ "dieid",
+ TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
+ "rev",
+ TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
+}
+
+#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
+ | TUSB_PHY_OTG_CTRL_TESTM0)
+
+/*
+ * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
+ * Disables power detection in PHY for the duration of idle.
+ */
+static void tusb_wbus_quirk(struct musb *musb, int enabled)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ static u32 phy_otg_ctrl, phy_otg_ena;
+ u32 tmp;
+
+ if (enabled) {
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
+ | phy_otg_ena | WBUS_QUIRK_MASK;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
+ tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ dev_dbg(musb->controller, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
+ & TUSB_PHY_OTG_CTRL_TESTM2) {
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ dev_dbg(musb->controller, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ phy_otg_ctrl = 0;
+ phy_otg_ena = 0;
+ }
+}
+
+static u32 tusb_fifo_offset(u8 epnum)
+{
+ return 0x200 + (epnum * 0x20);
+}
+
+static u32 tusb_ep_offset(u8 epnum, u16 offset)
+{
+ return 0x10 + offset;
+}
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+static void tusb_ep_select(void __iomem *mbase, u8 epnum)
+{
+ musb_writeb(mbase, MUSB_INDEX, epnum);
+}
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static u8 tusb_readb(void __iomem *addr, u32 offset)
+{
+ u16 tmp;
+ u8 val;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ val = (tmp >> 8);
+ else
+ val = tmp & 0xff;
+
+ return val;
+}
+
+static void tusb_writeb(void __iomem *addr, u32 offset, u8 data)
+{
+ u16 tmp;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ tmp = (data << 8) | (tmp & 0xff);
+ else
+ tmp = (tmp & 0xff00) | data;
+
+ __raw_writew(tmp, addr + (offset & ~1));
+}
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+static inline void
+tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ memcpy(&val, buf, 4);
+ musb_writel(fifo, 0, val);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Write the rest 1 - 3 bytes to FIFO */
+ val = 0;
+ memcpy(&val, buf, len);
+ musb_writel(fifo, 0, val);
+ }
+}
+
+static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+ void *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, 4);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Read the rest 1 - 3 bytes from FIFO */
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, len);
+ }
+}
+
+static void tusb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+ struct musb *musb = hw_ep->musb;
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ prefetch(buf);
+
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+ TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ iowrite32_rep(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use writesw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = (u32)(*(u16 *)buf);
+ buf += 2;
+ val |= (*(u16 *)buf) << 16;
+ buf += 2;
+ musb_writel(fifo, 0, val);
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_write_unaligned(fifo, buf, len);
+}
+
+static void tusb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+ struct musb *musb = hw_ep->musb;
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ ioread32_rep(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use readsw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ *(u16 *)buf = (u16)(val & 0xffff);
+ buf += 2;
+ *(u16 *)buf = (u16)(val >> 16);
+ buf += 2;
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_read_unaligned(fifo, buf, len);
+}
+
+static struct musb *the_musb;
+
+/* This is used by gadget drivers, and OTG transceiver logic, allowing
+ * at most mA current to be drawn from VBUS during a Default-B session
+ * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
+ * mode), or low power Default-B sessions, something else supplies power.
+ * Caller must take care of locking.
+ */
+static int tusb_draw_power(struct usb_phy *x, unsigned mA)
+{
+ struct musb *musb = the_musb;
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ /* tps65030 seems to consume max 100mA, with maybe 60mA available
+ * (measured on one board) for things other than tps and tusb.
+ *
+ * Boards sharing the CPU clock with CLKIN will need to prevent
+ * certain idle sleep states while the USB link is active.
+ *
+ * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
+ * The actual current usage would be very board-specific. For now,
+ * it's simpler to just use an aggregate (also board-specific).
+ */
+ if (x->otg->default_a || mA < (musb->min_power << 1))
+ mA = 0;
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ if (mA) {
+ musb->is_bus_powered = 1;
+ reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
+ } else {
+ musb->is_bus_powered = 0;
+ reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+ }
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ dev_dbg(musb->controller, "draw max %d mA VBUS\n", mA);
+ return 0;
+}
+
+/* workaround for issue 13: change clock during chip idle
+ * (to be fixed in rev3 silicon) ... symptoms include disconnect
+ * or looping suspend/resume cycles
+ */
+static void tusb_set_clock_source(struct musb *musb, unsigned mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ reg = musb_readl(tbase, TUSB_PRCM_CONF);
+ reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
+
+ /* 0 = refclk (clkin, XI)
+ * 1 = PHY 60 MHz (internal PLL)
+ * 2 = not supported
+ * 3 = what?
+ */
+ if (mode > 0)
+ reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
+
+ musb_writel(tbase, TUSB_PRCM_CONF, reg);
+
+ /* FIXME tusb6010_platform_retime(mode == 0); */
+}
+
+/*
+ * Idle TUSB6010 until next wake-up event; NOR access always wakes.
+ * Other code ensures that we idle unless we're connected _and_ the
+ * USB link is not suspended ... and tells us the relevant wakeup
+ * events. SW_EN for voltage is handled separately.
+ */
+static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ if ((wakeup_enables & TUSB_PRCM_WBUS)
+ && (musb->tusb_revision == TUSB_REV_30))
+ tusb_wbus_quirk(musb, 1);
+
+ tusb_set_clock_source(musb, 0);
+
+ wakeup_enables |= TUSB_PRCM_WNORCS;
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
+
+ /* REVISIT writeup of WID implies that if WID set and ID is grounded,
+ * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
+ * Presumably that's mostly to save power, hence WID is immaterial ...
+ */
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
+ if (is_host_active(musb)) {
+ reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ } else {
+ reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ }
+ reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ dev_dbg(musb->controller, "idle, wake on %02x\n", wakeup_enables);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+static int tusb_musb_vbus_status(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, prcm_mngmt;
+ int ret = 0;
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
+
+ /* Temporarily enable VBUS detection if it was disabled for
+ * suspend mode. Unless it's enabled otg_stat and devctl will
+ * not show correct VBUS state.
+ */
+ if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+ u32 tmp = prcm_mngmt;
+ tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
+ }
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
+ ret = 1;
+
+ return ret;
+}
+
+static void musb_do_idle(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if ((musb->a_wait_bcon != 0)
+ && (musb->idle_timeout == 0
+ || time_after(jiffies, musb->idle_timeout))) {
+ dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ }
+ fallthrough;
+ case OTG_STATE_A_IDLE:
+ tusb_musb_set_vbus(musb, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (!musb->is_active) {
+ u32 wakeups;
+
+ /* wait until hub_wq handles port change status */
+ if (is_host_active(musb) && (musb->port1_status >> 16))
+ goto done;
+
+ if (!musb->gadget_driver) {
+ wakeups = 0;
+ } else {
+ wakeups = TUSB_PRCM_WHOSTDISCON
+ | TUSB_PRCM_WBUS
+ | TUSB_PRCM_WVBUS;
+ wakeups |= TUSB_PRCM_WID;
+ }
+ tusb_allow_idle(musb, wakeups);
+ }
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Maybe put TUSB6010 into idle mode depending on USB link status,
+ * like "disconnected" or "suspended". We'll be woken out of it by
+ * connect, resume, or disconnect.
+ *
+ * Needs to be called as the last function everywhere where there is
+ * register access to TUSB6010 because of NOR flash wake-up.
+ * Caller should own controller spinlock.
+ *
+ * Delay because peripheral enables D+ pullup 3msec after SE0, and
+ * we don't want to treat that full speed J as a wakeup event.
+ * ... peripherals must draw only suspend current after 10 msec.
+ */
+static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ del_timer(&musb->dev_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb->dev_timer))
+ last_timer = timeout;
+ else {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb->dev_timer, timeout);
+}
+
+/* ticks of 60 MHz clock */
+#define DEVCLOCK 60000000
+#define OTG_TIMER_MS(msecs) ((msecs) \
+ ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
+ | TUSB_DEV_OTG_TIMER_ENABLE) \
+ : 0)
+
+static void tusb_musb_set_vbus(struct musb *musb, int is_on)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 conf, prcm, timer;
+ u8 devctl;
+ struct usb_otg *otg = musb->xceiv->otg;
+
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ conf = musb_readl(tbase, TUSB_DEV_CONF);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
+ otg->default_a = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ conf |= TUSB_DEV_CONF_USB_HOST_MODE;
+ MUSB_HST_MODE(musb);
+ } else {
+ u32 otg_stat;
+
+ timer = 0;
+
+ /* If ID pin is grounded, we want to be a_idle */
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ }
+ musb->is_active = 0;
+ otg->default_a = 1;
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+ otg->default_a = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
+ }
+ prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
+ musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
+ musb_writel(tbase, TUSB_DEV_CONF, conf);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ musb_readl(tbase, TUSB_DEV_OTG_STAT),
+ conf, prcm);
+}
+
+/*
+ * Sets the mode to OTG, peripheral or host by changing the ID detection.
+ * Caller must take care of locking.
+ *
+ * Note that if a mini-A cable is plugged in the ID line will stay down as
+ * the weak ID pull-up is not able to pull the ID up.
+ */
+static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
+
+ switch (musb_mode) {
+
+ case MUSB_HOST: /* Disable PHY ID detect, ground ID */
+ phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= TUSB_DEV_CONF_ID_SEL;
+ dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
+ break;
+ case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+ case MUSB_OTG: /* Use PHY ID detection */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+
+ default:
+ dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode);
+ return -EINVAL;
+ }
+
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
+ musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if ((musb_mode == MUSB_PERIPHERAL) &&
+ !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
+ INFO("Cannot be peripheral with mini-A cable "
+ "otg_stat: %08x\n", otg_stat);
+
+ return 0;
+}
+
+static inline unsigned long
+tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
+{
+ u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ unsigned long idle_timeout = 0;
+ struct usb_otg *otg = musb->xceiv->otg;
+
+ /* ID pin */
+ if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+ int default_a;
+
+ default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+ dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B');
+ otg->default_a = default_a;
+ tusb_musb_set_vbus(musb, default_a);
+
+ /* Don't allow idling immediately */
+ if (default_a)
+ idle_timeout = jiffies + (HZ * 3);
+ }
+
+ /* VBUS state change */
+ if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
+
+ /* B-dev state machine: no vbus ~= disconnect */
+ if (!otg->default_a) {
+ /* ? musb_root_disconnect(musb); */
+ musb->port1_status &=
+ ~(USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED
+ | USB_PORT_STAT_TEST
+ );
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
+ dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n");
+ if (musb->xceiv->otg->state != OTG_STATE_B_IDLE) {
+ /* INTR_DISCONNECT can hide... */
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ }
+ musb->is_active = 0;
+ }
+ dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
+ usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
+ idle_timeout = jiffies + (1 * HZ);
+ schedule_delayed_work(&musb->irq_work, 0);
+
+ } else /* A-dev state machine */ {
+ dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
+ usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_IDLE:
+ dev_dbg(musb->controller, "Got SRP, turning on VBUS\n");
+ musb_platform_set_vbus(musb, 1);
+
+ /* CONNECT can wake if a_wait_bcon is set */
+ if (musb->a_wait_bcon != 0)
+ musb->is_active = 0;
+ else
+ musb->is_active = 1;
+
+ /*
+ * OPT FS A TD.4.6 needs few seconds for
+ * A_WAIT_VRISE
+ */
+ idle_timeout = jiffies + (2 * HZ);
+
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ /* ignore; A-session-valid < VBUS_VALID/2,
+ * we monitor this with the timer
+ */
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /* REVISIT this irq triggers during short
+ * spikes caused by enumeration ...
+ */
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ tusb_musb_set_vbus(musb, 1);
+ } else {
+ musb->vbuserr_retry
+ = VBUSERR_RETRY_COUNT;
+ tusb_musb_set_vbus(musb, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ /* OTG timer expiration */
+ if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+ u8 devctl;
+
+ dev_dbg(musb->controller, "%s timer, %03x\n",
+ usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ /* VBUS has probably been valid for a while now,
+ * but may well have bounced out of range a bit
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != MUSB_DEVCTL_VBUS) {
+ dev_dbg(musb->controller, "devctl %02x\n", devctl);
+ break;
+ }
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ } else {
+ /* REVISIT report overcurrent to hub? */
+ ERR("vbus too slow, devctl %02x\n", devctl);
+ tusb_musb_set_vbus(musb, 0);
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ break;
+ case OTG_STATE_A_SUSPEND:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ break;
+ default:
+ break;
+ }
+ }
+ schedule_delayed_work(&musb->irq_work, 0);
+
+ return idle_timeout;
+}
+
+static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
+{
+ struct musb *musb = __hci;
+ void __iomem *tbase = musb->ctrl_base;
+ unsigned long flags, idle_timeout = 0;
+ u32 int_mask, int_src;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Mask all interrupts to allow using both edge and level GPIO irq */
+ int_mask = musb_readl(tbase, TUSB_INT_MASK);
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+ dev_dbg(musb->controller, "TUSB IRQ %08x\n", int_src);
+
+ musb->int_usb = (u8) int_src;
+
+ /* Acknowledge wake-up source interrupts */
+ if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+ u32 reg;
+ u32 i;
+
+ if (musb->tusb_revision == TUSB_REV_30)
+ tusb_wbus_quirk(musb, 0);
+
+ /* there are issues re-locking the PLL on wakeup ... */
+
+ /* work around issue 8 */
+ for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
+ musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
+ musb_writel(tbase, TUSB_SCRATCH_PAD, i);
+ reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
+ if (reg == i)
+ break;
+ dev_dbg(musb->controller, "TUSB NOR not ready\n");
+ }
+
+ /* work around issue 13 (2nd half) */
+ tusb_set_clock_source(musb, 1);
+
+ reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
+ if (reg & ~TUSB_PRCM_WNORCS) {
+ musb->is_active = 1;
+ schedule_delayed_work(&musb->irq_work, 0);
+ }
+ dev_dbg(musb->controller, "wake %sactive %02x\n",
+ musb->is_active ? "" : "in", reg);
+
+ /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
+ }
+
+ if (int_src & TUSB_INT_SRC_USB_IP_CONN)
+ del_timer(&musb->dev_timer);
+
+ /* OTG state change reports (annoyingly) not issued by Mentor core */
+ if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
+ | TUSB_INT_SRC_OTG_TIMEOUT
+ | TUSB_INT_SRC_ID_STATUS_CHNG))
+ idle_timeout = tusb_otg_ints(musb, int_src, tbase);
+
+ /*
+ * Just clear the DMA interrupt if it comes as the completion for both
+ * TX and RX is handled by the DMA callback in tusb6010_omap
+ */
+ if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
+ u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
+
+ dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src);
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
+ }
+
+ /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+ u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
+
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
+ musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+ musb->int_tx = (musb_src & 0xffff);
+ } else {
+ musb->int_rx = 0;
+ musb->int_tx = 0;
+ }
+
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
+ musb_interrupt(musb);
+
+ /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR,
+ int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+ tusb_musb_try_idle(musb, idle_timeout);
+
+ musb_writel(tbase, TUSB_INT_MASK, int_mask);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ */
+static void tusb_musb_enable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+ * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+ musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+ /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ /* Clear all subsystem interrups */
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+ /* Acknowledge pending interrupt(s) */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ /* Only 0 clock cycles for minimum interrupt de-assertion time and
+ * interrupt polarity active low seems to work reliably here */
+ musb_writel(tbase, TUSB_INT_CTRL_CONF,
+ TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+ irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+ /* maybe force into the Default-A OTG state machine */
+ if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+ & TUSB_DEV_OTG_STAT_ID_STATUS))
+ musb_writel(tbase, TUSB_INT_SRC_SET,
+ TUSB_INT_SRC_ID_STATUS_CHNG);
+
+ if (is_dma_capable() && dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+static void tusb_musb_disable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* FIXME stop DMA, IRQs, timers, ... */
+
+ /* disable all IRQs */
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ del_timer(&musb->dev_timer);
+
+ if (is_dma_capable() && !dma_off) {
+ printk(KERN_WARNING "%s %s: dma still active\n",
+ __FILE__, __func__);
+ dma_off = 1;
+ }
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void tusb_setup_cpu_interface(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /*
+ * Disable GPIO[5:0] pullups (used as output DMA requests)
+ * Don't disable GPIO[7:6] as they are needed for wake-up.
+ */
+ musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
+
+ /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+ musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+ /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+ musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+ /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+ * de-assertion time 2 system clocks p 62 */
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ /* Set 0 wait count for synchronous burst access */
+ musb_writel(tbase, TUSB_WAIT_COUNT, 1);
+}
+
+static int tusb_musb_start(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ int ret = 0;
+ unsigned long flags;
+ u32 reg;
+
+ if (musb->board_set_power)
+ ret = musb->board_set_power(1);
+ if (ret != 0) {
+ printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
+ TUSB_PROD_TEST_RESET_VAL) {
+ printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+ goto err;
+ }
+
+ musb->tusb_revision = tusb_get_revision(musb);
+ tusb_print_revision(musb);
+ if (musb->tusb_revision < 2) {
+ printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+ musb->tusb_revision);
+ goto err;
+ }
+
+ /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+ * NOR FLASH interface is used */
+ musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
+
+ /* Select PHY free running 60MHz as a system clock */
+ tusb_set_clock_source(musb, 1);
+
+ /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+ * power saving, enable VBus detect and session end comparators,
+ * enable IDpullup, enable VBus charging */
+ musb_writel(tbase, TUSB_PRCM_MNGMT,
+ TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+ TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+ TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+ TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+ TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+ tusb_setup_cpu_interface(musb);
+
+ /* simplify: always sense/pullup ID pins, as if in OTG mode */
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
+
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ return -ENODEV;
+}
+
+static int tusb_musb_init(struct musb *musb)
+{
+ struct platform_device *pdev;
+ struct resource *mem;
+ void __iomem *sync = NULL;
+ int ret;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv))
+ return -EPROBE_DEFER;
+
+ pdev = to_platform_device(musb->controller);
+
+ /* dma address for async dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ pr_debug("no async dma resource?\n");
+ ret = -ENODEV;
+ goto done;
+ }
+ musb->async = mem->start;
+
+ /* dma address for sync dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem) {
+ pr_debug("no sync dma resource?\n");
+ ret = -ENODEV;
+ goto done;
+ }
+ musb->sync = mem->start;
+
+ sync = ioremap(mem->start, resource_size(mem));
+ if (!sync) {
+ pr_debug("ioremap for sync failed\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ musb->sync_va = sync;
+
+ /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+ * FIFOs at 0x600, TUSB at 0x800
+ */
+ musb->mregs += TUSB_BASE_OFFSET;
+
+ ret = tusb_musb_start(musb);
+ if (ret) {
+ printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+ ret);
+ goto done;
+ }
+ musb->isr = tusb_musb_interrupt;
+
+ musb->xceiv->set_power = tusb_draw_power;
+ the_musb = musb;
+
+ timer_setup(&musb->dev_timer, musb_do_idle, 0);
+
+done:
+ if (ret < 0) {
+ if (sync)
+ iounmap(sync);
+
+ usb_put_phy(musb->xceiv);
+ }
+ return ret;
+}
+
+static int tusb_musb_exit(struct musb *musb)
+{
+ del_timer_sync(&musb->dev_timer);
+ the_musb = NULL;
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ iounmap(musb->sync_va);
+
+ usb_put_phy(musb->xceiv);
+ return 0;
+}
+
+static const struct musb_platform_ops tusb_ops = {
+ .quirks = MUSB_DMA_TUSB_OMAP | MUSB_IN_TUSB |
+ MUSB_G_NO_SKB_RESERVE,
+ .init = tusb_musb_init,
+ .exit = tusb_musb_exit,
+
+ .ep_offset = tusb_ep_offset,
+ .ep_select = tusb_ep_select,
+ .fifo_offset = tusb_fifo_offset,
+ .readb = tusb_readb,
+ .writeb = tusb_writeb,
+ .read_fifo = tusb_read_fifo,
+ .write_fifo = tusb_write_fifo,
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+ .dma_init = tusb_dma_controller_create,
+ .dma_exit = tusb_dma_controller_destroy,
+#endif
+ .enable = tusb_musb_enable,
+ .disable = tusb_musb_disable,
+
+ .set_mode = tusb_musb_set_mode,
+ .try_idle = tusb_musb_try_idle,
+
+ .vbus_status = tusb_musb_vbus_status,
+ .set_vbus = tusb_musb_set_vbus,
+};
+
+static const struct platform_device_info tusb_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int tusb_probe(struct platform_device *pdev)
+{
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct tusb6010_glue *glue;
+ struct platform_device_info pinfo;
+ int ret;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = &pdev->dev;
+
+ pdata->platform_ops = &tusb_ops;
+
+ usb_phy_generic_register();
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
+ pinfo = tusb_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tusb_remove(struct platform_device *pdev)
+{
+ struct tusb6010_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->phy);
+
+ return 0;
+}
+
+static struct platform_driver tusb_driver = {
+ .probe = tusb_probe,
+ .remove = tusb_remove,
+ .driver = {
+ .name = "musb-tusb",
+ },
+};
+
+MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(tusb_driver);
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 000000000..8a253564f
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL 0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET 0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE 0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE 0x800
+
+#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
+#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
+#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
+#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
+#define TUSB_DEV_CONF_ID_SEL (1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
+#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
+#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
+#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
+#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
+#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
+#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
+#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
+#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
+#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
+#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
+#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
+#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
+#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
+#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
+#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
+#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
+#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
+#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
+#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
+#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
+#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
+#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
+#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
+#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
+#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
+#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
+#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
+#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
+#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
+#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
+#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
+
+#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
+# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
+# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
+#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
+#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
+#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
+#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
+#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
+#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
+#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
+#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
+#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
+#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
+#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
+#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
+#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
+#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
+#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
+#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
+#define TUSB_PRCM_WGPIO_7 (1 << 12)
+#define TUSB_PRCM_WGPIO_6 (1 << 11)
+#define TUSB_PRCM_WGPIO_5 (1 << 10)
+#define TUSB_PRCM_WGPIO_4 (1 << 9)
+#define TUSB_PRCM_WGPIO_3 (1 << 8)
+#define TUSB_PRCM_WGPIO_2 (1 << 7)
+#define TUSB_PRCM_WGPIO_1 (1 << 6)
+#define TUSB_PRCM_WGPIO_0 (1 << 5)
+#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
+#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
+#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
+#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
+#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
+#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
+#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
+#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
+#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
+#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
+#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
+#define TUSB_INT_SRC_DEV_READY (1 << 12)
+#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
+#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
+#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
+#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
+#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
+#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
+#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
+#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
+#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
+#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
+#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
+#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
+#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
+#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
+#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
+ TUSB_INT_MASK_RESERVED_13 | \
+ TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
+ TUSB_INT_SRC_RESERVED_18 | \
+ TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
+
+#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN (1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL 0xa596
+#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
+
+#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
+#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
+#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf)
+#define TUSB_DIDR1_HI_REV_20 0
+#define TUSB_DIDR1_HI_REV_30 1
+#define TUSB_DIDR1_HI_REV_31 2
+
+#define TUSB_REV_10 0x10
+#define TUSB_REV_20 0x20
+#define TUSB_REV_30 0x30
+#define TUSB_REV_31 0x31
+
+#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 000000000..60a93b8bb
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+
+#include "musb_core.h"
+#include "tusb6010.h"
+
+#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
+
+#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_dma_data {
+ s8 dmareq;
+ struct dma_chan *chan;
+};
+
+struct tusb_omap_dma_ch {
+ struct musb *musb;
+ void __iomem *tbase;
+ unsigned long phys_offset;
+ int epnum;
+ u8 tx;
+ struct musb_hw_ep *hw_ep;
+
+ struct tusb_dma_data *dma_data;
+
+ struct tusb_omap_dma *tusb_dma;
+
+ dma_addr_t dma_addr;
+
+ u32 len;
+ u16 packet_sz;
+ u16 transfer_packet_sz;
+ u32 transfer_len;
+ u32 completed_len;
+};
+
+struct tusb_omap_dma {
+ struct dma_controller controller;
+ void __iomem *tbase;
+
+ struct tusb_dma_data dma_pool[MAX_DMAREQ];
+ unsigned multichannel:1;
+};
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if (reg != 0) {
+ dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return -EAGAIN;
+ }
+
+ if (chdat->tx)
+ reg = (1 << 4) | chdat->epnum;
+ else
+ reg = chdat->epnum;
+
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if ((reg & 0xf) != chdat->epnum) {
+ printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return;
+ }
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
+}
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(void *data)
+{
+ struct dma_channel *channel = (struct dma_channel *)data;
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct device *dev = musb->controller;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *mbase = musb->mregs;
+ unsigned long remaining, flags, pio;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ dev_dbg(musb->controller, "ep%i %s dma callback\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx");
+
+ if (chdat->tx)
+ remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
+
+ /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
+ if (unlikely(remaining > chdat->transfer_len)) {
+ dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
+ chdat->tx ? "tx" : "rx", remaining);
+ remaining = 0;
+ }
+
+ channel->actual_len = chdat->transfer_len - remaining;
+ pio = chdat->len - channel->actual_len;
+
+ dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+
+ /* Transfer remaining 1 - 31 bytes */
+ if (pio > 0 && pio < 32) {
+ u8 *buf;
+
+ dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
+ buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
+ if (chdat->tx) {
+ dma_unmap_single(dev, chdat->dma_addr,
+ chdat->transfer_len,
+ DMA_TO_DEVICE);
+ musb_write_fifo(hw_ep, pio, buf);
+ } else {
+ dma_unmap_single(dev, chdat->dma_addr,
+ chdat->transfer_len,
+ DMA_FROM_DEVICE);
+ musb_read_fifo(hw_ep, pio, buf);
+ }
+ channel->actual_len += pio;
+ }
+
+ if (!tusb_dma->multichannel)
+ tusb_omap_free_shared_dmareq(chdat);
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+ /* We must terminate short tx transfers manually by setting TXPKTRDY.
+ * REVISIT: This same problem may occur with other MUSB dma as well.
+ * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+ */
+ if ((chdat->transfer_len < chdat->packet_sz)
+ || (chdat->transfer_len % chdat->packet_sz != 0)) {
+ u16 csr;
+
+ if (chdat->tx) {
+ dev_dbg(musb->controller, "terminating short tx packet\n");
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ }
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+ u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct device *dev = musb->controller;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *ep_conf = hw_ep->conf;
+ dma_addr_t fifo_addr = hw_ep->fifo_sync;
+ u32 dma_remaining;
+ u16 csr;
+ u32 psize;
+ struct tusb_dma_data *dma_data;
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_slave_config dma_cfg;
+ enum dma_transfer_direction dma_dir;
+ u32 port_window;
+ int ret;
+
+ if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
+ return false;
+
+ /*
+ * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
+ * register which will cause missed DMA interrupt. We could try to
+ * use a timer for the callback, but it is unsafe as the XFR_SIZE
+ * register is corrupt, and we won't know if the DMA worked.
+ */
+ if (dma_addr & 0x2)
+ return false;
+
+ /*
+ * Because of HW issue #10, it seems like mixing sync DMA and async
+ * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
+ * using the channel for DMA.
+ */
+ if (chdat->tx)
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
+ if (dma_remaining) {
+ dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
+ chdat->tx ? "tx" : "rx", dma_remaining);
+ return false;
+ }
+
+ chdat->transfer_len = len & ~0x1f;
+
+ if (len < packet_sz)
+ chdat->transfer_packet_sz = chdat->transfer_len;
+ else
+ chdat->transfer_packet_sz = packet_sz;
+
+ dma_data = chdat->dma_data;
+ if (!tusb_dma->multichannel) {
+ if (tusb_omap_use_shared_dmareq(chdat) != 0) {
+ dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
+ return false;
+ }
+ if (dma_data->dmareq < 0) {
+ /* REVISIT: This should get blocked earlier, happens
+ * with MSC ErrorRecoveryTest
+ */
+ WARN_ON(1);
+ return false;
+ }
+ }
+
+ chdat->packet_sz = packet_sz;
+ chdat->len = len;
+ channel->actual_len = 0;
+ chdat->dma_addr = dma_addr;
+ channel->status = MUSB_DMA_STATUS_BUSY;
+
+ /* Since we're recycling dma areas, we need to clean or invalidate */
+ if (chdat->tx) {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_map_single(dev, phys_to_virt(dma_addr), len,
+ DMA_TO_DEVICE);
+ } else {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_map_single(dev, phys_to_virt(dma_addr), len,
+ DMA_FROM_DEVICE);
+ }
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+
+ /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
+ if ((dma_addr & 0x3) == 0) {
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ port_window = 8;
+ } else {
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ port_window = 16;
+
+ fifo_addr = hw_ep->fifo_async;
+ }
+
+ dev_dbg(musb->controller,
+ "ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
+ chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
+
+ dma_cfg.src_addr = fifo_addr;
+ dma_cfg.dst_addr = fifo_addr;
+ dma_cfg.src_port_window_size = port_window;
+ dma_cfg.src_maxburst = port_window;
+ dma_cfg.dst_port_window_size = port_window;
+ dma_cfg.dst_maxburst = port_window;
+
+ ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
+ if (ret) {
+ dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
+ return false;
+ }
+
+ dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
+ chdat->transfer_len, dma_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc) {
+ dev_err(musb->controller, "DMA prep_slave_single failed\n");
+ return false;
+ }
+
+ dma_desc->callback = tusb_omap_dma_cb;
+ dma_desc->callback_param = channel;
+ dmaengine_submit(dma_desc);
+
+ dev_dbg(musb->controller,
+ "ep%i %s using %i-bit %s dma from %pad to %pad\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ dma_cfg.src_addr_width * 8,
+ ((dma_addr & 0x3) == 0) ? "sync" : "async",
+ (dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
+ (dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
+
+ /*
+ * Prepare MUSB for DMA transfer
+ */
+ musb_ep_select(mbase, chdat->epnum);
+ if (chdat->tx) {
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_DMAENAB;
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+ musb_writew(hw_ep->regs, MUSB_RXCSR,
+ csr | MUSB_RXCSR_P_WZC_BITS);
+ }
+
+ /* Start DMA transfer */
+ dma_async_issue_pending(dma_data->chan);
+
+ if (chdat->tx) {
+ /* Send transfer_packet_sz packets at a time */
+ psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+ psize &= ~0x7ff;
+ psize |= chdat->transfer_packet_sz;
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
+
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ } else {
+ /* Receive transfer_packet_sz packets at a time */
+ psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+ psize &= ~(0x7ff << 16);
+ psize |= (chdat->transfer_packet_sz << 16);
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
+
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ }
+
+ return true;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+
+ if (chdat->dma_data)
+ dmaengine_terminate_all(chdat->dma_data->chan);
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ int i, dmareq_nr = -1;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+ if (cur == 0) {
+ dmareq_nr = i;
+ break;
+ }
+ }
+
+ if (dmareq_nr == -1)
+ return -EAGAIN;
+
+ reg |= (chdat->epnum << (dmareq_nr * 5));
+ if (chdat->tx)
+ reg |= ((1 << 4) << (dmareq_nr * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
+
+ return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg;
+
+ if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
+ return;
+
+ reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dma_data = NULL;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep,
+ u8 tx)
+{
+ int ret, i;
+ struct tusb_omap_dma *tusb_dma;
+ struct musb *musb;
+ struct dma_channel *channel = NULL;
+ struct tusb_omap_dma_ch *chdat = NULL;
+ struct tusb_dma_data *dma_data = NULL;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ musb = tusb_dma->controller.musb;
+
+ /* REVISIT: Why does dmareq5 not work? */
+ if (hw_ep->epnum == 0) {
+ dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+ return NULL;
+ }
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
+ ch->status = MUSB_DMA_STATUS_FREE;
+ channel = ch;
+ chdat = ch->private_data;
+ break;
+ }
+ }
+
+ if (!channel)
+ return NULL;
+
+ chdat->musb = tusb_dma->controller.musb;
+ chdat->tbase = tusb_dma->tbase;
+ chdat->hw_ep = hw_ep;
+ chdat->epnum = hw_ep->epnum;
+ chdat->completed_len = 0;
+ chdat->tusb_dma = tusb_dma;
+ if (tx)
+ chdat->tx = 1;
+ else
+ chdat->tx = 0;
+
+ channel->max_len = 0x7fffffff;
+ channel->desired_mode = 0;
+ channel->actual_len = 0;
+
+ if (!chdat->dma_data) {
+ if (tusb_dma->multichannel) {
+ ret = tusb_omap_dma_allocate_dmareq(chdat);
+ if (ret != 0)
+ goto free_dmareq;
+ } else {
+ chdat->dma_data = &tusb_dma->dma_pool[0];
+ }
+ }
+
+ dma_data = chdat->dma_data;
+
+ dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
+ chdat->epnum,
+ chdat->tx ? "tx" : "rx",
+ tusb_dma->multichannel ? "shared" : "dedicated",
+ dma_data->dmareq);
+
+ return channel;
+
+free_dmareq:
+ tusb_omap_dma_free_dmareq(chdat);
+
+ dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct musb *musb = chdat->musb;
+
+ dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
+
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ dmaengine_terminate_sync(chdat->dma_data->chan);
+ tusb_omap_dma_free_dmareq(chdat);
+
+ channel = NULL;
+}
+
+void tusb_dma_controller_destroy(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch) {
+ kfree(ch->private_data);
+ kfree(ch);
+ }
+
+ /* Free up the DMA channels */
+ if (tusb_dma && tusb_dma->dma_pool[i].chan)
+ dma_release_channel(tusb_dma->dma_pool[i].chan);
+ }
+
+ kfree(tusb_dma);
+}
+EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
+
+static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
+{
+ struct musb *musb = tusb_dma->controller.musb;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
+
+ /*
+ * Request DMA channels:
+ * - one channel in case of non multichannel mode
+ * - MAX_DMAREQ number of channels in multichannel mode
+ */
+ if (i == 0 || tusb_dma->multichannel) {
+ char ch_name[8];
+
+ sprintf(ch_name, "dmareq%d", i);
+ dma_data->chan = dma_request_chan(musb->controller,
+ ch_name);
+ if (IS_ERR(dma_data->chan)) {
+ dev_err(musb->controller,
+ "Failed to request %s\n", ch_name);
+ ret = PTR_ERR(dma_data->chan);
+ goto dma_error;
+ }
+
+ dma_data->dmareq = i;
+ } else {
+ dma_data->dmareq = -1;
+ }
+ }
+
+ return 0;
+
+dma_error:
+ for (; i >= 0; i--) {
+ struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
+
+ if (dma_data->dmareq >= 0)
+ dma_release_channel(dma_data->chan);
+ }
+
+ return ret;
+}
+
+struct dma_controller *
+tusb_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ /* REVISIT: Get dmareq lines used from board-*.c */
+
+ musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+ if (!tusb_dma)
+ goto out;
+
+ tusb_dma->controller.musb = musb;
+ tusb_dma->tbase = musb->ctrl_base;
+
+ tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+ tusb_dma->controller.channel_release = tusb_omap_dma_release;
+ tusb_dma->controller.channel_program = tusb_omap_dma_program;
+ tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+ if (musb->tusb_revision >= TUSB_REV_30)
+ tusb_dma->multichannel = 1;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch;
+ struct tusb_omap_dma_ch *chdat;
+
+ ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+ if (!ch)
+ goto cleanup;
+
+ dma_channel_pool[i] = ch;
+
+ chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+ if (!chdat)
+ goto cleanup;
+
+ ch->status = MUSB_DMA_STATUS_UNKNOWN;
+ ch->private_data = chdat;
+ }
+
+ if (tusb_omap_allocate_dma_pool(tusb_dma))
+ goto cleanup;
+
+ return &tusb_dma->controller;
+
+cleanup:
+ musb_dma_controller_destroy(&tusb_dma->controller);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tusb_dma_controller_create);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
new file mode 100644
index 000000000..8ea62c344
--- /dev/null
+++ b/drivers/usb/musb/ux500.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2010 ST-Ericsson AB
+ * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ *
+ * Based on omap2430.c
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/usb/musb-ux500.h>
+
+#include "musb_core.h"
+
+static const struct musb_hdrc_config ux500_musb_hdrc_config = {
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = 16,
+ .ram_bits = 16,
+};
+
+struct ux500_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+};
+#define glue_to_musb(g) platform_get_drvdata(g->musb)
+
+static void ux500_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ /*
+ * Wait for the musb to set as A device to enable the
+ * VBUS
+ */
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ break;
+ }
+ }
+
+ } else {
+ musb->is_active = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ }
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and jumping
+ * right to B_IDLE...
+ */
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ /*
+ * Devctl values will be updated after vbus goes below
+ * session_valid. The time taken depends on the capacitance
+ * on VBUS line. The max discharge time can be upto 1 sec
+ * as per the spec. Typically on our platform, it is 200ms
+ */
+ if (!is_on)
+ mdelay(200);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static int musb_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct musb *musb = container_of(nb, struct musb, nb);
+
+ dev_dbg(musb->controller, "musb_otg_notifications %ld %s\n",
+ event, usb_otg_state_string(musb->xceiv->otg->state));
+
+ switch (event) {
+ case UX500_MUSB_ID:
+ dev_dbg(musb->controller, "ID GND\n");
+ ux500_musb_set_vbus(musb, 1);
+ break;
+ case UX500_MUSB_VBUS:
+ dev_dbg(musb->controller, "VBUS Connect\n");
+ break;
+ case UX500_MUSB_NONE:
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
+ if (is_host_active(musb))
+ ux500_musb_set_vbus(musb, 0);
+ else
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static irqreturn_t ux500_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static int ux500_musb_init(struct musb *musb)
+{
+ int status;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ pr_err("HS USB OTG: no transceiver configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ musb->nb.notifier_call = musb_otg_notifications;
+ status = usb_register_notifier(musb->xceiv, &musb->nb);
+ if (status < 0) {
+ dev_dbg(musb->controller, "notification register failed\n");
+ return status;
+ }
+
+ musb->isr = ux500_musb_interrupt;
+
+ return 0;
+}
+
+static int ux500_musb_exit(struct musb *musb)
+{
+ usb_unregister_notifier(musb->xceiv, &musb->nb);
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static const struct musb_platform_ops ux500_ops = {
+ .quirks = MUSB_DMA_UX500 | MUSB_INDEXED_EP,
+#ifdef CONFIG_USB_UX500_DMA
+ .dma_init = ux500_dma_controller_create,
+ .dma_exit = ux500_dma_controller_destroy,
+#endif
+ .init = ux500_musb_init,
+ .exit = ux500_musb_exit,
+ .fifo_mode = 5,
+
+ .set_vbus = ux500_musb_set_vbus,
+};
+
+static struct musb_hdrc_platform_data *
+ux500_of_probe(struct platform_device *pdev, struct device_node *np)
+{
+ struct musb_hdrc_platform_data *pdata;
+ const char *mode;
+ int strlen;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ mode = of_get_property(np, "dr_mode", &strlen);
+ if (!mode) {
+ dev_err(&pdev->dev, "No 'dr_mode' property found\n");
+ return NULL;
+ }
+
+ if (strlen > 0) {
+ if (!strcmp(mode, "host"))
+ pdata->mode = MUSB_HOST;
+ if (!strcmp(mode, "otg"))
+ pdata->mode = MUSB_OTG;
+ if (!strcmp(mode, "peripheral"))
+ pdata->mode = MUSB_PERIPHERAL;
+ }
+
+ return pdata;
+}
+
+static int ux500_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct platform_device *musb;
+ struct ux500_glue *glue;
+ struct clk *clk;
+ int ret = -ENOMEM;
+
+ if (!pdata) {
+ if (np) {
+ pdata = ux500_of_probe(pdev, np);
+ if (!pdata)
+ goto err0;
+
+ pdev->dev.platform_data = pdata;
+ } else {
+ dev_err(&pdev->dev, "no pdata or device tree found\n");
+ goto err0;
+ }
+ }
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ goto err0;
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err0;
+ }
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err1;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err1;
+ }
+
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->clk = clk;
+
+ pdata->platform_ops = &ux500_ops;
+ pdata->config = &ux500_musb_hdrc_config;
+
+ platform_set_drvdata(pdev, glue);
+
+ ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err2;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err2;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ clk_disable_unprepare(clk);
+
+err1:
+ platform_device_put(musb);
+
+err0:
+ return ret;
+}
+
+static int ux500_remove(struct platform_device *pdev)
+{
+ struct ux500_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ux500_suspend(struct device *dev)
+{
+ struct ux500_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ if (musb)
+ usb_phy_set_suspend(musb->xceiv, 1);
+
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+static int ux500_resume(struct device *dev)
+{
+ struct ux500_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+ int ret;
+
+ ret = clk_prepare_enable(glue->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ if (musb)
+ usb_phy_set_suspend(musb->xceiv, 0);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ux500_pm_ops, ux500_suspend, ux500_resume);
+
+static const struct of_device_id ux500_match[] = {
+ { .compatible = "stericsson,db8500-musb", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, ux500_match);
+
+static struct platform_driver ux500_driver = {
+ .probe = ux500_probe,
+ .remove = ux500_remove,
+ .driver = {
+ .name = "musb-ux500",
+ .pm = &ux500_pm_ops,
+ .of_match_table = ux500_match,
+ },
+};
+
+MODULE_DESCRIPTION("UX500 MUSB Glue Layer");
+MODULE_AUTHOR("Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(ux500_driver);
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
new file mode 100644
index 000000000..d5cf5e8bb
--- /dev/null
+++ b/drivers/usb/musb/ux500_dma.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/usb/musb/ux500_dma.c
+ *
+ * U8500 DMA support code
+ *
+ * Copyright (C) 2009 STMicroelectronics
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Authors:
+ * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * Praveena Nadahally <praveen.nadahally@stericsson.com>
+ * Rajaram Regupathy <ragupathy.rajaram@stericsson.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pfn.h>
+#include <linux/sizes.h>
+#include <linux/platform_data/usb-musb-ux500.h>
+#include "musb_core.h"
+
+static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12",
+ "iep_5_13", "iep_6_14", "iep_7_15", "iep_8" };
+static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12",
+ "oep_5_13", "oep_6_14", "oep_7_15", "oep_8" };
+
+struct ux500_dma_channel {
+ struct dma_channel channel;
+ struct ux500_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dma_chan;
+ unsigned int cur_len;
+ dma_cookie_t cookie;
+ u8 ch_num;
+ u8 is_tx;
+ u8 is_allocated;
+};
+
+struct ux500_dma_controller {
+ struct dma_controller controller;
+ struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
+ struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
+ void *private_data;
+ dma_addr_t phy_base;
+};
+
+/* Work function invoked from DMA callback to handle rx transfers. */
+static void ux500_dma_callback(void *private_data)
+{
+ struct dma_channel *channel = private_data;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+
+ dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
+ hw_ep->epnum);
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ux500_channel->channel.actual_len = ux500_channel->cur_len;
+ ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+}
+
+static bool ux500_configure_channel(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct dma_chan *dma_chan = ux500_channel->dma_chan;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ struct scatterlist sg;
+ struct dma_slave_config slave_conf;
+ enum dma_slave_buswidth addr_width;
+ struct musb *musb = ux500_channel->controller->private_data;
+ dma_addr_t usb_fifo_addr = (musb->io.fifo_offset(hw_ep->epnum) +
+ ux500_channel->controller->phy_base);
+
+ dev_dbg(musb->controller,
+ "packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
+ packet_sz, mode, (unsigned long long) dma_addr,
+ len, ux500_channel->is_tx);
+
+ ux500_channel->cur_len = len;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len,
+ offset_in_page(dma_addr));
+ sg_dma_address(&sg) = dma_addr;
+ sg_dma_len(&sg) = len;
+
+ direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ slave_conf.direction = direction;
+ slave_conf.src_addr = usb_fifo_addr;
+ slave_conf.src_addr_width = addr_width;
+ slave_conf.src_maxburst = 16;
+ slave_conf.dst_addr = usb_fifo_addr;
+ slave_conf.dst_addr_width = addr_width;
+ slave_conf.dst_maxburst = 16;
+ slave_conf.device_fc = false;
+
+ dmaengine_slave_config(dma_chan, &slave_conf);
+
+ dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ return false;
+
+ dma_desc->callback = ux500_dma_callback;
+ dma_desc->callback_param = channel;
+ ux500_channel->cookie = dma_desc->tx_submit(dma_desc);
+
+ dma_async_issue_pending(dma_chan);
+
+ return true;
+}
+
+static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 is_tx)
+{
+ struct ux500_dma_controller *controller = container_of(c,
+ struct ux500_dma_controller, controller);
+ struct ux500_dma_channel *ux500_channel = NULL;
+ struct musb *musb = controller->private_data;
+ u8 ch_num = hw_ep->epnum - 1;
+
+ /* 8 DMA channels (0 - 7). Each DMA channel can only be allocated
+ * to specified hw_ep. For example DMA channel 0 can only be allocated
+ * to hw_ep 1 and 9.
+ */
+ if (ch_num > 7)
+ ch_num -= 8;
+
+ if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS)
+ return NULL;
+
+ ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) :
+ &(controller->rx_channel[ch_num]) ;
+
+ /* Check if channel is already used. */
+ if (ux500_channel->is_allocated)
+ return NULL;
+
+ ux500_channel->hw_ep = hw_ep;
+ ux500_channel->is_allocated = 1;
+
+ dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
+ hw_ep->epnum, is_tx, ch_num);
+
+ return &(ux500_channel->channel);
+}
+
+static void ux500_dma_channel_release(struct dma_channel *channel)
+{
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb *musb = ux500_channel->controller->private_data;
+
+ dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
+
+ if (ux500_channel->is_allocated) {
+ ux500_channel->is_allocated = 0;
+ channel->status = MUSB_DMA_STATUS_FREE;
+ channel->actual_len = 0;
+ }
+}
+
+static int ux500_dma_is_compatible(struct dma_channel *channel,
+ u16 maxpacket, void *buf, u32 length)
+{
+ if ((maxpacket & 0x3) ||
+ ((unsigned long int) buf & 0x3) ||
+ (length < 512) ||
+ (length & 0x3))
+ return false;
+ else
+ return true;
+}
+
+static int ux500_dma_channel_program(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ int ret;
+
+ BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ channel->status == MUSB_DMA_STATUS_BUSY);
+
+ channel->status = MUSB_DMA_STATUS_BUSY;
+ channel->actual_len = 0;
+ ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
+ if (!ret)
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return ret;
+}
+
+static int ux500_dma_channel_abort(struct dma_channel *channel)
+{
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct ux500_dma_controller *controller = ux500_channel->controller;
+ struct musb *musb = controller->private_data;
+ void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
+ u16 csr;
+
+ dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
+ ux500_channel->ch_num, ux500_channel->is_tx);
+
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ if (ux500_channel->is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_DMAMODE);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+ MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ dmaengine_terminate_all(ux500_channel->dma_chan);
+ channel->status = MUSB_DMA_STATUS_FREE;
+ }
+ return 0;
+}
+
+static void ux500_dma_controller_stop(struct ux500_dma_controller *controller)
+{
+ struct ux500_dma_channel *ux500_channel;
+ struct dma_channel *channel;
+ u8 ch_num;
+
+ for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
+ channel = &controller->rx_channel[ch_num].channel;
+ ux500_channel = channel->private_data;
+
+ ux500_dma_channel_release(channel);
+
+ if (ux500_channel->dma_chan)
+ dma_release_channel(ux500_channel->dma_chan);
+ }
+
+ for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
+ channel = &controller->tx_channel[ch_num].channel;
+ ux500_channel = channel->private_data;
+
+ ux500_dma_channel_release(channel);
+
+ if (ux500_channel->dma_chan)
+ dma_release_channel(ux500_channel->dma_chan);
+ }
+}
+
+static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
+{
+ struct ux500_dma_channel *ux500_channel = NULL;
+ struct musb *musb = controller->private_data;
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct ux500_musb_board_data *data;
+ struct dma_channel *dma_channel = NULL;
+ char **chan_names;
+ u32 ch_num;
+ u8 dir;
+ u8 is_tx = 0;
+
+ void **param_array;
+ struct ux500_dma_channel *channel_array;
+ dma_cap_mask_t mask;
+
+ if (!plat) {
+ dev_err(musb->controller, "No platform data\n");
+ return -EINVAL;
+ }
+
+ data = plat->board_data;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Prepare the loop for RX channels */
+ channel_array = controller->rx_channel;
+ param_array = data ? data->dma_rx_param_array : NULL;
+ chan_names = (char **)iep_chan_names;
+
+ for (dir = 0; dir < 2; dir++) {
+ for (ch_num = 0;
+ ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS;
+ ch_num++) {
+ ux500_channel = &channel_array[ch_num];
+ ux500_channel->controller = controller;
+ ux500_channel->ch_num = ch_num;
+ ux500_channel->is_tx = is_tx;
+
+ dma_channel = &(ux500_channel->channel);
+ dma_channel->private_data = ux500_channel;
+ dma_channel->status = MUSB_DMA_STATUS_FREE;
+ dma_channel->max_len = SZ_16M;
+
+ ux500_channel->dma_chan =
+ dma_request_chan(dev, chan_names[ch_num]);
+
+ if (IS_ERR(ux500_channel->dma_chan))
+ ux500_channel->dma_chan =
+ dma_request_channel(mask,
+ data ?
+ data->dma_filter :
+ NULL,
+ param_array ?
+ param_array[ch_num] :
+ NULL);
+
+ if (!ux500_channel->dma_chan) {
+ ERR("Dma pipe allocation error dir=%d ch=%d\n",
+ dir, ch_num);
+
+ /* Release already allocated channels */
+ ux500_dma_controller_stop(controller);
+
+ return -EBUSY;
+ }
+
+ }
+
+ /* Prepare the loop for TX channels */
+ channel_array = controller->tx_channel;
+ param_array = data ? data->dma_tx_param_array : NULL;
+ chan_names = (char **)oep_chan_names;
+ is_tx = 1;
+ }
+
+ return 0;
+}
+
+void ux500_dma_controller_destroy(struct dma_controller *c)
+{
+ struct ux500_dma_controller *controller = container_of(c,
+ struct ux500_dma_controller, controller);
+
+ ux500_dma_controller_stop(controller);
+ kfree(controller);
+}
+EXPORT_SYMBOL_GPL(ux500_dma_controller_destroy);
+
+struct dma_controller *
+ux500_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct ux500_dma_controller *controller;
+ struct platform_device *pdev = to_platform_device(musb->controller);
+ struct resource *iomem;
+ int ret;
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ goto kzalloc_fail;
+
+ controller->private_data = musb;
+
+ /* Save physical address for DMA controller. */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(musb->controller, "no memory resource defined\n");
+ goto plat_get_fail;
+ }
+
+ controller->phy_base = (dma_addr_t) iomem->start;
+
+ controller->controller.channel_alloc = ux500_dma_channel_allocate;
+ controller->controller.channel_release = ux500_dma_channel_release;
+ controller->controller.channel_program = ux500_dma_channel_program;
+ controller->controller.channel_abort = ux500_dma_channel_abort;
+ controller->controller.is_compatible = ux500_dma_is_compatible;
+
+ ret = ux500_dma_controller_start(controller);
+ if (ret)
+ goto plat_get_fail;
+ return &controller->controller;
+
+plat_get_fail:
+ kfree(controller);
+kzalloc_fail:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ux500_dma_controller_create);