summaryrefslogtreecommitdiffstats
path: root/drivers/usb/dwc2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/usb/dwc2
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/usb/dwc2/Kconfig97
-rw-r--r--drivers/usb/dwc2/Makefile29
-rw-r--r--drivers/usb/dwc2/core.c1174
-rw-r--r--drivers/usb/dwc2/core.h1526
-rw-r--r--drivers/usb/dwc2/core_intr.c865
-rw-r--r--drivers/usb/dwc2/debug.h19
-rw-r--r--drivers/usb/dwc2/debugfs.c811
-rw-r--r--drivers/usb/dwc2/drd.c250
-rw-r--r--drivers/usb/dwc2/gadget.c5677
-rw-r--r--drivers/usb/dwc2/hcd.c5954
-rw-r--r--drivers/usb/dwc2/hcd.h787
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c1347
-rw-r--r--drivers/usb/dwc2/hcd_intr.c2264
-rw-r--r--drivers/usb/dwc2/hcd_queue.c2069
-rw-r--r--drivers/usb/dwc2/hw.h875
-rw-r--r--drivers/usb/dwc2/params.c940
-rw-r--r--drivers/usb/dwc2/pci.c148
-rw-r--r--drivers/usb/dwc2/platform.c730
18 files changed, 25562 insertions, 0 deletions
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
new file mode 100644
index 000000000..c13171936
--- /dev/null
+++ b/drivers/usb/dwc2/Kconfig
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_DWC2
+ tristate "DesignWare USB2 DRD Core Support"
+ depends on HAS_DMA
+ depends on USB || USB_GADGET
+ depends on HAS_IOMEM
+ select USB_ROLE_SWITCH
+ help
+ Say Y here if your system has a Dual Role Hi-Speed USB
+ controller based on the DesignWare HSOTG IP Core.
+
+ For host mode, if you choose to build the driver as dynamically
+ linked modules, the core module will be called dwc2.ko, the PCI
+ bus interface module (if you have a PCI bus system) will be
+ called dwc2_pci.ko, and the platform interface module (for
+ controllers directly connected to the CPU) will be called
+ dwc2_platform.ko. For all modes(host, gadget and dual-role), there
+ will be an additional module named dwc2.ko.
+
+if USB_DWC2
+
+choice
+ bool "DWC2 Mode Selection"
+ default USB_DWC2_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC2_HOST if (USB && !USB_GADGET)
+ default USB_DWC2_PERIPHERAL if (!USB && USB_GADGET)
+
+config USB_DWC2_HOST
+ bool "Host only mode"
+ depends on USB=y || (USB_DWC2=m && USB)
+ help
+ The Designware USB2.0 high-speed host controller
+ integrated into many SoCs. Select this option if you want the
+ driver to operate in Host-only mode.
+
+comment "Gadget/Dual-role mode requires USB Gadget support to be enabled"
+
+config USB_DWC2_PERIPHERAL
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_DWC2
+ help
+ The Designware USB2.0 high-speed gadget controller
+ integrated into many SoCs. Select this option if you want the
+ driver to operate in Peripheral-only mode. This option requires
+ USB_GADGET to be enabled.
+
+config USB_DWC2_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on (USB=y && USB_GADGET=y) || (USB_DWC2=m && USB && USB_GADGET)
+ help
+ Select this option if you want the driver to work in a dual-role
+ mode. In this mode both host and gadget features are enabled, and
+ the role will be determined by the cable that gets plugged-in. This
+ option requires USB_GADGET to be enabled.
+endchoice
+
+config USB_DWC2_PCI
+ tristate "DWC2 PCI"
+ depends on USB_PCI
+ depends on USB_GADGET || !USB_GADGET
+ select NOP_USB_XCEIV
+ help
+ The Designware USB2.0 PCI interface module for controllers
+ connected to a PCI bus.
+
+config USB_DWC2_DEBUG
+ bool "Enable Debugging Messages"
+ help
+ Say Y here to enable debugging messages in the DWC2 Driver.
+
+config USB_DWC2_VERBOSE
+ bool "Enable Verbose Debugging Messages"
+ depends on USB_DWC2_DEBUG
+ help
+ Say Y here to enable verbose debugging messages in the DWC2 Driver.
+ WARNING: Enabling this will quickly fill your message log.
+ If in doubt, say N.
+
+config USB_DWC2_TRACK_MISSED_SOFS
+ bool "Enable Missed SOF Tracking"
+ help
+ Say Y here to enable logging of missed SOF events to the dmesg log.
+ WARNING: This feature is still experimental.
+ If in doubt, say N.
+
+config USB_DWC2_DEBUG_PERIODIC
+ bool "Enable Debugging Messages For Periodic Transfers"
+ depends on USB_DWC2_DEBUG || USB_DWC2_VERBOSE
+ default y
+ help
+ Say N here to disable (verbose) debugging messages to be
+ logged for periodic transfers. This allows better debugging of
+ non-periodic transfers, but of course the debug logs will be
+ incomplete. Note that this also disables some debug messages
+ for which the transfer type cannot be deduced.
+endif
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
new file mode 100644
index 000000000..2bcd6945d
--- /dev/null
+++ b/drivers/usb/dwc2/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-$(CONFIG_USB_DWC2_DEBUG) += -DDEBUG
+ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
+
+obj-$(CONFIG_USB_DWC2) += dwc2.o
+dwc2-y := core.o core_intr.o platform.o drd.o
+dwc2-y += params.o
+
+ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
+ dwc2-y += hcd.o hcd_intr.o
+ dwc2-y += hcd_queue.o hcd_ddma.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_DWC2_PERIPHERAL) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
+ dwc2-y += gadget.o
+endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+ dwc2-y += debugfs.o
+endif
+
+# NOTE: The previous s3c-hsotg peripheral mode only driver has been moved to
+# this location and renamed gadget.c. When building for dynamically linked
+# modules, dwc2.ko will get built for host mode, peripheral mode, and dual-role
+# mode. The PCI bus interface module will called dwc2_pci.ko and the platform
+# interface module will be called dwc2_platform.ko.
+
+obj-$(CONFIG_USB_DWC2_PCI) += dwc2_pci.o
+dwc2_pci-y := pci.o
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
new file mode 100644
index 000000000..5635e4d7e
--- /dev/null
+++ b/drivers/usb/dwc2/core.c
@@ -0,0 +1,1174 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * core.c - DesignWare HS OTG Controller common routines
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * The Core code provides basic services for accessing and managing the
+ * DWC_otg hardware. These services are used by both the Host Controller
+ * Driver and the Peripheral Controller Driver.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/**
+ * dwc2_backup_global_registers() - Backup global controller registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_gregs_backup *gr;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup global regs */
+ gr = &hsotg->gr_backup;
+
+ gr->gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gr->gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gr->gahbcfg = dwc2_readl(hsotg, GAHBCFG);
+ gr->gusbcfg = dwc2_readl(hsotg, GUSBCFG);
+ gr->grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
+ gr->gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
+ gr->gdfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
+ gr->pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
+ gr->glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ gr->gi2cctl = dwc2_readl(hsotg, GI2CCTL);
+ gr->pcgcctl = dwc2_readl(hsotg, PCGCTL);
+
+ gr->valid = true;
+ return 0;
+}
+
+/**
+ * dwc2_restore_global_registers() - Restore controller global registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_gregs_backup *gr;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore global regs */
+ gr = &hsotg->gr_backup;
+ if (!gr->valid) {
+ dev_err(hsotg->dev, "%s: no global registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ gr->valid = false;
+
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+ dwc2_writel(hsotg, gr->gotgctl, GOTGCTL);
+ dwc2_writel(hsotg, gr->gintmsk, GINTMSK);
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, gr->gahbcfg, GAHBCFG);
+ dwc2_writel(hsotg, gr->grxfsiz, GRXFSIZ);
+ dwc2_writel(hsotg, gr->gnptxfsiz, GNPTXFSIZ);
+ dwc2_writel(hsotg, gr->gdfifocfg, GDFIFOCFG);
+ dwc2_writel(hsotg, gr->pcgcctl1, PCGCCTL1);
+ dwc2_writel(hsotg, gr->glpmcfg, GLPMCFG);
+ dwc2_writel(hsotg, gr->pcgcctl, PCGCTL);
+ dwc2_writel(hsotg, gr->gi2cctl, GI2CCTL);
+
+ return 0;
+}
+
+/**
+ * dwc2_exit_partial_power_down() - Exit controller from Partial Power Down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: indicates whether resume is initiated by Reset.
+ * @restore: Controller registers need to be restored
+ */
+int dwc2_exit_partial_power_down(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ bool restore)
+{
+ struct dwc2_gregs_backup *gr;
+
+ gr = &hsotg->gr_backup;
+
+ /*
+ * Restore host or device regisers with the same mode core enterted
+ * to partial power down by checking "GOTGCTL_CURMODE_HOST" backup
+ * value of the "gotgctl" register.
+ */
+ if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
+ return dwc2_host_exit_partial_power_down(hsotg, rem_wakeup,
+ restore);
+ else
+ return dwc2_gadget_exit_partial_power_down(hsotg, restore);
+}
+
+/**
+ * dwc2_enter_partial_power_down() - Put controller in Partial Power Down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_enter_partial_power_down(struct dwc2_hsotg *hsotg)
+{
+ if (dwc2_is_host_mode(hsotg))
+ return dwc2_host_enter_partial_power_down(hsotg);
+ else
+ return dwc2_gadget_enter_partial_power_down(hsotg);
+}
+
+/**
+ * dwc2_restore_essential_regs() - Restore essiential regs of core.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rmode: Restore mode, enabled in case of remote-wakeup.
+ * @is_host: Host or device mode.
+ */
+static void dwc2_restore_essential_regs(struct dwc2_hsotg *hsotg, int rmode,
+ int is_host)
+{
+ u32 pcgcctl;
+ struct dwc2_gregs_backup *gr;
+ struct dwc2_dregs_backup *dr;
+ struct dwc2_hregs_backup *hr;
+
+ gr = &hsotg->gr_backup;
+ dr = &hsotg->dr_backup;
+ hr = &hsotg->hr_backup;
+
+ dev_dbg(hsotg->dev, "%s: restoring essential regs\n", __func__);
+
+ /* Load restore values for [31:14] bits */
+ pcgcctl = (gr->pcgcctl & 0xffffc000);
+ /* If High Speed */
+ if (is_host) {
+ if (!(pcgcctl & PCGCTL_P2HD_PRT_SPD_MASK))
+ pcgcctl |= BIT(17);
+ } else {
+ if (!(pcgcctl & PCGCTL_P2HD_DEV_ENUM_SPD_MASK))
+ pcgcctl |= BIT(17);
+ }
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ /* Umnask global Interrupt in GAHBCFG and restore it */
+ dwc2_writel(hsotg, gr->gahbcfg | GAHBCFG_GLBL_INTR_EN, GAHBCFG);
+
+ /* Clear all pending interupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* Unmask restore done interrupt */
+ dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTMSK);
+
+ /* Restore GUSBCFG and HCFG/DCFG */
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+
+ if (is_host) {
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
+ if (rmode)
+ pcgcctl |= PCGCTL_RESTOREMODE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+
+ pcgcctl |= PCGCTL_ESS_REG_RESTORED;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+ } else {
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
+ if (!rmode)
+ pcgcctl |= PCGCTL_RESTOREMODE | PCGCTL_RSTPDWNMODULE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+
+ pcgcctl |= PCGCTL_ESS_REG_RESTORED;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+ }
+}
+
+/**
+ * dwc2_hib_restore_common() - Common part of restore routine.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: Remote-wakeup, enabled in case of remote-wakeup.
+ * @is_host: Host or device mode.
+ */
+void dwc2_hib_restore_common(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ int is_host)
+{
+ u32 gpwrdn;
+
+ /* Switch-on voltage to the core */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PWRDNSWTCH;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Reset core */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PWRDNRSTN;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Enable restore from PMU */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_RESTORE;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Disable Power Down Clamp */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PWRDNCLMP;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(50);
+
+ if (!is_host && rem_wakeup)
+ udelay(70);
+
+ /* Deassert reset core */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PWRDNRSTN;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUINTSEL;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Set Restore Essential Regs bit in PCGCCTL register */
+ dwc2_restore_essential_regs(hsotg, rem_wakeup, is_host);
+
+ /*
+ * Wait For Restore_done Interrupt. This mechanism of polling the
+ * interrupt is introduced to avoid any possible race conditions
+ */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_RESTOREDONE,
+ 20000)) {
+ dev_dbg(hsotg->dev,
+ "%s: Restore Done wasn't generated here\n",
+ __func__);
+ } else {
+ dev_dbg(hsotg->dev, "restore done generated here\n");
+
+ /*
+ * To avoid restore done interrupt storm after restore is
+ * generated clear GINTSTS_RESTOREDONE bit.
+ */
+ dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTSTS);
+ }
+}
+
+/**
+ * dwc2_wait_for_mode() - Waits for the controller mode.
+ * @hsotg: Programming view of the DWC_otg controller.
+ * @host_mode: If true, waits for host mode, otherwise device mode.
+ */
+static void dwc2_wait_for_mode(struct dwc2_hsotg *hsotg,
+ bool host_mode)
+{
+ ktime_t start;
+ ktime_t end;
+ unsigned int timeout = 110;
+
+ dev_vdbg(hsotg->dev, "Waiting for %s mode\n",
+ host_mode ? "host" : "device");
+
+ start = ktime_get();
+
+ while (1) {
+ s64 ms;
+
+ if (dwc2_is_host_mode(hsotg) == host_mode) {
+ dev_vdbg(hsotg->dev, "%s mode set\n",
+ host_mode ? "Host" : "Device");
+ break;
+ }
+
+ end = ktime_get();
+ ms = ktime_to_ms(ktime_sub(end, start));
+
+ if (ms >= (s64)timeout) {
+ dev_warn(hsotg->dev, "%s: Couldn't set %s mode\n",
+ __func__, host_mode ? "host" : "device");
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ }
+}
+
+/**
+ * dwc2_iddig_filter_enabled() - Returns true if the IDDIG debounce
+ * filter is enabled.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
+{
+ u32 gsnpsid;
+ u32 ghwcfg4;
+
+ if (!dwc2_hw_is_otg(hsotg))
+ return false;
+
+ /* Check if core configuration includes the IDDIG filter. */
+ ghwcfg4 = dwc2_readl(hsotg, GHWCFG4);
+ if (!(ghwcfg4 & GHWCFG4_IDDIG_FILT_EN))
+ return false;
+
+ /*
+ * Check if the IDDIG debounce filter is bypassed. Available
+ * in core version >= 3.10a.
+ */
+ gsnpsid = dwc2_readl(hsotg, GSNPSID);
+ if (gsnpsid >= DWC2_CORE_REV_3_10a) {
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+
+ if (gotgctl & GOTGCTL_DBNCE_FLTR_BYPASS)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * dwc2_enter_hibernation() - Common function to enter hibernation.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @is_host: True if core is in host mode.
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg, int is_host)
+{
+ if (is_host)
+ return dwc2_host_enter_hibernation(hsotg);
+ else
+ return dwc2_gadget_enter_hibernation(hsotg);
+}
+
+/*
+ * dwc2_exit_hibernation() - Common function to exit from hibernation.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: Remote-wakeup, enabled in case of remote-wakeup.
+ * @reset: Enabled in case of restore with reset.
+ * @is_host: True if core is in host mode.
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ int reset, int is_host)
+{
+ if (is_host)
+ return dwc2_host_exit_hibernation(hsotg, rem_wakeup, reset);
+ else
+ return dwc2_gadget_exit_hibernation(hsotg, rem_wakeup, reset);
+}
+
+/*
+ * Do core a soft reset of the core. Be careful with this because it
+ * resets all the internal state machines of the core.
+ */
+int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
+{
+ u32 greset;
+ bool wait_for_host_mode = false;
+
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ /*
+ * If the current mode is host, either due to the force mode
+ * bit being set (which persists after core reset) or the
+ * connector id pin, a core soft reset will temporarily reset
+ * the mode to device. A delay from the IDDIG debounce filter
+ * will occur before going back to host mode.
+ *
+ * Determine whether we will go back into host mode after a
+ * reset and account for this delay after the reset.
+ */
+ if (dwc2_iddig_filter_enabled(hsotg)) {
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ u32 gusbcfg = dwc2_readl(hsotg, GUSBCFG);
+
+ if (!(gotgctl & GOTGCTL_CONID_B) ||
+ (gusbcfg & GUSBCFG_FORCEHOSTMODE)) {
+ wait_for_host_mode = true;
+ }
+ }
+
+ /* Core Soft Reset */
+ greset = dwc2_readl(hsotg, GRSTCTL);
+ greset |= GRSTCTL_CSFTRST;
+ dwc2_writel(hsotg, greset, GRSTCTL);
+
+ if ((hsotg->hw_params.snpsid & DWC2_CORE_REV_MASK) <
+ (DWC2_CORE_REV_4_20a & DWC2_CORE_REV_MASK)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL,
+ GRSTCTL_CSFTRST, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST\n",
+ __func__);
+ return -EBUSY;
+ }
+ } else {
+ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
+ GRSTCTL_CSFTRST_DONE, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST_DONE\n",
+ __func__);
+ return -EBUSY;
+ }
+ greset = dwc2_readl(hsotg, GRSTCTL);
+ greset &= ~GRSTCTL_CSFTRST;
+ greset |= GRSTCTL_CSFTRST_DONE;
+ dwc2_writel(hsotg, greset, GRSTCTL);
+ }
+
+ /*
+ * Switching from device mode to host mode by disconnecting
+ * device cable core enters and exits form hibernation.
+ * However, the fifo map remains not cleared. It results
+ * to a WARNING (WARNING: CPU: 5 PID: 0 at drivers/usb/dwc2/
+ * gadget.c:307 dwc2_hsotg_init_fifo+0x12/0x152 [dwc2])
+ * if in host mode we disconnect the micro a to b host
+ * cable. Because core reset occurs.
+ * To avoid the WARNING, fifo_map should be cleared
+ * in dwc2_core_reset() function by taking into account configs.
+ * fifo_map must be cleared only if driver is configured in
+ * "CONFIG_USB_DWC2_PERIPHERAL" or "CONFIG_USB_DWC2_DUAL_ROLE"
+ * mode.
+ */
+ dwc2_clear_fifo_map(hsotg);
+
+ /* Wait for AHB master IDLE state */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! AHB Idle timeout GRSTCTL GRSTCTL_AHBIDLE\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ if (wait_for_host_mode && !skip_wait)
+ dwc2_wait_for_mode(hsotg, true);
+
+ return 0;
+}
+
+/**
+ * dwc2_force_mode() - Force the mode of the controller.
+ *
+ * Forcing the mode is needed for two cases:
+ *
+ * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
+ * controller to stay in a particular mode regardless of ID pin
+ * changes. We do this once during probe.
+ *
+ * 2) During probe we want to read reset values of the hw
+ * configuration registers that are only available in either host or
+ * device mode. We may need to force the mode if the current mode does
+ * not allow us to access the register in the mode that we want.
+ *
+ * In either case it only makes sense to force the mode if the
+ * controller hardware is OTG capable.
+ *
+ * Checks are done in this function to determine whether doing a force
+ * would be valid or not.
+ *
+ * If a force is done, it requires a IDDIG debounce filter delay if
+ * the filter is configured and enabled. We poll the current mode of
+ * the controller to account for this delay.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @host: Host mode flag
+ */
+void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
+{
+ u32 gusbcfg;
+ u32 set;
+ u32 clear;
+
+ dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device");
+
+ /*
+ * Force mode has no effect if the hardware is not OTG.
+ */
+ if (!dwc2_hw_is_otg(hsotg))
+ return;
+
+ /*
+ * If dr_mode is either peripheral or host only, there is no
+ * need to ever force the mode to the opposite mode.
+ */
+ if (WARN_ON(host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL))
+ return;
+
+ if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST))
+ return;
+
+ gusbcfg = dwc2_readl(hsotg, GUSBCFG);
+
+ set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
+ clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
+
+ gusbcfg &= ~clear;
+ gusbcfg |= set;
+ dwc2_writel(hsotg, gusbcfg, GUSBCFG);
+
+ dwc2_wait_for_mode(hsotg, host);
+ return;
+}
+
+/**
+ * dwc2_clear_force_mode() - Clears the force mode bits.
+ *
+ * After clearing the bits, wait up to 100 ms to account for any
+ * potential IDDIG filter delay. We can't know if we expect this delay
+ * or not because the value of the connector ID status is affected by
+ * the force mode. We only need to call this once during probe if
+ * dr_mode == OTG.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
+{
+ u32 gusbcfg;
+
+ if (!dwc2_hw_is_otg(hsotg))
+ return;
+
+ dev_dbg(hsotg->dev, "Clearing force mode bits\n");
+
+ gusbcfg = dwc2_readl(hsotg, GUSBCFG);
+ gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
+ gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
+ dwc2_writel(hsotg, gusbcfg, GUSBCFG);
+
+ if (dwc2_iddig_filter_enabled(hsotg))
+ msleep(100);
+}
+
+/*
+ * Sets or clears force mode based on the dr_mode parameter.
+ */
+void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
+{
+ switch (hsotg->dr_mode) {
+ case USB_DR_MODE_HOST:
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms on their host-only dwc2.
+ */
+ if (!dwc2_hw_is_otg(hsotg))
+ msleep(50);
+
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ dwc2_force_mode(hsotg, false);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc2_clear_force_mode(hsotg);
+ break;
+ default:
+ dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n",
+ __func__, hsotg->dr_mode);
+ break;
+ }
+}
+
+/*
+ * dwc2_enable_acg - enable active clock gating feature
+ */
+void dwc2_enable_acg(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->params.acg_enable) {
+ u32 pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
+
+ dev_dbg(hsotg->dev, "Enabling Active Clock Gating\n");
+ pcgcctl1 |= PCGCCTL1_GATEEN;
+ dwc2_writel(hsotg, pcgcctl1, PCGCCTL1);
+ }
+}
+
+/**
+ * dwc2_dump_host_registers() - Prints the host registers
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
+{
+#ifdef DEBUG
+ u32 __iomem *addr;
+ int i;
+
+ dev_dbg(hsotg->dev, "Host Global Registers\n");
+ addr = hsotg->regs + HCFG;
+ dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCFG));
+ addr = hsotg->regs + HFIR;
+ dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HFIR));
+ addr = hsotg->regs + HFNUM;
+ dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HFNUM));
+ addr = hsotg->regs + HPTXSTS;
+ dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HPTXSTS));
+ addr = hsotg->regs + HAINT;
+ dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HAINT));
+ addr = hsotg->regs + HAINTMSK;
+ dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HAINTMSK));
+ if (hsotg->params.dma_desc_enable) {
+ addr = hsotg->regs + HFLBADDR;
+ dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HFLBADDR));
+ }
+
+ addr = hsotg->regs + HPRT0;
+ dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HPRT0));
+
+ for (i = 0; i < hsotg->params.host_channels; i++) {
+ dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
+ addr = hsotg->regs + HCCHAR(i);
+ dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCCHAR(i)));
+ addr = hsotg->regs + HCSPLT(i);
+ dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCSPLT(i)));
+ addr = hsotg->regs + HCINT(i);
+ dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCINT(i)));
+ addr = hsotg->regs + HCINTMSK(i);
+ dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCINTMSK(i)));
+ addr = hsotg->regs + HCTSIZ(i);
+ dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCTSIZ(i)));
+ addr = hsotg->regs + HCDMA(i);
+ dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HCDMA(i)));
+ if (hsotg->params.dma_desc_enable) {
+ addr = hsotg->regs + HCDMAB(i);
+ dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg,
+ HCDMAB(i)));
+ }
+ }
+#endif
+}
+
+/**
+ * dwc2_dump_global_registers() - Prints the core global registers
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
+{
+#ifdef DEBUG
+ u32 __iomem *addr;
+
+ dev_dbg(hsotg->dev, "Core Global Registers\n");
+ addr = hsotg->regs + GOTGCTL;
+ dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GOTGCTL));
+ addr = hsotg->regs + GOTGINT;
+ dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GOTGINT));
+ addr = hsotg->regs + GAHBCFG;
+ dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GAHBCFG));
+ addr = hsotg->regs + GUSBCFG;
+ dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GUSBCFG));
+ addr = hsotg->regs + GRSTCTL;
+ dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GRSTCTL));
+ addr = hsotg->regs + GINTSTS;
+ dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GINTSTS));
+ addr = hsotg->regs + GINTMSK;
+ dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GINTMSK));
+ addr = hsotg->regs + GRXSTSR;
+ dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GRXSTSR));
+ addr = hsotg->regs + GRXFSIZ;
+ dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GRXFSIZ));
+ addr = hsotg->regs + GNPTXFSIZ;
+ dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GNPTXFSIZ));
+ addr = hsotg->regs + GNPTXSTS;
+ dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GNPTXSTS));
+ addr = hsotg->regs + GI2CCTL;
+ dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GI2CCTL));
+ addr = hsotg->regs + GPVNDCTL;
+ dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GPVNDCTL));
+ addr = hsotg->regs + GGPIO;
+ dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GGPIO));
+ addr = hsotg->regs + GUID;
+ dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GUID));
+ addr = hsotg->regs + GSNPSID;
+ dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GSNPSID));
+ addr = hsotg->regs + GHWCFG1;
+ dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG1));
+ addr = hsotg->regs + GHWCFG2;
+ dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG2));
+ addr = hsotg->regs + GHWCFG3;
+ dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG3));
+ addr = hsotg->regs + GHWCFG4;
+ dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG4));
+ addr = hsotg->regs + GLPMCFG;
+ dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GLPMCFG));
+ addr = hsotg->regs + GPWRDN;
+ dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GPWRDN));
+ addr = hsotg->regs + GDFIFOCFG;
+ dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, GDFIFOCFG));
+ addr = hsotg->regs + HPTXFSIZ;
+ dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, HPTXFSIZ));
+
+ addr = hsotg->regs + PCGCTL;
+ dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, dwc2_readl(hsotg, PCGCTL));
+#endif
+}
+
+/**
+ * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @num: Tx FIFO to flush
+ */
+void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
+{
+ u32 greset;
+
+ dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
+
+ /* Wait for AHB master IDLE state */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000))
+ dev_warn(hsotg->dev, "%s: HANG! AHB Idle GRSCTL\n",
+ __func__);
+
+ greset = GRSTCTL_TXFFLSH;
+ greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
+ dwc2_writel(hsotg, greset, GRSTCTL);
+
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 10000))
+ dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_TXFFLSH\n",
+ __func__);
+
+ /* Wait for at least 3 PHY Clocks */
+ udelay(1);
+}
+
+/**
+ * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
+{
+ u32 greset;
+
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ /* Wait for AHB master IDLE state */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000))
+ dev_warn(hsotg->dev, "%s: HANG! AHB Idle GRSCTL\n",
+ __func__);
+
+ greset = GRSTCTL_RXFFLSH;
+ dwc2_writel(hsotg, greset, GRSTCTL);
+
+ /* Wait for RxFIFO flush done */
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_RXFFLSH, 10000))
+ dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_RXFFLSH\n",
+ __func__);
+
+ /* Wait for at least 3 PHY Clocks */
+ udelay(1);
+}
+
+bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
+{
+ if (dwc2_readl(hsotg, GSNPSID) == 0xffffffff)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * dwc2_enable_global_interrupts() - Enables the controller's Global
+ * Interrupt in the AHB Config register
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
+
+ ahbcfg |= GAHBCFG_GLBL_INTR_EN;
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
+}
+
+/**
+ * dwc2_disable_global_interrupts() - Disables the controller's Global
+ * Interrupt in the AHB Config register
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
+
+ ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
+}
+
+/* Returns the controller's GHWCFG2.OTG_MODE. */
+unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg)
+{
+ u32 ghwcfg2 = dwc2_readl(hsotg, GHWCFG2);
+
+ return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+}
+
+/* Returns true if the controller is capable of DRD. */
+bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
+{
+ unsigned int op_mode = dwc2_op_mode(hsotg);
+
+ return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
+ (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
+ (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
+}
+
+/* Returns true if the controller is host-only. */
+bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
+{
+ unsigned int op_mode = dwc2_op_mode(hsotg);
+
+ return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
+ (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
+}
+
+/* Returns true if the controller is device-only. */
+bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
+{
+ unsigned int op_mode = dwc2_op_mode(hsotg);
+
+ return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
+ (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
+}
+
+/**
+ * dwc2_hsotg_wait_bit_set - Waits for bit to be set.
+ * @hsotg: Programming view of DWC_otg controller.
+ * @offset: Register's offset where bit/bits must be set.
+ * @mask: Mask of the bit/bits which must be set.
+ * @timeout: Timeout to wait.
+ *
+ * Return: 0 if bit/bits are set or -ETIMEDOUT in case of timeout.
+ */
+int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
+ u32 timeout)
+{
+ u32 i;
+
+ for (i = 0; i < timeout; i++) {
+ if (dwc2_readl(hsotg, offset) & mask)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * dwc2_hsotg_wait_bit_clear - Waits for bit to be clear.
+ * @hsotg: Programming view of DWC_otg controller.
+ * @offset: Register's offset where bit/bits must be set.
+ * @mask: Mask of the bit/bits which must be set.
+ * @timeout: Timeout to wait.
+ *
+ * Return: 0 if bit/bits are set or -ETIMEDOUT in case of timeout.
+ */
+int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
+ u32 timeout)
+{
+ u32 i;
+
+ for (i = 0; i < timeout; i++) {
+ if (!(dwc2_readl(hsotg, offset) & mask))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the
+ * PHY type
+ */
+void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
+{
+ u32 hcfg, val;
+
+ if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ hsotg->params.ulpi_fs_ls) ||
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* Full speed PHY */
+ val = HCFG_FSLSPCLKSEL_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = HCFG_FSLSPCLKSEL_30_60_MHZ;
+ }
+
+ dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
+ hcfg = dwc2_readl(hsotg, HCFG);
+ hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+ hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
+ dwc2_writel(hsotg, hcfg, HCFG);
+}
+
+static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg, ggpio, i2cctl;
+ int retval = 0;
+
+ /*
+ * core_init() is now called on every switch so only call the
+ * following for the first time through
+ */
+ if (select_phy) {
+ dev_dbg(hsotg->dev, "FS PHY selected\n");
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ if (!(usbcfg & GUSBCFG_PHYSEL)) {
+ usbcfg |= GUSBCFG_PHYSEL;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Reset after a PHY select */
+ retval = dwc2_core_reset(hsotg, false);
+
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
+ }
+
+ if (hsotg->params.activate_stm_fs_transceiver) {
+ ggpio = dwc2_readl(hsotg, GGPIO);
+ if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
+ dev_dbg(hsotg->dev, "Activating transceiver\n");
+ /*
+ * STM32F4x9 uses the GGPIO register as general
+ * core configuration register.
+ */
+ ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
+ dwc2_writel(hsotg, ggpio, GGPIO);
+ }
+ }
+ }
+
+ /*
+ * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init).
+ */
+ if (dwc2_is_host_mode(hsotg))
+ dwc2_init_fs_ls_pclk_sel(hsotg);
+
+ if (hsotg->params.i2c_enable) {
+ dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
+
+ /* Program GUSBCFG.OtgUtmiFsSel to I2C */
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl = dwc2_readl(hsotg, GI2CCTL);
+ i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
+ i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
+ i2cctl &= ~GI2CCTL_I2CEN;
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
+ i2cctl |= GI2CCTL_I2CEN;
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
+ }
+
+ return retval;
+}
+
+static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg, usbcfg_old;
+ int retval = 0;
+
+ if (!select_phy)
+ return 0;
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg_old = usbcfg;
+
+ /*
+ * HS PHY parameters. These parameters are preserved during soft reset
+ * so only program the first time. Do a soft reset immediately after
+ * setting phyif.
+ */
+ switch (hsotg->params.phy_type) {
+ case DWC2_PHY_TYPE_PARAM_ULPI:
+ /* ULPI interface */
+ dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
+ usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
+ usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
+ if (hsotg->params.phy_ulpi_ddr)
+ usbcfg |= GUSBCFG_DDRSEL;
+
+ /* Set external VBUS indicator as needed. */
+ if (hsotg->params.oc_disable)
+ usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
+ GUSBCFG_INDICATORPASSTHROUGH);
+ break;
+ case DWC2_PHY_TYPE_PARAM_UTMI:
+ /* UTMI+ interface */
+ dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
+ usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
+ if (hsotg->params.phy_utmi_width == 16)
+ usbcfg |= GUSBCFG_PHYIF16;
+ break;
+ default:
+ dev_err(hsotg->dev, "FS PHY selected at HS!\n");
+ break;
+ }
+
+ if (usbcfg != usbcfg_old) {
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Reset after setting the PHY parameters */
+ retval = dwc2_core_reset(hsotg, false);
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
+{
+ u32 usbcfg;
+
+ if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
+ return;
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+
+ usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
+ if (hsotg->params.phy_utmi_width == 16)
+ usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
+ else
+ usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
+
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+}
+
+int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg;
+ u32 otgctl;
+ int retval = 0;
+
+ if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* If FS/LS mode with FS/LS PHY */
+ retval = dwc2_fs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
+ } else {
+ /* High speed PHY */
+ retval = dwc2_hs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
+
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_set_turnaround_time(hsotg);
+ }
+
+ if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ hsotg->params.ulpi_fs_ls) {
+ dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg |= GUSBCFG_ULPI_FS_LS;
+ usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ } else {
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg &= ~GUSBCFG_ULPI_FS_LS;
+ usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ }
+
+ if (!hsotg->params.activate_ingenic_overcurrent_detection) {
+ if (dwc2_is_host_mode(hsotg)) {
+ otgctl = readl(hsotg->regs + GOTGCTL);
+ otgctl |= GOTGCTL_VBVALOEN | GOTGCTL_VBVALOVAL;
+ writel(otgctl, hsotg->regs + GOTGCTL);
+ }
+ }
+
+ return retval;
+}
+
+MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
+MODULE_AUTHOR("Synopsys, Inc.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
new file mode 100644
index 000000000..40cf2880d
--- /dev/null
+++ b/drivers/usb/dwc2/core.h
@@ -0,0 +1,1526 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * core.h - DesignWare HS OTG Controller common declarations
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+#ifndef __DWC2_CORE_H__
+#define __DWC2_CORE_H__
+
+#include <linux/acpi.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+#include "hw.h"
+
+/*
+ * Suggested defines for tracers:
+ * - no_printk: Disable tracing
+ * - pr_info: Print this info to the console
+ * - trace_printk: Print this info to trace buffer (good for verbose logging)
+ */
+
+#define DWC2_TRACE_SCHEDULER no_printk
+#define DWC2_TRACE_SCHEDULER_VB no_printk
+
+/* Detailed scheduler tracing, but won't overwhelm console */
+#define dwc2_sch_dbg(hsotg, fmt, ...) \
+ DWC2_TRACE_SCHEDULER(pr_fmt("%s: SCH: " fmt), \
+ dev_name(hsotg->dev), ##__VA_ARGS__)
+
+/* Verbose scheduler tracing */
+#define dwc2_sch_vdbg(hsotg, fmt, ...) \
+ DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
+ dev_name(hsotg->dev), ##__VA_ARGS__)
+
+/* Maximum number of Endpoints/HostChannels */
+#define MAX_EPS_CHANNELS 16
+
+/* dwc2-hsotg declarations */
+static const char * const dwc2_hsotg_supply_names[] = {
+ "vusb_d", /* digital USB supply, 1.2V */
+ "vusb_a", /* analog USB supply, 1.1V */
+};
+
+#define DWC2_NUM_SUPPLIES ARRAY_SIZE(dwc2_hsotg_supply_names)
+
+/*
+ * EP0_MPS_LIMIT
+ *
+ * Unfortunately there seems to be a limit of the amount of data that can
+ * be transferred by IN transactions on EP0. This is either 127 bytes or 3
+ * packets (which practically means 1 packet and 63 bytes of data) when the
+ * MPS is set to 64.
+ *
+ * This means if we are wanting to move >127 bytes of data, we need to
+ * split the transactions up, but just doing one packet at a time does
+ * not work (this may be an implicit DATA0 PID on first packet of the
+ * transaction) and doing 2 packets is outside the controller's limits.
+ *
+ * If we try to lower the MPS size for EP0, then no transfers work properly
+ * for EP0, and the system will fail basic enumeration. As no cause for this
+ * has currently been found, we cannot support any large IN transfers for
+ * EP0.
+ */
+#define EP0_MPS_LIMIT 64
+
+struct dwc2_hsotg;
+struct dwc2_hsotg_req;
+
+/**
+ * struct dwc2_hsotg_ep - driver endpoint definition.
+ * @ep: The gadget layer representation of the endpoint.
+ * @name: The driver generated name for the endpoint.
+ * @queue: Queue of requests for this endpoint.
+ * @parent: Reference back to the parent device structure.
+ * @req: The current request that the endpoint is processing. This is
+ * used to indicate an request has been loaded onto the endpoint
+ * and has yet to be completed (maybe due to data move, or simply
+ * awaiting an ack from the core all the data has been completed).
+ * @debugfs: File entry for debugfs file for this endpoint.
+ * @dir_in: Set to true if this endpoint is of the IN direction, which
+ * means that it is sending data to the Host.
+ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
+ * @index: The index for the endpoint registers.
+ * @mc: Multi Count - number of transactions per microframe
+ * @interval: Interval for periodic endpoints, in frames or microframes.
+ * @name: The name array passed to the USB core.
+ * @halted: Set if the endpoint has been halted.
+ * @periodic: Set if this is a periodic ep, such as Interrupt
+ * @isochronous: Set if this is a isochronous ep
+ * @send_zlp: Set if we need to send a zero-length packet.
+ * @wedged: Set if ep is wedged.
+ * @desc_list_dma: The DMA address of descriptor chain currently in use.
+ * @desc_list: Pointer to descriptor DMA chain head currently in use.
+ * @desc_count: Count of entries within the DMA descriptor chain of EP.
+ * @next_desc: index of next free descriptor in the ISOC chain under SW control.
+ * @compl_desc: index of next descriptor to be completed by xFerComplete
+ * @total_data: The total number of data bytes done.
+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)
+ * @fifo_index: For Dedicated FIFO operation, only FIFO0 can be used for EP0.
+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
+ * @last_load: The offset of data for the last start of request.
+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ * @target_frame: Targeted frame num to setup next ISOC transfer
+ * @frame_overrun: Indicates SOF number overrun in DSTS
+ *
+ * This is the driver's state for each registered endpoint, allowing it
+ * to keep track of transactions that need doing. Each endpoint has a
+ * lock to protect the state, to try and avoid using an overall lock
+ * for the host controller as much as possible.
+ *
+ * For periodic IN endpoints, we have fifo_size and fifo_load to try
+ * and keep track of the amount of data in the periodic FIFO for each
+ * of these as we don't have a status register that tells us how much
+ * is in each of them. (note, this may actually be useless information
+ * as in shared-fifo mode periodic in acts like a single-frame packet
+ * buffer than a fifo)
+ */
+struct dwc2_hsotg_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct dwc2_hsotg *parent;
+ struct dwc2_hsotg_req *req;
+ struct dentry *debugfs;
+
+ unsigned long total_data;
+ unsigned int size_loaded;
+ unsigned int last_load;
+ unsigned int fifo_load;
+ unsigned short fifo_size;
+ unsigned short fifo_index;
+
+ unsigned char dir_in;
+ unsigned char map_dir;
+ unsigned char index;
+ unsigned char mc;
+ u16 interval;
+
+ unsigned int halted:1;
+ unsigned int periodic:1;
+ unsigned int isochronous:1;
+ unsigned int send_zlp:1;
+ unsigned int wedged:1;
+ unsigned int target_frame;
+#define TARGET_FRAME_INITIAL 0xFFFFFFFF
+ bool frame_overrun;
+
+ dma_addr_t desc_list_dma;
+ struct dwc2_dma_desc *desc_list;
+ u8 desc_count;
+
+ unsigned int next_desc;
+ unsigned int compl_desc;
+
+ char name[10];
+};
+
+/**
+ * struct dwc2_hsotg_req - data transfer request
+ * @req: The USB gadget request
+ * @queue: The list of requests for the endpoint this is queued for.
+ * @saved_req_buf: variable to save req.buf when bounce buffers are used.
+ */
+struct dwc2_hsotg_req {
+ struct usb_request req;
+ struct list_head queue;
+ void *saved_req_buf;
+};
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+#define call_gadget(_hs, _entry) \
+do { \
+ if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
+ (_hs)->driver && (_hs)->driver->_entry) { \
+ spin_unlock(&_hs->lock); \
+ (_hs)->driver->_entry(&(_hs)->gadget); \
+ spin_lock(&_hs->lock); \
+ } \
+} while (0)
+#else
+#define call_gadget(_hs, _entry) do {} while (0)
+#endif
+
+struct dwc2_hsotg;
+struct dwc2_host_chan;
+
+/* Device States */
+enum dwc2_lx_state {
+ DWC2_L0, /* On state */
+ DWC2_L1, /* LPM sleep state */
+ DWC2_L2, /* USB suspend state */
+ DWC2_L3, /* Off state */
+};
+
+/* Gadget ep0 states */
+enum dwc2_ep0_state {
+ DWC2_EP0_SETUP,
+ DWC2_EP0_DATA_IN,
+ DWC2_EP0_DATA_OUT,
+ DWC2_EP0_STATUS_IN,
+ DWC2_EP0_STATUS_OUT,
+};
+
+/**
+ * struct dwc2_core_params - Parameters for configuring the core
+ *
+ * @otg_caps: Specifies the OTG capabilities. OTG caps from the platform parameters,
+ * used to setup the:
+ * - HNP and SRP capable
+ * - SRP Only capable
+ * - No HNP/SRP capable (always available)
+ * Defaults to best available option
+ * - OTG revision number the device is compliant with, in binary-coded
+ * decimal (i.e. 2.0 is 0200H). (see struct usb_otg_caps)
+ * @host_dma: Specifies whether to use slave or DMA mode for accessing
+ * the data FIFOs. The driver will automatically detect the
+ * value for this parameter if none is specified.
+ * 0 - Slave (always available)
+ * 1 - DMA (default, if available)
+ * @dma_desc_enable: When DMA mode is enabled, specifies whether to use
+ * address DMA mode or descriptor DMA mode for accessing
+ * the data FIFOs. The driver will automatically detect the
+ * value for this if none is specified.
+ * 0 - Address DMA
+ * 1 - Descriptor DMA (default, if available)
+ * @dma_desc_fs_enable: When DMA mode is enabled, specifies whether to use
+ * address DMA mode or descriptor DMA mode for accessing
+ * the data FIFOs in Full Speed mode only. The driver
+ * will automatically detect the value for this if none is
+ * specified.
+ * 0 - Address DMA
+ * 1 - Descriptor DMA in FS (default, if available)
+ * @speed: Specifies the maximum speed of operation in host and
+ * device mode. The actual speed depends on the speed of
+ * the attached device and the value of phy_type.
+ * 0 - High Speed
+ * (default when phy_type is UTMI+ or ULPI)
+ * 1 - Full Speed
+ * (default when phy_type is Full Speed)
+ * @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
+ * 1 - Allow dynamic FIFO sizing (default, if available)
+ * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
+ * are enabled for non-periodic IN endpoints in device
+ * mode.
+ * @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when
+ * dynamic FIFO sizing is enabled
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @host_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
+ * in host mode when dynamic FIFO sizing is enabled
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @host_perio_tx_fifo_size: Number of 4-byte words in the periodic Tx FIFO in
+ * host mode when dynamic FIFO sizing is enabled
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @max_transfer_size: The maximum transfer size supported, in bytes
+ * 2047 to 65,535
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @max_packet_count: The maximum number of packets in a transfer
+ * 15 to 511
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @host_channels: The number of host channel registers to use
+ * 1 to 16
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @phy_type: Specifies the type of PHY interface to use. By default,
+ * the driver will automatically detect the phy_type.
+ * 0 - Full Speed Phy
+ * 1 - UTMI+ Phy
+ * 2 - ULPI Phy
+ * Defaults to best available option (2, 1, then 0)
+ * @phy_utmi_width: Specifies the UTMI+ Data Width (in bits). This parameter
+ * is applicable for a phy_type of UTMI+ or ULPI. (For a
+ * ULPI phy_type, this parameter indicates the data width
+ * between the MAC and the ULPI Wrapper.) Also, this
+ * parameter is applicable only if the OTG_HSPHY_WIDTH cC
+ * parameter was set to "8 and 16 bits", meaning that the
+ * core has been configured to work at either data path
+ * width.
+ * 8 or 16 (default 16 if available)
+ * @phy_ulpi_ddr: Specifies whether the ULPI operates at double or single
+ * data rate. This parameter is only applicable if phy_type
+ * is ULPI.
+ * 0 - single data rate ULPI interface with 8 bit wide
+ * data bus (default)
+ * 1 - double data rate ULPI interface with 4 bit wide
+ * data bus
+ * @phy_ulpi_ext_vbus: For a ULPI phy, specifies whether to use the internal or
+ * external supply to drive the VBus
+ * 0 - Internal supply (default)
+ * 1 - External supply
+ * @i2c_enable: Specifies whether to use the I2Cinterface for a full
+ * speed PHY. This parameter is only applicable if phy_type
+ * is FS.
+ * 0 - No (default)
+ * 1 - Yes
+ * @ipg_isoc_en: Indicates the IPG supports is enabled or disabled.
+ * 0 - Disable (default)
+ * 1 - Enable
+ * @acg_enable: For enabling Active Clock Gating in the controller
+ * 0 - No
+ * 1 - Yes
+ * @ulpi_fs_ls: Make ULPI phy operate in FS/LS mode only
+ * 0 - No (default)
+ * 1 - Yes
+ * @host_support_fs_ls_low_power: Specifies whether low power mode is supported
+ * when attached to a Full Speed or Low Speed device in
+ * host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ * @host_ls_low_power_phy_clk: Specifies the PHY clock rate in low power mode
+ * when connected to a Low Speed device in host
+ * mode. This parameter is applicable only if
+ * host_support_fs_ls_low_power is enabled.
+ * 0 - 48 MHz
+ * (default when phy_type is UTMI+ or ULPI)
+ * 1 - 6 MHz
+ * (default when phy_type is Full Speed)
+ * @oc_disable: Flag to disable overcurrent condition.
+ * 0 - Allow overcurrent condition to get detected
+ * 1 - Disable overcurrent condtion to get detected
+ * @ts_dline: Enable Term Select Dline pulsing
+ * 0 - No (default)
+ * 1 - Yes
+ * @reload_ctl: Allow dynamic reloading of HFIR register during runtime
+ * 0 - No (default for core < 2.92a)
+ * 1 - Yes (default for core >= 2.92a)
+ * @ahbcfg: This field allows the default value of the GAHBCFG
+ * register to be overridden
+ * -1 - GAHBCFG value will be set to 0x06
+ * (INCR, default)
+ * all others - GAHBCFG value will be overridden with
+ * this value
+ * Not all bits can be controlled like this, the
+ * bits defined by GAHBCFG_CTRL_MASK are controlled
+ * by the driver and are ignored in this
+ * configuration value.
+ * @uframe_sched: True to enable the microframe scheduler
+ * @external_id_pin_ctl: Specifies whether ID pin is handled externally.
+ * Disable CONIDSTSCHNG controller interrupt in such
+ * case.
+ * 0 - No (default)
+ * 1 - Yes
+ * @power_down: Specifies whether the controller support power_down.
+ * If power_down is enabled, the controller will enter
+ * power_down in both peripheral and host mode when
+ * needed.
+ * 0 - No (default)
+ * 1 - Partial power down
+ * 2 - Hibernation
+ * @no_clock_gating: Specifies whether to avoid clock gating feature.
+ * 0 - No (use clock gating)
+ * 1 - Yes (avoid it)
+ * @lpm: Enable LPM support.
+ * 0 - No
+ * 1 - Yes
+ * @lpm_clock_gating: Enable core PHY clock gating.
+ * 0 - No
+ * 1 - Yes
+ * @besl: Enable LPM Errata support.
+ * 0 - No
+ * 1 - Yes
+ * @hird_threshold_en: HIRD or HIRD Threshold enable.
+ * 0 - No
+ * 1 - Yes
+ * @hird_threshold: Value of BESL or HIRD Threshold.
+ * @ref_clk_per: Indicates in terms of pico seconds the period
+ * of ref_clk.
+ * 62500 - 16MHz
+ * 58823 - 17MHz
+ * 52083 - 19.2MHz
+ * 50000 - 20MHz
+ * 41666 - 24MHz
+ * 33333 - 30MHz (default)
+ * 25000 - 40MHz
+ * @sof_cnt_wkup_alert: Indicates in term of number of SOF's after which
+ * the controller should generate an interrupt if the
+ * device had been in L1 state until that period.
+ * This is used by SW to initiate Remote WakeUp in the
+ * controller so as to sync to the uF number from the host.
+ * @activate_stm_fs_transceiver: Activate internal transceiver using GGPIO
+ * register.
+ * 0 - Deactivate the transceiver (default)
+ * 1 - Activate the transceiver
+ * @activate_stm_id_vb_detection: Activate external ID pin and Vbus level
+ * detection using GGPIO register.
+ * 0 - Deactivate the external level detection (default)
+ * 1 - Activate the external level detection
+ * @activate_ingenic_overcurrent_detection: Activate Ingenic overcurrent
+ * detection.
+ * 0 - Deactivate the overcurrent detection
+ * 1 - Activate the overcurrent detection (default)
+ * @g_dma: Enables gadget dma usage (default: autodetect).
+ * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
+ * @g_rx_fifo_size: The periodic rx fifo size for the device, in
+ * DWORDS from 16-32768 (default: 2048 if
+ * possible, otherwise autodetect).
+ * @g_np_tx_fifo_size: The non-periodic tx fifo size for the device in
+ * DWORDS from 16-32768 (default: 1024 if
+ * possible, otherwise autodetect).
+ * @g_tx_fifo_size: An array of TX fifo sizes in dedicated fifo
+ * mode. Each value corresponds to one EP
+ * starting from EP1 (max 15 values). Sizes are
+ * in DWORDS with possible values from
+ * 16-32768 (default: 256, 256, 256, 256, 768,
+ * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0).
+ * @change_speed_quirk: Change speed configuration to DWC2_SPEED_PARAM_FULL
+ * while full&low speed device connect. And change speed
+ * back to DWC2_SPEED_PARAM_HIGH while device is gone.
+ * 0 - No (default)
+ * 1 - Yes
+ * @service_interval: Enable service interval based scheduling.
+ * 0 - No
+ * 1 - Yes
+ *
+ * The following parameters may be specified when starting the module. These
+ * parameters define how the DWC_otg controller should be configured. A
+ * value of -1 (or any other out of range value) for any parameter means
+ * to read the value from hardware (if possible) or use the builtin
+ * default described above.
+ */
+struct dwc2_core_params {
+ struct usb_otg_caps otg_caps;
+ u8 phy_type;
+#define DWC2_PHY_TYPE_PARAM_FS 0
+#define DWC2_PHY_TYPE_PARAM_UTMI 1
+#define DWC2_PHY_TYPE_PARAM_ULPI 2
+
+ u8 speed;
+#define DWC2_SPEED_PARAM_HIGH 0
+#define DWC2_SPEED_PARAM_FULL 1
+#define DWC2_SPEED_PARAM_LOW 2
+
+ u8 phy_utmi_width;
+ bool phy_ulpi_ddr;
+ bool phy_ulpi_ext_vbus;
+ bool enable_dynamic_fifo;
+ bool en_multiple_tx_fifo;
+ bool i2c_enable;
+ bool acg_enable;
+ bool ulpi_fs_ls;
+ bool ts_dline;
+ bool reload_ctl;
+ bool uframe_sched;
+ bool external_id_pin_ctl;
+
+ int power_down;
+#define DWC2_POWER_DOWN_PARAM_NONE 0
+#define DWC2_POWER_DOWN_PARAM_PARTIAL 1
+#define DWC2_POWER_DOWN_PARAM_HIBERNATION 2
+ bool no_clock_gating;
+
+ bool lpm;
+ bool lpm_clock_gating;
+ bool besl;
+ bool hird_threshold_en;
+ bool service_interval;
+ u8 hird_threshold;
+ bool activate_stm_fs_transceiver;
+ bool activate_stm_id_vb_detection;
+ bool activate_ingenic_overcurrent_detection;
+ bool ipg_isoc_en;
+ u16 max_packet_count;
+ u32 max_transfer_size;
+ u32 ahbcfg;
+
+ /* GREFCLK parameters */
+ u32 ref_clk_per;
+ u16 sof_cnt_wkup_alert;
+
+ /* Host parameters */
+ bool host_dma;
+ bool dma_desc_enable;
+ bool dma_desc_fs_enable;
+ bool host_support_fs_ls_low_power;
+ bool host_ls_low_power_phy_clk;
+ bool oc_disable;
+
+ u8 host_channels;
+ u16 host_rx_fifo_size;
+ u16 host_nperio_tx_fifo_size;
+ u16 host_perio_tx_fifo_size;
+
+ /* Gadget parameters */
+ bool g_dma;
+ bool g_dma_desc;
+ u32 g_rx_fifo_size;
+ u32 g_np_tx_fifo_size;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
+
+ bool change_speed_quirk;
+};
+
+/**
+ * struct dwc2_hw_params - Autodetected parameters.
+ *
+ * These parameters are the various parameters read from hardware
+ * registers during initialization. They typically contain the best
+ * supported or maximum value that can be configured in the
+ * corresponding dwc2_core_params value.
+ *
+ * The values that are not in dwc2_core_params are documented below.
+ *
+ * @op_mode: Mode of Operation
+ * 0 - HNP- and SRP-Capable OTG (Host & Device)
+ * 1 - SRP-Capable OTG (Host & Device)
+ * 2 - Non-HNP and Non-SRP Capable OTG (Host & Device)
+ * 3 - SRP-Capable Device
+ * 4 - Non-OTG Device
+ * 5 - SRP-Capable Host
+ * 6 - Non-OTG Host
+ * @arch: Architecture
+ * 0 - Slave only
+ * 1 - External DMA
+ * 2 - Internal DMA
+ * @ipg_isoc_en: This feature indicates that the controller supports
+ * the worst-case scenario of Rx followed by Rx
+ * Interpacket Gap (IPG) (32 bitTimes) as per the utmi
+ * specification for any token following ISOC OUT token.
+ * 0 - Don't support
+ * 1 - Support
+ * @power_optimized: Are power optimizations enabled?
+ * @num_dev_ep: Number of device endpoints available
+ * @num_dev_in_eps: Number of device IN endpoints available
+ * @num_dev_perio_in_ep: Number of device periodic IN endpoints
+ * available
+ * @dev_token_q_depth: Device Mode IN Token Sequence Learning Queue
+ * Depth
+ * 0 to 30
+ * @host_perio_tx_q_depth:
+ * Host Mode Periodic Request Queue Depth
+ * 2, 4 or 8
+ * @nperio_tx_q_depth:
+ * Non-Periodic Request Queue Depth
+ * 2, 4 or 8
+ * @hs_phy_type: High-speed PHY interface type
+ * 0 - High-speed interface not supported
+ * 1 - UTMI+
+ * 2 - ULPI
+ * 3 - UTMI+ and ULPI
+ * @fs_phy_type: Full-speed PHY interface type
+ * 0 - Full speed interface not supported
+ * 1 - Dedicated full speed interface
+ * 2 - FS pins shared with UTMI+ pins
+ * 3 - FS pins shared with ULPI pins
+ * @total_fifo_size: Total internal RAM for FIFOs (bytes)
+ * @hibernation: Is hibernation enabled?
+ * @utmi_phy_data_width: UTMI+ PHY data width
+ * 0 - 8 bits
+ * 1 - 16 bits
+ * 2 - 8 or 16 bits
+ * @snpsid: Value from SNPSID register
+ * @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
+ * @g_tx_fifo_size: Power-on values of TxFIFO sizes
+ * @dma_desc_enable: When DMA mode is enabled, specifies whether to use
+ * address DMA mode or descriptor DMA mode for accessing
+ * the data FIFOs. The driver will automatically detect the
+ * value for this if none is specified.
+ * 0 - Address DMA
+ * 1 - Descriptor DMA (default, if available)
+ * @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
+ * 1 - Allow dynamic FIFO sizing (default, if available)
+ * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
+ * are enabled for non-periodic IN endpoints in device
+ * mode.
+ * @host_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
+ * in host mode when dynamic FIFO sizing is enabled
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @host_perio_tx_fifo_size: Number of 4-byte words in the periodic Tx FIFO in
+ * host mode when dynamic FIFO sizing is enabled
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @max_transfer_size: The maximum transfer size supported, in bytes
+ * 2047 to 65,535
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @max_packet_count: The maximum number of packets in a transfer
+ * 15 to 511
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @host_channels: The number of host channel registers to use
+ * 1 to 16
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @dev_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
+ * in device mode when dynamic FIFO sizing is enabled
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @i2c_enable: Specifies whether to use the I2Cinterface for a full
+ * speed PHY. This parameter is only applicable if phy_type
+ * is FS.
+ * 0 - No (default)
+ * 1 - Yes
+ * @acg_enable: For enabling Active Clock Gating in the controller
+ * 0 - Disable
+ * 1 - Enable
+ * @lpm_mode: For enabling Link Power Management in the controller
+ * 0 - Disable
+ * 1 - Enable
+ * @rx_fifo_size: Number of 4-byte words in the Rx FIFO when dynamic
+ * FIFO sizing is enabled 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
+ * @service_interval_mode: For enabling service interval based scheduling in the
+ * controller.
+ * 0 - Disable
+ * 1 - Enable
+ */
+struct dwc2_hw_params {
+ unsigned op_mode:3;
+ unsigned arch:2;
+ unsigned dma_desc_enable:1;
+ unsigned enable_dynamic_fifo:1;
+ unsigned en_multiple_tx_fifo:1;
+ unsigned rx_fifo_size:16;
+ unsigned host_nperio_tx_fifo_size:16;
+ unsigned dev_nperio_tx_fifo_size:16;
+ unsigned host_perio_tx_fifo_size:16;
+ unsigned nperio_tx_q_depth:3;
+ unsigned host_perio_tx_q_depth:3;
+ unsigned dev_token_q_depth:5;
+ unsigned max_transfer_size:26;
+ unsigned max_packet_count:11;
+ unsigned host_channels:5;
+ unsigned hs_phy_type:2;
+ unsigned fs_phy_type:2;
+ unsigned i2c_enable:1;
+ unsigned acg_enable:1;
+ unsigned num_dev_ep:4;
+ unsigned num_dev_in_eps : 4;
+ unsigned num_dev_perio_in_ep:4;
+ unsigned total_fifo_size:16;
+ unsigned power_optimized:1;
+ unsigned hibernation:1;
+ unsigned utmi_phy_data_width:2;
+ unsigned lpm_mode:1;
+ unsigned ipg_isoc_en:1;
+ unsigned service_interval_mode:1;
+ u32 snpsid;
+ u32 dev_ep_dirs;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
+};
+
+/* Size of control and EP0 buffers */
+#define DWC2_CTRL_BUFF_SIZE 8
+
+/**
+ * struct dwc2_gregs_backup - Holds global registers state before
+ * entering partial power down
+ * @gotgctl: Backup of GOTGCTL register
+ * @gintmsk: Backup of GINTMSK register
+ * @gahbcfg: Backup of GAHBCFG register
+ * @gusbcfg: Backup of GUSBCFG register
+ * @grxfsiz: Backup of GRXFSIZ register
+ * @gnptxfsiz: Backup of GNPTXFSIZ register
+ * @gi2cctl: Backup of GI2CCTL register
+ * @glpmcfg: Backup of GLPMCFG register
+ * @gdfifocfg: Backup of GDFIFOCFG register
+ * @pcgcctl: Backup of PCGCCTL register
+ * @pcgcctl1: Backup of PCGCCTL1 register
+ * @dtxfsiz: Backup of DTXFSIZ registers for each endpoint
+ * @gpwrdn: Backup of GPWRDN register
+ * @valid: True if registers values backuped.
+ */
+struct dwc2_gregs_backup {
+ u32 gotgctl;
+ u32 gintmsk;
+ u32 gahbcfg;
+ u32 gusbcfg;
+ u32 grxfsiz;
+ u32 gnptxfsiz;
+ u32 gi2cctl;
+ u32 glpmcfg;
+ u32 pcgcctl;
+ u32 pcgcctl1;
+ u32 gdfifocfg;
+ u32 gpwrdn;
+ bool valid;
+};
+
+/**
+ * struct dwc2_dregs_backup - Holds device registers state before
+ * entering partial power down
+ * @dcfg: Backup of DCFG register
+ * @dctl: Backup of DCTL register
+ * @daintmsk: Backup of DAINTMSK register
+ * @diepmsk: Backup of DIEPMSK register
+ * @doepmsk: Backup of DOEPMSK register
+ * @diepctl: Backup of DIEPCTL register
+ * @dieptsiz: Backup of DIEPTSIZ register
+ * @diepdma: Backup of DIEPDMA register
+ * @doepctl: Backup of DOEPCTL register
+ * @doeptsiz: Backup of DOEPTSIZ register
+ * @doepdma: Backup of DOEPDMA register
+ * @dtxfsiz: Backup of DTXFSIZ registers for each endpoint
+ * @valid: True if registers values backuped.
+ */
+struct dwc2_dregs_backup {
+ u32 dcfg;
+ u32 dctl;
+ u32 daintmsk;
+ u32 diepmsk;
+ u32 doepmsk;
+ u32 diepctl[MAX_EPS_CHANNELS];
+ u32 dieptsiz[MAX_EPS_CHANNELS];
+ u32 diepdma[MAX_EPS_CHANNELS];
+ u32 doepctl[MAX_EPS_CHANNELS];
+ u32 doeptsiz[MAX_EPS_CHANNELS];
+ u32 doepdma[MAX_EPS_CHANNELS];
+ u32 dtxfsiz[MAX_EPS_CHANNELS];
+ bool valid;
+};
+
+/**
+ * struct dwc2_hregs_backup - Holds host registers state before
+ * entering partial power down
+ * @hcfg: Backup of HCFG register
+ * @haintmsk: Backup of HAINTMSK register
+ * @hcintmsk: Backup of HCINTMSK register
+ * @hprt0: Backup of HPTR0 register
+ * @hfir: Backup of HFIR register
+ * @hptxfsiz: Backup of HPTXFSIZ register
+ * @valid: True if registers values backuped.
+ */
+struct dwc2_hregs_backup {
+ u32 hcfg;
+ u32 haintmsk;
+ u32 hcintmsk[MAX_EPS_CHANNELS];
+ u32 hprt0;
+ u32 hfir;
+ u32 hptxfsiz;
+ bool valid;
+};
+
+/*
+ * Constants related to high speed periodic scheduling
+ *
+ * We have a periodic schedule that is DWC2_HS_SCHEDULE_UFRAMES long. From a
+ * reservation point of view it's assumed that the schedule goes right back to
+ * the beginning after the end of the schedule.
+ *
+ * What does that mean for scheduling things with a long interval? It means
+ * we'll reserve time for them in every possible microframe that they could
+ * ever be scheduled in. ...but we'll still only actually schedule them as
+ * often as they were requested.
+ *
+ * We keep our schedule in a "bitmap" structure. This simplifies having
+ * to keep track of and merge intervals: we just let the bitmap code do most
+ * of the heavy lifting. In a way scheduling is much like memory allocation.
+ *
+ * We schedule 100us per uframe or 80% of 125us (the maximum amount you're
+ * supposed to schedule for periodic transfers). That's according to spec.
+ *
+ * Note that though we only schedule 80% of each microframe, the bitmap that we
+ * keep the schedule in is tightly packed (AKA it doesn't have 100us worth of
+ * space for each uFrame).
+ *
+ * Requirements:
+ * - DWC2_HS_SCHEDULE_UFRAMES must even divide 0x4000 (HFNUM_MAX_FRNUM + 1)
+ * - DWC2_HS_SCHEDULE_UFRAMES must be 8 times DWC2_LS_SCHEDULE_FRAMES (probably
+ * could be any multiple of 8 times DWC2_LS_SCHEDULE_FRAMES, but there might
+ * be bugs). The 8 comes from the USB spec: number of microframes per frame.
+ */
+#define DWC2_US_PER_UFRAME 125
+#define DWC2_HS_PERIODIC_US_PER_UFRAME 100
+
+#define DWC2_HS_SCHEDULE_UFRAMES 8
+#define DWC2_HS_SCHEDULE_US (DWC2_HS_SCHEDULE_UFRAMES * \
+ DWC2_HS_PERIODIC_US_PER_UFRAME)
+
+/*
+ * Constants related to low speed scheduling
+ *
+ * For high speed we schedule every 1us. For low speed that's a bit overkill,
+ * so we make up a unit called a "slice" that's worth 25us. There are 40
+ * slices in a full frame and we can schedule 36 of those (90%) for periodic
+ * transfers.
+ *
+ * Our low speed schedule can be as short as 1 frame or could be longer. When
+ * we only schedule 1 frame it means that we'll need to reserve a time every
+ * frame even for things that only transfer very rarely, so something that runs
+ * every 2048 frames will get time reserved in every frame. Our low speed
+ * schedule can be longer and we'll be able to handle more overlap, but that
+ * will come at increased memory cost and increased time to schedule.
+ *
+ * Note: one other advantage of a short low speed schedule is that if we mess
+ * up and miss scheduling we can jump in and use any of the slots that we
+ * happened to reserve.
+ *
+ * With 25 us per slice and 1 frame in the schedule, we only need 4 bytes for
+ * the schedule. There will be one schedule per TT.
+ *
+ * Requirements:
+ * - DWC2_US_PER_SLICE must evenly divide DWC2_LS_PERIODIC_US_PER_FRAME.
+ */
+#define DWC2_US_PER_SLICE 25
+#define DWC2_SLICES_PER_UFRAME (DWC2_US_PER_UFRAME / DWC2_US_PER_SLICE)
+
+#define DWC2_ROUND_US_TO_SLICE(us) \
+ (DIV_ROUND_UP((us), DWC2_US_PER_SLICE) * \
+ DWC2_US_PER_SLICE)
+
+#define DWC2_LS_PERIODIC_US_PER_FRAME \
+ 900
+#define DWC2_LS_PERIODIC_SLICES_PER_FRAME \
+ (DWC2_LS_PERIODIC_US_PER_FRAME / \
+ DWC2_US_PER_SLICE)
+
+#define DWC2_LS_SCHEDULE_FRAMES 1
+#define DWC2_LS_SCHEDULE_SLICES (DWC2_LS_SCHEDULE_FRAMES * \
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME)
+
+/**
+ * struct dwc2_hsotg - Holds the state of the driver, including the non-periodic
+ * and periodic schedules
+ *
+ * These are common for both host and peripheral modes:
+ *
+ * @dev: The struct device pointer
+ * @regs: Pointer to controller regs
+ * @hw_params: Parameters that were autodetected from the
+ * hardware registers
+ * @params: Parameters that define how the core should be configured
+ * @op_state: The operational State, during transitions (a_host=>
+ * a_peripheral and b_device=>b_host) this may not match
+ * the core, but allows the software to determine
+ * transitions
+ * @dr_mode: Requested mode of operation, one of following:
+ * - USB_DR_MODE_PERIPHERAL
+ * - USB_DR_MODE_HOST
+ * - USB_DR_MODE_OTG
+ * @role_sw: usb_role_switch handle
+ * @role_sw_default_mode: default operation mode of controller while usb role
+ * is USB_ROLE_NONE
+ * @hcd_enabled: Host mode sub-driver initialization indicator.
+ * @gadget_enabled: Peripheral mode sub-driver initialization indicator.
+ * @ll_hw_enabled: Status of low-level hardware resources.
+ * @hibernated: True if core is hibernated
+ * @in_ppd: True if core is partial power down mode.
+ * @bus_suspended: True if bus is suspended
+ * @reset_phy_on_wake: Quirk saying that we should assert PHY reset on a
+ * remote wakeup.
+ * @phy_off_for_suspend: Status of whether we turned the PHY off at suspend.
+ * @need_phy_for_wake: Quirk saying that we should keep the PHY on at
+ * suspend if we need USB to wake us up.
+ * @frame_number: Frame number read from the core. For both device
+ * and host modes. The value ranges are from 0
+ * to HFNUM_MAX_FRNUM.
+ * @phy: The otg phy transceiver structure for phy control.
+ * @uphy: The otg phy transceiver structure for old USB phy
+ * control.
+ * @plat: The platform specific configuration data. This can be
+ * removed once all SoCs support usb transceiver.
+ * @supplies: Definition of USB power supplies
+ * @vbus_supply: Regulator supplying vbus.
+ * @usb33d: Optional 3.3v regulator used on some stm32 devices to
+ * supply ID and VBUS detection hardware.
+ * @lock: Spinlock that protects all the driver data structures
+ * @priv: Stores a pointer to the struct usb_hcd
+ * @queuing_high_bandwidth: True if multiple packets of a high-bandwidth
+ * transfer are in process of being queued
+ * @srp_success: Stores status of SRP request in the case of a FS PHY
+ * with an I2C interface
+ * @wq_otg: Workqueue object used for handling of some interrupts
+ * @wf_otg: Work object for handling Connector ID Status Change
+ * interrupt
+ * @wkp_timer: Timer object for handling Wakeup Detected interrupt
+ * @lx_state: Lx state of connected device
+ * @gr_backup: Backup of global registers during suspend
+ * @dr_backup: Backup of device registers during suspend
+ * @hr_backup: Backup of host registers during suspend
+ * @needs_byte_swap: Specifies whether the opposite endianness.
+ *
+ * These are for host mode:
+ *
+ * @flags: Flags for handling root port state changes
+ * @flags.d32: Contain all root port flags
+ * @flags.b: Separate root port flags from each other
+ * @flags.b.port_connect_status_change: True if root port connect status
+ * changed
+ * @flags.b.port_connect_status: True if device connected to root port
+ * @flags.b.port_reset_change: True if root port reset status changed
+ * @flags.b.port_enable_change: True if root port enable status changed
+ * @flags.b.port_suspend_change: True if root port suspend status changed
+ * @flags.b.port_over_current_change: True if root port over current state
+ * changed.
+ * @flags.b.port_l1_change: True if root port l1 status changed
+ * @flags.b.reserved: Reserved bits of root port register
+ * @non_periodic_sched_inactive: Inactive QHs in the non-periodic schedule.
+ * Transfers associated with these QHs are not currently
+ * assigned to a host channel.
+ * @non_periodic_sched_active: Active QHs in the non-periodic schedule.
+ * Transfers associated with these QHs are currently
+ * assigned to a host channel.
+ * @non_periodic_qh_ptr: Pointer to next QH to process in the active
+ * non-periodic schedule
+ * @non_periodic_sched_waiting: Waiting QHs in the non-periodic schedule.
+ * Transfers associated with these QHs are not currently
+ * assigned to a host channel.
+ * @periodic_sched_inactive: Inactive QHs in the periodic schedule. This is a
+ * list of QHs for periodic transfers that are _not_
+ * scheduled for the next frame. Each QH in the list has an
+ * interval counter that determines when it needs to be
+ * scheduled for execution. This scheduling mechanism
+ * allows only a simple calculation for periodic bandwidth
+ * used (i.e. must assume that all periodic transfers may
+ * need to execute in the same frame). However, it greatly
+ * simplifies scheduling and should be sufficient for the
+ * vast majority of OTG hosts, which need to connect to a
+ * small number of peripherals at one time. Items move from
+ * this list to periodic_sched_ready when the QH interval
+ * counter is 0 at SOF.
+ * @periodic_sched_ready: List of periodic QHs that are ready for execution in
+ * the next frame, but have not yet been assigned to host
+ * channels. Items move from this list to
+ * periodic_sched_assigned as host channels become
+ * available during the current frame.
+ * @periodic_sched_assigned: List of periodic QHs to be executed in the next
+ * frame that are assigned to host channels. Items move
+ * from this list to periodic_sched_queued as the
+ * transactions for the QH are queued to the DWC_otg
+ * controller.
+ * @periodic_sched_queued: List of periodic QHs that have been queued for
+ * execution. Items move from this list to either
+ * periodic_sched_inactive or periodic_sched_ready when the
+ * channel associated with the transfer is released. If the
+ * interval for the QH is 1, the item moves to
+ * periodic_sched_ready because it must be rescheduled for
+ * the next frame. Otherwise, the item moves to
+ * periodic_sched_inactive.
+ * @split_order: List keeping track of channels doing splits, in order.
+ * @periodic_usecs: Total bandwidth claimed so far for periodic transfers.
+ * This value is in microseconds per (micro)frame. The
+ * assumption is that all periodic transfers may occur in
+ * the same (micro)frame.
+ * @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the
+ * host is in high speed mode; low speed schedules are
+ * stored elsewhere since we need one per TT.
+ * @periodic_qh_count: Count of periodic QHs, if using several eps. Used for
+ * SOF enable/disable.
+ * @free_hc_list: Free host channels in the controller. This is a list of
+ * struct dwc2_host_chan items.
+ * @periodic_channels: Number of host channels assigned to periodic transfers.
+ * Currently assuming that there is a dedicated host
+ * channel for each periodic transaction and at least one
+ * host channel is available for non-periodic transactions.
+ * @non_periodic_channels: Number of host channels assigned to non-periodic
+ * transfers
+ * @available_host_channels: Number of host channels available for the
+ * microframe scheduler to use
+ * @hc_ptr_array: Array of pointers to the host channel descriptors.
+ * Allows accessing a host channel descriptor given the
+ * host channel number. This is useful in interrupt
+ * handlers.
+ * @status_buf: Buffer used for data received during the status phase of
+ * a control transfer.
+ * @status_buf_dma: DMA address for status_buf
+ * @start_work: Delayed work for handling host A-cable connection
+ * @reset_work: Delayed work for handling a port reset
+ * @phy_reset_work: Work structure for doing a PHY reset
+ * @otg_port: OTG port number
+ * @frame_list: Frame list
+ * @frame_list_dma: Frame list DMA address
+ * @frame_list_sz: Frame list size
+ * @desc_gen_cache: Kmem cache for generic descriptors
+ * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
+ * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
+ *
+ * These are for peripheral mode:
+ *
+ * @driver: USB gadget driver
+ * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
+ * @num_of_eps: Number of available EPs (excluding EP0)
+ * @debug_root: Root directrory for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @ep0_state: EP0 control transfers state
+ * @delayed_status: true when gadget driver asks for delayed status
+ * @test_mode: USB test mode requested by the host
+ * @remote_wakeup_allowed: True if device is allowed to wake-up host by
+ * remote-wakeup signalling
+ * @setup_desc_dma: EP0 setup stage desc chain DMA address
+ * @setup_desc: EP0 setup stage desc chain pointer
+ * @ctrl_in_desc_dma: EP0 IN data phase desc chain DMA address
+ * @ctrl_in_desc: EP0 IN data phase desc chain pointer
+ * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address
+ * @ctrl_out_desc: EP0 OUT data phase desc chain pointer
+ * @irq: Interrupt request line number
+ * @clk: Pointer to otg clock
+ * @reset: Pointer to dwc2 reset controller
+ * @reset_ecc: Pointer to dwc2 optional reset controller in Stratix10.
+ * @regset: A pointer to a struct debugfs_regset32, which contains
+ * a pointer to an array of register definitions, the
+ * array size and the base address where the register bank
+ * is to be found.
+ * @last_frame_num: Number of last frame. Range from 0 to 32768
+ * @frame_num_array: Used only if CONFIG_USB_DWC2_TRACK_MISSED_SOFS is
+ * defined, for missed SOFs tracking. Array holds that
+ * frame numbers, which not equal to last_frame_num +1
+ * @last_frame_num_array: Used only if CONFIG_USB_DWC2_TRACK_MISSED_SOFS is
+ * defined, for missed SOFs tracking.
+ * If current_frame_number != last_frame_num+1
+ * then last_frame_num added to this array
+ * @frame_num_idx: Actual size of frame_num_array and last_frame_num_array
+ * @dumped_frame_num_array: 1 - if missed SOFs frame numbers dumbed
+ * 0 - if missed SOFs frame numbers not dumbed
+ * @fifo_mem: Total internal RAM for FIFOs (bytes)
+ * @fifo_map: Each bit intend for concrete fifo. If that bit is set,
+ * then that fifo is used
+ * @gadget: Represents a usb gadget device
+ * @connected: Used in slave mode. True if device connected with host
+ * @eps_in: The IN endpoints being supplied to the gadget framework
+ * @eps_out: The OUT endpoints being supplied to the gadget framework
+ * @new_connection: Used in host mode. True if there are new connected
+ * device
+ * @enabled: Indicates the enabling state of controller
+ *
+ */
+struct dwc2_hsotg {
+ struct device *dev;
+ void __iomem *regs;
+ /** Params detected from hardware */
+ struct dwc2_hw_params hw_params;
+ /** Params to actually use */
+ struct dwc2_core_params params;
+ enum usb_otg_state op_state;
+ enum usb_dr_mode dr_mode;
+ struct usb_role_switch *role_sw;
+ enum usb_dr_mode role_sw_default_mode;
+ unsigned int hcd_enabled:1;
+ unsigned int gadget_enabled:1;
+ unsigned int ll_hw_enabled:1;
+ unsigned int hibernated:1;
+ unsigned int in_ppd:1;
+ bool bus_suspended;
+ unsigned int reset_phy_on_wake:1;
+ unsigned int need_phy_for_wake:1;
+ unsigned int phy_off_for_suspend:1;
+ u16 frame_number;
+
+ struct phy *phy;
+ struct usb_phy *uphy;
+ struct dwc2_hsotg_plat *plat;
+ struct regulator_bulk_data supplies[DWC2_NUM_SUPPLIES];
+ struct regulator *vbus_supply;
+ struct regulator *usb33d;
+
+ spinlock_t lock;
+ void *priv;
+ int irq;
+ struct clk *clk;
+ struct reset_control *reset;
+ struct reset_control *reset_ecc;
+
+ unsigned int queuing_high_bandwidth:1;
+ unsigned int srp_success:1;
+
+ struct workqueue_struct *wq_otg;
+ struct work_struct wf_otg;
+ struct timer_list wkp_timer;
+ enum dwc2_lx_state lx_state;
+ struct dwc2_gregs_backup gr_backup;
+ struct dwc2_dregs_backup dr_backup;
+ struct dwc2_hregs_backup hr_backup;
+
+ struct dentry *debug_root;
+ struct debugfs_regset32 *regset;
+ bool needs_byte_swap;
+
+ /* DWC OTG HW Release versions */
+#define DWC2_CORE_REV_2_71a 0x4f54271a
+#define DWC2_CORE_REV_2_72a 0x4f54272a
+#define DWC2_CORE_REV_2_80a 0x4f54280a
+#define DWC2_CORE_REV_2_90a 0x4f54290a
+#define DWC2_CORE_REV_2_91a 0x4f54291a
+#define DWC2_CORE_REV_2_92a 0x4f54292a
+#define DWC2_CORE_REV_2_94a 0x4f54294a
+#define DWC2_CORE_REV_3_00a 0x4f54300a
+#define DWC2_CORE_REV_3_10a 0x4f54310a
+#define DWC2_CORE_REV_4_00a 0x4f54400a
+#define DWC2_CORE_REV_4_20a 0x4f54420a
+#define DWC2_FS_IOT_REV_1_00a 0x5531100a
+#define DWC2_HS_IOT_REV_1_00a 0x5532100a
+#define DWC2_CORE_REV_MASK 0x0000ffff
+
+ /* DWC OTG HW Core ID */
+#define DWC2_OTG_ID 0x4f540000
+#define DWC2_FS_IOT_ID 0x55310000
+#define DWC2_HS_IOT_ID 0x55320000
+
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ union dwc2_hcd_internal_flags {
+ u32 d32;
+ struct {
+ unsigned port_connect_status_change:1;
+ unsigned port_connect_status:1;
+ unsigned port_reset_change:1;
+ unsigned port_enable_change:1;
+ unsigned port_suspend_change:1;
+ unsigned port_over_current_change:1;
+ unsigned port_l1_change:1;
+ unsigned reserved:25;
+ } b;
+ } flags;
+
+ struct list_head non_periodic_sched_inactive;
+ struct list_head non_periodic_sched_waiting;
+ struct list_head non_periodic_sched_active;
+ struct list_head *non_periodic_qh_ptr;
+ struct list_head periodic_sched_inactive;
+ struct list_head periodic_sched_ready;
+ struct list_head periodic_sched_assigned;
+ struct list_head periodic_sched_queued;
+ struct list_head split_order;
+ u16 periodic_usecs;
+ DECLARE_BITMAP(hs_periodic_bitmap, DWC2_HS_SCHEDULE_US);
+ u16 periodic_qh_count;
+ bool new_connection;
+
+ u16 last_frame_num;
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+#define FRAME_NUM_ARRAY_SIZE 1000
+ u16 *frame_num_array;
+ u16 *last_frame_num_array;
+ int frame_num_idx;
+ int dumped_frame_num_array;
+#endif
+
+ struct list_head free_hc_list;
+ int periodic_channels;
+ int non_periodic_channels;
+ int available_host_channels;
+ struct dwc2_host_chan *hc_ptr_array[MAX_EPS_CHANNELS];
+ u8 *status_buf;
+ dma_addr_t status_buf_dma;
+#define DWC2_HCD_STATUS_BUF_SIZE 64
+
+ struct delayed_work start_work;
+ struct delayed_work reset_work;
+ struct work_struct phy_reset_work;
+ u8 otg_port;
+ u32 *frame_list;
+ dma_addr_t frame_list_dma;
+ u32 frame_list_sz;
+ struct kmem_cache *desc_gen_cache;
+ struct kmem_cache *desc_hsisoc_cache;
+ struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
+
+#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Gadget structures */
+ struct usb_gadget_driver *driver;
+ int fifo_mem;
+ unsigned int dedicated_fifos:1;
+ unsigned char num_of_eps;
+ u32 fifo_map;
+
+ struct usb_request *ep0_reply;
+ struct usb_request *ctrl_req;
+ void *ep0_buff;
+ void *ctrl_buff;
+ enum dwc2_ep0_state ep0_state;
+ unsigned delayed_status : 1;
+ u8 test_mode;
+
+ dma_addr_t setup_desc_dma[2];
+ struct dwc2_dma_desc *setup_desc[2];
+ dma_addr_t ctrl_in_desc_dma;
+ struct dwc2_dma_desc *ctrl_in_desc;
+ dma_addr_t ctrl_out_desc_dma;
+ struct dwc2_dma_desc *ctrl_out_desc;
+
+ struct usb_gadget gadget;
+ unsigned int enabled:1;
+ unsigned int connected:1;
+ unsigned int remote_wakeup_allowed:1;
+ struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
+ struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
+};
+
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(struct dwc2_hsotg *hsotg, u32 offset)
+{
+ u32 val;
+
+ val = readl(hsotg->regs + offset);
+ if (hsotg->needs_byte_swap)
+ return swab32(val);
+ else
+ return val;
+}
+
+static inline void dwc2_writel(struct dwc2_hsotg *hsotg, u32 value, u32 offset)
+{
+ if (hsotg->needs_byte_swap)
+ writel(swab32(value), hsotg->regs + offset);
+ else
+ writel(value, hsotg->regs + offset);
+
+#ifdef DWC2_LOG_WRITES
+ pr_info("info:: wrote %08x to %p\n", value, hsotg->regs + offset);
+#endif
+}
+
+static inline void dwc2_readl_rep(struct dwc2_hsotg *hsotg, u32 offset,
+ void *buffer, unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = dwc2_readl(hsotg, offset);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void dwc2_writel_rep(struct dwc2_hsotg *hsotg, u32 offset,
+ const void *buffer, unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ dwc2_writel(hsotg, *buf++, offset);
+ } while (--count);
+ }
+}
+
+/* Reasons for halting a host channel */
+enum dwc2_halt_status {
+ DWC2_HC_XFER_NO_HALT_STATUS,
+ DWC2_HC_XFER_COMPLETE,
+ DWC2_HC_XFER_URB_COMPLETE,
+ DWC2_HC_XFER_ACK,
+ DWC2_HC_XFER_NAK,
+ DWC2_HC_XFER_NYET,
+ DWC2_HC_XFER_STALL,
+ DWC2_HC_XFER_XACT_ERR,
+ DWC2_HC_XFER_FRAME_OVERRUN,
+ DWC2_HC_XFER_BABBLE_ERR,
+ DWC2_HC_XFER_DATA_TOGGLE_ERR,
+ DWC2_HC_XFER_AHB_ERR,
+ DWC2_HC_XFER_PERIODIC_INCOMPLETE,
+ DWC2_HC_XFER_URB_DEQUEUE,
+};
+
+/* Core version information */
+static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000;
+}
+
+static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000;
+}
+
+static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000;
+}
+
+/*
+ * The following functions support initialization of the core driver component
+ * and the DWC_otg controller
+ */
+int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait);
+int dwc2_enter_partial_power_down(struct dwc2_hsotg *hsotg);
+int dwc2_exit_partial_power_down(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ bool restore);
+int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg, int is_host);
+int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ int reset, int is_host);
+void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg);
+int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy);
+
+void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host);
+void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
+
+bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
+
+int dwc2_check_core_version(struct dwc2_hsotg *hsotg);
+
+/*
+ * Common core Functions.
+ * The following functions support managing the DWC_otg controller in either
+ * device or host mode.
+ */
+void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes);
+void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num);
+void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg);
+
+void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd);
+void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
+
+void dwc2_hib_restore_common(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ int is_host);
+int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg);
+int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);
+
+void dwc2_enable_acg(struct dwc2_hsotg *hsotg);
+
+/* This function should be called on every hardware interrupt. */
+irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
+
+/* The device ID match table */
+extern const struct of_device_id dwc2_of_match_table[];
+extern const struct acpi_device_id dwc2_acpi_match[];
+
+int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
+int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
+
+/* Common polling functions */
+int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, u32 bit,
+ u32 timeout);
+int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hs_otg, u32 reg, u32 bit,
+ u32 timeout);
+/* Parameters */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+int dwc2_init_params(struct dwc2_hsotg *hsotg);
+
+/*
+ * The following functions check the controller's OTG operation mode
+ * capability (GHWCFG2.OTG_MODE).
+ *
+ * These functions can be used before the internal hsotg->hw_params
+ * are read in and cached so they always read directly from the
+ * GHWCFG2 register.
+ */
+unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg);
+bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg);
+bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg);
+bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg);
+
+/*
+ * Returns the mode of operation, host or device
+ */
+static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg)
+{
+ return (dwc2_readl(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
+}
+
+static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
+{
+ return (dwc2_readl(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
+}
+
+int dwc2_drd_init(struct dwc2_hsotg *hsotg);
+void dwc2_drd_suspend(struct dwc2_hsotg *hsotg);
+void dwc2_drd_resume(struct dwc2_hsotg *hsotg);
+void dwc2_drd_exit(struct dwc2_hsotg *hsotg);
+
+/*
+ * Dump core registers and SPRAM
+ */
+void dwc2_dump_dev_registers(struct dwc2_hsotg *hsotg);
+void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg);
+void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg);
+
+/* Gadget defines */
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg);
+int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2);
+int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2);
+int dwc2_gadget_init(struct dwc2_hsotg *hsotg);
+void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
+ bool reset);
+void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg);
+void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
+void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
+int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
+#define dwc2_is_device_connected(hsotg) (hsotg->connected)
+#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
+int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
+int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
+int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, int reset);
+int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg);
+int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
+ bool restore);
+void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg,
+ int rem_wakeup);
+int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg);
+int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg);
+int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg);
+static inline void dwc2_clear_fifo_map(struct dwc2_hsotg *hsotg)
+{ hsotg->fifo_map = 0; }
+#else
+static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
+ bool reset) {}
+static inline void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2) {}
+static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
+ int testmode)
+{ return 0; }
+#define dwc2_is_device_connected(hsotg) (0)
+#define dwc2_is_device_enabled(hsotg) (0)
+static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
+ int remote_wakeup)
+{ return 0; }
+static inline int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, int reset)
+{ return 0; }
+static inline int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
+ bool restore)
+{ return 0; }
+static inline void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg,
+ int rem_wakeup) {}
+static inline int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_clear_fifo_map(struct dwc2_hsotg *hsotg) {}
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
+int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us);
+void dwc2_hcd_connect(struct dwc2_hsotg *hsotg);
+void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force);
+void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
+int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup);
+int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex);
+int dwc2_port_resume(struct dwc2_hsotg *hsotg);
+int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg);
+int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg);
+int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg);
+int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, int reset);
+int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg);
+int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, bool restore);
+void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg);
+void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup);
+bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2);
+static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg)
+{ schedule_work(&hsotg->phy_reset_work); }
+#else
+static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg,
+ int us)
+{ return 0; }
+static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {}
+static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {}
+static inline int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
+{ return 0; }
+static inline int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
+{ return 0; }
+static inline int dwc2_port_resume(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, int reset)
+{ return 0; }
+static inline int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, bool restore)
+{ return 0; }
+static inline void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg,
+ int rem_wakeup) {}
+static inline bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
+{ return false; }
+static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg) {}
+
+#endif
+
+#endif /* __DWC2_CORE_H__ */
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
new file mode 100644
index 000000000..158ede753
--- /dev/null
+++ b/drivers/usb/dwc2/core_intr.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * core_intr.c - DesignWare HS OTG Controller common interrupt handling
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * This file contains the common interrupt handlers
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
+{
+ switch (hsotg->op_state) {
+ case OTG_STATE_A_HOST:
+ return "a_host";
+ case OTG_STATE_A_SUSPEND:
+ return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL:
+ return "a_peripheral";
+ case OTG_STATE_B_PERIPHERAL:
+ return "b_peripheral";
+ case OTG_STATE_B_HOST:
+ return "b_host";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * dwc2_handle_usb_port_intr - handles OTG PRTINT interrupts.
+ * When the PRTINT interrupt fires, there are certain status bits in the Host
+ * Port that needs to get cleared.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0 = dwc2_readl(hsotg, HPRT0);
+
+ if (hprt0 & HPRT0_ENACHG) {
+ hprt0 &= ~HPRT0_ENA;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ }
+}
+
+/**
+ * dwc2_handle_mode_mismatch_intr() - Logs a mode mismatch warning message
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg)
+{
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_MODEMIS, GINTSTS);
+
+ dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n",
+ dwc2_is_host_mode(hsotg) ? "Host" : "Device");
+}
+
+/**
+ * dwc2_handle_otg_intr() - Handles the OTG Interrupts. It reads the OTG
+ * Interrupt Register (GOTGINT) to determine what interrupt has occurred.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 gotgint;
+ u32 gotgctl;
+ u32 gintmsk;
+
+ gotgint = dwc2_readl(hsotg, GOTGINT);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ dev_dbg(hsotg->dev, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint,
+ dwc2_op_state_str(hsotg));
+
+ if (gotgint & GOTGINT_SES_END_DET) {
+ dev_dbg(hsotg->dev,
+ " ++OTG Interrupt: Session End Detected++ (%s)\n",
+ dwc2_op_state_str(hsotg));
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_hsotg_disconnect(hsotg);
+
+ if (hsotg->op_state == OTG_STATE_B_HOST) {
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+ } else {
+ /*
+ * If not B_HOST and Device HNP still set, HNP did
+ * not succeed!
+ */
+ if (gotgctl & GOTGCTL_DEVHNPEN) {
+ dev_dbg(hsotg->dev, "Session End Detected\n");
+ dev_err(hsotg->dev,
+ "Device Not Connected/Responding!\n");
+ }
+
+ /*
+ * If Session End Detected the B-Cable has been
+ * disconnected
+ */
+ /* Reset to a clean state */
+ hsotg->lx_state = DWC2_L0;
+ }
+
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gotgctl &= ~GOTGCTL_DEVHNPEN;
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+ }
+
+ if (gotgint & GOTGINT_SES_REQ_SUC_STS_CHNG) {
+ dev_dbg(hsotg->dev,
+ " ++OTG Interrupt: Session Request Success Status Change++\n");
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ if (gotgctl & GOTGCTL_SESREQSCS) {
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
+ hsotg->params.i2c_enable) {
+ hsotg->srp_success = 1;
+ } else {
+ /* Clear Session Request */
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gotgctl &= ~GOTGCTL_SESREQ;
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+ }
+ }
+ }
+
+ if (gotgint & GOTGINT_HST_NEG_SUC_STS_CHNG) {
+ /*
+ * Print statements during the HNP interrupt handling
+ * can cause it to fail
+ */
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ /*
+ * WA for 3.00a- HW is not setting cur_mode, even sometimes
+ * this does not help
+ */
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a)
+ udelay(100);
+ if (gotgctl & GOTGCTL_HSTNEGSCS) {
+ if (dwc2_is_host_mode(hsotg)) {
+ hsotg->op_state = OTG_STATE_B_HOST;
+ /*
+ * Need to disable SOF interrupt immediately.
+ * When switching from device to host, the PCD
+ * interrupt handler won't handle the interrupt
+ * if host mode is already set. The HCD
+ * interrupt handler won't get called if the
+ * HCD state is HALT. This means that the
+ * interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_SOF;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+
+ /*
+ * Call callback function with spin lock
+ * released
+ */
+ spin_unlock(&hsotg->lock);
+
+ /* Initialize the Core for Host mode */
+ dwc2_hcd_start(hsotg);
+ spin_lock(&hsotg->lock);
+ hsotg->op_state = OTG_STATE_B_HOST;
+ }
+ } else {
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gotgctl &= ~(GOTGCTL_HNPREQ | GOTGCTL_DEVHNPEN);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+ dev_dbg(hsotg->dev, "HNP Failed\n");
+ dev_err(hsotg->dev,
+ "Device Not Connected/Responding\n");
+ }
+ }
+
+ if (gotgint & GOTGINT_HST_NEG_DET) {
+ /*
+ * The disconnect interrupt is set at the same time as
+ * Host Negotiation Detected. During the mode switch all
+ * interrupts are cleared so the disconnect interrupt
+ * handler will not get executed.
+ */
+ dev_dbg(hsotg->dev,
+ " ++OTG Interrupt: Host Negotiation Detected++ (%s)\n",
+ (dwc2_is_host_mode(hsotg) ? "Host" : "Device"));
+ if (dwc2_is_device_mode(hsotg)) {
+ dev_dbg(hsotg->dev, "a_suspend->a_peripheral (%d)\n",
+ hsotg->op_state);
+ spin_unlock(&hsotg->lock);
+ dwc2_hcd_disconnect(hsotg, false);
+ spin_lock(&hsotg->lock);
+ hsotg->op_state = OTG_STATE_A_PERIPHERAL;
+ } else {
+ /* Need to disable SOF interrupt immediately */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_SOF;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ spin_unlock(&hsotg->lock);
+ dwc2_hcd_start(hsotg);
+ spin_lock(&hsotg->lock);
+ hsotg->op_state = OTG_STATE_A_HOST;
+ }
+ }
+
+ if (gotgint & GOTGINT_A_DEV_TOUT_CHG)
+ dev_dbg(hsotg->dev,
+ " ++OTG Interrupt: A-Device Timeout Change++\n");
+ if (gotgint & GOTGINT_DBNCE_DONE)
+ dev_dbg(hsotg->dev, " ++OTG Interrupt: Debounce Done++\n");
+
+ /* Clear GOTGINT */
+ dwc2_writel(hsotg, gotgint, GOTGINT);
+}
+
+/**
+ * dwc2_handle_conn_id_status_change_intr() - Handles the Connector ID Status
+ * Change Interrupt
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * Reads the OTG Interrupt Register (GOTCTL) to determine whether this is a
+ * Device to Host Mode transition or a Host to Device Mode transition. This only
+ * occurs when the cable is connected/removed from the PHY connector.
+ */
+static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 gintmsk;
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_CONIDSTSCHNG, GINTSTS);
+
+ /* Need to disable SOF interrupt immediately */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_SOF;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+
+ dev_dbg(hsotg->dev, " ++Connector ID Status Change Interrupt++ (%s)\n",
+ dwc2_is_host_mode(hsotg) ? "Host" : "Device");
+
+ /*
+ * Need to schedule a work, as there are possible DELAY function calls.
+ */
+ if (hsotg->wq_otg)
+ queue_work(hsotg->wq_otg, &hsotg->wf_otg);
+}
+
+/**
+ * dwc2_handle_session_req_intr() - This interrupt indicates that a device is
+ * initiating the Session Request Protocol to request the host to turn on bus
+ * power so a new session can begin
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * This handler responds by turning on bus power. If the DWC_otg controller is
+ * in low power mode, this handler brings the controller out of low power mode
+ * before turning on bus power.
+ */
+static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+ u32 hprt0;
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
+
+ dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n",
+ hsotg->lx_state);
+
+ if (dwc2_is_device_mode(hsotg)) {
+ if (hsotg->lx_state == DWC2_L2) {
+ if (hsotg->in_ppd) {
+ ret = dwc2_exit_partial_power_down(hsotg, 0,
+ true);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit power_down failed\n");
+ }
+
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ }
+
+ /*
+ * Report disconnect if there is any previous session
+ * established
+ */
+ dwc2_hsotg_disconnect(hsotg);
+ } else {
+ /* Turn on the port power bit. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ /* Connect hcd after port power is set. */
+ dwc2_hcd_connect(hsotg);
+ }
+}
+
+/**
+ * dwc2_wakeup_from_lpm_l1 - Exit the device from LPM L1 state
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
+{
+ u32 glpmcfg;
+ u32 i = 0;
+
+ if (hsotg->lx_state != DWC2_L1) {
+ dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
+ return;
+ }
+
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (dwc2_is_device_mode(hsotg)) {
+ dev_dbg(hsotg->dev, "Exit from L1 state\n");
+ glpmcfg &= ~GLPMCFG_ENBLSLPM;
+ glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
+
+ do {
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+
+ if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
+ GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
+ break;
+
+ udelay(1);
+ } while (++i < 200);
+
+ if (i == 200) {
+ dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
+ return;
+ }
+ dwc2_gadget_init_lpm(hsotg);
+ } else {
+ /* TODO */
+ dev_err(hsotg->dev, "Host side LPM is not supported.\n");
+ return;
+ }
+
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+
+ /* Inform gadget to exit from L1 */
+ call_gadget(hsotg, resume);
+}
+
+/*
+ * This interrupt indicates that the DWC_otg controller has detected a
+ * resume or remote wakeup sequence. If the DWC_otg controller is in
+ * low power mode, the handler must brings the controller out of low
+ * power mode. The controller automatically begins resume signaling.
+ * The handler schedules a time to stop resume signaling.
+ */
+static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_WKUPINT, GINTSTS);
+
+ dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n");
+ dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
+
+ if (hsotg->lx_state == DWC2_L1) {
+ dwc2_wakeup_from_lpm_l1(hsotg);
+ return;
+ }
+
+ if (dwc2_is_device_mode(hsotg)) {
+ dev_dbg(hsotg->dev, "DSTS=0x%0x\n",
+ dwc2_readl(hsotg, DSTS));
+ if (hsotg->lx_state == DWC2_L2) {
+ if (hsotg->in_ppd) {
+ u32 dctl = dwc2_readl(hsotg, DCTL);
+ /* Clear Remote Wakeup Signaling */
+ dctl &= ~DCTL_RMTWKUPSIG;
+ dwc2_writel(hsotg, dctl, DCTL);
+ ret = dwc2_exit_partial_power_down(hsotg, 1,
+ true);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed\n");
+ call_gadget(hsotg, resume);
+ }
+
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ } else {
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+ }
+ } else {
+ if (hsotg->lx_state == DWC2_L2) {
+ if (hsotg->in_ppd) {
+ ret = dwc2_exit_partial_power_down(hsotg, 1,
+ true);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed\n");
+ }
+
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ dwc2_host_exit_clock_gating(hsotg, 1);
+
+ /*
+ * If we've got this quirk then the PHY is stuck upon
+ * wakeup. Assert reset. This will propagate out and
+ * eventually we'll re-enumerate the device. Not great
+ * but the best we can do. We can't call phy_reset()
+ * at interrupt time but there's no hurry, so we'll
+ * schedule it for later.
+ */
+ if (hsotg->reset_phy_on_wake)
+ dwc2_host_schedule_phy_reset(hsotg);
+
+ mod_timer(&hsotg->wkp_timer,
+ jiffies + msecs_to_jiffies(71));
+ } else {
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+ }
+ }
+}
+
+/*
+ * This interrupt indicates that a device has been disconnected from the
+ * root port
+ */
+static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
+{
+ dwc2_writel(hsotg, GINTSTS_DISCONNINT, GINTSTS);
+
+ dev_dbg(hsotg->dev, "++Disconnect Detected Interrupt++ (%s) %s\n",
+ dwc2_is_host_mode(hsotg) ? "Host" : "Device",
+ dwc2_op_state_str(hsotg));
+
+ if (hsotg->op_state == OTG_STATE_A_HOST)
+ dwc2_hcd_disconnect(hsotg, false);
+}
+
+/*
+ * This interrupt indicates that SUSPEND state has been detected on the USB.
+ *
+ * For HNP the USB Suspend interrupt signals the change from "a_peripheral"
+ * to "a_host".
+ *
+ * When power management is enabled the core will be put in low power mode.
+ */
+static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 dsts;
+ int ret;
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_USBSUSP, GINTSTS);
+
+ dev_dbg(hsotg->dev, "USB SUSPEND\n");
+
+ if (dwc2_is_device_mode(hsotg)) {
+ /*
+ * Check the Device status register to determine if the Suspend
+ * state is active
+ */
+ dsts = dwc2_readl(hsotg, DSTS);
+ dev_dbg(hsotg->dev, "%s: DSTS=0x%0x\n", __func__, dsts);
+ dev_dbg(hsotg->dev,
+ "DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d HWCFG4.Hibernation=%d\n",
+ !!(dsts & DSTS_SUSPSTS),
+ hsotg->hw_params.power_optimized,
+ hsotg->hw_params.hibernation);
+
+ /* Ignore suspend request before enumeration */
+ if (!dwc2_is_device_connected(hsotg)) {
+ dev_dbg(hsotg->dev,
+ "ignore suspend request before enumeration\n");
+ return;
+ }
+ if (dsts & DSTS_SUSPSTS) {
+ switch (hsotg->params.power_down) {
+ case DWC2_POWER_DOWN_PARAM_PARTIAL:
+ ret = dwc2_enter_partial_power_down(hsotg);
+ if (ret)
+ dev_err(hsotg->dev,
+ "enter partial_power_down failed\n");
+
+ udelay(100);
+
+ /* Ask phy to be suspended */
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ usb_phy_set_suspend(hsotg->uphy, true);
+ break;
+ case DWC2_POWER_DOWN_PARAM_HIBERNATION:
+ ret = dwc2_enter_hibernation(hsotg, 0);
+ if (ret)
+ dev_err(hsotg->dev,
+ "enter hibernation failed\n");
+ break;
+ case DWC2_POWER_DOWN_PARAM_NONE:
+ /*
+ * If neither hibernation nor partial power down are supported,
+ * clock gating is used to save power.
+ */
+ if (!hsotg->params.no_clock_gating)
+ dwc2_gadget_enter_clock_gating(hsotg);
+ }
+
+ /*
+ * Change to L2 (suspend) state before releasing
+ * spinlock
+ */
+ hsotg->lx_state = DWC2_L2;
+
+ /* Call gadget suspend callback */
+ call_gadget(hsotg, suspend);
+ }
+ } else {
+ if (hsotg->op_state == OTG_STATE_A_PERIPHERAL) {
+ dev_dbg(hsotg->dev, "a_peripheral->a_host\n");
+
+ /* Change to L2 (suspend) state */
+ hsotg->lx_state = DWC2_L2;
+ /* Clear the a_peripheral flag, back to a_host */
+ spin_unlock(&hsotg->lock);
+ dwc2_hcd_start(hsotg);
+ spin_lock(&hsotg->lock);
+ hsotg->op_state = OTG_STATE_A_HOST;
+ }
+ }
+}
+
+/**
+ * dwc2_handle_lpm_intr - GINTSTS_LPMTRANRCVD Interrupt handler
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+static void dwc2_handle_lpm_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 glpmcfg;
+ u32 pcgcctl;
+ u32 hird;
+ u32 hird_thres;
+ u32 hird_thres_en;
+ u32 enslpm;
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_LPMTRANRCVD, GINTSTS);
+
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+
+ if (!(glpmcfg & GLPMCFG_LPMCAP)) {
+ dev_err(hsotg->dev, "Unexpected LPM interrupt\n");
+ return;
+ }
+
+ hird = (glpmcfg & GLPMCFG_HIRD_MASK) >> GLPMCFG_HIRD_SHIFT;
+ hird_thres = (glpmcfg & GLPMCFG_HIRD_THRES_MASK &
+ ~GLPMCFG_HIRD_THRES_EN) >> GLPMCFG_HIRD_THRES_SHIFT;
+ hird_thres_en = glpmcfg & GLPMCFG_HIRD_THRES_EN;
+ enslpm = glpmcfg & GLPMCFG_ENBLSLPM;
+
+ if (dwc2_is_device_mode(hsotg)) {
+ dev_dbg(hsotg->dev, "HIRD_THRES_EN = %d\n", hird_thres_en);
+
+ if (hird_thres_en && hird >= hird_thres) {
+ dev_dbg(hsotg->dev, "L1 with utmi_l1_suspend_n\n");
+ } else if (enslpm) {
+ dev_dbg(hsotg->dev, "L1 with utmi_sleep_n\n");
+ } else {
+ dev_dbg(hsotg->dev, "Entering Sleep with L1 Gating\n");
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_ENBL_SLEEP_GATING;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ }
+ /**
+ * Examine prt_sleep_sts after TL1TokenTetry period max (10 us)
+ */
+ udelay(10);
+
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+
+ if (glpmcfg & GLPMCFG_SLPSTS) {
+ /* Save the current state */
+ hsotg->lx_state = DWC2_L1;
+ dev_dbg(hsotg->dev,
+ "Core is in L1 sleep glpmcfg=%08x\n", glpmcfg);
+
+ /* Inform gadget that we are in L1 state */
+ call_gadget(hsotg, suspend);
+ }
+ }
+}
+
+#define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \
+ GINTSTS_CONIDSTSCHNG | GINTSTS_OTGINT | \
+ GINTSTS_MODEMIS | GINTSTS_DISCONNINT | \
+ GINTSTS_USBSUSP | GINTSTS_PRTINT | \
+ GINTSTS_LPMTRANRCVD)
+
+/*
+ * This function returns the Core Interrupt register
+ */
+static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts;
+ u32 gintmsk;
+ u32 gahbcfg;
+ u32 gintmsk_common = GINTMSK_COMMON;
+
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gahbcfg = dwc2_readl(hsotg, GAHBCFG);
+
+ /* If any common interrupts set */
+ if (gintsts & gintmsk_common)
+ dev_dbg(hsotg->dev, "gintsts=%08x gintmsk=%08x\n",
+ gintsts, gintmsk);
+
+ if (gahbcfg & GAHBCFG_GLBL_INTR_EN)
+ return gintsts & gintmsk & gintmsk_common;
+ else
+ return 0;
+}
+
+/**
+ * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
+ * Exits hibernation without restoring registers.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @gpwrdn: GPWRDN register
+ */
+static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
+ u32 gpwrdn)
+{
+ u32 gpwrdn_tmp;
+
+ /* Switch-on voltage to the core */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Reset core */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Disable Power Down Clamp */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Deassert reset core */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Disable PMU interrupt */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+
+ /* De-assert Wakeup Logic */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+
+ hsotg->hibernated = 0;
+ hsotg->bus_suspended = 0;
+
+ if (gpwrdn & GPWRDN_IDSTS) {
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ dwc2_hsotg_core_connect(hsotg);
+ } else {
+ hsotg->op_state = OTG_STATE_A_HOST;
+
+ /* Initialize the Core for Host mode */
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ dwc2_hcd_start(hsotg);
+ }
+}
+
+/*
+ * GPWRDN interrupt handler.
+ *
+ * The GPWRDN interrupts are those that occur in both Host and
+ * Device mode while core is in hibernated state.
+ */
+static int dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 gpwrdn;
+ int linestate;
+ int ret = 0;
+
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ /* clear all interrupt */
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ linestate = (gpwrdn & GPWRDN_LINESTATE_MASK) >> GPWRDN_LINESTATE_SHIFT;
+ dev_dbg(hsotg->dev,
+ "%s: dwc2_handle_gpwrdwn_intr called gpwrdn= %08x\n", __func__,
+ gpwrdn);
+
+ if ((gpwrdn & GPWRDN_DISCONN_DET) &&
+ (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
+ dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
+ /*
+ * Call disconnect detect function to exit from
+ * hibernation
+ */
+ dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
+ } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
+ (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
+ dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
+ if (hsotg->hw_params.hibernation &&
+ hsotg->hibernated) {
+ if (gpwrdn & GPWRDN_IDSTS) {
+ ret = dwc2_exit_hibernation(hsotg, 0, 0, 0);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit hibernation failed.\n");
+ call_gadget(hsotg, resume);
+ } else {
+ ret = dwc2_exit_hibernation(hsotg, 1, 0, 1);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit hibernation failed.\n");
+ }
+ }
+ } else if ((gpwrdn & GPWRDN_RST_DET) &&
+ (gpwrdn & GPWRDN_RST_DET_MSK)) {
+ dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
+ if (!linestate) {
+ ret = dwc2_exit_hibernation(hsotg, 0, 1, 0);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit hibernation failed.\n");
+ }
+ } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
+ (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
+ dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
+ /*
+ * As GPWRDN_STS_CHGINT exit from hibernation flow is
+ * the same as in GPWRDN_DISCONN_DET flow. Call
+ * disconnect detect helper function to exit from
+ * hibernation.
+ */
+ dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
+ }
+
+ return ret;
+}
+
+/*
+ * Common interrupt handler
+ *
+ * The common interrupts are those that occur in both Host and Device mode.
+ * This handler handles the following interrupts:
+ * - Mode Mismatch Interrupt
+ * - OTG Interrupt
+ * - Connector ID Status Change Interrupt
+ * - Disconnect Interrupt
+ * - Session Request Interrupt
+ * - Resume / Remote Wakeup Detected Interrupt
+ * - Suspend Interrupt
+ */
+irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
+{
+ struct dwc2_hsotg *hsotg = dev;
+ u32 gintsts;
+ irqreturn_t retval = IRQ_NONE;
+
+ spin_lock(&hsotg->lock);
+
+ if (!dwc2_is_controller_alive(hsotg)) {
+ dev_warn(hsotg->dev, "Controller is dead\n");
+ goto out;
+ }
+
+ /* Reading current frame number value in device or host modes. */
+ if (dwc2_is_device_mode(hsotg))
+ hsotg->frame_number = (dwc2_readl(hsotg, DSTS)
+ & DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT;
+ else
+ hsotg->frame_number = (dwc2_readl(hsotg, HFNUM)
+ & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
+
+ gintsts = dwc2_read_common_intr(hsotg);
+ if (gintsts & ~GINTSTS_PRTINT)
+ retval = IRQ_HANDLED;
+
+ /* In case of hibernated state gintsts must not work */
+ if (hsotg->hibernated) {
+ dwc2_handle_gpwrdn_intr(hsotg);
+ retval = IRQ_HANDLED;
+ goto out;
+ }
+
+ if (gintsts & GINTSTS_MODEMIS)
+ dwc2_handle_mode_mismatch_intr(hsotg);
+ if (gintsts & GINTSTS_OTGINT)
+ dwc2_handle_otg_intr(hsotg);
+ if (gintsts & GINTSTS_CONIDSTSCHNG)
+ dwc2_handle_conn_id_status_change_intr(hsotg);
+ if (gintsts & GINTSTS_DISCONNINT)
+ dwc2_handle_disconnect_intr(hsotg);
+ if (gintsts & GINTSTS_SESSREQINT)
+ dwc2_handle_session_req_intr(hsotg);
+ if (gintsts & GINTSTS_WKUPINT)
+ dwc2_handle_wakeup_detected_intr(hsotg);
+ if (gintsts & GINTSTS_USBSUSP)
+ dwc2_handle_usb_suspend_intr(hsotg);
+ if (gintsts & GINTSTS_LPMTRANRCVD)
+ dwc2_handle_lpm_intr(hsotg);
+
+ if (gintsts & GINTSTS_PRTINT) {
+ /*
+ * The port interrupt occurs while in device mode with HPRT0
+ * Port Enable/Disable
+ */
+ if (dwc2_is_device_mode(hsotg)) {
+ dev_dbg(hsotg->dev,
+ " --Port interrupt received in Device mode--\n");
+ dwc2_handle_usb_port_intr(hsotg);
+ retval = IRQ_HANDLED;
+ }
+ }
+
+out:
+ spin_unlock(&hsotg->lock);
+ return retval;
+}
diff --git a/drivers/usb/dwc2/debug.h b/drivers/usb/dwc2/debug.h
new file mode 100644
index 000000000..47252c56d
--- /dev/null
+++ b/drivers/usb/dwc2/debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * debug.h - Designware USB2 DRD controller debug header
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Mian Yousaf Kaukab <yousaf.kaukab@intel.com>
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_DEBUG_FS
+int dwc2_debugfs_init(struct dwc2_hsotg *hsotg);
+void dwc2_debugfs_exit(struct dwc2_hsotg *hsotg);
+#else
+static inline int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline void dwc2_debugfs_exit(struct dwc2_hsotg *hsotg)
+{ }
+#endif
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
new file mode 100644
index 000000000..1d72ece9c
--- /dev/null
+++ b/drivers/usb/dwc2/debugfs.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debugfs.c - Designware USB2 DRD controller debugfs
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Mian Yousaf Kaukab <yousaf.kaukab@intel.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "core.h"
+#include "debug.h"
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+
+/**
+ * testmode_write() - change usb test mode state.
+ * @file: The file to write to.
+ * @ubuf: The buffer where user wrote.
+ * @count: The ubuf size.
+ * @ppos: Unused parameter.
+ */
+static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t
+ count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc2_hsotg *hsotg = s->private;
+ unsigned long flags;
+ u32 testmode = 0;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "test_j", 6))
+ testmode = USB_TEST_J;
+ else if (!strncmp(buf, "test_k", 6))
+ testmode = USB_TEST_K;
+ else if (!strncmp(buf, "test_se0_nak", 12))
+ testmode = USB_TEST_SE0_NAK;
+ else if (!strncmp(buf, "test_packet", 11))
+ testmode = USB_TEST_PACKET;
+ else if (!strncmp(buf, "test_force_enable", 17))
+ testmode = USB_TEST_FORCE_ENABLE;
+ else
+ testmode = 0;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_set_test_mode(hsotg, testmode);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return count;
+}
+
+/**
+ * testmode_show() - debugfs: show usb test mode state
+ * @s: The seq file to write to.
+ * @unused: Unused parameter.
+ *
+ * This debugfs entry shows which usb test mode is currently enabled.
+ */
+static int testmode_show(struct seq_file *s, void *unused)
+{
+ struct dwc2_hsotg *hsotg = s->private;
+ unsigned long flags;
+ int dctl;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl &= DCTL_TSTCTL_MASK;
+ dctl >>= DCTL_TSTCTL_SHIFT;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ switch (dctl) {
+ case 0:
+ seq_puts(s, "no test\n");
+ break;
+ case USB_TEST_J:
+ seq_puts(s, "test_j\n");
+ break;
+ case USB_TEST_K:
+ seq_puts(s, "test_k\n");
+ break;
+ case USB_TEST_SE0_NAK:
+ seq_puts(s, "test_se0_nak\n");
+ break;
+ case USB_TEST_PACKET:
+ seq_puts(s, "test_packet\n");
+ break;
+ case USB_TEST_FORCE_ENABLE:
+ seq_puts(s, "test_force_enable\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %d\n", dctl);
+ }
+
+ return 0;
+}
+
+static int testmode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testmode_show, inode->i_private);
+}
+
+static const struct file_operations testmode_fops = {
+ .owner = THIS_MODULE,
+ .open = testmode_open,
+ .write = testmode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * state_show - debugfs: show overall driver and device state.
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the overall state of the hardware and
+ * some general information about each of the endpoints available
+ * to the system.
+ */
+static int state_show(struct seq_file *seq, void *v)
+{
+ struct dwc2_hsotg *hsotg = seq->private;
+ int idx;
+
+ seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
+ dwc2_readl(hsotg, DCFG),
+ dwc2_readl(hsotg, DCTL),
+ dwc2_readl(hsotg, DSTS));
+
+ seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
+ dwc2_readl(hsotg, DIEPMSK), dwc2_readl(hsotg, DOEPMSK));
+
+ seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
+ dwc2_readl(hsotg, GINTMSK),
+ dwc2_readl(hsotg, GINTSTS));
+
+ seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
+ dwc2_readl(hsotg, DAINTMSK),
+ dwc2_readl(hsotg, DAINT));
+
+ seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
+ dwc2_readl(hsotg, GNPTXSTS),
+ dwc2_readl(hsotg, GRXSTSR));
+
+ seq_puts(seq, "\nEndpoint status:\n");
+
+ for (idx = 0; idx < hsotg->num_of_eps; idx++) {
+ u32 in, out;
+
+ in = dwc2_readl(hsotg, DIEPCTL(idx));
+ out = dwc2_readl(hsotg, DOEPCTL(idx));
+
+ seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
+ idx, in, out);
+
+ in = dwc2_readl(hsotg, DIEPTSIZ(idx));
+ out = dwc2_readl(hsotg, DOEPTSIZ(idx));
+
+ seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
+ in, out);
+
+ seq_puts(seq, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(state);
+
+/**
+ * fifo_show - debugfs: show the fifo information
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * Show the FIFO information for the overall fifo and all the
+ * periodic transmission FIFOs.
+ */
+static int fifo_show(struct seq_file *seq, void *v)
+{
+ struct dwc2_hsotg *hsotg = seq->private;
+ int fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+ u32 val;
+ int idx;
+
+ seq_puts(seq, "Non-periodic FIFOs:\n");
+ seq_printf(seq, "RXFIFO: Size %d\n", dwc2_readl(hsotg, GRXFSIZ));
+
+ val = dwc2_readl(hsotg, GNPTXFSIZ);
+ seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
+ val >> FIFOSIZE_DEPTH_SHIFT,
+ val & FIFOSIZE_STARTADDR_MASK);
+
+ seq_puts(seq, "\nPeriodic TXFIFOs:\n");
+
+ for (idx = 1; idx <= fifo_count; idx++) {
+ val = dwc2_readl(hsotg, DPTXFSIZN(idx));
+
+ seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
+ val >> FIFOSIZE_DEPTH_SHIFT,
+ val & FIFOSIZE_STARTADDR_MASK);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fifo);
+
+static const char *decode_direction(int is_in)
+{
+ return is_in ? "in" : "out";
+}
+
+/**
+ * ep_show - debugfs: show the state of an endpoint.
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the state of the given endpoint (one is
+ * registered for each available).
+ */
+static int ep_show(struct seq_file *seq, void *v)
+{
+ struct dwc2_hsotg_ep *ep = seq->private;
+ struct dwc2_hsotg *hsotg = ep->parent;
+ struct dwc2_hsotg_req *req;
+ int index = ep->index;
+ int show_limit = 15;
+ unsigned long flags;
+
+ seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
+ ep->index, ep->ep.name, decode_direction(ep->dir_in));
+
+ /* first show the register state */
+
+ seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
+ dwc2_readl(hsotg, DIEPCTL(index)),
+ dwc2_readl(hsotg, DOEPCTL(index)));
+
+ seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
+ dwc2_readl(hsotg, DIEPDMA(index)),
+ dwc2_readl(hsotg, DOEPDMA(index)));
+
+ seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
+ dwc2_readl(hsotg, DIEPINT(index)),
+ dwc2_readl(hsotg, DOEPINT(index)));
+
+ seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
+ dwc2_readl(hsotg, DIEPTSIZ(index)),
+ dwc2_readl(hsotg, DOEPTSIZ(index)));
+
+ seq_puts(seq, "\n");
+ seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
+ seq_printf(seq, "total_data=%ld\n", ep->total_data);
+
+ seq_printf(seq, "request list (%p,%p):\n",
+ ep->queue.next, ep->queue.prev);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (--show_limit < 0) {
+ seq_puts(seq, "not showing more requests...\n");
+ break;
+ }
+
+ seq_printf(seq, "%c req %p: %d bytes @%p, ",
+ req == ep->req ? '*' : ' ',
+ req, req->req.length, req->req.buf);
+ seq_printf(seq, "%d done, res %d\n",
+ req->req.actual, req->req.status);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ep);
+
+/**
+ * dwc2_hsotg_create_debug - create debugfs directory and files
+ * @hsotg: The driver state
+ *
+ * Create the debugfs files to allow the user to get information
+ * about the state of the system. The directory name is created
+ * with the same name as the device itself, in case we end up
+ * with multiple blocks in future systems.
+ */
+static void dwc2_hsotg_create_debug(struct dwc2_hsotg *hsotg)
+{
+ struct dentry *root;
+ unsigned int epidx;
+
+ root = hsotg->debug_root;
+
+ /* create general state file */
+ debugfs_create_file("state", 0444, root, hsotg, &state_fops);
+ debugfs_create_file("testmode", 0644, root, hsotg, &testmode_fops);
+ debugfs_create_file("fifo", 0444, root, hsotg, &fifo_fops);
+
+ /* Create one file for each out endpoint */
+ for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
+ struct dwc2_hsotg_ep *ep;
+
+ ep = hsotg->eps_out[epidx];
+ if (ep)
+ debugfs_create_file(ep->name, 0444, root, ep, &ep_fops);
+ }
+ /* Create one file for each in endpoint. EP0 is handled with out eps */
+ for (epidx = 1; epidx < hsotg->num_of_eps; epidx++) {
+ struct dwc2_hsotg_ep *ep;
+
+ ep = hsotg->eps_in[epidx];
+ if (ep)
+ debugfs_create_file(ep->name, 0444, root, ep, &ep_fops);
+ }
+}
+#else
+static inline void dwc2_hsotg_create_debug(struct dwc2_hsotg *hsotg) {}
+#endif
+
+/* dwc2_hsotg_delete_debug is removed as cleanup in done in dwc2_debugfs_exit */
+
+#define dump_register(nm) \
+{ \
+ .name = #nm, \
+ .offset = nm, \
+}
+
+static const struct debugfs_reg32 dwc2_regs[] = {
+ /*
+ * Accessing registers like this can trigger mode mismatch interrupt.
+ * However, according to dwc2 databook, the register access, in this
+ * case, is completed on the processor bus but is ignored by the core
+ * and does not affect its operation.
+ */
+ dump_register(GOTGCTL),
+ dump_register(GOTGINT),
+ dump_register(GAHBCFG),
+ dump_register(GUSBCFG),
+ dump_register(GRSTCTL),
+ dump_register(GINTSTS),
+ dump_register(GINTMSK),
+ dump_register(GRXSTSR),
+ /* Omit GRXSTSP */
+ dump_register(GRXFSIZ),
+ dump_register(GNPTXFSIZ),
+ dump_register(GNPTXSTS),
+ dump_register(GI2CCTL),
+ dump_register(GPVNDCTL),
+ dump_register(GGPIO),
+ dump_register(GUID),
+ dump_register(GSNPSID),
+ dump_register(GHWCFG1),
+ dump_register(GHWCFG2),
+ dump_register(GHWCFG3),
+ dump_register(GHWCFG4),
+ dump_register(GLPMCFG),
+ dump_register(GPWRDN),
+ dump_register(GDFIFOCFG),
+ dump_register(ADPCTL),
+ dump_register(HPTXFSIZ),
+ dump_register(DPTXFSIZN(1)),
+ dump_register(DPTXFSIZN(2)),
+ dump_register(DPTXFSIZN(3)),
+ dump_register(DPTXFSIZN(4)),
+ dump_register(DPTXFSIZN(5)),
+ dump_register(DPTXFSIZN(6)),
+ dump_register(DPTXFSIZN(7)),
+ dump_register(DPTXFSIZN(8)),
+ dump_register(DPTXFSIZN(9)),
+ dump_register(DPTXFSIZN(10)),
+ dump_register(DPTXFSIZN(11)),
+ dump_register(DPTXFSIZN(12)),
+ dump_register(DPTXFSIZN(13)),
+ dump_register(DPTXFSIZN(14)),
+ dump_register(DPTXFSIZN(15)),
+ dump_register(DCFG),
+ dump_register(DCTL),
+ dump_register(DSTS),
+ dump_register(DIEPMSK),
+ dump_register(DOEPMSK),
+ dump_register(DAINT),
+ dump_register(DAINTMSK),
+ dump_register(DTKNQR1),
+ dump_register(DTKNQR2),
+ dump_register(DTKNQR3),
+ dump_register(DTKNQR4),
+ dump_register(DVBUSDIS),
+ dump_register(DVBUSPULSE),
+ dump_register(DIEPCTL(0)),
+ dump_register(DIEPCTL(1)),
+ dump_register(DIEPCTL(2)),
+ dump_register(DIEPCTL(3)),
+ dump_register(DIEPCTL(4)),
+ dump_register(DIEPCTL(5)),
+ dump_register(DIEPCTL(6)),
+ dump_register(DIEPCTL(7)),
+ dump_register(DIEPCTL(8)),
+ dump_register(DIEPCTL(9)),
+ dump_register(DIEPCTL(10)),
+ dump_register(DIEPCTL(11)),
+ dump_register(DIEPCTL(12)),
+ dump_register(DIEPCTL(13)),
+ dump_register(DIEPCTL(14)),
+ dump_register(DIEPCTL(15)),
+ dump_register(DOEPCTL(0)),
+ dump_register(DOEPCTL(1)),
+ dump_register(DOEPCTL(2)),
+ dump_register(DOEPCTL(3)),
+ dump_register(DOEPCTL(4)),
+ dump_register(DOEPCTL(5)),
+ dump_register(DOEPCTL(6)),
+ dump_register(DOEPCTL(7)),
+ dump_register(DOEPCTL(8)),
+ dump_register(DOEPCTL(9)),
+ dump_register(DOEPCTL(10)),
+ dump_register(DOEPCTL(11)),
+ dump_register(DOEPCTL(12)),
+ dump_register(DOEPCTL(13)),
+ dump_register(DOEPCTL(14)),
+ dump_register(DOEPCTL(15)),
+ dump_register(DIEPINT(0)),
+ dump_register(DIEPINT(1)),
+ dump_register(DIEPINT(2)),
+ dump_register(DIEPINT(3)),
+ dump_register(DIEPINT(4)),
+ dump_register(DIEPINT(5)),
+ dump_register(DIEPINT(6)),
+ dump_register(DIEPINT(7)),
+ dump_register(DIEPINT(8)),
+ dump_register(DIEPINT(9)),
+ dump_register(DIEPINT(10)),
+ dump_register(DIEPINT(11)),
+ dump_register(DIEPINT(12)),
+ dump_register(DIEPINT(13)),
+ dump_register(DIEPINT(14)),
+ dump_register(DIEPINT(15)),
+ dump_register(DOEPINT(0)),
+ dump_register(DOEPINT(1)),
+ dump_register(DOEPINT(2)),
+ dump_register(DOEPINT(3)),
+ dump_register(DOEPINT(4)),
+ dump_register(DOEPINT(5)),
+ dump_register(DOEPINT(6)),
+ dump_register(DOEPINT(7)),
+ dump_register(DOEPINT(8)),
+ dump_register(DOEPINT(9)),
+ dump_register(DOEPINT(10)),
+ dump_register(DOEPINT(11)),
+ dump_register(DOEPINT(12)),
+ dump_register(DOEPINT(13)),
+ dump_register(DOEPINT(14)),
+ dump_register(DOEPINT(15)),
+ dump_register(DIEPTSIZ(0)),
+ dump_register(DIEPTSIZ(1)),
+ dump_register(DIEPTSIZ(2)),
+ dump_register(DIEPTSIZ(3)),
+ dump_register(DIEPTSIZ(4)),
+ dump_register(DIEPTSIZ(5)),
+ dump_register(DIEPTSIZ(6)),
+ dump_register(DIEPTSIZ(7)),
+ dump_register(DIEPTSIZ(8)),
+ dump_register(DIEPTSIZ(9)),
+ dump_register(DIEPTSIZ(10)),
+ dump_register(DIEPTSIZ(11)),
+ dump_register(DIEPTSIZ(12)),
+ dump_register(DIEPTSIZ(13)),
+ dump_register(DIEPTSIZ(14)),
+ dump_register(DIEPTSIZ(15)),
+ dump_register(DOEPTSIZ(0)),
+ dump_register(DOEPTSIZ(1)),
+ dump_register(DOEPTSIZ(2)),
+ dump_register(DOEPTSIZ(3)),
+ dump_register(DOEPTSIZ(4)),
+ dump_register(DOEPTSIZ(5)),
+ dump_register(DOEPTSIZ(6)),
+ dump_register(DOEPTSIZ(7)),
+ dump_register(DOEPTSIZ(8)),
+ dump_register(DOEPTSIZ(9)),
+ dump_register(DOEPTSIZ(10)),
+ dump_register(DOEPTSIZ(11)),
+ dump_register(DOEPTSIZ(12)),
+ dump_register(DOEPTSIZ(13)),
+ dump_register(DOEPTSIZ(14)),
+ dump_register(DOEPTSIZ(15)),
+ dump_register(DIEPDMA(0)),
+ dump_register(DIEPDMA(1)),
+ dump_register(DIEPDMA(2)),
+ dump_register(DIEPDMA(3)),
+ dump_register(DIEPDMA(4)),
+ dump_register(DIEPDMA(5)),
+ dump_register(DIEPDMA(6)),
+ dump_register(DIEPDMA(7)),
+ dump_register(DIEPDMA(8)),
+ dump_register(DIEPDMA(9)),
+ dump_register(DIEPDMA(10)),
+ dump_register(DIEPDMA(11)),
+ dump_register(DIEPDMA(12)),
+ dump_register(DIEPDMA(13)),
+ dump_register(DIEPDMA(14)),
+ dump_register(DIEPDMA(15)),
+ dump_register(DOEPDMA(0)),
+ dump_register(DOEPDMA(1)),
+ dump_register(DOEPDMA(2)),
+ dump_register(DOEPDMA(3)),
+ dump_register(DOEPDMA(4)),
+ dump_register(DOEPDMA(5)),
+ dump_register(DOEPDMA(6)),
+ dump_register(DOEPDMA(7)),
+ dump_register(DOEPDMA(8)),
+ dump_register(DOEPDMA(9)),
+ dump_register(DOEPDMA(10)),
+ dump_register(DOEPDMA(11)),
+ dump_register(DOEPDMA(12)),
+ dump_register(DOEPDMA(13)),
+ dump_register(DOEPDMA(14)),
+ dump_register(DOEPDMA(15)),
+ dump_register(DTXFSTS(0)),
+ dump_register(DTXFSTS(1)),
+ dump_register(DTXFSTS(2)),
+ dump_register(DTXFSTS(3)),
+ dump_register(DTXFSTS(4)),
+ dump_register(DTXFSTS(5)),
+ dump_register(DTXFSTS(6)),
+ dump_register(DTXFSTS(7)),
+ dump_register(DTXFSTS(8)),
+ dump_register(DTXFSTS(9)),
+ dump_register(DTXFSTS(10)),
+ dump_register(DTXFSTS(11)),
+ dump_register(DTXFSTS(12)),
+ dump_register(DTXFSTS(13)),
+ dump_register(DTXFSTS(14)),
+ dump_register(DTXFSTS(15)),
+ dump_register(PCGCTL),
+ dump_register(HCFG),
+ dump_register(HFIR),
+ dump_register(HFNUM),
+ dump_register(HPTXSTS),
+ dump_register(HAINT),
+ dump_register(HAINTMSK),
+ dump_register(HFLBADDR),
+ dump_register(HPRT0),
+ dump_register(HCCHAR(0)),
+ dump_register(HCCHAR(1)),
+ dump_register(HCCHAR(2)),
+ dump_register(HCCHAR(3)),
+ dump_register(HCCHAR(4)),
+ dump_register(HCCHAR(5)),
+ dump_register(HCCHAR(6)),
+ dump_register(HCCHAR(7)),
+ dump_register(HCCHAR(8)),
+ dump_register(HCCHAR(9)),
+ dump_register(HCCHAR(10)),
+ dump_register(HCCHAR(11)),
+ dump_register(HCCHAR(12)),
+ dump_register(HCCHAR(13)),
+ dump_register(HCCHAR(14)),
+ dump_register(HCCHAR(15)),
+ dump_register(HCSPLT(0)),
+ dump_register(HCSPLT(1)),
+ dump_register(HCSPLT(2)),
+ dump_register(HCSPLT(3)),
+ dump_register(HCSPLT(4)),
+ dump_register(HCSPLT(5)),
+ dump_register(HCSPLT(6)),
+ dump_register(HCSPLT(7)),
+ dump_register(HCSPLT(8)),
+ dump_register(HCSPLT(9)),
+ dump_register(HCSPLT(10)),
+ dump_register(HCSPLT(11)),
+ dump_register(HCSPLT(12)),
+ dump_register(HCSPLT(13)),
+ dump_register(HCSPLT(14)),
+ dump_register(HCSPLT(15)),
+ dump_register(HCINT(0)),
+ dump_register(HCINT(1)),
+ dump_register(HCINT(2)),
+ dump_register(HCINT(3)),
+ dump_register(HCINT(4)),
+ dump_register(HCINT(5)),
+ dump_register(HCINT(6)),
+ dump_register(HCINT(7)),
+ dump_register(HCINT(8)),
+ dump_register(HCINT(9)),
+ dump_register(HCINT(10)),
+ dump_register(HCINT(11)),
+ dump_register(HCINT(12)),
+ dump_register(HCINT(13)),
+ dump_register(HCINT(14)),
+ dump_register(HCINT(15)),
+ dump_register(HCINTMSK(0)),
+ dump_register(HCINTMSK(1)),
+ dump_register(HCINTMSK(2)),
+ dump_register(HCINTMSK(3)),
+ dump_register(HCINTMSK(4)),
+ dump_register(HCINTMSK(5)),
+ dump_register(HCINTMSK(6)),
+ dump_register(HCINTMSK(7)),
+ dump_register(HCINTMSK(8)),
+ dump_register(HCINTMSK(9)),
+ dump_register(HCINTMSK(10)),
+ dump_register(HCINTMSK(11)),
+ dump_register(HCINTMSK(12)),
+ dump_register(HCINTMSK(13)),
+ dump_register(HCINTMSK(14)),
+ dump_register(HCINTMSK(15)),
+ dump_register(HCTSIZ(0)),
+ dump_register(HCTSIZ(1)),
+ dump_register(HCTSIZ(2)),
+ dump_register(HCTSIZ(3)),
+ dump_register(HCTSIZ(4)),
+ dump_register(HCTSIZ(5)),
+ dump_register(HCTSIZ(6)),
+ dump_register(HCTSIZ(7)),
+ dump_register(HCTSIZ(8)),
+ dump_register(HCTSIZ(9)),
+ dump_register(HCTSIZ(10)),
+ dump_register(HCTSIZ(11)),
+ dump_register(HCTSIZ(12)),
+ dump_register(HCTSIZ(13)),
+ dump_register(HCTSIZ(14)),
+ dump_register(HCTSIZ(15)),
+ dump_register(HCDMA(0)),
+ dump_register(HCDMA(1)),
+ dump_register(HCDMA(2)),
+ dump_register(HCDMA(3)),
+ dump_register(HCDMA(4)),
+ dump_register(HCDMA(5)),
+ dump_register(HCDMA(6)),
+ dump_register(HCDMA(7)),
+ dump_register(HCDMA(8)),
+ dump_register(HCDMA(9)),
+ dump_register(HCDMA(10)),
+ dump_register(HCDMA(11)),
+ dump_register(HCDMA(12)),
+ dump_register(HCDMA(13)),
+ dump_register(HCDMA(14)),
+ dump_register(HCDMA(15)),
+ dump_register(HCDMAB(0)),
+ dump_register(HCDMAB(1)),
+ dump_register(HCDMAB(2)),
+ dump_register(HCDMAB(3)),
+ dump_register(HCDMAB(4)),
+ dump_register(HCDMAB(5)),
+ dump_register(HCDMAB(6)),
+ dump_register(HCDMAB(7)),
+ dump_register(HCDMAB(8)),
+ dump_register(HCDMAB(9)),
+ dump_register(HCDMAB(10)),
+ dump_register(HCDMAB(11)),
+ dump_register(HCDMAB(12)),
+ dump_register(HCDMAB(13)),
+ dump_register(HCDMAB(14)),
+ dump_register(HCDMAB(15)),
+};
+
+#define print_param(_seq, _ptr, _param) \
+seq_printf((_seq), "%-30s: %d\n", #_param, (_ptr)->_param)
+
+#define print_param_hex(_seq, _ptr, _param) \
+seq_printf((_seq), "%-30s: 0x%x\n", #_param, (_ptr)->_param)
+
+static int params_show(struct seq_file *seq, void *v)
+{
+ struct dwc2_hsotg *hsotg = seq->private;
+ struct dwc2_core_params *p = &hsotg->params;
+ int i;
+
+ print_param(seq, p, otg_caps.hnp_support);
+ print_param(seq, p, otg_caps.srp_support);
+ print_param(seq, p, otg_caps.otg_rev);
+ print_param(seq, p, dma_desc_enable);
+ print_param(seq, p, dma_desc_fs_enable);
+ print_param(seq, p, speed);
+ print_param(seq, p, enable_dynamic_fifo);
+ print_param(seq, p, en_multiple_tx_fifo);
+ print_param(seq, p, host_rx_fifo_size);
+ print_param(seq, p, host_nperio_tx_fifo_size);
+ print_param(seq, p, host_perio_tx_fifo_size);
+ print_param(seq, p, max_transfer_size);
+ print_param(seq, p, max_packet_count);
+ print_param(seq, p, host_channels);
+ print_param(seq, p, phy_type);
+ print_param(seq, p, phy_utmi_width);
+ print_param(seq, p, phy_ulpi_ddr);
+ print_param(seq, p, phy_ulpi_ext_vbus);
+ print_param(seq, p, i2c_enable);
+ print_param(seq, p, ipg_isoc_en);
+ print_param(seq, p, ulpi_fs_ls);
+ print_param(seq, p, host_support_fs_ls_low_power);
+ print_param(seq, p, host_ls_low_power_phy_clk);
+ print_param(seq, p, activate_stm_fs_transceiver);
+ print_param(seq, p, activate_stm_id_vb_detection);
+ print_param(seq, p, ts_dline);
+ print_param(seq, p, reload_ctl);
+ print_param_hex(seq, p, ahbcfg);
+ print_param(seq, p, uframe_sched);
+ print_param(seq, p, external_id_pin_ctl);
+ print_param(seq, p, power_down);
+ print_param(seq, p, lpm);
+ print_param(seq, p, lpm_clock_gating);
+ print_param(seq, p, besl);
+ print_param(seq, p, hird_threshold_en);
+ print_param(seq, p, hird_threshold);
+ print_param(seq, p, service_interval);
+ print_param(seq, p, host_dma);
+ print_param(seq, p, g_dma);
+ print_param(seq, p, g_dma_desc);
+ print_param(seq, p, g_rx_fifo_size);
+ print_param(seq, p, g_np_tx_fifo_size);
+
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ char str[32];
+
+ snprintf(str, 32, "g_tx_fifo_size[%d]", i);
+ seq_printf(seq, "%-30s: %d\n", str, p->g_tx_fifo_size[i]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(params);
+
+static int hw_params_show(struct seq_file *seq, void *v)
+{
+ struct dwc2_hsotg *hsotg = seq->private;
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+
+ print_param(seq, hw, op_mode);
+ print_param(seq, hw, arch);
+ print_param(seq, hw, dma_desc_enable);
+ print_param(seq, hw, enable_dynamic_fifo);
+ print_param(seq, hw, en_multiple_tx_fifo);
+ print_param(seq, hw, rx_fifo_size);
+ print_param(seq, hw, host_nperio_tx_fifo_size);
+ print_param(seq, hw, dev_nperio_tx_fifo_size);
+ print_param(seq, hw, host_perio_tx_fifo_size);
+ print_param(seq, hw, nperio_tx_q_depth);
+ print_param(seq, hw, host_perio_tx_q_depth);
+ print_param(seq, hw, dev_token_q_depth);
+ print_param(seq, hw, max_transfer_size);
+ print_param(seq, hw, max_packet_count);
+ print_param(seq, hw, host_channels);
+ print_param(seq, hw, hs_phy_type);
+ print_param(seq, hw, fs_phy_type);
+ print_param(seq, hw, i2c_enable);
+ print_param(seq, hw, num_dev_ep);
+ print_param(seq, hw, num_dev_perio_in_ep);
+ print_param(seq, hw, total_fifo_size);
+ print_param(seq, hw, power_optimized);
+ print_param(seq, hw, utmi_phy_data_width);
+ print_param_hex(seq, hw, snpsid);
+ print_param_hex(seq, hw, dev_ep_dirs);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(hw_params);
+
+static int dr_mode_show(struct seq_file *seq, void *v)
+{
+ struct dwc2_hsotg *hsotg = seq->private;
+ const char *dr_mode = "";
+
+ device_property_read_string(hsotg->dev, "dr_mode", &dr_mode);
+ seq_printf(seq, "%s\n", dr_mode);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dr_mode);
+
+int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+ struct dentry *root;
+
+ root = debugfs_create_dir(dev_name(hsotg->dev), usb_debug_root);
+ hsotg->debug_root = root;
+
+ debugfs_create_file("params", 0444, root, hsotg, &params_fops);
+ debugfs_create_file("hw_params", 0444, root, hsotg, &hw_params_fops);
+ debugfs_create_file("dr_mode", 0444, root, hsotg, &dr_mode_fops);
+
+ /* Add gadget debugfs nodes */
+ dwc2_hsotg_create_debug(hsotg);
+
+ hsotg->regset = devm_kzalloc(hsotg->dev, sizeof(*hsotg->regset),
+ GFP_KERNEL);
+ if (!hsotg->regset) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hsotg->regset->regs = dwc2_regs;
+ hsotg->regset->nregs = ARRAY_SIZE(dwc2_regs);
+ hsotg->regset->base = hsotg->regs;
+
+ debugfs_create_regset32("regdump", 0444, root, hsotg->regset);
+
+ return 0;
+err:
+ debugfs_remove_recursive(hsotg->debug_root);
+ return ret;
+}
+
+void dwc2_debugfs_exit(struct dwc2_hsotg *hsotg)
+{
+ debugfs_remove_recursive(hsotg->debug_root);
+ hsotg->debug_root = NULL;
+}
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
new file mode 100644
index 000000000..a8605b021
--- /dev/null
+++ b/drivers/usb/dwc2/drd.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drd.c - DesignWare USB2 DRD Controller Dual-role support
+ *
+ * Copyright (C) 2020 STMicroelectronics
+ *
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include "core.h"
+
+#define dwc2_ovr_gotgctl(gotgctl) \
+ ((gotgctl) |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN | \
+ GOTGCTL_DBNCE_FLTR_BYPASS)
+
+static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+ u32 gotgctl;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ dwc2_ovr_gotgctl(gotgctl);
+ gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
+}
+
+static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
+{
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+
+ /* Check if A-Session is already in the right state */
+ if ((valid && (gotgctl & GOTGCTL_ASESVLD)) ||
+ (!valid && !(gotgctl & GOTGCTL_ASESVLD)))
+ return -EALREADY;
+
+ /* Always enable overrides to handle the resume case */
+ dwc2_ovr_gotgctl(gotgctl);
+
+ gotgctl &= ~GOTGCTL_BVALOVAL;
+ if (valid)
+ gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
+ else
+ gotgctl &= ~(GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+
+ return 0;
+}
+
+static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid)
+{
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+
+ /* Check if B-Session is already in the right state */
+ if ((valid && (gotgctl & GOTGCTL_BSESVLD)) ||
+ (!valid && !(gotgctl & GOTGCTL_BSESVLD)))
+ return -EALREADY;
+
+ /* Always enable overrides to handle the resume case */
+ dwc2_ovr_gotgctl(gotgctl);
+
+ gotgctl &= ~GOTGCTL_AVALOVAL;
+ if (valid)
+ gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
+ else
+ gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+
+ return 0;
+}
+
+static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
+{
+ struct dwc2_hsotg *hsotg = usb_role_switch_get_drvdata(sw);
+ unsigned long flags;
+ int already = 0;
+
+ /* Skip session not in line with dr_mode */
+ if ((role == USB_ROLE_DEVICE && hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (role == USB_ROLE_HOST && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL))
+ return -EINVAL;
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Skip session if core is in test mode */
+ if (role == USB_ROLE_NONE && hsotg->test_mode) {
+ dev_dbg(hsotg->dev, "Core is in test mode\n");
+ return -EBUSY;
+ }
+#endif
+
+ /*
+ * In case of USB_DR_MODE_PERIPHERAL, clock is disabled at the end of
+ * the probe and enabled on udc_start.
+ * If role-switch set is called before the udc_start, we need to enable
+ * the clock to read/write GOTGCTL and GUSBCFG registers to override
+ * mode and sessions. It is the case if cable is plugged at boot.
+ */
+ if (!hsotg->ll_hw_enabled && hsotg->clk) {
+ int ret = clk_prepare_enable(hsotg->clk);
+
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (role == USB_ROLE_NONE) {
+ /* default operation mode when usb role is USB_ROLE_NONE */
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+ }
+
+ if (role == USB_ROLE_HOST) {
+ already = dwc2_ovr_avalid(hsotg, true);
+ } else if (role == USB_ROLE_DEVICE) {
+ already = dwc2_ovr_bvalid(hsotg, true);
+ if (dwc2_is_device_enabled(hsotg)) {
+ /* This clear DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_connect(hsotg);
+ }
+ } else {
+ if (dwc2_is_device_mode(hsotg)) {
+ if (!dwc2_ovr_bvalid(hsotg, false))
+ /* This set DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_disconnect(hsotg);
+ } else {
+ dwc2_ovr_avalid(hsotg, false);
+ }
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ if (!already && hsotg->dr_mode == USB_DR_MODE_OTG)
+ /* This will raise a Connector ID Status Change Interrupt */
+ dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
+
+ if (!hsotg->ll_hw_enabled && hsotg->clk)
+ clk_disable_unprepare(hsotg->clk);
+
+ dev_dbg(hsotg->dev, "%s-session valid\n",
+ role == USB_ROLE_NONE ? "No" :
+ role == USB_ROLE_HOST ? "A" : "B");
+
+ return 0;
+}
+
+int dwc2_drd_init(struct dwc2_hsotg *hsotg)
+{
+ struct usb_role_switch_desc role_sw_desc = {0};
+ struct usb_role_switch *role_sw;
+ int ret;
+
+ if (!device_property_read_bool(hsotg->dev, "usb-role-switch"))
+ return 0;
+
+ hsotg->role_sw_default_mode = usb_get_role_switch_default_mode(hsotg->dev);
+ role_sw_desc.driver_data = hsotg;
+ role_sw_desc.fwnode = dev_fwnode(hsotg->dev);
+ role_sw_desc.set = dwc2_drd_role_sw_set;
+ role_sw_desc.allow_userspace_control = true;
+
+ role_sw = usb_role_switch_register(hsotg->dev, &role_sw_desc);
+ if (IS_ERR(role_sw)) {
+ ret = PTR_ERR(role_sw);
+ dev_err(hsotg->dev,
+ "failed to register role switch: %d\n", ret);
+ return ret;
+ }
+
+ hsotg->role_sw = role_sw;
+
+ /* Enable override and initialize values */
+ dwc2_ovr_init(hsotg);
+
+ return 0;
+}
+
+void dwc2_drd_suspend(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts, gintmsk;
+
+ if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_CONIDSTSCHNG;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS);
+ }
+}
+
+void dwc2_drd_resume(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts, gintmsk;
+ enum usb_role role;
+
+ if (hsotg->role_sw) {
+ /* get last known role (as the get ops isn't implemented by this driver) */
+ role = usb_role_switch_get_role(hsotg->role_sw);
+
+ if (role == USB_ROLE_NONE) {
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+ }
+
+ /* restore last role that may have been lost */
+ if (role == USB_ROLE_HOST)
+ dwc2_ovr_avalid(hsotg, true);
+ else if (role == USB_ROLE_DEVICE)
+ dwc2_ovr_bvalid(hsotg, true);
+
+ dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
+
+ dev_dbg(hsotg->dev, "resuming %s-session valid\n",
+ role == USB_ROLE_NONE ? "No" :
+ role == USB_ROLE_HOST ? "A" : "B");
+ }
+
+ if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk |= GINTSTS_CONIDSTSCHNG;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+}
+
+void dwc2_drd_exit(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->role_sw)
+ usb_role_switch_unregister(hsotg->role_sw);
+}
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
new file mode 100644
index 000000000..8b15742d9
--- /dev/null
+++ b/drivers/usb/dwc2/gadget.c
@@ -0,0 +1,5677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * S3C USB2.0 High-speed / OtG driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/composite.h>
+
+
+#include "core.h"
+#include "hw.h"
+
+/* conversion functions */
+static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
+{
+ return container_of(req, struct dwc2_hsotg_req, req);
+}
+
+static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct dwc2_hsotg_ep, ep);
+}
+
+static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct dwc2_hsotg, gadget);
+}
+
+static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
+{
+ dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
+}
+
+static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
+{
+ dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
+}
+
+static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
+ u32 ep_index, u32 dir_in)
+{
+ if (dir_in)
+ return hsotg->eps_in[ep_index];
+ else
+ return hsotg->eps_out[ep_index];
+}
+
+/* forward declaration of functions */
+static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
+
+/**
+ * using_dma - return the DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using DMA.
+ *
+ * Currently, we have the DMA support code worked into everywhere
+ * that needs it, but the AMBA DMA implementation in the hardware can
+ * only DMA from 32bit aligned addresses. This means that gadgets such
+ * as the CDC Ethernet cannot work as they often pass packets which are
+ * not 32bit aligned.
+ *
+ * Unfortunately the choice to use DMA or not is global to the controller
+ * and seems to be only settable when the controller is being put through
+ * a core reset. This means we either need to fix the gadgets to take
+ * account of DMA alignment, or add bounce buffers (yuerk).
+ *
+ * g_using_dma is set depending on dts flag.
+ */
+static inline bool using_dma(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.g_dma;
+}
+
+/*
+ * using_desc_dma - return the descriptor DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using descriptor DMA.
+ */
+static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.g_dma_desc;
+}
+
+/**
+ * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
+ * @hs_ep: The endpoint
+ *
+ * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
+ * If an overrun occurs it will wrap the value and set the frame_overrun flag.
+ */
+static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
+ hs_ep->target_frame += hs_ep->interval;
+ if (hs_ep->target_frame > limit) {
+ hs_ep->frame_overrun = true;
+ hs_ep->target_frame &= limit;
+ } else {
+ hs_ep->frame_overrun = false;
+ }
+}
+
+/**
+ * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
+ * by one.
+ * @hs_ep: The endpoint.
+ *
+ * This function used in service interval based scheduling flow to calculate
+ * descriptor frame number filed value. For service interval mode frame
+ * number in descriptor should point to last (u)frame in the interval.
+ *
+ */
+static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
+ if (hs_ep->target_frame)
+ hs_ep->target_frame -= 1;
+ else
+ hs_ep->target_frame = limit;
+}
+
+/**
+ * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
+{
+ u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
+ u32 new_gsintmsk;
+
+ new_gsintmsk = gsintmsk | ints;
+
+ if (new_gsintmsk != gsintmsk) {
+ dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
+ dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
+ }
+}
+
+/**
+ * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
+{
+ u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
+ u32 new_gsintmsk;
+
+ new_gsintmsk = gsintmsk & ~ints;
+
+ if (new_gsintmsk != gsintmsk)
+ dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
+}
+
+/**
+ * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
+ * @hsotg: The device state
+ * @ep: The endpoint index
+ * @dir_in: True if direction is in.
+ * @en: The enable value, true to enable
+ *
+ * Set or clear the mask for an individual endpoint's interrupt
+ * request.
+ */
+static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
+ unsigned int ep, unsigned int dir_in,
+ unsigned int en)
+{
+ unsigned long flags;
+ u32 bit = 1 << ep;
+ u32 daint;
+
+ if (!dir_in)
+ bit <<= 16;
+
+ local_irq_save(flags);
+ daint = dwc2_readl(hsotg, DAINTMSK);
+ if (en)
+ daint |= bit;
+ else
+ daint &= ~bit;
+ dwc2_writel(hsotg, daint, DAINTMSK);
+ local_irq_restore(flags);
+}
+
+/**
+ * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->hw_params.en_multiple_tx_fifo)
+ /* In dedicated FIFO mode we need count of IN EPs */
+ return hsotg->hw_params.num_dev_in_eps;
+ else
+ /* In shared FIFO mode we need count of Periodic IN EPs */
+ return hsotg->hw_params.num_dev_perio_in_ep;
+}
+
+/**
+ * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
+ * device mode TX FIFOs
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
+{
+ int addr;
+ int tx_addr_max;
+ u32 np_tx_fifo_size;
+
+ np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
+ hsotg->params.g_np_tx_fifo_size);
+
+ /* Get Endpoint Info Control block size in DWORDs. */
+ tx_addr_max = hsotg->hw_params.total_fifo_size;
+
+ addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
+ if (tx_addr_max <= addr)
+ return 0;
+
+ return tx_addr_max - addr;
+}
+
+/**
+ * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts2;
+ u32 gintmsk2;
+
+ gintsts2 = dwc2_readl(hsotg, GINTSTS2);
+ gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
+ gintsts2 &= gintmsk2;
+
+ if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
+ dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
+ dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
+ dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
+ }
+}
+
+/**
+ * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
+ * TX FIFOs
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
+{
+ int tx_fifo_count;
+ int tx_fifo_depth;
+
+ tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
+
+ tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+ if (!tx_fifo_count)
+ return tx_fifo_depth;
+ else
+ return tx_fifo_depth / tx_fifo_count;
+}
+
+/**
+ * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
+ * @hsotg: The device instance.
+ */
+static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
+{
+ unsigned int ep;
+ unsigned int addr;
+ int timeout;
+
+ u32 val;
+ u32 *txfsz = hsotg->params.g_tx_fifo_size;
+
+ /* Reset fifo map if not correctly cleared during previous session */
+ WARN_ON(hsotg->fifo_map);
+ hsotg->fifo_map = 0;
+
+ /* set RX/NPTX FIFO sizes */
+ dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
+ dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
+ FIFOSIZE_STARTADDR_SHIFT) |
+ (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
+ GNPTXFSIZ);
+
+ /*
+ * arange all the rest of the TX FIFOs, as some versions of this
+ * block have overlapping default addresses. This also ensures
+ * that if the settings have been changed, then they are set to
+ * known values.
+ */
+
+ /* start at the end of the GNPTXFSIZ, rounded up */
+ addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
+
+ /*
+ * Configure fifos sizes from provided configuration and assign
+ * them to endpoints dynamically according to maxpacket size value of
+ * given endpoint.
+ */
+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+ if (!txfsz[ep])
+ continue;
+ val = addr;
+ val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
+ "insufficient fifo memory");
+ addr += txfsz[ep];
+
+ dwc2_writel(hsotg, val, DPTXFSIZN(ep));
+ val = dwc2_readl(hsotg, DPTXFSIZN(ep));
+ }
+
+ dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
+ addr << GDFIFOCFG_EPINFOBASE_SHIFT,
+ GDFIFOCFG);
+ /*
+ * according to p428 of the design guide, we need to ensure that
+ * all fifos are flushed before continuing
+ */
+
+ dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
+ GRSTCTL_RXFFLSH, GRSTCTL);
+
+ /* wait until the fifos are both flushed */
+ timeout = 100;
+ while (1) {
+ val = dwc2_readl(hsotg, GRSTCTL);
+
+ if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
+ break;
+
+ if (--timeout == 0) {
+ dev_err(hsotg->dev,
+ "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
+ __func__, val);
+ break;
+ }
+
+ udelay(1);
+ }
+
+ dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
+}
+
+/**
+ * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
+ * @ep: USB endpoint to allocate request for.
+ * @flags: Allocation flags
+ *
+ * Allocate a new USB request structure appropriate for the specified endpoint
+ */
+static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
+ gfp_t flags)
+{
+ struct dwc2_hsotg_req *req;
+
+ req = kzalloc(sizeof(*req), flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+/**
+ * is_ep_periodic - return true if the endpoint is in periodic mode.
+ * @hs_ep: The endpoint to query.
+ *
+ * Returns true if the endpoint is in periodic mode, meaning it is being
+ * used for an Interrupt or ISO transfer.
+ */
+static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
+{
+ return hs_ep->periodic;
+}
+
+/**
+ * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint for the request
+ * @hs_req: The request being processed.
+ *
+ * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
+ * of a request to ensure the buffer is ready for access by the caller.
+ */
+static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req)
+{
+ struct usb_request *req = &hs_req->req;
+
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
+}
+
+/*
+ * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
+ * for Control endpoint
+ * @hsotg: The device state.
+ *
+ * This function will allocate 4 descriptor chains for EP 0: 2 for
+ * Setup stage, per one for IN and OUT data/status transactions.
+ */
+static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
+{
+ hsotg->setup_desc[0] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[0],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[0])
+ goto fail;
+
+ hsotg->setup_desc[1] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[1],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[1])
+ goto fail;
+
+ hsotg->ctrl_in_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_in_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_in_desc)
+ goto fail;
+
+ hsotg->ctrl_out_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_out_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_out_desc)
+ goto fail;
+
+ return 0;
+
+fail:
+ return -ENOMEM;
+}
+
+/**
+ * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint we're going to write for.
+ * @hs_req: The request to write data for.
+ *
+ * This is called when the TxFIFO has some space in it to hold a new
+ * transmission and we have something to give it. The actual setup of
+ * the data size is done elsewhere, so all we have to do is to actually
+ * write the data.
+ *
+ * The return value is zero if there is more space (or nothing was done)
+ * otherwise -ENOSPC is returned if the FIFO space was used up.
+ *
+ * This routine is only needed for PIO
+ */
+static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req)
+{
+ bool periodic = is_ep_periodic(hs_ep);
+ u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
+ int buf_pos = hs_req->req.actual;
+ int to_write = hs_ep->size_loaded;
+ void *data;
+ int can_write;
+ int pkt_round;
+ int max_transfer;
+
+ to_write -= (buf_pos - hs_ep->last_load);
+
+ /* if there's nothing to write, get out early */
+ if (to_write == 0)
+ return 0;
+
+ if (periodic && !hsotg->dedicated_fifos) {
+ u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
+ int size_left;
+ int size_done;
+
+ /*
+ * work out how much data was loaded so we can calculate
+ * how much data is left in the fifo.
+ */
+
+ size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+
+ /*
+ * if shared fifo, we cannot write anything until the
+ * previous data has been completely sent.
+ */
+ if (hs_ep->fifo_load != 0) {
+ dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
+ return -ENOSPC;
+ }
+
+ dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
+ __func__, size_left,
+ hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
+
+ /* how much of the data has moved */
+ size_done = hs_ep->size_loaded - size_left;
+
+ /* how much data is left in the fifo */
+ can_write = hs_ep->fifo_load - size_done;
+ dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
+ __func__, can_write);
+
+ can_write = hs_ep->fifo_size - can_write;
+ dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
+ __func__, can_write);
+
+ if (can_write <= 0) {
+ dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
+ return -ENOSPC;
+ }
+ } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
+ can_write = dwc2_readl(hsotg,
+ DTXFSTS(hs_ep->fifo_index));
+
+ can_write &= 0xffff;
+ can_write *= 4;
+ } else {
+ if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
+ dev_dbg(hsotg->dev,
+ "%s: no queue slots available (0x%08x)\n",
+ __func__, gnptxsts);
+
+ dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
+ return -ENOSPC;
+ }
+
+ can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
+ can_write *= 4; /* fifo size is in 32bit quantities. */
+ }
+
+ max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
+
+ dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
+ __func__, gnptxsts, can_write, to_write, max_transfer);
+
+ /*
+ * limit to 512 bytes of data, it seems at least on the non-periodic
+ * FIFO, requests of >512 cause the endpoint to get stuck with a
+ * fragment of the end of the transfer in it.
+ */
+ if (can_write > 512 && !periodic)
+ can_write = 512;
+
+ /*
+ * limit the write to one max-packet size worth of data, but allow
+ * the transfer to return that it did not run out of fifo space
+ * doing it.
+ */
+ if (to_write > max_transfer) {
+ to_write = max_transfer;
+
+ /* it's needed only when we do not use dedicated fifos */
+ if (!hsotg->dedicated_fifos)
+ dwc2_hsotg_en_gsint(hsotg,
+ periodic ? GINTSTS_PTXFEMP :
+ GINTSTS_NPTXFEMP);
+ }
+
+ /* see if we can write data */
+
+ if (to_write > can_write) {
+ to_write = can_write;
+ pkt_round = to_write % max_transfer;
+
+ /*
+ * Round the write down to an
+ * exact number of packets.
+ *
+ * Note, we do not currently check to see if we can ever
+ * write a full packet or not to the FIFO.
+ */
+
+ if (pkt_round)
+ to_write -= pkt_round;
+
+ /*
+ * enable correct FIFO interrupt to alert us when there
+ * is more room left.
+ */
+
+ /* it's needed only when we do not use dedicated fifos */
+ if (!hsotg->dedicated_fifos)
+ dwc2_hsotg_en_gsint(hsotg,
+ periodic ? GINTSTS_PTXFEMP :
+ GINTSTS_NPTXFEMP);
+ }
+
+ dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
+ to_write, hs_req->req.length, can_write, buf_pos);
+
+ if (to_write <= 0)
+ return -ENOSPC;
+
+ hs_req->req.actual = buf_pos + to_write;
+ hs_ep->total_data += to_write;
+
+ if (periodic)
+ hs_ep->fifo_load += to_write;
+
+ to_write = DIV_ROUND_UP(to_write, 4);
+ data = hs_req->req.buf + buf_pos;
+
+ dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
+
+ return (to_write >= can_write) ? -ENOSPC : 0;
+}
+
+/**
+ * get_ep_limit - get the maximum data legnth for this endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * so that transfers that are too long can be split.
+ */
+static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
+{
+ int index = hs_ep->index;
+ unsigned int maxsize;
+ unsigned int maxpkt;
+
+ if (index != 0) {
+ maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
+ maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
+ } else {
+ maxsize = 64 + 64;
+ if (hs_ep->dir_in)
+ maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
+ else
+ maxpkt = 2;
+ }
+
+ /* we made the constant loading easier above by using +1 */
+ maxpkt--;
+ maxsize--;
+
+ /*
+ * constrain by packet count if maxpkts*pktsize is greater
+ * than the length register size.
+ */
+
+ if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
+ maxsize = maxpkt * hs_ep->ep.maxpacket;
+
+ return maxsize;
+}
+
+/**
+ * dwc2_hsotg_read_frameno - read current frame number
+ * @hsotg: The device instance
+ *
+ * Return the current frame number
+ */
+static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
+{
+ u32 dsts;
+
+ dsts = dwc2_readl(hsotg, DSTS);
+ dsts &= DSTS_SOFFN_MASK;
+ dsts >>= DSTS_SOFFN_SHIFT;
+
+ return dsts;
+}
+
+/**
+ * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
+ * DMA descriptor chain prepared for specific endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * depending on its descriptor chain capacity so that transfers that
+ * are too long can be split.
+ */
+static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ int is_isoc = hs_ep->isochronous;
+ unsigned int maxsize;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+
+ if (is_isoc)
+ maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+ DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
+ MAX_DMA_DESC_NUM_HS_ISOC;
+ else
+ maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
+
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
+ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
+
+ return maxsize;
+}
+
+/*
+ * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
+ * @hs_ep: The endpoint
+ * @mask: RX/TX bytes mask to be defined
+ *
+ * Returns maximum data payload for one descriptor after analyzing endpoint
+ * characteristics.
+ * DMA descriptor transfer bytes limit depends on EP type:
+ * Control out - MPS,
+ * Isochronous - descriptor rx/tx bytes bitfield limit,
+ * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+ * have concatenations from various descriptors within one packet.
+ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
+ * to a single descriptor.
+ *
+ * Selects corresponding mask for RX/TX bytes as well.
+ */
+static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+ u32 desc_size = 0;
+
+ if (!hs_ep->index && !dir_in) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ } else if (hs_ep->isochronous) {
+ if (dir_in) {
+ desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
+ } else {
+ desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
+ }
+ } else {
+ desc_size = DEV_DMA_NBYTES_LIMIT;
+ *mask = DEV_DMA_NBYTES_MASK;
+
+ /* Round down desc_size to be mps multiple */
+ desc_size -= desc_size % mps;
+ }
+
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ }
+
+ return desc_size;
+}
+
+static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_dma_desc **desc,
+ dma_addr_t dma_buff,
+ unsigned int len,
+ bool true_last)
+{
+ int dir_in = hs_ep->dir_in;
+ u32 mps = hs_ep->ep.maxpacket;
+ u32 maxsize = 0;
+ u32 offset = 0;
+ u32 mask = 0;
+ int i;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+
+ hs_ep->desc_count = (len / maxsize) +
+ ((len % maxsize) ? 1 : 0);
+ if (len == 0)
+ hs_ep->desc_count = 1;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ (*desc)->status = 0;
+ (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
+ << DEV_DMA_BUFF_STS_SHIFT);
+
+ if (len > maxsize) {
+ if (!hs_ep->index && !dir_in)
+ (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ (*desc)->status |=
+ maxsize << DEV_DMA_NBYTES_SHIFT & mask;
+ (*desc)->buf = dma_buff + offset;
+
+ len -= maxsize;
+ offset += maxsize;
+ } else {
+ if (true_last)
+ (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ if (dir_in)
+ (*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
+ ((hs_ep->send_zlp && true_last) ?
+ DEV_DMA_SHORT : 0);
+
+ (*desc)->status |=
+ len << DEV_DMA_NBYTES_SHIFT & mask;
+ (*desc)->buf = dma_buff + offset;
+ }
+
+ (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
+ (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
+ << DEV_DMA_BUFF_STS_SHIFT);
+ (*desc)++;
+ }
+}
+
+/*
+ * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
+ * @hs_ep: The endpoint
+ * @ureq: Request to transfer
+ * @offset: offset in bytes
+ * @len: Length of the transfer
+ *
+ * This function will iterate over descriptor chain and fill its entries
+ * with corresponding information based on transfer data.
+ */
+static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff,
+ unsigned int len)
+{
+ struct usb_request *ureq = NULL;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ struct scatterlist *sg;
+ int i;
+ u8 desc_count = 0;
+
+ if (hs_ep->req)
+ ureq = &hs_ep->req->req;
+
+ /* non-DMA sg buffer */
+ if (!ureq || !ureq->num_sgs) {
+ dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
+ dma_buff, len, true);
+ return;
+ }
+
+ /* DMA sg buffer */
+ for_each_sg(ureq->sg, sg, ureq->num_sgs, i) {
+ dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
+ sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
+ sg_is_last(sg));
+ desc_count += hs_ep->desc_count;
+ }
+
+ hs_ep->desc_count = desc_count;
+}
+
+/*
+ * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
+ * @hs_ep: The isochronous endpoint.
+ * @dma_buff: usb requests dma buffer.
+ * @len: usb request transfer length.
+ *
+ * Fills next free descriptor with the data of the arrived usb request,
+ * frame info, sets Last and IOC bits increments next_desc. If filled
+ * descriptor is not the first one, removes L bit from the previous descriptor
+ * status.
+ */
+static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff, unsigned int len)
+{
+ struct dwc2_dma_desc *desc;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 index;
+ u32 mask = 0;
+ u8 pid = 0;
+
+ dwc2_gadget_get_desc_params(hs_ep, &mask);
+
+ index = hs_ep->next_desc;
+ desc = &hs_ep->desc_list[index];
+
+ /* Check if descriptor chain full */
+ if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
+ DEV_DMA_BUFF_STS_HREADY) {
+ dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
+ return 1;
+ }
+
+ /* Clear L bit of previous desc if more than one entries in the chain */
+ if (hs_ep->next_desc)
+ hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
+
+ dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
+ __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
+
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
+
+ desc->buf = dma_buff;
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
+ ((len << DEV_DMA_NBYTES_SHIFT) & mask));
+
+ if (hs_ep->dir_in) {
+ if (len)
+ pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+ else
+ pid = 1;
+ desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
+ DEV_DMA_ISOC_PID_MASK) |
+ ((len % hs_ep->ep.maxpacket) ?
+ DEV_DMA_SHORT : 0) |
+ ((hs_ep->target_frame <<
+ DEV_DMA_ISOC_FRNUM_SHIFT) &
+ DEV_DMA_ISOC_FRNUM_MASK);
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
+
+ /* Increment frame number by interval for IN */
+ if (hs_ep->dir_in)
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ /* Update index of last configured entry in the chain */
+ hs_ep->next_desc++;
+ if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
+ hs_ep->next_desc = 0;
+
+ return 0;
+}
+
+/*
+ * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
+ * @hs_ep: The isochronous endpoint.
+ *
+ * Prepare descriptor chain for isochronous endpoints. Afterwards
+ * write DMA address to HW and enable the endpoint.
+ */
+static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req, *treq;
+ int index = hs_ep->index;
+ int ret;
+ int i;
+ u32 dma_reg;
+ u32 depctl;
+ u32 ctrl;
+ struct dwc2_dma_desc *desc;
+
+ if (list_empty(&hs_ep->queue)) {
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
+ return;
+ }
+
+ /* Initialize descriptor chain by Host Busy status */
+ for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
+ desc = &hs_ep->desc_list[i];
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+ << DEV_DMA_BUFF_STS_SHIFT);
+ }
+
+ hs_ep->next_desc = 0;
+ list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
+ dma_addr_t dma_addr = hs_req->req.dma;
+
+ if (hs_req->req.num_sgs) {
+ WARN_ON(hs_req->req.num_sgs > 1);
+ dma_addr = sg_dma_address(hs_req->req.sg);
+ }
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
+ hs_req->req.length);
+ if (ret)
+ break;
+ }
+
+ hs_ep->compl_desc = 0;
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+
+ /* write descriptor chain address to control register */
+ dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
+
+ ctrl = dwc2_readl(hsotg, depctl);
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(hsotg, ctrl, depctl);
+}
+
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
+static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req,
+ int result);
+
+/**
+ * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint to process a request for
+ * @hs_req: The request to start.
+ * @continuing: True if we are doing more for the current request.
+ *
+ * Start the given request running by setting the endpoint registers
+ * appropriately, and writing any data to the FIFOs.
+ */
+static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req,
+ bool continuing)
+{
+ struct usb_request *ureq = &hs_req->req;
+ int index = hs_ep->index;
+ int dir_in = hs_ep->dir_in;
+ u32 epctrl_reg;
+ u32 epsize_reg;
+ u32 epsize;
+ u32 ctrl;
+ unsigned int length;
+ unsigned int packets;
+ unsigned int maxreq;
+ unsigned int dma_reg;
+
+ if (index != 0) {
+ if (hs_ep->req && !continuing) {
+ dev_err(hsotg->dev, "%s: active request\n", __func__);
+ WARN_ON(1);
+ return;
+ } else if (hs_ep->req != hs_req && continuing) {
+ dev_err(hsotg->dev,
+ "%s: continue different req\n", __func__);
+ WARN_ON(1);
+ return;
+ }
+ }
+
+ dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
+ epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
+ __func__, dwc2_readl(hsotg, epctrl_reg), index,
+ hs_ep->dir_in ? "in" : "out");
+
+ /* If endpoint is stalled, we will restart request later */
+ ctrl = dwc2_readl(hsotg, epctrl_reg);
+
+ if (index && ctrl & DXEPCTL_STALL) {
+ dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
+ return;
+ }
+
+ length = ureq->length - ureq->actual;
+ dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
+ ureq->length, ureq->actual);
+
+ if (!using_desc_dma(hsotg))
+ maxreq = get_ep_limit(hs_ep);
+ else
+ maxreq = dwc2_gadget_get_chain_limit(hs_ep);
+
+ if (length > maxreq) {
+ int round = maxreq % hs_ep->ep.maxpacket;
+
+ dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
+ __func__, length, maxreq, round);
+
+ /* round down to multiple of packets */
+ if (round)
+ maxreq -= round;
+
+ length = maxreq;
+ }
+
+ if (length)
+ packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
+ else
+ packets = 1; /* send one packet if length is zero. */
+
+ if (dir_in && index != 0)
+ if (hs_ep->isochronous)
+ epsize = DXEPTSIZ_MC(packets);
+ else
+ epsize = DXEPTSIZ_MC(1);
+ else
+ epsize = 0;
+
+ /*
+ * zero length packet should be programmed on its own and should not
+ * be counted in DIEPTSIZ.PktCnt with other packets.
+ */
+ if (dir_in && ureq->zero && !continuing) {
+ /* Test if zlp is actually required. */
+ if ((ureq->length >= hs_ep->ep.maxpacket) &&
+ !(ureq->length % hs_ep->ep.maxpacket))
+ hs_ep->send_zlp = 1;
+ }
+
+ epsize |= DXEPTSIZ_PKTCNT(packets);
+ epsize |= DXEPTSIZ_XFERSIZE(length);
+
+ dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
+ __func__, packets, length, ureq->length, epsize, epsize_reg);
+
+ /* store the request as the current one we're doing */
+ hs_ep->req = hs_req;
+
+ if (using_desc_dma(hsotg)) {
+ u32 offset = 0;
+ u32 mps = hs_ep->ep.maxpacket;
+
+ /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
+ if (!dir_in) {
+ if (!index)
+ length = mps;
+ else if (length % mps)
+ length += (mps - (length % mps));
+ }
+
+ if (continuing)
+ offset = ureq->actual;
+
+ /* Fill DDMA chain entries */
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
+ length);
+
+ /* write descriptor chain address to control register */
+ dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
+ __func__, (u32)hs_ep->desc_list_dma, dma_reg);
+ } else {
+ /* write size / packets */
+ dwc2_writel(hsotg, epsize, epsize_reg);
+
+ if (using_dma(hsotg) && !continuing && (length != 0)) {
+ /*
+ * write DMA address to control register, buffer
+ * already synced by dwc2_hsotg_ep_queue().
+ */
+
+ dwc2_writel(hsotg, ureq->dma, dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
+ __func__, &ureq->dma, dma_reg);
+ }
+ }
+
+ if (hs_ep->isochronous) {
+ if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ if (hs_ep->interval == 1) {
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+ }
+ ctrl |= DXEPCTL_CNAK;
+ } else {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+ return;
+ }
+ }
+
+ ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
+
+ dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
+
+ /* For Setup request do not clear NAK */
+ if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
+ ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+ dwc2_writel(hsotg, ctrl, epctrl_reg);
+
+ /*
+ * set these, it seems that DMA support increments past the end
+ * of the packet buffer so we need to calculate the length from
+ * this information.
+ */
+ hs_ep->size_loaded = length;
+ hs_ep->last_load = ureq->actual;
+
+ if (dir_in && !using_dma(hsotg)) {
+ /* set these anyway, we may need them for non-periodic in */
+ hs_ep->fifo_load = 0;
+
+ dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+ }
+
+ /*
+ * Note, trying to clear the NAK here causes problems with transmit
+ * on the S3C6400 ending up with the TXFIFO becoming full.
+ */
+
+ /* check ep is enabled */
+ if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
+ dev_dbg(hsotg->dev,
+ "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
+ index, dwc2_readl(hsotg, epctrl_reg));
+
+ dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
+ __func__, dwc2_readl(hsotg, epctrl_reg));
+
+ /* enable ep interrupts */
+ dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
+}
+
+/**
+ * dwc2_hsotg_map_dma - map the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request is on.
+ * @req: The request being processed.
+ *
+ * We've been asked to queue a request, so ensure that the memory buffer
+ * is correctly setup for DMA. If we've been passed an extant DMA address
+ * then ensure the buffer has been synced to memory. If our buffer has no
+ * DMA memory, then we map the memory and mark our request to allow us to
+ * cleanup on completion.
+ */
+static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct usb_request *req)
+{
+ int ret;
+
+ hs_ep->map_dir = hs_ep->dir_in;
+ ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
+ if (ret)
+ goto dma_error;
+
+ return 0;
+
+dma_error:
+ dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
+ __func__, req->buf, req->length);
+
+ return -EIO;
+}
+
+static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req)
+{
+ void *req_buf = hs_req->req.buf;
+
+ /* If dma is not being used or buffer is aligned */
+ if (!using_dma(hsotg) || !((long)req_buf & 3))
+ return 0;
+
+ WARN_ON(hs_req->saved_req_buf);
+
+ dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
+ hs_ep->ep.name, req_buf, hs_req->req.length);
+
+ hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
+ if (!hs_req->req.buf) {
+ hs_req->req.buf = req_buf;
+ dev_err(hsotg->dev,
+ "%s: unable to allocate memory for bounce buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Save actual buffer */
+ hs_req->saved_req_buf = req_buf;
+
+ if (hs_ep->dir_in)
+ memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
+ return 0;
+}
+
+static void
+dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req)
+{
+ /* If dma is not being used or buffer was aligned */
+ if (!using_dma(hsotg) || !hs_req->saved_req_buf)
+ return;
+
+ dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
+ hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
+
+ /* Copy data from bounce buffer on successful out transfer */
+ if (!hs_ep->dir_in && !hs_req->req.status)
+ memcpy(hs_req->saved_req_buf, hs_req->req.buf,
+ hs_req->req.actual);
+
+ /* Free bounce buffer */
+ kfree(hs_req->req.buf);
+
+ hs_req->req.buf = hs_req->saved_req_buf;
+ hs_req->saved_req_buf = NULL;
+}
+
+/**
+ * dwc2_gadget_target_frame_elapsed - Checks target frame
+ * @hs_ep: The driver endpoint to check
+ *
+ * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
+ * corresponding transfer.
+ */
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 target_frame = hs_ep->target_frame;
+ u32 current_frame = hsotg->frame_number;
+ bool frame_overrun = hs_ep->frame_overrun;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
+ if (!frame_overrun && current_frame >= target_frame)
+ return true;
+
+ if (frame_overrun && current_frame >= target_frame &&
+ ((current_frame - target_frame) < limit / 2))
+ return true;
+
+ return false;
+}
+
+/*
+ * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
+ * @hsotg: The driver state
+ * @hs_ep: the ep descriptor chain is for
+ *
+ * Called to update EP0 structure's pointers depend on stage of
+ * control transfer.
+ */
+static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ switch (hsotg->ep0_state) {
+ case DWC2_EP0_SETUP:
+ case DWC2_EP0_STATUS_OUT:
+ hs_ep->desc_list = hsotg->setup_desc[0];
+ hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
+ break;
+ case DWC2_EP0_DATA_IN:
+ case DWC2_EP0_STATUS_IN:
+ hs_ep->desc_list = hsotg->ctrl_in_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
+ break;
+ case DWC2_EP0_DATA_OUT:
+ hs_ep->desc_list = hsotg->ctrl_out_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
+ break;
+ default:
+ dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
+ hsotg->ep0_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct dwc2_hsotg_req *hs_req = our_req(req);
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hs = hs_ep->parent;
+ bool first;
+ int ret;
+ u32 maxsize = 0;
+ u32 mask = 0;
+
+
+ dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
+ ep->name, req, req->length, req->buf, req->no_interrupt,
+ req->zero, req->short_not_ok);
+
+ /* Prevent new request submission when controller is suspended */
+ if (hs->lx_state != DWC2_L0) {
+ dev_dbg(hs->dev, "%s: submit request only in active state\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ /* initialise status of the request */
+ INIT_LIST_HEAD(&hs_req->queue);
+ req->actual = 0;
+ req->status = -EINPROGRESS;
+
+ /* Don't queue ISOC request if length greater than mps*mc */
+ if (hs_ep->isochronous &&
+ req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
+ dev_err(hs->dev, "req length > maxpacket*mc\n");
+ return -EINVAL;
+ }
+
+ /* In DDMA mode for ISOC's don't queue request if length greater
+ * than descriptor limits.
+ */
+ if (using_desc_dma(hs) && hs_ep->isochronous) {
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+ if (hs_ep->dir_in && req->length > maxsize) {
+ dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
+ req->length, maxsize);
+ return -EINVAL;
+ }
+
+ if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
+ dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
+ req->length, hs_ep->ep.maxpacket);
+ return -EINVAL;
+ }
+ }
+
+ ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
+ if (ret)
+ return ret;
+
+ /* if we're using DMA, sync the buffers as necessary */
+ if (using_dma(hs)) {
+ ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
+ if (ret)
+ return ret;
+ }
+ /* If using descriptor DMA configure EP0 descriptor chain pointers */
+ if (using_desc_dma(hs) && !hs_ep->index) {
+ ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
+ if (ret)
+ return ret;
+ }
+
+ first = list_empty(&hs_ep->queue);
+ list_add_tail(&hs_req->queue, &hs_ep->queue);
+
+ /*
+ * Handle DDMA isochronous transfers separately - just add new entry
+ * to the descriptor chain.
+ * Transfer will be started once SW gets either one of NAK or
+ * OutTknEpDis interrupts.
+ */
+ if (using_desc_dma(hs) && hs_ep->isochronous) {
+ if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
+ dma_addr_t dma_addr = hs_req->req.dma;
+
+ if (hs_req->req.num_sgs) {
+ WARN_ON(hs_req->req.num_sgs > 1);
+ dma_addr = sg_dma_address(hs_req->req.sg);
+ }
+ dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
+ hs_req->req.length);
+ }
+ return 0;
+ }
+
+ /* Change EP direction if status phase request is after data out */
+ if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
+ hs->ep0_state == DWC2_EP0_DATA_OUT)
+ hs_ep->dir_in = 1;
+
+ if (first) {
+ if (!hs_ep->isochronous) {
+ dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ return 0;
+ }
+
+ /* Update current frame number value. */
+ hs->frame_number = dwc2_hsotg_read_frameno(hs);
+ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ dwc2_gadget_incr_frame_num(hs_ep);
+ /* Update current frame number value once more as it
+ * changes here.
+ */
+ hs->frame_number = dwc2_hsotg_read_frameno(hs);
+ }
+
+ if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
+ dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ }
+ return 0;
+}
+
+static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hs = hs_ep->parent;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hs->lock, flags);
+ ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(&hs->lock, flags);
+
+ return ret;
+}
+
+static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct dwc2_hsotg_req *hs_req = our_req(req);
+
+ kfree(hs_req);
+}
+
+/**
+ * dwc2_hsotg_complete_oursetup - setup completion callback
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself
+ * submitted that need cleaning up.
+ */
+static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+
+ dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
+
+ dwc2_hsotg_ep_free_request(ep, req);
+}
+
+/**
+ * ep_from_windex - convert control wIndex value to endpoint
+ * @hsotg: The driver state.
+ * @windex: The control request wIndex field (in host order).
+ *
+ * Convert the given wIndex into a pointer to an driver endpoint
+ * structure, or return NULL if it is not a valid endpoint.
+ */
+static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
+ u32 windex)
+{
+ int dir = (windex & USB_DIR_IN) ? 1 : 0;
+ int idx = windex & 0x7F;
+
+ if (windex >= 0x100)
+ return NULL;
+
+ if (idx > hsotg->num_of_eps)
+ return NULL;
+
+ return index_to_ep(hsotg, idx, dir);
+}
+
+/**
+ * dwc2_hsotg_set_test_mode - Enable usb Test Modes
+ * @hsotg: The driver state.
+ * @testmode: requested usb test mode
+ * Enable usb Test Mode requested by the Host.
+ */
+int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
+{
+ int dctl = dwc2_readl(hsotg, DCTL);
+
+ dctl &= ~DCTL_TSTCTL_MASK;
+ switch (testmode) {
+ case USB_TEST_J:
+ case USB_TEST_K:
+ case USB_TEST_SE0_NAK:
+ case USB_TEST_PACKET:
+ case USB_TEST_FORCE_ENABLE:
+ dctl |= testmode << DCTL_TSTCTL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dwc2_writel(hsotg, dctl, DCTL);
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_send_reply - send reply to control request
+ * @hsotg: The device state
+ * @ep: Endpoint 0
+ * @buff: Buffer for request
+ * @length: Length of reply.
+ *
+ * Create a request and queue it on the given endpoint. This is useful as
+ * an internal method of sending replies to certain control requests, etc.
+ */
+static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *ep,
+ void *buff,
+ int length)
+{
+ struct usb_request *req;
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
+
+ req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
+ hsotg->ep0_reply = req;
+ if (!req) {
+ dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
+ return -ENOMEM;
+ }
+
+ req->buf = hsotg->ep0_buff;
+ req->length = length;
+ /*
+ * zero flag is for sending zlp in DATA IN stage. It has no impact on
+ * STATUS stage.
+ */
+ req->zero = 0;
+ req->complete = dwc2_hsotg_complete_oursetup;
+
+ if (length)
+ memcpy(req->buf, buff, length);
+
+ ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
+ if (ret) {
+ dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_process_req_status - process request GET_STATUS
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ struct dwc2_hsotg_ep *ep;
+ __le16 reply;
+ u16 status;
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+
+ if (!ep0->dir_in) {
+ dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = hsotg->gadget.is_selfpowered <<
+ USB_DEVICE_SELF_POWERED;
+ status |= hsotg->remote_wakeup_allowed <<
+ USB_DEVICE_REMOTE_WAKEUP;
+ reply = cpu_to_le16(status);
+ break;
+
+ case USB_RECIP_INTERFACE:
+ /* currently, the data result should be zero */
+ reply = cpu_to_le16(0);
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+ if (!ep)
+ return -ENOENT;
+
+ reply = cpu_to_le16(ep->halted ? 1 : 0);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (le16_to_cpu(ctrl->wLength) != 2)
+ return -EINVAL;
+
+ ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
+ return ret;
+ }
+
+ return 1;
+}
+
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
+
+/**
+ * get_ep_head - return the first request on the endpoint
+ * @hs_ep: The controller endpoint to get
+ *
+ * Get the first request on the endpoint.
+ */
+static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
+{
+ return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
+ queue);
+}
+
+/**
+ * dwc2_gadget_start_next_request - Starts next request from ep queue
+ * @hs_ep: Endpoint structure
+ *
+ * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
+ * in its handler. Hence we need to unmask it here to be able to do
+ * resynchronization.
+ */
+static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_hsotg_req *hs_req;
+
+ if (!list_empty(&hs_ep->queue)) {
+ hs_req = get_ep_head(hs_ep);
+ dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+ return;
+ }
+ if (!hs_ep->isochronous)
+ return;
+
+ if (dir_in) {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
+ __func__);
+ } else {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
+ __func__);
+ }
+}
+
+/**
+ * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ struct dwc2_hsotg_req *hs_req;
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ struct dwc2_hsotg_ep *ep;
+ int ret;
+ bool halted;
+ u32 recip;
+ u32 wValue;
+ u32 wIndex;
+
+ dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
+ __func__, set ? "SET" : "CLEAR");
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if (set)
+ hsotg->remote_wakeup_allowed = 1;
+ else
+ hsotg->remote_wakeup_allowed = 0;
+ break;
+
+ case USB_DEVICE_TEST_MODE:
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ hsotg->test_mode = wIndex >> 8;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ if (ret) {
+ dev_err(hsotg->dev,
+ "%s: failed to send reply\n", __func__);
+ return ret;
+ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ ep = ep_from_windex(hsotg, wIndex);
+ if (!ep) {
+ dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
+ __func__, wIndex);
+ return -ENOENT;
+ }
+
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ halted = ep->halted;
+
+ if (!ep->wedged)
+ dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
+
+ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ if (ret) {
+ dev_err(hsotg->dev,
+ "%s: failed to send reply\n", __func__);
+ return ret;
+ }
+
+ /*
+ * we have to complete all requests for ep if it was
+ * halted, and the halt was cleared by CLEAR_FEATURE
+ */
+
+ if (!set && halted) {
+ /*
+ * If we have request in progress,
+ * then complete it
+ */
+ if (ep->req) {
+ hs_req = ep->req;
+ ep->req = NULL;
+ list_del_init(&hs_req->queue);
+ if (hs_req->req.complete) {
+ spin_unlock(&hsotg->lock);
+ usb_gadget_giveback_request(
+ &ep->ep, &hs_req->req);
+ spin_lock(&hsotg->lock);
+ }
+ }
+
+ /* If we have pending request, then start it */
+ if (!ep->req)
+ dwc2_gadget_start_next_request(ep);
+ }
+
+ break;
+
+ default:
+ return -ENOENT;
+ }
+ break;
+ default:
+ return -ENOENT;
+ }
+ return 1;
+}
+
+static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hsotg_stall_ep0 - stall ep0
+ * @hsotg: The device state
+ *
+ * Set stall for ep0 as response for setup request.
+ */
+static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ u32 reg;
+ u32 ctrl;
+
+ dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+ reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
+
+ /*
+ * DxEPCTL_Stall will be cleared by EP once it has
+ * taken effect, so no need to clear later.
+ */
+
+ ctrl = dwc2_readl(hsotg, reg);
+ ctrl |= DXEPCTL_STALL;
+ ctrl |= DXEPCTL_CNAK;
+ dwc2_writel(hsotg, ctrl, reg);
+
+ dev_dbg(hsotg->dev,
+ "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
+ ctrl, reg, dwc2_readl(hsotg, reg));
+
+ /*
+ * complete won't be called, so we enqueue
+ * setup request here
+ */
+ dwc2_hsotg_enqueue_setup(hsotg);
+}
+
+/**
+ * dwc2_hsotg_process_control - process a control request
+ * @hsotg: The device state
+ * @ctrl: The control request received
+ *
+ * The controller has received the SETUP phase of a control request, and
+ * needs to work out what to do next (and whether to pass it on to the
+ * gadget driver).
+ */
+static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ int ret = 0;
+ u32 dcfg;
+
+ dev_dbg(hsotg->dev,
+ "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
+ ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
+ ctrl->wIndex, ctrl->wLength);
+
+ if (ctrl->wLength == 0) {
+ ep0->dir_in = 1;
+ hsotg->ep0_state = DWC2_EP0_STATUS_IN;
+ } else if (ctrl->bRequestType & USB_DIR_IN) {
+ ep0->dir_in = 1;
+ hsotg->ep0_state = DWC2_EP0_DATA_IN;
+ } else {
+ ep0->dir_in = 0;
+ hsotg->ep0_state = DWC2_EP0_DATA_OUT;
+ }
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ hsotg->connected = 1;
+ dcfg = dwc2_readl(hsotg, DCFG);
+ dcfg &= ~DCFG_DEVADDR_MASK;
+ dcfg |= (le16_to_cpu(ctrl->wValue) <<
+ DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
+ dwc2_writel(hsotg, dcfg, DCFG);
+
+ dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
+
+ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ return;
+
+ case USB_REQ_GET_STATUS:
+ ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
+ break;
+ }
+ }
+
+ /* as a fallback, try delivering it to the driver to deal with */
+
+ if (ret == 0 && hsotg->driver) {
+ spin_unlock(&hsotg->lock);
+ ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
+ spin_lock(&hsotg->lock);
+ if (ret < 0)
+ dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
+ }
+
+ hsotg->delayed_status = false;
+ if (ret == USB_GADGET_DELAYED_STATUS)
+ hsotg->delayed_status = true;
+
+ /*
+ * the request is either unhandlable, or is not formatted correctly
+ * so respond with a STALL for the status stage to indicate failure.
+ */
+
+ if (ret < 0)
+ dwc2_hsotg_stall_ep0(hsotg);
+}
+
+/**
+ * dwc2_hsotg_complete_setup - completion of a setup transfer
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself submitted for
+ * EP0 setup packets
+ */
+static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+
+ if (req->status < 0) {
+ dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
+ return;
+ }
+
+ spin_lock(&hsotg->lock);
+ if (req->actual == 0)
+ dwc2_hsotg_enqueue_setup(hsotg);
+ else
+ dwc2_hsotg_process_control(hsotg, req->buf);
+ spin_unlock(&hsotg->lock);
+}
+
+/**
+ * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
+ * @hsotg: The device state.
+ *
+ * Enqueue a request on EP0 if necessary to received any SETUP packets
+ * received from the host.
+ */
+static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
+{
+ struct usb_request *req = hsotg->ctrl_req;
+ struct dwc2_hsotg_req *hs_req = our_req(req);
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
+
+ req->zero = 0;
+ req->length = 8;
+ req->buf = hsotg->ctrl_buff;
+ req->complete = dwc2_hsotg_complete_setup;
+
+ if (!list_empty(&hs_req->queue)) {
+ dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
+ return;
+ }
+
+ hsotg->eps_out[0]->dir_in = 0;
+ hsotg->eps_out[0]->send_zlp = 0;
+ hsotg->ep0_state = DWC2_EP0_SETUP;
+
+ ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
+ /*
+ * Don't think there's much we can do other than watch the
+ * driver fail.
+ */
+ }
+}
+
+static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 ctrl;
+ u8 index = hs_ep->index;
+ u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
+
+ if (hs_ep->dir_in)
+ dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
+ index);
+ else
+ dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
+ index);
+ if (using_desc_dma(hsotg)) {
+ /* Not specific buffer needed for ep0 ZLP */
+ dma_addr_t dma = hs_ep->desc_list_dma;
+
+ if (!index)
+ dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
+ } else {
+ dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(0),
+ epsiz_reg);
+ }
+
+ ctrl = dwc2_readl(hsotg, epctl_reg);
+ ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
+ ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
+ ctrl |= DXEPCTL_USBACTEP;
+ dwc2_writel(hsotg, ctrl, epctl_reg);
+}
+
+/**
+ * dwc2_hsotg_complete_request - complete a request given to us
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * The given request has finished, so call the necessary completion
+ * if it has one and then look to see if we can start a new request
+ * on the endpoint.
+ *
+ * Note, expects the ep to already be locked as appropriate.
+ */
+static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req,
+ int result)
+{
+ if (!hs_req) {
+ dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
+ return;
+ }
+
+ dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
+ hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
+
+ /*
+ * only replace the status if we've not already set an error
+ * from a previous transaction
+ */
+
+ if (hs_req->req.status == -EINPROGRESS)
+ hs_req->req.status = result;
+
+ if (using_dma(hsotg))
+ dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
+
+ dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
+
+ hs_ep->req = NULL;
+ list_del_init(&hs_req->queue);
+
+ /*
+ * call the complete request with the locks off, just in case the
+ * request tries to queue more work for this endpoint.
+ */
+
+ if (hs_req->req.complete) {
+ spin_unlock(&hsotg->lock);
+ usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
+ spin_lock(&hsotg->lock);
+ }
+
+ /* In DDMA don't need to proceed to starting of next ISOC request */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous)
+ return;
+
+ /*
+ * Look to see if there is anything else to do. Note, the completion
+ * of the previous request may have caused a new request to be started
+ * so be careful when doing this.
+ */
+
+ if (!hs_ep->req && result >= 0)
+ dwc2_gadget_start_next_request(hs_ep);
+}
+
+/*
+ * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
+ * @hs_ep: The endpoint the request was on.
+ *
+ * Get first request from the ep queue, determine descriptor on which complete
+ * happened. SW discovers which descriptor currently in use by HW, adjusts
+ * dma_address and calculates index of completed descriptor based on the value
+ * of DEPDMA register. Update actual length of request, giveback to gadget.
+ */
+static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ struct usb_request *ureq;
+ u32 desc_sts;
+ u32 mask;
+
+ desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
+
+ /* Process only descriptors with buffer status set to DMA done */
+ while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
+ DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
+
+ hs_req = get_ep_head(hs_ep);
+ if (!hs_req) {
+ dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
+ return;
+ }
+ ureq = &hs_req->req;
+
+ /* Check completion status */
+ if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
+ DEV_DMA_STS_SUCC) {
+ mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
+ DEV_DMA_ISOC_RX_NBYTES_MASK;
+ ureq->actual = ureq->length - ((desc_sts & mask) >>
+ DEV_DMA_ISOC_NBYTES_SHIFT);
+
+ /* Adjust actual len for ISOC Out if len is
+ * not align of 4
+ */
+ if (!hs_ep->dir_in && ureq->length & 0x3)
+ ureq->actual += 4 - (ureq->length & 0x3);
+
+ /* Set actual frame number for completed transfers */
+ ureq->frame_number =
+ (desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
+ DEV_DMA_ISOC_FRNUM_SHIFT;
+ }
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+
+ hs_ep->compl_desc++;
+ if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
+ hs_ep->compl_desc = 0;
+ desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
+ }
+}
+
+/*
+ * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
+ * @hs_ep: The isochronous endpoint.
+ *
+ * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
+ * interrupt. Reset target frame and next_desc to allow to start
+ * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
+ * interrupt for OUT direction.
+ */
+static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+
+ if (!hs_ep->dir_in)
+ dwc2_flush_rx_fifo(hsotg);
+ dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
+
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ hs_ep->next_desc = 0;
+ hs_ep->compl_desc = 0;
+}
+
+/**
+ * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
+ * @hsotg: The device state.
+ * @ep_idx: The endpoint index for the data
+ * @size: The size of data in the fifo, in bytes
+ *
+ * The FIFO status shows there is data to read from the FIFO for a given
+ * endpoint, so sort out whether we need to read the data into a request
+ * that has been made for that endpoint.
+ */
+static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
+{
+ struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
+ struct dwc2_hsotg_req *hs_req = hs_ep->req;
+ int to_read;
+ int max_req;
+ int read_ptr;
+
+ if (!hs_req) {
+ u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
+ int ptr;
+
+ dev_dbg(hsotg->dev,
+ "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
+ __func__, size, ep_idx, epctl);
+
+ /* dump the data from the FIFO, we've nothing we can do */
+ for (ptr = 0; ptr < size; ptr += 4)
+ (void)dwc2_readl(hsotg, EPFIFO(ep_idx));
+
+ return;
+ }
+
+ to_read = size;
+ read_ptr = hs_req->req.actual;
+ max_req = hs_req->req.length - read_ptr;
+
+ dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
+ __func__, to_read, max_req, read_ptr, hs_req->req.length);
+
+ if (to_read > max_req) {
+ /*
+ * more data appeared than we where willing
+ * to deal with in this request.
+ */
+
+ /* currently we don't deal this */
+ WARN_ON_ONCE(1);
+ }
+
+ hs_ep->total_data += to_read;
+ hs_req->req.actual += to_read;
+ to_read = DIV_ROUND_UP(to_read, 4);
+
+ /*
+ * note, we might over-write the buffer end by 3 bytes depending on
+ * alignment of the data.
+ */
+ dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
+ hs_req->req.buf + read_ptr, to_read);
+}
+
+/**
+ * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
+ * @hsotg: The device instance
+ * @dir_in: If IN zlp
+ *
+ * Generate a zero-length IN packet request for terminating a SETUP
+ * transaction.
+ *
+ * Note, since we don't write any data to the TxFIFO, then it is
+ * currently believed that we do not need to wait for any space in
+ * the TxFIFO.
+ */
+static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
+{
+ /* eps_out[0] is used in both directions */
+ hsotg->eps_out[0]->dir_in = dir_in;
+ hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
+
+ dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
+}
+
+/*
+ * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
+ * @hs_ep - The endpoint on which transfer went
+ *
+ * Iterate over endpoints descriptor chain and get info on bytes remained
+ * in DMA descriptors after transfer has completed. Used for non isoc EPs.
+ */
+static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned int bytes_rem = 0;
+ unsigned int bytes_rem_correction = 0;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ int i;
+ u32 status;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+
+ if (!desc)
+ return -EINVAL;
+
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
+ bytes_rem_correction = 4 - (mps % 4);
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ status = desc->status;
+ bytes_rem += status & DEV_DMA_NBYTES_MASK;
+ bytes_rem -= bytes_rem_correction;
+
+ if (status & DEV_DMA_STS_MASK)
+ dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ i, status & DEV_DMA_STS_MASK);
+
+ if (status & DEV_DMA_L)
+ break;
+
+ desc++;
+ }
+
+ return bytes_rem;
+}
+
+/**
+ * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
+ * @hsotg: The device instance
+ * @epnum: The endpoint received from
+ *
+ * The RXFIFO has delivered an OutDone event, which means that the data
+ * transfer for an OUT endpoint has been completed, either by a short
+ * packet or by the finish of a transfer.
+ */
+static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
+{
+ u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
+ struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
+ struct dwc2_hsotg_req *hs_req = hs_ep->req;
+ struct usb_request *req = &hs_req->req;
+ unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ int result = 0;
+
+ if (!hs_req) {
+ dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
+ return;
+ }
+
+ if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
+ dev_dbg(hsotg->dev, "zlp packet received\n");
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+ dwc2_hsotg_enqueue_setup(hsotg);
+ return;
+ }
+
+ if (using_desc_dma(hsotg))
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+
+ if (using_dma(hsotg)) {
+ unsigned int size_done;
+
+ /*
+ * Calculate the size of the transfer by checking how much
+ * is left in the endpoint size register and then working it
+ * out from the amount we loaded for the transfer.
+ *
+ * We need to do this as DMA pointers are always 32bit aligned
+ * so may overshoot/undershoot the transfer.
+ */
+
+ size_done = hs_ep->size_loaded - size_left;
+ size_done += hs_ep->last_load;
+
+ req->actual = size_done;
+ }
+
+ /* if there is more request to do, schedule new transfer */
+ if (req->actual < req->length && size_left == 0) {
+ dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+ return;
+ }
+
+ if (req->actual < req->length && req->short_not_ok) {
+ dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
+ __func__, req->actual, req->length);
+
+ /*
+ * todo - what should we return here? there's no one else
+ * even bothering to check the status.
+ */
+ }
+
+ /* DDMA IN status phase will start from StsPhseRcvd interrupt */
+ if (!using_desc_dma(hsotg) && epnum == 0 &&
+ hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* Move to STATUS IN */
+ if (!hsotg->delayed_status)
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ }
+
+ /* Set actual frame number for completed transfers */
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+ req->frame_number = hs_ep->target_frame;
+ dwc2_gadget_incr_frame_num(hs_ep);
+ }
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
+}
+
+/**
+ * dwc2_hsotg_handle_rx - RX FIFO has data
+ * @hsotg: The device instance
+ *
+ * The IRQ handler has detected that the RX FIFO has some data in it
+ * that requires processing, so find out what is in there and do the
+ * appropriate read.
+ *
+ * The RXFIFO is a true FIFO, the packets coming out are still in packet
+ * chunks, so if you have x packets received on an endpoint you'll get x
+ * FIFO events delivered, each with a packet's worth of data in it.
+ *
+ * When using DMA, we should not be processing events from the RXFIFO
+ * as the actual data should be sent to the memory directly and we turn
+ * on the completion interrupts to get notifications of transfer completion.
+ */
+static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
+{
+ u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
+ u32 epnum, status, size;
+
+ WARN_ON(using_dma(hsotg));
+
+ epnum = grxstsr & GRXSTS_EPNUM_MASK;
+ status = grxstsr & GRXSTS_PKTSTS_MASK;
+
+ size = grxstsr & GRXSTS_BYTECNT_MASK;
+ size >>= GRXSTS_BYTECNT_SHIFT;
+
+ dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
+ __func__, grxstsr, size, epnum);
+
+ switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
+ case GRXSTS_PKTSTS_GLOBALOUTNAK:
+ dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
+ break;
+
+ case GRXSTS_PKTSTS_OUTDONE:
+ dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
+ dwc2_hsotg_read_frameno(hsotg));
+
+ if (!using_dma(hsotg))
+ dwc2_hsotg_handle_outdone(hsotg, epnum);
+ break;
+
+ case GRXSTS_PKTSTS_SETUPDONE:
+ dev_dbg(hsotg->dev,
+ "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+ dwc2_hsotg_read_frameno(hsotg),
+ dwc2_readl(hsotg, DOEPCTL(0)));
+ /*
+ * Call dwc2_hsotg_handle_outdone here if it was not called from
+ * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
+ * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
+ */
+ if (hsotg->ep0_state == DWC2_EP0_SETUP)
+ dwc2_hsotg_handle_outdone(hsotg, epnum);
+ break;
+
+ case GRXSTS_PKTSTS_OUTRX:
+ dwc2_hsotg_rx_data(hsotg, epnum, size);
+ break;
+
+ case GRXSTS_PKTSTS_SETUPRX:
+ dev_dbg(hsotg->dev,
+ "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+ dwc2_hsotg_read_frameno(hsotg),
+ dwc2_readl(hsotg, DOEPCTL(0)));
+
+ WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
+
+ dwc2_hsotg_rx_data(hsotg, epnum, size);
+ break;
+
+ default:
+ dev_warn(hsotg->dev, "%s: unknown status %08x\n",
+ __func__, grxstsr);
+
+ dwc2_hsotg_dump(hsotg);
+ break;
+ }
+}
+
+/**
+ * dwc2_hsotg_ep0_mps - turn max packet size into register setting
+ * @mps: The maximum packet size in bytes.
+ */
+static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
+{
+ switch (mps) {
+ case 64:
+ return D0EPCTL_MPS_64;
+ case 32:
+ return D0EPCTL_MPS_32;
+ case 16:
+ return D0EPCTL_MPS_16;
+ case 8:
+ return D0EPCTL_MPS_8;
+ }
+
+ /* bad max packet size, warn and return invalid result */
+ WARN_ON(1);
+ return (u32)-1;
+}
+
+/**
+ * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
+ * @hsotg: The driver state.
+ * @ep: The index number of the endpoint
+ * @mps: The maximum packet size in bytes
+ * @mc: The multicount value
+ * @dir_in: True if direction is in.
+ *
+ * Configure the maximum packet size for the given endpoint, updating
+ * the hardware control registers to reflect this.
+ */
+static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
+ unsigned int ep, unsigned int mps,
+ unsigned int mc, unsigned int dir_in)
+{
+ struct dwc2_hsotg_ep *hs_ep;
+ u32 reg;
+
+ hs_ep = index_to_ep(hsotg, ep, dir_in);
+ if (!hs_ep)
+ return;
+
+ if (ep == 0) {
+ u32 mps_bytes = mps;
+
+ /* EP0 is a special case */
+ mps = dwc2_hsotg_ep0_mps(mps_bytes);
+ if (mps > 3)
+ goto bad_mps;
+ hs_ep->ep.maxpacket = mps_bytes;
+ hs_ep->mc = 1;
+ } else {
+ if (mps > 1024)
+ goto bad_mps;
+ hs_ep->mc = mc;
+ if (mc > 3)
+ goto bad_mps;
+ hs_ep->ep.maxpacket = mps;
+ }
+
+ if (dir_in) {
+ reg = dwc2_readl(hsotg, DIEPCTL(ep));
+ reg &= ~DXEPCTL_MPS_MASK;
+ reg |= mps;
+ dwc2_writel(hsotg, reg, DIEPCTL(ep));
+ } else {
+ reg = dwc2_readl(hsotg, DOEPCTL(ep));
+ reg &= ~DXEPCTL_MPS_MASK;
+ reg |= mps;
+ dwc2_writel(hsotg, reg, DOEPCTL(ep));
+ }
+
+ return;
+
+bad_mps:
+ dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
+}
+
+/**
+ * dwc2_hsotg_txfifo_flush - flush Tx FIFO
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ */
+static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
+{
+ dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
+ GRSTCTL);
+
+ /* wait until the fifo is flushed */
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
+ dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
+ __func__);
+}
+
+/**
+ * dwc2_hsotg_trytx - check to see if anything needs transmitting
+ * @hsotg: The driver state
+ * @hs_ep: The driver endpoint to check.
+ *
+ * Check to see if there is a request that has data to send, and if so
+ * make an attempt to write data into the FIFO.
+ */
+static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg_req *hs_req = hs_ep->req;
+
+ if (!hs_ep->dir_in || !hs_req) {
+ /**
+ * if request is not enqueued, we disable interrupts
+ * for endpoints, excepting ep0
+ */
+ if (hs_ep->index != 0)
+ dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
+ hs_ep->dir_in, 0);
+ return 0;
+ }
+
+ if (hs_req->req.actual < hs_req->req.length) {
+ dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
+ hs_ep->index);
+ return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+ }
+
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_complete_in - complete IN transfer
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint that has just completed.
+ *
+ * An IN transfer has been completed, update the transfer's state and then
+ * call the relevant completion routines.
+ */
+static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg_req *hs_req = hs_ep->req;
+ u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
+ int size_left, size_done;
+
+ if (!hs_req) {
+ dev_dbg(hsotg->dev, "XferCompl but no req\n");
+ return;
+ }
+
+ /* Finish ZLP handling for IN EP0 transactions */
+ if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
+ dev_dbg(hsotg->dev, "zlp packet sent\n");
+
+ /*
+ * While send zlp for DWC2_EP0_STATUS_IN EP direction was
+ * changed to IN. Change back to complete OUT transfer request
+ */
+ hs_ep->dir_in = 0;
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+ if (hsotg->test_mode) {
+ int ret;
+
+ ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
+ if (ret < 0) {
+ dev_dbg(hsotg->dev, "Invalid Test #%d\n",
+ hsotg->test_mode);
+ dwc2_hsotg_stall_ep0(hsotg);
+ return;
+ }
+ }
+ dwc2_hsotg_enqueue_setup(hsotg);
+ return;
+ }
+
+ /*
+ * Calculate the size of the transfer by checking how much is left
+ * in the endpoint size register and then working it out from
+ * the amount we loaded for the transfer.
+ *
+ * We do this even for DMA, as the transfer may have incremented
+ * past the end of the buffer (DMA transfers are always 32bit
+ * aligned).
+ */
+ if (using_desc_dma(hsotg)) {
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+ if (size_left < 0)
+ dev_err(hsotg->dev, "error parsing DDMA results %d\n",
+ size_left);
+ } else {
+ size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ }
+
+ size_done = hs_ep->size_loaded - size_left;
+ size_done += hs_ep->last_load;
+
+ if (hs_req->req.actual != size_done)
+ dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
+ __func__, hs_req->req.actual, size_done);
+
+ hs_req->req.actual = size_done;
+ dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
+ hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
+
+ if (!size_left && hs_req->req.actual < hs_req->req.length) {
+ dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
+ dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+ return;
+ }
+
+ /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
+ if (hs_ep->send_zlp) {
+ hs_ep->send_zlp = 0;
+ if (!using_desc_dma(hsotg)) {
+ dwc2_hsotg_program_zlp(hsotg, hs_ep);
+ /* transfer will be completed on next complete interrupt */
+ return;
+ }
+ }
+
+ if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
+ /* Move to STATUS OUT */
+ dwc2_hsotg_ep0_zlp(hsotg, false);
+ return;
+ }
+
+ /* Set actual frame number for completed transfers */
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ dwc2_gadget_incr_frame_num(hs_ep);
+ }
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+}
+
+/**
+ * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
+ * @hsotg: The device state.
+ * @idx: Index of ep.
+ * @dir_in: Endpoint direction 1-in 0-out.
+ *
+ * Reads for endpoint with given index and direction, by masking
+ * epint_reg with coresponding mask.
+ */
+static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
+ unsigned int idx, int dir_in)
+{
+ u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+ u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
+ u32 ints;
+ u32 mask;
+ u32 diepempmsk;
+
+ mask = dwc2_readl(hsotg, epmsk_reg);
+ diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
+ mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
+ mask |= DXEPINT_SETUP_RCVD;
+
+ ints = dwc2_readl(hsotg, epint_reg);
+ ints &= mask;
+ return ints;
+}
+
+/**
+ * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This interrupt indicates that the endpoint has been disabled per the
+ * application's request.
+ *
+ * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
+ * in case of ISOC completes current request.
+ *
+ * For ISOC-OUT endpoints completes expired requests. If there is remaining
+ * request starts it.
+ */
+static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ unsigned char idx = hs_ep->index;
+ int dir_in = hs_ep->dir_in;
+ u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
+ int dctl = dwc2_readl(hsotg, DCTL);
+
+ dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+
+ if (dir_in) {
+ int epctl = dwc2_readl(hsotg, epctl_reg);
+
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+ if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
+ int dctl = dwc2_readl(hsotg, DCTL);
+
+ dctl |= DCTL_CGNPINNAK;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+ } else {
+
+ if (dctl & DCTL_GOUTNAKSTS) {
+ dctl |= DCTL_CGOUTNAK;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+ }
+
+ if (!hs_ep->isochronous)
+ return;
+
+ if (list_empty(&hs_ep->queue)) {
+ dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
+ __func__, hs_ep);
+ return;
+ }
+
+ do {
+ hs_req = get_ep_head(hs_ep);
+ if (hs_req) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
+ -ENODATA);
+ }
+ dwc2_gadget_incr_frame_num(hs_ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ } while (dwc2_gadget_target_frame_elapsed(hs_ep));
+}
+
+/**
+ * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
+ * @ep: The endpoint on which interrupt is asserted.
+ *
+ * This is starting point for ISOC-OUT transfer, synchronization done with
+ * first out token received from host while corresponding EP is disabled.
+ *
+ * Device does not know initial frame in which out token will come. For this
+ * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
+ * getting this interrupt SW starts calculation for next transfer frame.
+ */
+static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+{
+ struct dwc2_hsotg *hsotg = ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ int dir_in = ep->dir_in;
+
+ if (dir_in || !ep->isochronous)
+ return;
+
+ if (using_desc_dma(hsotg)) {
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
+ /* Start first ISO Out */
+ ep->target_frame = hsotg->frame_number;
+ dwc2_gadget_start_isoc_ddma(ep);
+ }
+ return;
+ }
+
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
+ u32 ctrl;
+
+ ep->target_frame = hsotg->frame_number;
+ if (ep->interval > 1) {
+ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
+ if (ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+
+ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+ }
+ }
+
+ while (dwc2_gadget_target_frame_elapsed(ep)) {
+ hs_req = get_ep_head(ep);
+ if (hs_req) {
+ hs_req->req.frame_number = ep->target_frame;
+ hs_req->req.actual = 0;
+ dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
+ }
+
+ dwc2_gadget_incr_frame_num(ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ }
+
+ if (!ep->req)
+ dwc2_gadget_start_next_request(ep);
+
+}
+
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep);
+
+/**
+ * dwc2_gadget_handle_nak - handle NAK interrupt
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This is starting point for ISOC-IN transfer, synchronization done with
+ * first IN token received from host while corresponding EP is disabled.
+ *
+ * Device does not know when first one token will arrive from host. On first
+ * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
+ * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
+ * sent in response to that as there was no data in FIFO. SW is basing on this
+ * interrupt to obtain frame in which token has come and then based on the
+ * interval calculates next frame for transfer.
+ */
+static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ int dir_in = hs_ep->dir_in;
+ u32 ctrl;
+
+ if (!dir_in || !hs_ep->isochronous)
+ return;
+
+ if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
+
+ if (using_desc_dma(hsotg)) {
+ hs_ep->target_frame = hsotg->frame_number;
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ /* In service interval mode target_frame must
+ * be set to last (u)frame of the service interval.
+ */
+ if (hsotg->params.service_interval) {
+ /* Set target_frame to the first (u)frame of
+ * the service interval
+ */
+ hs_ep->target_frame &= ~hs_ep->interval + 1;
+
+ /* Set target_frame to the last (u)frame of
+ * the service interval
+ */
+ dwc2_gadget_incr_frame_num(hs_ep);
+ dwc2_gadget_dec_frame_num_by_one(hs_ep);
+ }
+
+ dwc2_gadget_start_isoc_ddma(hs_ep);
+ return;
+ }
+
+ hs_ep->target_frame = hsotg->frame_number;
+ if (hs_ep->interval > 1) {
+ u32 ctrl = dwc2_readl(hsotg,
+ DIEPCTL(hs_ep->index));
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+
+ dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
+ }
+ }
+
+ if (using_desc_dma(hsotg))
+ return;
+
+ ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+ else
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ hs_req = get_ep_head(hs_ep);
+ if (hs_req) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+ }
+
+ dwc2_gadget_incr_frame_num(hs_ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ }
+
+ if (!hs_ep->req)
+ dwc2_gadget_start_next_request(hs_ep);
+}
+
+/**
+ * dwc2_hsotg_epint - handle an in/out endpoint interrupt
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ * @dir_in: Set if this is an IN endpoint
+ *
+ * Process and clear any interrupt pending for an individual endpoint
+ */
+static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
+ int dir_in)
+{
+ struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
+ u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
+ u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
+ u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
+ u32 ints;
+
+ ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
+
+ /* Clear endpoint interrupts */
+ dwc2_writel(hsotg, ints, epint_reg);
+
+ if (!hs_ep) {
+ dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
+ __func__, idx, dir_in ? "in" : "out");
+ return;
+ }
+
+ dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
+ __func__, idx, dir_in ? "in" : "out", ints);
+
+ /* Don't process XferCompl interrupt if it is a setup packet */
+ if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
+ ints &= ~DXEPINT_XFERCOMPL;
+
+ /*
+ * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
+ * stage and xfercomplete was generated without SETUP phase done
+ * interrupt. SW should parse received setup packet only after host's
+ * exit from setup phase of control transfer.
+ */
+ if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
+ hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
+ ints &= ~DXEPINT_XFERCOMPL;
+
+ if (ints & DXEPINT_XFERCOMPL) {
+ dev_dbg(hsotg->dev,
+ "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
+ __func__, dwc2_readl(hsotg, epctl_reg),
+ dwc2_readl(hsotg, epsiz_reg));
+
+ /* In DDMA handle isochronous requests separately */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous) {
+ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ } else if (dir_in) {
+ /*
+ * We get OutDone from the FIFO, so we only
+ * need to look at completing IN requests here
+ * if operating slave mode
+ */
+ if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
+ dwc2_hsotg_complete_in(hsotg, hs_ep);
+
+ if (idx == 0 && !hs_ep->req)
+ dwc2_hsotg_enqueue_setup(hsotg);
+ } else if (using_dma(hsotg)) {
+ /*
+ * We're using DMA, we need to fire an OutDone here
+ * as we ignore the RXFIFO.
+ */
+ if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
+ dwc2_hsotg_handle_outdone(hsotg, idx);
+ }
+ }
+
+ if (ints & DXEPINT_EPDISBLD)
+ dwc2_gadget_handle_ep_disabled(hs_ep);
+
+ if (ints & DXEPINT_OUTTKNEPDIS)
+ dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
+
+ if (ints & DXEPINT_NAKINTRPT)
+ dwc2_gadget_handle_nak(hs_ep);
+
+ if (ints & DXEPINT_AHBERR)
+ dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
+
+ if (ints & DXEPINT_SETUP) { /* Setup or Timeout */
+ dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
+
+ if (using_dma(hsotg) && idx == 0) {
+ /*
+ * this is the notification we've received a
+ * setup packet. In non-DMA mode we'd get this
+ * from the RXFIFO, instead we need to process
+ * the setup here.
+ */
+
+ if (dir_in)
+ WARN_ON_ONCE(1);
+ else
+ dwc2_hsotg_handle_outdone(hsotg, 0);
+ }
+ }
+
+ if (ints & DXEPINT_STSPHSERCVD) {
+ dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
+
+ /* Safety check EP0 state when STSPHSERCVD asserted */
+ if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* Move to STATUS IN for DDMA */
+ if (using_desc_dma(hsotg)) {
+ if (!hsotg->delayed_status)
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ else
+ /* In case of 3 stage Control Write with delayed
+ * status, when Status IN transfer started
+ * before STSPHSERCVD asserted, NAKSTS bit not
+ * cleared by CNAK in dwc2_hsotg_start_req()
+ * function. Clear now NAKSTS to allow complete
+ * transfer.
+ */
+ dwc2_set_bit(hsotg, DIEPCTL(0),
+ DXEPCTL_CNAK);
+ }
+ }
+
+ }
+
+ if (ints & DXEPINT_BACK2BACKSETUP)
+ dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+
+ if (ints & DXEPINT_BNAINTR) {
+ dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
+ if (hs_ep->isochronous)
+ dwc2_gadget_handle_isoc_bna(hs_ep);
+ }
+
+ if (dir_in && !hs_ep->isochronous) {
+ /* not sure if this is important, but we'll clear it anyway */
+ if (ints & DXEPINT_INTKNTXFEMP) {
+ dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
+ __func__, idx);
+ }
+
+ /* this probably means something bad is happening */
+ if (ints & DXEPINT_INTKNEPMIS) {
+ dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
+ __func__, idx);
+ }
+
+ /* FIFO has space or is empty (see GAHBCFG) */
+ if (hsotg->dedicated_fifos &&
+ ints & DXEPINT_TXFEMP) {
+ dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
+ __func__, idx);
+ if (!using_dma(hsotg))
+ dwc2_hsotg_trytx(hsotg, hs_ep);
+ }
+ }
+}
+
+/**
+ * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
+ * @hsotg: The device state.
+ *
+ * Handle updating the device settings after the enumeration phase has
+ * been completed.
+ */
+static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
+{
+ u32 dsts = dwc2_readl(hsotg, DSTS);
+ int ep0_mps = 0, ep_mps = 8;
+
+ /*
+ * This should signal the finish of the enumeration phase
+ * of the USB handshaking, so we should now know what rate
+ * we connected at.
+ */
+
+ dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
+
+ /*
+ * note, since we're limited by the size of transfer on EP0, and
+ * it seems IN transfers must be a even number of packets we do
+ * not advertise a 64byte MPS on EP0.
+ */
+
+ /* catch both EnumSpd_FS and EnumSpd_FS48 */
+ switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
+ case DSTS_ENUMSPD_FS:
+ case DSTS_ENUMSPD_FS48:
+ hsotg->gadget.speed = USB_SPEED_FULL;
+ ep0_mps = EP0_MPS_LIMIT;
+ ep_mps = 1023;
+ break;
+
+ case DSTS_ENUMSPD_HS:
+ hsotg->gadget.speed = USB_SPEED_HIGH;
+ ep0_mps = EP0_MPS_LIMIT;
+ ep_mps = 1024;
+ break;
+
+ case DSTS_ENUMSPD_LS:
+ hsotg->gadget.speed = USB_SPEED_LOW;
+ ep0_mps = 8;
+ ep_mps = 8;
+ /*
+ * note, we don't actually support LS in this driver at the
+ * moment, and the documentation seems to imply that it isn't
+ * supported by the PHYs on some of the devices.
+ */
+ break;
+ }
+ dev_info(hsotg->dev, "new device is %s\n",
+ usb_speed_string(hsotg->gadget.speed));
+
+ /*
+ * we should now know the maximum packet size for an
+ * endpoint, so set the endpoints to a default value.
+ */
+
+ if (ep0_mps) {
+ int i;
+ /* Initialize ep0 for both in and out directions */
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
+ for (i = 1; i < hsotg->num_of_eps; i++) {
+ if (hsotg->eps_in[i])
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 1);
+ if (hsotg->eps_out[i])
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 0);
+ }
+ }
+
+ /* ensure after enumeration our EP0 is active */
+
+ dwc2_hsotg_enqueue_setup(hsotg);
+
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ dwc2_readl(hsotg, DIEPCTL0),
+ dwc2_readl(hsotg, DOEPCTL0));
+}
+
+/**
+ * kill_all_requests - remove all requests from the endpoint's queue
+ * @hsotg: The device state.
+ * @ep: The endpoint the requests may be on.
+ * @result: The result code to use.
+ *
+ * Go through the requests on the given endpoint and mark them
+ * completed with the given result code.
+ */
+static void kill_all_requests(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *ep,
+ int result)
+{
+ unsigned int size;
+
+ ep->req = NULL;
+
+ while (!list_empty(&ep->queue)) {
+ struct dwc2_hsotg_req *req = get_ep_head(ep);
+
+ dwc2_hsotg_complete_request(hsotg, ep, req, result);
+ }
+
+ if (!hsotg->dedicated_fifos)
+ return;
+ size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
+ if (size < ep->fifo_size)
+ dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
+}
+
+/**
+ * dwc2_hsotg_disconnect - disconnect service
+ * @hsotg: The device state.
+ *
+ * The device has been disconnected. Remove all current
+ * transactions and signal the gadget driver that this
+ * has happened.
+ */
+void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
+{
+ unsigned int ep;
+
+ if (!hsotg->connected)
+ return;
+
+ hsotg->connected = 0;
+ hsotg->test_mode = 0;
+
+ /* all endpoints should be shutdown */
+ for (ep = 0; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ kill_all_requests(hsotg, hsotg->eps_in[ep],
+ -ESHUTDOWN);
+ if (hsotg->eps_out[ep])
+ kill_all_requests(hsotg, hsotg->eps_out[ep],
+ -ESHUTDOWN);
+ }
+
+ call_gadget(hsotg, disconnect);
+ hsotg->lx_state = DWC2_L3;
+
+ usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
+}
+
+/**
+ * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
+ * @hsotg: The device state:
+ * @periodic: True if this is a periodic FIFO interrupt
+ */
+static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
+{
+ struct dwc2_hsotg_ep *ep;
+ int epno, ret;
+
+ /* look through for any more data to transmit */
+ for (epno = 0; epno < hsotg->num_of_eps; epno++) {
+ ep = index_to_ep(hsotg, epno, 1);
+
+ if (!ep)
+ continue;
+
+ if (!ep->dir_in)
+ continue;
+
+ if ((periodic && !ep->periodic) ||
+ (!periodic && ep->periodic))
+ continue;
+
+ ret = dwc2_hsotg_trytx(hsotg, ep);
+ if (ret < 0)
+ break;
+ }
+}
+
+/* IRQ flags which will trigger a retry around the IRQ loop */
+#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
+ GINTSTS_PTXFEMP | \
+ GINTSTS_RXFLVL)
+
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
+/**
+ * dwc2_hsotg_core_init_disconnected - issue softreset to the core
+ * @hsotg: The device state
+ * @is_usb_reset: Usb resetting flag
+ *
+ * Issue a soft reset to the core, and await the core finishing it.
+ */
+void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
+ bool is_usb_reset)
+{
+ u32 intmsk;
+ u32 val;
+ u32 usbcfg;
+ u32 dcfg = 0;
+ int ep;
+
+ /* Kill any ep0 requests as controller will be reinitialized */
+ kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
+
+ if (!is_usb_reset) {
+ if (dwc2_core_reset(hsotg, true))
+ return;
+ } else {
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
+ }
+
+ /*
+ * we must now enable ep0 ready for host detection and then
+ * set configuration.
+ */
+
+ /* keep other bits untouched (so e.g. forced modes are not lost) */
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
+ usbcfg |= GUSBCFG_TOUTCAL(7);
+
+ /* remove the HNP/SRP and set the PHY */
+ usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ dwc2_phy_init(hsotg, true);
+
+ dwc2_hsotg_init_fifo(hsotg);
+
+ if (!is_usb_reset)
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
+
+ dcfg |= DCFG_EPMISCNT(1);
+
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ dcfg |= DCFG_DEVSPD_LS;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
+ dcfg |= DCFG_DEVSPD_FS48;
+ else
+ dcfg |= DCFG_DEVSPD_FS;
+ break;
+ default:
+ dcfg |= DCFG_DEVSPD_HS;
+ }
+
+ if (hsotg->params.ipg_isoc_en)
+ dcfg |= DCFG_IPG_ISOC_SUPPORDED;
+
+ dwc2_writel(hsotg, dcfg, DCFG);
+
+ /* Clear any pending OTG interrupts */
+ dwc2_writel(hsotg, 0xffffffff, GOTGINT);
+
+ /* Clear any pending interrupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+ intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
+ GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
+ GINTSTS_USBRST | GINTSTS_RESETDET |
+ GINTSTS_ENUMDONE | GINTSTS_OTGINT |
+ GINTSTS_USBSUSP | GINTSTS_WKUPINT |
+ GINTSTS_LPMTRANRCVD;
+
+ if (!using_desc_dma(hsotg))
+ intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+
+ if (!hsotg->params.external_id_pin_ctl)
+ intmsk |= GINTSTS_CONIDSTSCHNG;
+
+ dwc2_writel(hsotg, intmsk, GINTMSK);
+
+ if (using_dma(hsotg)) {
+ dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
+ hsotg->params.ahbcfg,
+ GAHBCFG);
+
+ /* Set DDMA mode support in the core if needed */
+ if (using_desc_dma(hsotg))
+ dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
+
+ } else {
+ dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
+ (GAHBCFG_NP_TXF_EMP_LVL |
+ GAHBCFG_P_TXF_EMP_LVL) : 0) |
+ GAHBCFG_GLBL_INTR_EN, GAHBCFG);
+ }
+
+ /*
+ * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
+ * when we have no data to transfer. Otherwise we get being flooded by
+ * interrupts.
+ */
+
+ dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
+ DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
+ DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
+ DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
+ DIEPMSK);
+
+ /*
+ * don't need XferCompl, we get that from RXFIFO in slave mode. In
+ * DMA mode we may need this and StsPhseRcvd.
+ */
+ dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
+ DOEPMSK_STSPHSERCVDMSK) : 0) |
+ DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
+ DOEPMSK_SETUPMSK,
+ DOEPMSK);
+
+ /* Enable BNA interrupt for DDMA */
+ if (using_desc_dma(hsotg)) {
+ dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
+ dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
+ }
+
+ /* Enable Service Interval mode if supported */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
+
+ dwc2_writel(hsotg, 0, DAINTMSK);
+
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ dwc2_readl(hsotg, DIEPCTL0),
+ dwc2_readl(hsotg, DOEPCTL0));
+
+ /* enable in and out endpoint interrupts */
+ dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
+
+ /*
+ * Enable the RXFIFO when in slave mode, as this is how we collect
+ * the data. In DMA mode, we get events from the FIFO but also
+ * things we cannot process, so do not use it.
+ */
+ if (!using_dma(hsotg))
+ dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
+
+ /* Enable interrupts for EP0 in and out */
+ dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
+ dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
+
+ if (!is_usb_reset) {
+ dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
+ udelay(10); /* see openiboot */
+ dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
+ }
+
+ dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
+
+ /*
+ * DxEPCTL_USBActEp says RO in manual, but seems to be set by
+ * writing to the EPCTL register..
+ */
+
+ /* set to read 1 8byte packet */
+ dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
+
+ dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
+ DXEPCTL_CNAK | DXEPCTL_EPENA |
+ DXEPCTL_USBACTEP,
+ DOEPCTL0);
+
+ /* enable, but don't activate EP0in */
+ dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
+ DXEPCTL_USBACTEP, DIEPCTL0);
+
+ /* clear global NAKs */
+ val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
+ if (!is_usb_reset)
+ val |= DCTL_SFTDISCON;
+ dwc2_set_bit(hsotg, DCTL, val);
+
+ /* configure the core to support LPM */
+ dwc2_gadget_init_lpm(hsotg);
+
+ /* program GREFCLK register if needed */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_gadget_program_ref_clk(hsotg);
+
+ /* must be at-least 3ms to allow bus to see disconnect */
+ mdelay(3);
+
+ hsotg->lx_state = DWC2_L0;
+
+ dwc2_hsotg_enqueue_setup(hsotg);
+
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ dwc2_readl(hsotg, DIEPCTL0),
+ dwc2_readl(hsotg, DOEPCTL0));
+}
+
+void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
+{
+ /* set the soft-disconnect bit */
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
+}
+
+void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
+{
+ /* remove the soft-disconnect and let's go */
+ if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
+ dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
+}
+
+/**
+ * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted IN Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
+ */
+static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hsotg_ep *hs_ep;
+ u32 epctrl;
+ u32 daintmsk;
+ u32 idx;
+
+ dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
+
+ daintmsk = dwc2_readl(hsotg, DAINTMSK);
+
+ for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_in[idx];
+ /* Proceed only unmasked ISOC EPs */
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
+ continue;
+
+ epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
+ if ((epctrl & DXEPCTL_EPENA) &&
+ dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ epctrl |= DXEPCTL_SNAK;
+ epctrl |= DXEPCTL_EPDIS;
+ dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
+ }
+ }
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
+}
+
+/**
+ * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted OUT Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
+ */
+static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts;
+ u32 gintmsk;
+ u32 daintmsk;
+ u32 epctrl;
+ struct dwc2_hsotg_ep *hs_ep;
+ int idx;
+
+ dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
+
+ daintmsk = dwc2_readl(hsotg, DAINTMSK);
+ daintmsk >>= DAINT_OUTEP_SHIFT;
+
+ for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_out[idx];
+ /* Proceed only unmasked ISOC EPs */
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
+ continue;
+
+ epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
+ if ((epctrl & DXEPCTL_EPENA) &&
+ dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ /* Unmask GOUTNAKEFF interrupt */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk |= GINTSTS_GOUTNAKEFF;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
+ dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
+ break;
+ }
+ }
+ }
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
+}
+
+/**
+ * dwc2_hsotg_irq - handle device interrupt
+ * @irq: The IRQ number triggered
+ * @pw: The pw value when registered the handler.
+ */
+static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
+{
+ struct dwc2_hsotg *hsotg = pw;
+ int retry_count = 8;
+ u32 gintsts;
+ u32 gintmsk;
+
+ if (!dwc2_is_device_mode(hsotg))
+ return IRQ_NONE;
+
+ spin_lock(&hsotg->lock);
+irq_retry:
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+
+ dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
+ __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
+
+ gintsts &= gintmsk;
+
+ if (gintsts & GINTSTS_RESETDET) {
+ dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
+
+ dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
+
+ /* This event must be used only if controller is suspended */
+ if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
+ dwc2_exit_partial_power_down(hsotg, 0, true);
+
+ hsotg->lx_state = DWC2_L0;
+ }
+
+ if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
+ u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
+ u32 connected = hsotg->connected;
+
+ dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
+ dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
+ dwc2_readl(hsotg, GNPTXSTS));
+
+ dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
+
+ /* Report disconnection if it is not already done. */
+ dwc2_hsotg_disconnect(hsotg);
+
+ /* Reset device address to zero */
+ dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
+
+ if (usb_status & GOTGCTL_BSESVLD && connected)
+ dwc2_hsotg_core_init_disconnected(hsotg, true);
+ }
+
+ if (gintsts & GINTSTS_ENUMDONE) {
+ dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
+
+ dwc2_hsotg_irq_enumdone(hsotg);
+ }
+
+ if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
+ u32 daint = dwc2_readl(hsotg, DAINT);
+ u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
+ u32 daint_out, daint_in;
+ int ep;
+
+ daint &= daintmsk;
+ daint_out = daint >> DAINT_OUTEP_SHIFT;
+ daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
+
+ dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
+
+ for (ep = 0; ep < hsotg->num_of_eps && daint_out;
+ ep++, daint_out >>= 1) {
+ if (daint_out & 1)
+ dwc2_hsotg_epint(hsotg, ep, 0);
+ }
+
+ for (ep = 0; ep < hsotg->num_of_eps && daint_in;
+ ep++, daint_in >>= 1) {
+ if (daint_in & 1)
+ dwc2_hsotg_epint(hsotg, ep, 1);
+ }
+ }
+
+ /* check both FIFOs */
+
+ if (gintsts & GINTSTS_NPTXFEMP) {
+ dev_dbg(hsotg->dev, "NPTxFEmp\n");
+
+ /*
+ * Disable the interrupt to stop it happening again
+ * unless one of these endpoint routines decides that
+ * it needs re-enabling
+ */
+
+ dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
+ dwc2_hsotg_irq_fifoempty(hsotg, false);
+ }
+
+ if (gintsts & GINTSTS_PTXFEMP) {
+ dev_dbg(hsotg->dev, "PTxFEmp\n");
+
+ /* See note in GINTSTS_NPTxFEmp */
+
+ dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
+ dwc2_hsotg_irq_fifoempty(hsotg, true);
+ }
+
+ if (gintsts & GINTSTS_RXFLVL) {
+ /*
+ * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
+ * we need to retry dwc2_hsotg_handle_rx if this is still
+ * set.
+ */
+
+ dwc2_hsotg_handle_rx(hsotg);
+ }
+
+ if (gintsts & GINTSTS_ERLYSUSP) {
+ dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
+ dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
+ }
+
+ /*
+ * these next two seem to crop-up occasionally causing the core
+ * to shutdown the USB transfer, so try clearing them and logging
+ * the occurrence.
+ */
+
+ if (gintsts & GINTSTS_GOUTNAKEFF) {
+ u8 idx;
+ u32 epctrl;
+ u32 gintmsk;
+ u32 daintmsk;
+ struct dwc2_hsotg_ep *hs_ep;
+
+ daintmsk = dwc2_readl(hsotg, DAINTMSK);
+ daintmsk >>= DAINT_OUTEP_SHIFT;
+ /* Mask this interrupt */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_GOUTNAKEFF;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+
+ dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
+ for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_out[idx];
+ /* Proceed only unmasked ISOC EPs */
+ if (BIT(idx) & ~daintmsk)
+ continue;
+
+ epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
+
+ //ISOC Ep's only
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
+ epctrl |= DXEPCTL_SNAK;
+ epctrl |= DXEPCTL_EPDIS;
+ dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
+ continue;
+ }
+
+ //Non-ISOC EP's
+ if (hs_ep->halted) {
+ if (!(epctrl & DXEPCTL_EPENA))
+ epctrl |= DXEPCTL_EPENA;
+ epctrl |= DXEPCTL_EPDIS;
+ epctrl |= DXEPCTL_STALL;
+ dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
+ }
+ }
+
+ /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
+ }
+
+ if (gintsts & GINTSTS_GINNAKEFF) {
+ dev_info(hsotg->dev, "GINNakEff triggered\n");
+
+ dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
+
+ dwc2_hsotg_dump(hsotg);
+ }
+
+ if (gintsts & GINTSTS_INCOMPL_SOIN)
+ dwc2_gadget_handle_incomplete_isoc_in(hsotg);
+
+ if (gintsts & GINTSTS_INCOMPL_SOOUT)
+ dwc2_gadget_handle_incomplete_isoc_out(hsotg);
+
+ /*
+ * if we've had fifo events, we should try and go around the
+ * loop again to see if there's any point in returning yet.
+ */
+
+ if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
+ goto irq_retry;
+
+ /* Check WKUP_ALERT interrupt*/
+ if (hsotg->params.service_interval)
+ dwc2_gadget_wkup_alert_handler(hsotg);
+
+ spin_unlock(&hsotg->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 epctrl_reg;
+ u32 epint_reg;
+
+ epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
+ DOEPCTL(hs_ep->index);
+ epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
+ DOEPINT(hs_ep->index);
+
+ dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
+ hs_ep->name);
+
+ if (hs_ep->dir_in) {
+ if (hsotg->dedicated_fifos || hs_ep->periodic) {
+ dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
+ DXEPINT_INEPNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DIEPINT.NAKEFF\n",
+ __func__);
+ } else {
+ dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GINNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout GINTSTS.GINNAKEFF\n",
+ __func__);
+ }
+ } else {
+ /* Mask GINTSTS_GOUTNAKEFF interrupt */
+ dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
+
+ if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
+ dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
+
+ if (!using_dma(hsotg)) {
+ /* Wait for GINTSTS_RXFLVL interrupt */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_RXFLVL, 100)) {
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
+ __func__);
+ } else {
+ /*
+ * Pop GLOBAL OUT NAK status packet from RxFIFO
+ * to assert GOUTNAKEFF interrupt
+ */
+ dwc2_readl(hsotg, GRXSTSP);
+ }
+ }
+
+ /* Wait for global nak to take effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GOUTNAKEFF, 100))
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
+ __func__);
+ }
+
+ /* Disable ep */
+ dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
+
+ /* Wait for ep to be disabled */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DOEPCTL.EPDisable\n", __func__);
+
+ /* Clear EPDISBLD interrupt */
+ dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
+
+ if (hs_ep->dir_in) {
+ unsigned short fifo_index;
+
+ if (hsotg->dedicated_fifos || hs_ep->periodic)
+ fifo_index = hs_ep->fifo_index;
+ else
+ fifo_index = 0;
+
+ /* Flush TX FIFO */
+ dwc2_flush_tx_fifo(hsotg, fifo_index);
+
+ /* Clear Global In NP NAK in Shared FIFO for non periodic ep */
+ if (!hsotg->dedicated_fifos && !hs_ep->periodic)
+ dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
+
+ } else {
+ /* Remove global NAKs */
+ dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
+ }
+}
+
+/**
+ * dwc2_hsotg_ep_enable - enable the given endpoint
+ * @ep: The USB endpint to configure
+ * @desc: The USB endpoint descriptor to configure with.
+ *
+ * This is called from the USB gadget code's usb_ep_enable().
+ */
+static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned long flags;
+ unsigned int index = hs_ep->index;
+ u32 epctrl_reg;
+ u32 epctrl;
+ u32 mps;
+ u32 mc;
+ u32 mask;
+ unsigned int dir_in;
+ unsigned int i, val, size;
+ int ret = 0;
+ unsigned char ep_type;
+ int desc_num;
+
+ dev_dbg(hsotg->dev,
+ "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
+ __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
+ desc->wMaxPacketSize, desc->bInterval);
+
+ /* not to be called for EP0 */
+ if (index == 0) {
+ dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
+ return -EINVAL;
+ }
+
+ dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+ if (dir_in != hs_ep->dir_in) {
+ dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
+ return -EINVAL;
+ }
+
+ ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ mps = usb_endpoint_maxp(desc);
+ mc = usb_endpoint_maxp_mult(desc);
+
+ /* ISOC IN in DDMA supported bInterval up to 10 */
+ if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
+ dir_in && desc->bInterval > 10) {
+ dev_err(hsotg->dev,
+ "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* High bandwidth ISOC OUT in DDMA not supported */
+ if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
+ !dir_in && mc > 1) {
+ dev_err(hsotg->dev,
+ "%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
+
+ epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ epctrl = dwc2_readl(hsotg, epctrl_reg);
+
+ dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
+ __func__, epctrl, epctrl_reg);
+
+ if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
+ desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
+ else
+ desc_num = MAX_DMA_DESC_NUM_GENERIC;
+
+ /* Allocate DMA descriptor chain for non-ctrl endpoints */
+ if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+ hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
+ desc_num * sizeof(struct dwc2_dma_desc),
+ &hs_ep->desc_list_dma, GFP_ATOMIC);
+ if (!hs_ep->desc_list) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
+ epctrl |= DXEPCTL_MPS(mps);
+
+ /*
+ * mark the endpoint as active, otherwise the core may ignore
+ * transactions entirely for this endpoint
+ */
+ epctrl |= DXEPCTL_USBACTEP;
+
+ /* update the endpoint state */
+ dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
+
+ /* default, set to non-periodic */
+ hs_ep->isochronous = 0;
+ hs_ep->periodic = 0;
+ hs_ep->halted = 0;
+ hs_ep->wedged = 0;
+ hs_ep->interval = desc->bInterval;
+
+ switch (ep_type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ epctrl |= DXEPCTL_EPTYPE_ISO;
+ epctrl |= DXEPCTL_SETEVENFR;
+ hs_ep->isochronous = 1;
+ hs_ep->interval = 1 << (desc->bInterval - 1);
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ hs_ep->next_desc = 0;
+ hs_ep->compl_desc = 0;
+ if (dir_in) {
+ hs_ep->periodic = 1;
+ mask = dwc2_readl(hsotg, DIEPMSK);
+ mask |= DIEPMSK_NAKMSK;
+ dwc2_writel(hsotg, mask, DIEPMSK);
+ } else {
+ epctrl |= DXEPCTL_SNAK;
+ mask = dwc2_readl(hsotg, DOEPMSK);
+ mask |= DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(hsotg, mask, DOEPMSK);
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ epctrl |= DXEPCTL_EPTYPE_BULK;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ if (dir_in)
+ hs_ep->periodic = 1;
+
+ if (hsotg->gadget.speed == USB_SPEED_HIGH)
+ hs_ep->interval = 1 << (desc->bInterval - 1);
+
+ epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ epctrl |= DXEPCTL_EPTYPE_CONTROL;
+ break;
+ }
+
+ /*
+ * if the hardware has dedicated fifos, we must give each IN EP
+ * a unique tx-fifo even if it is non-periodic.
+ */
+ if (dir_in && hsotg->dedicated_fifos) {
+ unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+ u32 fifo_index = 0;
+ u32 fifo_size = UINT_MAX;
+
+ size = hs_ep->ep.maxpacket * hs_ep->mc;
+ for (i = 1; i <= fifo_count; ++i) {
+ if (hsotg->fifo_map & (1 << i))
+ continue;
+ val = dwc2_readl(hsotg, DPTXFSIZN(i));
+ val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
+ if (val < size)
+ continue;
+ /* Search for smallest acceptable fifo */
+ if (val < fifo_size) {
+ fifo_size = val;
+ fifo_index = i;
+ }
+ }
+ if (!fifo_index) {
+ dev_err(hsotg->dev,
+ "%s: No suitable fifo found\n", __func__);
+ ret = -ENOMEM;
+ goto error1;
+ }
+ epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
+ hsotg->fifo_map |= 1 << fifo_index;
+ epctrl |= DXEPCTL_TXFNUM(fifo_index);
+ hs_ep->fifo_index = fifo_index;
+ hs_ep->fifo_size = fifo_size;
+ }
+
+ /* for non control endpoints, set PID to D0 */
+ if (index && !hs_ep->isochronous)
+ epctrl |= DXEPCTL_SETD0PID;
+
+ /* WA for Full speed ISOC IN in DDMA mode.
+ * By Clear NAK status of EP, core will send ZLP
+ * to IN token and assert NAK interrupt relying
+ * on TxFIFO status only
+ */
+
+ if (hsotg->gadget.speed == USB_SPEED_FULL &&
+ hs_ep->isochronous && dir_in) {
+ /* The WA applies only to core versions from 2.72a
+ * to 4.00a (including both). Also for FS_IOT_1.00a
+ * and HS_IOT_1.00a.
+ */
+ u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
+
+ if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
+ gsnpsid <= DWC2_CORE_REV_4_00a) ||
+ gsnpsid == DWC2_FS_IOT_REV_1_00a ||
+ gsnpsid == DWC2_HS_IOT_REV_1_00a)
+ epctrl |= DXEPCTL_CNAK;
+ }
+
+ dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
+ __func__, epctrl);
+
+ dwc2_writel(hsotg, epctrl, epctrl_reg);
+ dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
+ __func__, dwc2_readl(hsotg, epctrl_reg));
+
+ /* enable the endpoint interrupt */
+ dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
+
+error1:
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+error2:
+ if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
+ dmam_free_coherent(hsotg->dev, desc_num *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * dwc2_hsotg_ep_disable - disable given endpoint
+ * @ep: The endpoint to disable.
+ */
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ int index = hs_ep->index;
+ u32 epctrl_reg;
+ u32 ctrl;
+
+ dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
+
+ if (ep == &hsotg->eps_out[0]->ep) {
+ dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
+ return -EINVAL;
+ }
+
+ if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
+ dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
+ return -EINVAL;
+ }
+
+ epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
+
+ ctrl = dwc2_readl(hsotg, epctrl_reg);
+
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+
+ ctrl &= ~DXEPCTL_EPENA;
+ ctrl &= ~DXEPCTL_USBACTEP;
+ ctrl |= DXEPCTL_SNAK;
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+ dwc2_writel(hsotg, ctrl, epctrl_reg);
+
+ /* disable endpoint interrupts */
+ dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
+
+ /* terminate all requests with shutdown */
+ kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
+
+ hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
+ hs_ep->fifo_index = 0;
+ hs_ep->fifo_size = 0;
+
+ return 0;
+}
+
+static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ ret = dwc2_hsotg_ep_disable(ep);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return ret;
+}
+
+/**
+ * on_list - check request is on the given endpoint
+ * @ep: The endpoint to check.
+ * @test: The request to test if it is on the endpoint.
+ */
+static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
+{
+ struct dwc2_hsotg_req *req, *treq;
+
+ list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+ if (req == test)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dwc2_hsotg_ep_dequeue - dequeue given endpoint
+ * @ep: The endpoint to dequeue.
+ * @req: The request to be removed from a queue.
+ */
+static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct dwc2_hsotg_req *hs_req = our_req(req);
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hs = hs_ep->parent;
+ unsigned long flags;
+
+ dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
+
+ spin_lock_irqsave(&hs->lock, flags);
+
+ if (!on_list(hs_ep, hs_req)) {
+ spin_unlock_irqrestore(&hs->lock, flags);
+ return -EINVAL;
+ }
+
+ /* Dequeue already started request */
+ if (req == &hs_ep->req->req)
+ dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
+
+ dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
+ spin_unlock_irqrestore(&hs->lock, flags);
+
+ return 0;
+}
+
+/**
+ * dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
+ * @ep: The endpoint to be wedged.
+ *
+ */
+static int dwc2_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hs = hs_ep->parent;
+
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hs->lock, flags);
+ hs_ep->wedged = 1;
+ ret = dwc2_hsotg_ep_sethalt(ep, 1, false);
+ spin_unlock_irqrestore(&hs->lock, flags);
+
+ return ret;
+}
+
+/**
+ * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
+ * @ep: The endpoint to set halt.
+ * @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ * the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
+ */
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hs = hs_ep->parent;
+ int index = hs_ep->index;
+ u32 epreg;
+ u32 epctl;
+ u32 xfertype;
+
+ dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+
+ if (index == 0) {
+ if (value)
+ dwc2_hsotg_stall_ep0(hs);
+ else
+ dev_warn(hs->dev,
+ "%s: can't clear halt on ep0\n", __func__);
+ return 0;
+ }
+
+ if (hs_ep->isochronous) {
+ dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+ return -EINVAL;
+ }
+
+ if (!now && value && !list_empty(&hs_ep->queue)) {
+ dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+ ep->name);
+ return -EAGAIN;
+ }
+
+ if (hs_ep->dir_in) {
+ epreg = DIEPCTL(index);
+ epctl = dwc2_readl(hs, epreg);
+
+ if (value) {
+ epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
+ if (epctl & DXEPCTL_EPENA)
+ epctl |= DXEPCTL_EPDIS;
+ } else {
+ epctl &= ~DXEPCTL_STALL;
+ hs_ep->wedged = 0;
+ xfertype = epctl & DXEPCTL_EPTYPE_MASK;
+ if (xfertype == DXEPCTL_EPTYPE_BULK ||
+ xfertype == DXEPCTL_EPTYPE_INTERRUPT)
+ epctl |= DXEPCTL_SETD0PID;
+ }
+ dwc2_writel(hs, epctl, epreg);
+ } else {
+ epreg = DOEPCTL(index);
+ epctl = dwc2_readl(hs, epreg);
+
+ if (value) {
+ /* Unmask GOUTNAKEFF interrupt */
+ dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
+
+ if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
+ dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
+ // STALL bit will be set in GOUTNAKEFF interrupt handler
+ } else {
+ epctl &= ~DXEPCTL_STALL;
+ hs_ep->wedged = 0;
+ xfertype = epctl & DXEPCTL_EPTYPE_MASK;
+ if (xfertype == DXEPCTL_EPTYPE_BULK ||
+ xfertype == DXEPCTL_EPTYPE_INTERRUPT)
+ epctl |= DXEPCTL_SETD0PID;
+ dwc2_writel(hs, epctl, epreg);
+ }
+ }
+
+ hs_ep->halted = value;
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
+ * @ep: The endpoint to set halt.
+ * @value: Set or unset the halt.
+ */
+static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hs = hs_ep->parent;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hs->lock, flags);
+ ret = dwc2_hsotg_ep_sethalt(ep, value, false);
+ spin_unlock_irqrestore(&hs->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
+ .enable = dwc2_hsotg_ep_enable,
+ .disable = dwc2_hsotg_ep_disable_lock,
+ .alloc_request = dwc2_hsotg_ep_alloc_request,
+ .free_request = dwc2_hsotg_ep_free_request,
+ .queue = dwc2_hsotg_ep_queue_lock,
+ .dequeue = dwc2_hsotg_ep_dequeue,
+ .set_halt = dwc2_hsotg_ep_sethalt_lock,
+ .set_wedge = dwc2_gadget_ep_set_wedge,
+ /* note, don't believe we have any call for the fifo routines */
+};
+
+/**
+ * dwc2_hsotg_init - initialize the usb core
+ * @hsotg: The driver state
+ */
+static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
+{
+ /* unmask subset of endpoint interrupts */
+
+ dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
+ DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
+ DIEPMSK);
+
+ dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
+ DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
+ DOEPMSK);
+
+ dwc2_writel(hsotg, 0, DAINTMSK);
+
+ /* Be in disconnected state until gadget is registered */
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
+
+ /* setup fifos */
+
+ dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+ dwc2_readl(hsotg, GRXFSIZ),
+ dwc2_readl(hsotg, GNPTXFSIZ));
+
+ dwc2_hsotg_init_fifo(hsotg);
+
+ if (using_dma(hsotg))
+ dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
+}
+
+/**
+ * dwc2_hsotg_udc_start - prepare the udc for work
+ * @gadget: The usb gadget state
+ * @driver: The usb gadget driver
+ *
+ * Perform initialization to prepare udc device and driver
+ * to work.
+ */
+static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+ int ret;
+
+ if (!hsotg) {
+ pr_err("%s: called with no device\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!driver) {
+ dev_err(hsotg->dev, "%s: no driver\n", __func__);
+ return -EINVAL;
+ }
+
+ if (driver->max_speed < USB_SPEED_FULL)
+ dev_err(hsotg->dev, "%s: bad speed\n", __func__);
+
+ if (!driver->setup) {
+ dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
+ return -EINVAL;
+ }
+
+ WARN_ON(hsotg->driver);
+
+ hsotg->driver = driver;
+ hsotg->gadget.dev.of_node = hsotg->dev->of_node;
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ ret = dwc2_lowlevel_hw_enable(hsotg);
+ if (ret)
+ goto err;
+ }
+
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ if (dwc2_hw_is_device(hsotg)) {
+ dwc2_hsotg_init(hsotg);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ }
+
+ hsotg->enabled = 0;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ gadget->sg_supported = using_desc_dma(hsotg);
+ dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
+
+ return 0;
+
+err:
+ hsotg->driver = NULL;
+ return ret;
+}
+
+/**
+ * dwc2_hsotg_udc_stop - stop the udc
+ * @gadget: The usb gadget state
+ *
+ * Stop udc hw block and stay tunned for future transmissions
+ */
+static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+ int ep;
+
+ if (!hsotg)
+ return -ENODEV;
+
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ hsotg->driver = NULL;
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ hsotg->enabled = 0;
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ otg_set_peripheral(hsotg->uphy->otg, NULL);
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ dwc2_lowlevel_hw_disable(hsotg);
+
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_gadget_getframe - read the frame number
+ * @gadget: The usb gadget state
+ *
+ * Read the {micro} frame number
+ */
+static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
+{
+ return dwc2_hsotg_read_frameno(to_hsotg(gadget));
+}
+
+/**
+ * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
+ * @gadget: The usb gadget state
+ * @is_selfpowered: Whether the device is self-powered
+ *
+ * Set if the device is self or bus powered.
+ */
+static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ gadget->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_pullup - connect/disconnect the USB PHY
+ * @gadget: The usb gadget state
+ * @is_on: Current state of the USB PHY
+ *
+ * Connect/Disconnect the USB PHY pullup
+ */
+static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+
+ dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
+ hsotg->op_state);
+
+ /* Don't modify pullup state while in host mode */
+ if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
+ hsotg->enabled = is_on;
+ return 0;
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ if (is_on) {
+ hsotg->enabled = 1;
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ /* Enable ACG feature in device mode,if supported */
+ dwc2_enable_acg(hsotg);
+ dwc2_hsotg_core_connect(hsotg);
+ } else {
+ dwc2_hsotg_core_disconnect(hsotg);
+ dwc2_hsotg_disconnect(hsotg);
+ hsotg->enabled = 0;
+ }
+
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+}
+
+static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+
+ dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * If controller is in partial power down state, it must exit from
+ * that state before being initialized / de-initialized
+ */
+ if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
+ /*
+ * No need to check the return value as
+ * registers are not being restored.
+ */
+ dwc2_exit_partial_power_down(hsotg, 0, false);
+
+ if (is_active) {
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ if (hsotg->enabled) {
+ /* Enable ACG feature in device mode,if supported */
+ dwc2_enable_acg(hsotg);
+ dwc2_hsotg_core_connect(hsotg);
+ }
+ } else {
+ dwc2_hsotg_core_disconnect(hsotg);
+ dwc2_hsotg_disconnect(hsotg);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_vbus_draw - report bMaxPower field
+ * @gadget: The usb gadget state
+ * @mA: Amount of current
+ *
+ * Report how much power the device may consume to the phy.
+ */
+static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+
+ if (IS_ERR_OR_NULL(hsotg->uphy))
+ return -ENOTSUPP;
+ return usb_phy_set_power(hsotg->uphy, mA);
+}
+
+static void dwc2_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
+ break;
+ case USB_SPEED_FULL:
+ hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
+ break;
+ case USB_SPEED_LOW:
+ hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
+ break;
+ default:
+ dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
+ }
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
+ .get_frame = dwc2_hsotg_gadget_getframe,
+ .set_selfpowered = dwc2_hsotg_set_selfpowered,
+ .udc_start = dwc2_hsotg_udc_start,
+ .udc_stop = dwc2_hsotg_udc_stop,
+ .pullup = dwc2_hsotg_pullup,
+ .udc_set_speed = dwc2_gadget_set_speed,
+ .vbus_session = dwc2_hsotg_vbus_session,
+ .vbus_draw = dwc2_hsotg_vbus_draw,
+};
+
+/**
+ * dwc2_hsotg_initep - initialise a single endpoint
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint to be initialised.
+ * @epnum: The endpoint number
+ * @dir_in: True if direction is in.
+ *
+ * Initialise the given endpoint (as part of the probe and device state
+ * creation) to give to the gadget driver. Setup the endpoint name, any
+ * direction information and other state that may be required.
+ */
+static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ int epnum,
+ bool dir_in)
+{
+ char *dir;
+
+ if (epnum == 0)
+ dir = "";
+ else if (dir_in)
+ dir = "in";
+ else
+ dir = "out";
+
+ hs_ep->dir_in = dir_in;
+ hs_ep->index = epnum;
+
+ snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
+
+ INIT_LIST_HEAD(&hs_ep->queue);
+ INIT_LIST_HEAD(&hs_ep->ep.ep_list);
+
+ /* add to the list of endpoints known by the gadget driver */
+ if (epnum)
+ list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
+
+ hs_ep->parent = hsotg;
+ hs_ep->ep.name = hs_ep->name;
+
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
+ usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
+ else
+ usb_ep_set_maxpacket_limit(&hs_ep->ep,
+ epnum ? 1024 : EP0_MPS_LIMIT);
+ hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
+
+ if (epnum == 0) {
+ hs_ep->ep.caps.type_control = true;
+ } else {
+ if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
+ hs_ep->ep.caps.type_iso = true;
+ hs_ep->ep.caps.type_bulk = true;
+ }
+ hs_ep->ep.caps.type_int = true;
+ }
+
+ if (dir_in)
+ hs_ep->ep.caps.dir_in = true;
+ else
+ hs_ep->ep.caps.dir_out = true;
+
+ /*
+ * if we're using dma, we need to set the next-endpoint pointer
+ * to be something valid.
+ */
+
+ if (using_dma(hsotg)) {
+ u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
+
+ if (dir_in)
+ dwc2_writel(hsotg, next, DIEPCTL(epnum));
+ else
+ dwc2_writel(hsotg, next, DOEPCTL(epnum));
+ }
+}
+
+/**
+ * dwc2_hsotg_hw_cfg - read HW configuration registers
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Read the USB core HW configuration registers
+ */
+static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
+{
+ u32 cfg;
+ u32 ep_type;
+ u32 i;
+
+ /* check hardware configuration */
+
+ hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
+
+ /* Add ep0 */
+ hsotg->num_of_eps++;
+
+ hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
+ sizeof(struct dwc2_hsotg_ep),
+ GFP_KERNEL);
+ if (!hsotg->eps_in[0])
+ return -ENOMEM;
+ /* Same dwc2_hsotg_ep is used in both directions for ep0 */
+ hsotg->eps_out[0] = hsotg->eps_in[0];
+
+ cfg = hsotg->hw_params.dev_ep_dirs;
+ for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
+ ep_type = cfg & 3;
+ /* Direction in or both */
+ if (!(ep_type & 2)) {
+ hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
+ sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
+ if (!hsotg->eps_in[i])
+ return -ENOMEM;
+ }
+ /* Direction out or both */
+ if (!(ep_type & 1)) {
+ hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
+ sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
+ if (!hsotg->eps_out[i])
+ return -ENOMEM;
+ }
+ }
+
+ hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
+ hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
+
+ dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
+ hsotg->num_of_eps,
+ hsotg->dedicated_fifos ? "dedicated" : "shared",
+ hsotg->fifo_mem);
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_dump - dump state of the udc
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
+{
+#ifdef DEBUG
+ struct device *dev = hsotg->dev;
+ u32 val;
+ int idx;
+
+ dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
+ dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
+ dwc2_readl(hsotg, DIEPMSK));
+
+ dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
+ dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
+
+ dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+ dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
+
+ /* show periodic fifo settings */
+
+ for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ val = dwc2_readl(hsotg, DPTXFSIZN(idx));
+ dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
+ val >> FIFOSIZE_DEPTH_SHIFT,
+ val & FIFOSIZE_STARTADDR_MASK);
+ }
+
+ for (idx = 0; idx < hsotg->num_of_eps; idx++) {
+ dev_info(dev,
+ "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
+ dwc2_readl(hsotg, DIEPCTL(idx)),
+ dwc2_readl(hsotg, DIEPTSIZ(idx)),
+ dwc2_readl(hsotg, DIEPDMA(idx)));
+
+ val = dwc2_readl(hsotg, DOEPCTL(idx));
+ dev_info(dev,
+ "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
+ idx, dwc2_readl(hsotg, DOEPCTL(idx)),
+ dwc2_readl(hsotg, DOEPTSIZ(idx)),
+ dwc2_readl(hsotg, DOEPDMA(idx)));
+ }
+
+ dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
+ dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
+#endif
+}
+
+/**
+ * dwc2_gadget_init - init function for gadget
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
+{
+ struct device *dev = hsotg->dev;
+ int epnum;
+ int ret;
+
+ /* Dump fifo information */
+ dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
+ hsotg->params.g_np_tx_fifo_size);
+ dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
+
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ hsotg->gadget.max_speed = USB_SPEED_LOW;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ hsotg->gadget.max_speed = USB_SPEED_FULL;
+ break;
+ default:
+ hsotg->gadget.max_speed = USB_SPEED_HIGH;
+ break;
+ }
+
+ hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
+ hsotg->gadget.name = dev_name(dev);
+ hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
+ hsotg->remote_wakeup_allowed = 0;
+
+ if (hsotg->params.lpm)
+ hsotg->gadget.lpm_capable = true;
+
+ if (hsotg->dr_mode == USB_DR_MODE_OTG)
+ hsotg->gadget.is_otg = 1;
+ else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+
+ ret = dwc2_hsotg_hw_cfg(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
+ return ret;
+ }
+
+ hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
+ DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
+ if (!hsotg->ctrl_buff)
+ return -ENOMEM;
+
+ hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
+ DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
+ if (!hsotg->ep0_buff)
+ return -ENOMEM;
+
+ if (using_desc_dma(hsotg)) {
+ ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
+ IRQF_SHARED, dev_name(hsotg->dev), hsotg);
+ if (ret < 0) {
+ dev_err(dev, "cannot claim IRQ for gadget\n");
+ return ret;
+ }
+
+ /* hsotg->num_of_eps holds number of EPs other than ep0 */
+
+ if (hsotg->num_of_eps == 0) {
+ dev_err(dev, "wrong number of EPs (zero)\n");
+ return -EINVAL;
+ }
+
+ /* setup endpoint information */
+
+ INIT_LIST_HEAD(&hsotg->gadget.ep_list);
+ hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
+
+ /* allocate EP0 request */
+
+ hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_req) {
+ dev_err(dev, "failed to allocate ctrl req\n");
+ return -ENOMEM;
+ }
+
+ /* initialise the endpoints now the core has been initialised */
+ for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
+ if (hsotg->eps_in[epnum])
+ dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
+ epnum, 1);
+ if (hsotg->eps_out[epnum])
+ dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
+ epnum, 0);
+ }
+
+ dwc2_hsotg_dump(hsotg);
+
+ return 0;
+}
+
+/**
+ * dwc2_hsotg_remove - remove function for hsotg driver
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
+{
+ usb_del_gadget_udc(&hsotg->gadget);
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
+
+ return 0;
+}
+
+int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+
+ if (hsotg->lx_state != DWC2_L0)
+ return 0;
+
+ if (hsotg->driver) {
+ int ep;
+
+ dev_info(hsotg->dev, "suspending usb gadget %s\n",
+ hsotg->driver->driver.name);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ if (hsotg->enabled)
+ dwc2_hsotg_core_disconnect(hsotg);
+ dwc2_hsotg_disconnect(hsotg);
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
+ }
+ }
+
+ return 0;
+}
+
+int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+
+ if (hsotg->lx_state == DWC2_L2)
+ return 0;
+
+ if (hsotg->driver) {
+ dev_info(hsotg->dev, "resuming usb gadget %s\n",
+ hsotg->driver->driver.name);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ if (hsotg->enabled) {
+ /* Enable ACG feature in device mode,if supported */
+ dwc2_enable_acg(hsotg);
+ dwc2_hsotg_core_connect(hsotg);
+ }
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * dwc2_backup_device_registers() - Backup controller device registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_dregs_backup *dr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup dev regs */
+ dr = &hsotg->dr_backup;
+
+ dr->dcfg = dwc2_readl(hsotg, DCFG);
+ dr->dctl = dwc2_readl(hsotg, DCTL);
+ dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
+ dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
+ dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
+
+ for (i = 0; i < hsotg->num_of_eps; i++) {
+ /* Backup IN EPs */
+ dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
+
+ /* Ensure DATA PID is correctly configured */
+ if (dr->diepctl[i] & DXEPCTL_DPID)
+ dr->diepctl[i] |= DXEPCTL_SETD1PID;
+ else
+ dr->diepctl[i] |= DXEPCTL_SETD0PID;
+
+ dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
+ dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
+
+ /* Backup OUT EPs */
+ dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
+
+ /* Ensure DATA PID is correctly configured */
+ if (dr->doepctl[i] & DXEPCTL_DPID)
+ dr->doepctl[i] |= DXEPCTL_SETD1PID;
+ else
+ dr->doepctl[i] |= DXEPCTL_SETD0PID;
+
+ dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
+ dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
+ dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
+ }
+ dr->valid = true;
+ return 0;
+}
+
+/**
+ * dwc2_restore_device_registers() - Restore controller device registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
+{
+ struct dwc2_dregs_backup *dr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore dev regs */
+ dr = &hsotg->dr_backup;
+ if (!dr->valid) {
+ dev_err(hsotg->dev, "%s: no device registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ dr->valid = false;
+
+ if (!remote_wakeup)
+ dwc2_writel(hsotg, dr->dctl, DCTL);
+
+ dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
+ dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
+ dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
+
+ for (i = 0; i < hsotg->num_of_eps; i++) {
+ /* Restore IN EPs */
+ dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
+ dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
+ dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
+ /** WA for enabled EPx's IN in DDMA mode. On entering to
+ * hibernation wrong value read and saved from DIEPDMAx,
+ * as result BNA interrupt asserted on hibernation exit
+ * by restoring from saved area.
+ */
+ if (using_desc_dma(hsotg) &&
+ (dr->diepctl[i] & DXEPCTL_EPENA))
+ dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
+ dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
+ dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
+ /* Restore OUT EPs */
+ dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
+ /* WA for enabled EPx's OUT in DDMA mode. On entering to
+ * hibernation wrong value read and saved from DOEPDMAx,
+ * as result BNA interrupt asserted on hibernation exit
+ * by restoring from saved area.
+ */
+ if (using_desc_dma(hsotg) &&
+ (dr->doepctl[i] & DXEPCTL_EPENA))
+ dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
+ dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
+ dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
+ }
+
+ return 0;
+}
+
+/**
+ * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
+{
+ u32 val;
+
+ if (!hsotg->params.lpm)
+ return;
+
+ val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
+ val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
+ val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
+ val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
+ val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
+ val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
+ val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
+ dwc2_writel(hsotg, val, GLPMCFG);
+ dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
+
+ /* Unmask WKUP_ALERT Interrupt */
+ if (hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
+}
+
+/**
+ * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
+{
+ u32 val = 0;
+
+ val |= GREFCLK_REF_CLK_MODE;
+ val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
+ val |= hsotg->params.sof_cnt_wkup_alert <<
+ GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
+
+ dwc2_writel(hsotg, val, GREFCLK);
+ dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
+}
+
+/**
+ * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Return non-zero if failed to enter to hibernation.
+ */
+int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
+{
+ u32 gpwrdn;
+ int ret = 0;
+
+ /* Change to L2(suspend) state */
+ hsotg->lx_state = DWC2_L2;
+ dev_dbg(hsotg->dev, "Start of hibernation completed\n");
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+ ret = dwc2_backup_device_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup device registers\n",
+ __func__);
+ return ret;
+ }
+
+ gpwrdn = GPWRDN_PWRDNRSTN;
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Set flag to indicate that we are in hibernation */
+ hsotg->hibernated = 1;
+
+ /* Enable interrupts from wake up logic */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUINTSEL;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Unmask device mode interrupts in GPWRDN */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_RST_DET_MSK;
+ gpwrdn |= GPWRDN_LNSTSCHG_MSK;
+ gpwrdn |= GPWRDN_STS_CHGINT_MSK;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Enable Power Down Clamp */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PWRDNCLMP;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Switch off VDD */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PWRDNSWTCH;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Save gpwrdn register for further usage if stschng interrupt */
+ hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ dev_dbg(hsotg->dev, "Hibernation completed\n");
+
+ return ret;
+}
+
+/**
+ * dwc2_gadget_exit_hibernation()
+ * This function is for exiting from Device mode hibernation by host initiated
+ * resume/reset and device initiated remote-wakeup.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: indicates whether resume is initiated by Device or Host.
+ * @reset: indicates whether resume is initiated by Reset.
+ *
+ * Return non-zero if failed to exit from hibernation.
+ */
+int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, int reset)
+{
+ u32 pcgcctl;
+ u32 gpwrdn;
+ u32 dctl;
+ int ret = 0;
+ struct dwc2_gregs_backup *gr;
+ struct dwc2_dregs_backup *dr;
+
+ gr = &hsotg->gr_backup;
+ dr = &hsotg->dr_backup;
+
+ if (!hsotg->hibernated) {
+ dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
+ return 1;
+ }
+ dev_dbg(hsotg->dev,
+ "%s: called with rem_wakeup = %d reset = %d\n",
+ __func__, rem_wakeup, reset);
+
+ dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
+
+ if (!reset) {
+ /* Clear all pending interupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+ }
+
+ /* De-assert Restore */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_RESTORE;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ if (!rem_wakeup) {
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ }
+
+ /* Restore GUSBCFG, DCFG and DCTL */
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
+ dwc2_writel(hsotg, dr->dctl, DCTL);
+
+ /* On USB Reset, reset device address to zero */
+ if (reset)
+ dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
+
+ /* De-assert Wakeup Logic */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+
+ if (rem_wakeup) {
+ udelay(10);
+ /* Start Remote Wakeup Signaling */
+ dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
+ } else {
+ udelay(50);
+ /* Set Device programming done bit */
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_PWRONPRGDONE;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+ /* Wait for interrupts which must be cleared */
+ mdelay(2);
+ /* Clear all pending interupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* Restore global registers */
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+
+ /* Restore device registers */
+ ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore device registers\n",
+ __func__);
+ return ret;
+ }
+
+ if (rem_wakeup) {
+ mdelay(10);
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl &= ~DCTL_RMTWKUPSIG;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+
+ hsotg->hibernated = 0;
+ hsotg->lx_state = DWC2_L0;
+ dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
+
+ return ret;
+}
+
+/**
+ * dwc2_gadget_enter_partial_power_down() - Put controller in partial
+ * power down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Return: non-zero if failed to enter device partial power down.
+ *
+ * This function is for entering device mode partial power down.
+ */
+int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
+{
+ u32 pcgcctl;
+ int ret = 0;
+
+ dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_backup_device_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup device registers\n",
+ __func__);
+ return ret;
+ }
+
+ /*
+ * Clear any pending interrupts since dwc2 will not be able to
+ * clear them after entering partial_power_down.
+ */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* Put the controller in low power state */
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+
+ pcgcctl |= PCGCTL_PWRCLMP;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(5);
+
+ pcgcctl |= PCGCTL_RSTPDWNMODULE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(5);
+
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ /* Set in_ppd flag to 1 as here core enters suspend. */
+ hsotg->in_ppd = 1;
+ hsotg->lx_state = DWC2_L2;
+
+ dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
+
+ return ret;
+}
+
+/*
+ * dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
+ * power down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @restore: indicates whether need to restore the registers or not.
+ *
+ * Return: non-zero if failed to exit device partial power down.
+ *
+ * This function is for exiting from device mode partial power down.
+ */
+int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
+ bool restore)
+{
+ u32 pcgcctl;
+ u32 dctl;
+ struct dwc2_dregs_backup *dr;
+ int ret = 0;
+
+ dr = &hsotg->dr_backup;
+
+ dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_PWRCLMP;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ udelay(100);
+ if (restore) {
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+ /* Restore DCFG */
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
+
+ ret = dwc2_restore_device_registers(hsotg, 0);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore device registers\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ /* Set the Power-On Programming done bit */
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_PWRONPRGDONE;
+ dwc2_writel(hsotg, dctl, DCTL);
+
+ /* Set in_ppd flag to 0 as here core exits from suspend. */
+ hsotg->in_ppd = 0;
+ hsotg->lx_state = DWC2_L0;
+
+ dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
+ return ret;
+}
+
+/**
+ * dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Return: non-zero if failed to enter device partial power down.
+ *
+ * This function is for entering device mode clock gating.
+ */
+void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg)
+{
+ u32 pcgctl;
+
+ dev_dbg(hsotg->dev, "Entering device clock gating.\n");
+
+ /* Set the Phy Clock bit as suspend is received. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ /* Set the Gate hclk as suspend is received. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl |= PCGCTL_GATEHCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ hsotg->lx_state = DWC2_L2;
+ hsotg->bus_suspended = true;
+}
+
+/*
+ * dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: indicates whether remote wake up is enabled.
+ *
+ * This function is for exiting from device mode clock gating.
+ */
+void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
+{
+ u32 pcgctl;
+ u32 dctl;
+
+ dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
+
+ /* Clear the Gate hclk. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_GATEHCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ /* Phy Clock bit. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ if (rem_wakeup) {
+ /* Set Remote Wakeup Signaling */
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_RMTWKUPSIG;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+
+ /* Change to L0 state */
+ call_gadget(hsotg, resume);
+ hsotg->lx_state = DWC2_L0;
+ hsotg->bus_suspended = false;
+}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
new file mode 100644
index 000000000..35c7a4df8
--- /dev/null
+++ b/drivers/usb/dwc2/hcd.c
@@ -0,0 +1,5954 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * hcd.c - DesignWare HS OTG Controller host-mode routines
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * This file contains the core HCD code, and implements the Linux hc_driver
+ * API
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/of.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/*
+ * =========================================================================
+ * Host Core Layer Functions
+ * =========================================================================
+ */
+
+/**
+ * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
+ * used in both device and host modes
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 intmsk;
+
+ /* Clear any pending OTG Interrupts */
+ dwc2_writel(hsotg, 0xffffffff, GOTGINT);
+
+ /* Clear any pending interrupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* Enable the interrupts in the GINTMSK */
+ intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
+
+ if (!hsotg->params.host_dma)
+ intmsk |= GINTSTS_RXFLVL;
+ if (!hsotg->params.external_id_pin_ctl)
+ intmsk |= GINTSTS_CONIDSTSCHNG;
+
+ intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
+ GINTSTS_SESSREQINT;
+
+ if (dwc2_is_device_mode(hsotg) && hsotg->params.lpm)
+ intmsk |= GINTSTS_LPMTRANRCVD;
+
+ dwc2_writel(hsotg, intmsk, GINTMSK);
+}
+
+static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
+{
+ u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
+
+ switch (hsotg->hw_params.arch) {
+ case GHWCFG2_EXT_DMA_ARCH:
+ dev_err(hsotg->dev, "External DMA Mode not supported\n");
+ return -EINVAL;
+
+ case GHWCFG2_INT_DMA_ARCH:
+ dev_dbg(hsotg->dev, "Internal DMA Mode\n");
+ if (hsotg->params.ahbcfg != -1) {
+ ahbcfg &= GAHBCFG_CTRL_MASK;
+ ahbcfg |= hsotg->params.ahbcfg &
+ ~GAHBCFG_CTRL_MASK;
+ }
+ break;
+
+ case GHWCFG2_SLAVE_ONLY_ARCH:
+ default:
+ dev_dbg(hsotg->dev, "Slave Only Mode\n");
+ break;
+ }
+
+ if (hsotg->params.host_dma)
+ ahbcfg |= GAHBCFG_DMA_EN;
+ else
+ hsotg->params.dma_desc_enable = false;
+
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
+
+ return 0;
+}
+
+static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
+{
+ u32 usbcfg;
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
+
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ if (hsotg->params.otg_caps.hnp_support &&
+ hsotg->params.otg_caps.srp_support)
+ usbcfg |= GUSBCFG_HNPCAP;
+ fallthrough;
+
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ if (hsotg->params.otg_caps.srp_support)
+ usbcfg |= GUSBCFG_SRPCAP;
+ break;
+
+ case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
+ default:
+ break;
+ }
+
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+}
+
+static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->vbus_supply)
+ return regulator_enable(hsotg->vbus_supply);
+
+ return 0;
+}
+
+static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->vbus_supply)
+ return regulator_disable(hsotg->vbus_supply);
+
+ return 0;
+}
+
+/**
+ * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 intmsk;
+
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ /* Disable all interrupts */
+ dwc2_writel(hsotg, 0, GINTMSK);
+ dwc2_writel(hsotg, 0, HAINTMSK);
+
+ /* Enable the common interrupts */
+ dwc2_enable_common_interrupts(hsotg);
+
+ /* Enable host mode interrupts without disturbing common interrupts */
+ intmsk = dwc2_readl(hsotg, GINTMSK);
+ intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
+ dwc2_writel(hsotg, intmsk, GINTMSK);
+}
+
+/**
+ * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 intmsk = dwc2_readl(hsotg, GINTMSK);
+
+ /* Disable host mode interrupts without disturbing common interrupts */
+ intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
+ GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
+}
+
+/*
+ * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
+ * For system that have a total fifo depth that is smaller than the default
+ * RX + TX fifo size.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *params = &hsotg->params;
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
+
+ total_fifo_size = hw->total_fifo_size;
+ rxfsiz = params->host_rx_fifo_size;
+ nptxfsiz = params->host_nperio_tx_fifo_size;
+ ptxfsiz = params->host_perio_tx_fifo_size;
+
+ /*
+ * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
+ * allocation with support for high bandwidth endpoints. Synopsys
+ * defines MPS(Max Packet size) for a periodic EP=1024, and for
+ * non-periodic as 512.
+ */
+ if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
+ /*
+ * For Buffer DMA mode/Scatter Gather DMA mode
+ * 2 * ((Largest Packet size / 4) + 1 + 1) + n
+ * with n = number of host channel.
+ * 2 * ((1024/4) + 2) = 516
+ */
+ rxfsiz = 516 + hw->host_channels;
+
+ /*
+ * min non-periodic tx fifo depth
+ * 2 * (largest non-periodic USB packet used / 4)
+ * 2 * (512/4) = 256
+ */
+ nptxfsiz = 256;
+
+ /*
+ * min periodic tx fifo depth
+ * (largest packet size*MC)/4
+ * (1024 * 3)/4 = 768
+ */
+ ptxfsiz = 768;
+
+ params->host_rx_fifo_size = rxfsiz;
+ params->host_nperio_tx_fifo_size = nptxfsiz;
+ params->host_perio_tx_fifo_size = ptxfsiz;
+ }
+
+ /*
+ * If the summation of RX, NPTX and PTX fifo sizes is still
+ * bigger than the total_fifo_size, then we have a problem.
+ *
+ * We won't be able to allocate as many endpoints. Right now,
+ * we're just printing an error message, but ideally this FIFO
+ * allocation algorithm would be improved in the future.
+ *
+ * FIXME improve this FIFO allocation algorithm.
+ */
+ if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
+ dev_err(hsotg->dev, "invalid fifo sizes\n");
+}
+
+static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *params = &hsotg->params;
+ u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
+
+ if (!params->enable_dynamic_fifo)
+ return;
+
+ dwc2_calculate_dynamic_fifo(hsotg);
+
+ /* Rx FIFO */
+ grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
+ dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
+ grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
+ grxfsiz |= params->host_rx_fifo_size <<
+ GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
+ dwc2_writel(hsotg, grxfsiz, GRXFSIZ);
+ dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
+ dwc2_readl(hsotg, GRXFSIZ));
+
+ /* Non-periodic Tx FIFO */
+ dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
+ dwc2_readl(hsotg, GNPTXFSIZ));
+ nptxfsiz = params->host_nperio_tx_fifo_size <<
+ FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
+ nptxfsiz |= params->host_rx_fifo_size <<
+ FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
+ dwc2_writel(hsotg, nptxfsiz, GNPTXFSIZ);
+ dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
+ dwc2_readl(hsotg, GNPTXFSIZ));
+
+ /* Periodic Tx FIFO */
+ dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
+ dwc2_readl(hsotg, HPTXFSIZ));
+ hptxfsiz = params->host_perio_tx_fifo_size <<
+ FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
+ hptxfsiz |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size) <<
+ FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
+ dwc2_writel(hsotg, hptxfsiz, HPTXFSIZ);
+ dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
+ dwc2_readl(hsotg, HPTXFSIZ));
+
+ if (hsotg->params.en_multiple_tx_fifo &&
+ hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) {
+ /*
+ * This feature was implemented in 2.91a version
+ * Global DFIFOCFG calculation for Host mode -
+ * include RxFIFO, NPTXFIFO and HPTXFIFO
+ */
+ dfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
+ dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
+ dfifocfg |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size +
+ params->host_perio_tx_fifo_size) <<
+ GDFIFOCFG_EPINFOBASE_SHIFT &
+ GDFIFOCFG_EPINFOBASE_MASK;
+ dwc2_writel(hsotg, dfifocfg, GDFIFOCFG);
+ }
+}
+
+/**
+ * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
+ * the HFIR register according to PHY type and speed
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * NOTE: The caller can modify the value of the HFIR register only after the
+ * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
+ * has been set
+ */
+u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
+{
+ u32 usbcfg;
+ u32 hprt0;
+ int clock = 60; /* default value */
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+
+ if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
+ !(usbcfg & GUSBCFG_PHYIF16))
+ clock = 60;
+ if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
+ GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
+ clock = 48;
+ if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
+ !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
+ clock = 30;
+ if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
+ !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
+ clock = 60;
+ if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
+ !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
+ clock = 48;
+ if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
+ clock = 48;
+ if ((usbcfg & GUSBCFG_PHYSEL) &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ clock = 48;
+
+ if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
+ /* High speed case */
+ return 125 * clock - 1;
+
+ /* FS/LS case */
+ return 1000 * clock - 1;
+}
+
+/**
+ * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
+ * buffer
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @dest: Destination buffer for the packet
+ * @bytes: Number of bytes to copy to the destination
+ */
+void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
+{
+ u32 *data_buf = (u32 *)dest;
+ int word_count = (bytes + 3) / 4;
+ int i;
+
+ /*
+ * Todo: Account for the case where dest is not dword aligned. This
+ * requires reading data from the FIFO into a u32 temp buffer, then
+ * moving it into the data buffer.
+ */
+
+ dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
+
+ for (i = 0; i < word_count; i++, data_buf++)
+ *data_buf = dwc2_readl(hsotg, HCFIFO(0));
+}
+
+/**
+ * dwc2_dump_channel_info() - Prints the state of a host channel
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Pointer to the channel to dump
+ *
+ * Must be called with interrupt disabled and spinlock held
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+#ifdef VERBOSE_DEBUG
+ int num_channels = hsotg->params.host_channels;
+ struct dwc2_qh *qh;
+ u32 hcchar;
+ u32 hcsplt;
+ u32 hctsiz;
+ u32 hc_dma;
+ int i;
+
+ if (!chan)
+ return;
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chan->hc_num));
+ hc_dma = dwc2_readl(hsotg, HCDMA(chan->hc_num));
+
+ dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
+ dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
+ hcchar, hcsplt);
+ dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n",
+ hctsiz, hc_dma);
+ dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ chan->dev_addr, chan->ep_num, chan->ep_is_in);
+ dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
+ dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
+ dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
+ dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
+ dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
+ dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
+ dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
+ (unsigned long)chan->xfer_dma);
+ dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
+ dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
+ dev_dbg(hsotg->dev, " NP inactive sched:\n");
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
+ qh_list_entry)
+ dev_dbg(hsotg->dev, " %p\n", qh);
+ dev_dbg(hsotg->dev, " NP waiting sched:\n");
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
+ qh_list_entry)
+ dev_dbg(hsotg->dev, " %p\n", qh);
+ dev_dbg(hsotg->dev, " NP active sched:\n");
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
+ qh_list_entry)
+ dev_dbg(hsotg->dev, " %p\n", qh);
+ dev_dbg(hsotg->dev, " Channels:\n");
+ for (i = 0; i < num_channels; i++) {
+ struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
+
+ dev_dbg(hsotg->dev, " %2d: %p\n", i, chan);
+ }
+#endif /* VERBOSE_DEBUG */
+}
+
+static int _dwc2_hcd_start(struct usb_hcd *hcd);
+
+static void dwc2_host_start(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
+ _dwc2_hcd_start(hcd);
+}
+
+static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = 0;
+}
+
+static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
+ int *hub_addr, int *hub_port)
+{
+ struct urb *urb = context;
+
+ if (urb->dev->tt)
+ *hub_addr = urb->dev->tt->hub->devnum;
+ else
+ *hub_addr = 0;
+ *hub_port = urb->dev->ttport;
+}
+
+/*
+ * =========================================================================
+ * Low Level Host Channel Access Functions
+ * =========================================================================
+ */
+
+static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcintmsk = HCINTMSK_CHHLTD;
+
+ switch (chan->ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ dev_vdbg(hsotg->dev, "control/bulk\n");
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ hcintmsk |= HCINTMSK_STALL;
+ hcintmsk |= HCINTMSK_XACTERR;
+ hcintmsk |= HCINTMSK_DATATGLERR;
+ if (chan->ep_is_in) {
+ hcintmsk |= HCINTMSK_BBLERR;
+ } else {
+ hcintmsk |= HCINTMSK_NAK;
+ hcintmsk |= HCINTMSK_NYET;
+ if (chan->do_ping)
+ hcintmsk |= HCINTMSK_ACK;
+ }
+
+ if (chan->do_split) {
+ hcintmsk |= HCINTMSK_NAK;
+ if (chan->complete_split)
+ hcintmsk |= HCINTMSK_NYET;
+ else
+ hcintmsk |= HCINTMSK_ACK;
+ }
+
+ if (chan->error_state)
+ hcintmsk |= HCINTMSK_ACK;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "intr\n");
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ hcintmsk |= HCINTMSK_NAK;
+ hcintmsk |= HCINTMSK_STALL;
+ hcintmsk |= HCINTMSK_XACTERR;
+ hcintmsk |= HCINTMSK_DATATGLERR;
+ hcintmsk |= HCINTMSK_FRMOVRUN;
+
+ if (chan->ep_is_in)
+ hcintmsk |= HCINTMSK_BBLERR;
+ if (chan->error_state)
+ hcintmsk |= HCINTMSK_ACK;
+ if (chan->do_split) {
+ if (chan->complete_split)
+ hcintmsk |= HCINTMSK_NYET;
+ else
+ hcintmsk |= HCINTMSK_ACK;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "isoc\n");
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ hcintmsk |= HCINTMSK_FRMOVRUN;
+ hcintmsk |= HCINTMSK_ACK;
+
+ if (chan->ep_is_in) {
+ hcintmsk |= HCINTMSK_XACTERR;
+ hcintmsk |= HCINTMSK_BBLERR;
+ }
+ break;
+ default:
+ dev_err(hsotg->dev, "## Unknown EP type ##\n");
+ break;
+ }
+
+ dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
+}
+
+static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcintmsk = HCINTMSK_CHHLTD;
+
+ /*
+ * For Descriptor DMA mode core halts the channel on AHB error.
+ * Interrupt is not required.
+ */
+ if (!hsotg->params.dma_desc_enable) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "desc DMA disabled\n");
+ hcintmsk |= HCINTMSK_AHBERR;
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "desc DMA enabled\n");
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ }
+
+ if (chan->error_state && !chan->do_split &&
+ chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "setting ACK\n");
+ hcintmsk |= HCINTMSK_ACK;
+ if (chan->ep_is_in) {
+ hcintmsk |= HCINTMSK_DATATGLERR;
+ if (chan->ep_type != USB_ENDPOINT_XFER_INT)
+ hcintmsk |= HCINTMSK_NAK;
+ }
+ }
+
+ dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
+}
+
+static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 intmsk;
+
+ if (hsotg->params.host_dma) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA enabled\n");
+ dwc2_hc_enable_dma_ints(hsotg, chan);
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA disabled\n");
+ dwc2_hc_enable_slave_ints(hsotg, chan);
+ }
+
+ /* Enable the top level host channel interrupt */
+ intmsk = dwc2_readl(hsotg, HAINTMSK);
+ intmsk |= 1 << chan->hc_num;
+ dwc2_writel(hsotg, intmsk, HAINTMSK);
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
+
+ /* Make sure host channel interrupts are enabled */
+ intmsk = dwc2_readl(hsotg, GINTMSK);
+ intmsk |= GINTSTS_HCHINT;
+ dwc2_writel(hsotg, intmsk, GINTMSK);
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
+}
+
+/**
+ * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
+ * a specific endpoint
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * The HCCHARn register is set up with the characteristics specified in chan.
+ * Host channel interrupts that may need to be serviced while this transfer is
+ * in progress are enabled.
+ */
+static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
+{
+ u8 hc_num = chan->hc_num;
+ u32 hcintmsk;
+ u32 hcchar;
+ u32 hcsplt = 0;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ /* Clear old interrupt conditions for this host channel */
+ hcintmsk = 0xffffffff;
+ hcintmsk &= ~HCINTMSK_RESERVED14_31;
+ dwc2_writel(hsotg, hcintmsk, HCINT(hc_num));
+
+ /* Enable channel interrupts required for this transfer */
+ dwc2_hc_enable_ints(hsotg, chan);
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer
+ */
+ hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
+ hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
+ if (chan->ep_is_in)
+ hcchar |= HCCHAR_EPDIR;
+ if (chan->speed == USB_SPEED_LOW)
+ hcchar |= HCCHAR_LSPDDEV;
+ hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
+ hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
+ dwc2_writel(hsotg, hcchar, HCCHAR(hc_num));
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
+ hc_num, hcchar);
+
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n",
+ __func__, hc_num);
+ dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
+ chan->dev_addr);
+ dev_vdbg(hsotg->dev, " Ep Num: %d\n",
+ chan->ep_num);
+ dev_vdbg(hsotg->dev, " Is In: %d\n",
+ chan->ep_is_in);
+ dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
+ chan->speed == USB_SPEED_LOW);
+ dev_vdbg(hsotg->dev, " Ep Type: %d\n",
+ chan->ep_type);
+ dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
+ chan->max_packet);
+ }
+
+ /* Program the HCSPLT register for SPLITs */
+ if (chan->do_split) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev,
+ "Programming HC %d with split --> %s\n",
+ hc_num,
+ chan->complete_split ? "CSPLIT" : "SSPLIT");
+ if (chan->complete_split)
+ hcsplt |= HCSPLT_COMPSPLT;
+ hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
+ HCSPLT_XACTPOS_MASK;
+ hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
+ HCSPLT_HUBADDR_MASK;
+ hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
+ HCSPLT_PRTADDR_MASK;
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, " comp split %d\n",
+ chan->complete_split);
+ dev_vdbg(hsotg->dev, " xact pos %d\n",
+ chan->xact_pos);
+ dev_vdbg(hsotg->dev, " hub addr %d\n",
+ chan->hub_addr);
+ dev_vdbg(hsotg->dev, " hub port %d\n",
+ chan->hub_port);
+ dev_vdbg(hsotg->dev, " is_in %d\n",
+ chan->ep_is_in);
+ dev_vdbg(hsotg->dev, " Max Pkt %d\n",
+ chan->max_packet);
+ dev_vdbg(hsotg->dev, " xferlen %d\n",
+ chan->xfer_len);
+ }
+ }
+
+ dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num));
+}
+
+/**
+ * dwc2_hc_halt() - Attempts to halt a host channel
+ *
+ * @hsotg: Controller register interface
+ * @chan: Host channel to halt
+ * @halt_status: Reason for halting the channel
+ *
+ * This function should only be called in Slave mode or to abort a transfer in
+ * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
+ * controller halts the channel when the transfer is complete or a condition
+ * occurs that requires application intervention.
+ *
+ * In slave mode, checks for a free request queue entry, then sets the Channel
+ * Enable and Channel Disable bits of the Host Channel Characteristics
+ * register of the specified channel to intiate the halt. If there is no free
+ * request queue entry, sets only the Channel Disable bit of the HCCHARn
+ * register to flush requests for this channel. In the latter case, sets a
+ * flag to indicate that the host channel needs to be halted when a request
+ * queue slot is open.
+ *
+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ * HCCHARn register. The controller ensures there is space in the request
+ * queue before submitting the halt request.
+ *
+ * Some time may elapse before the core flushes any posted requests for this
+ * host channel and halts. The Channel Halted interrupt handler completes the
+ * deactivation of the host channel.
+ */
+void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+ enum dwc2_halt_status halt_status)
+{
+ u32 nptxsts, hptxsts, hcchar;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ /*
+ * In buffer DMA or external DMA mode channel can't be halted
+ * for non-split periodic channels. At the end of the next
+ * uframe/frame (in the worst case), the core generates a channel
+ * halted and disables the channel automatically.
+ */
+ if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
+ hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
+ if (!chan->do_split &&
+ (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
+ chan->ep_type == USB_ENDPOINT_XFER_INT)) {
+ dev_err(hsotg->dev, "%s() Channel can't be halted\n",
+ __func__);
+ return;
+ }
+ }
+
+ if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
+ dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
+
+ if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
+ halt_status == DWC2_HC_XFER_AHB_ERR) {
+ /*
+ * Disable all channel interrupts except Ch Halted. The QTD
+ * and QH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ u32 hcintmsk = HCINTMSK_CHHLTD;
+
+ dev_vdbg(hsotg->dev, "dequeue/error\n");
+ dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the QTD and QH state.
+ */
+ dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num));
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason
+ */
+ chan->halt_status = halt_status;
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+ if (!(hcchar & HCCHAR_CHENA)) {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+ if (chan->halt_pending) {
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+ dev_vdbg(hsotg->dev,
+ "*** %s: Channel %d, chan->halt_pending already set ***\n",
+ __func__, chan->hc_num);
+ return;
+ }
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+
+ /* No need to set the bit in DDMA for disabling the channel */
+ /* TODO check it everywhere channel is disabled */
+ if (!hsotg->params.dma_desc_enable) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "desc DMA disabled\n");
+ hcchar |= HCCHAR_CHENA;
+ } else {
+ if (dbg_hc(chan))
+ dev_dbg(hsotg->dev, "desc DMA enabled\n");
+ }
+ hcchar |= HCCHAR_CHDIS;
+
+ if (!hsotg->params.host_dma) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA not enabled\n");
+ hcchar |= HCCHAR_CHENA;
+
+ /* Check for space in the request queue to issue the halt */
+ if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+ chan->ep_type == USB_ENDPOINT_XFER_BULK) {
+ dev_vdbg(hsotg->dev, "control/bulk\n");
+ nptxsts = dwc2_readl(hsotg, GNPTXSTS);
+ if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
+ dev_vdbg(hsotg->dev, "Disabling channel\n");
+ hcchar &= ~HCCHAR_CHENA;
+ }
+ } else {
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "isoc/intr\n");
+ hptxsts = dwc2_readl(hsotg, HPTXSTS);
+ if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
+ hsotg->queuing_high_bandwidth) {
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "Disabling channel\n");
+ hcchar &= ~HCCHAR_CHENA;
+ }
+ }
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA enabled\n");
+ }
+
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
+ chan->halt_status = halt_status;
+
+ if (hcchar & HCCHAR_CHENA) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Channel enabled\n");
+ chan->halt_pending = 1;
+ chan->halt_on_queue = 0;
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Channel disabled\n");
+ chan->halt_on_queue = 1;
+ }
+
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+ dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
+ hcchar);
+ dev_vdbg(hsotg->dev, " halt_pending: %d\n",
+ chan->halt_pending);
+ dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
+ chan->halt_on_queue);
+ dev_vdbg(hsotg->dev, " halt_status: %d\n",
+ chan->halt_status);
+ }
+}
+
+/**
+ * dwc2_hc_cleanup() - Clears the transfer state for a host channel
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Identifies the host channel to clean up
+ *
+ * This function is normally called after a transfer is done and the host
+ * channel is being released
+ */
+void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
+{
+ u32 hcintmsk;
+
+ chan->xfer_started = 0;
+
+ list_del_init(&chan->split_order_list_entry);
+
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions
+ */
+ dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num));
+ hcintmsk = 0xffffffff;
+ hcintmsk &= ~HCINTMSK_RESERVED14_31;
+ dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num));
+}
+
+/**
+ * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
+ * which frame a periodic transfer should occur
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Identifies the host channel to set up and its properties
+ * @hcchar: Current value of the HCCHAR register for the specified host channel
+ *
+ * This function has no effect on non-periodic transfers
+ */
+static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, u32 *hcchar)
+{
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ int host_speed;
+ int xfer_ns;
+ int xfer_us;
+ int bytes_in_fifo;
+ u16 fifo_space;
+ u16 frame_number;
+ u16 wire_frame;
+
+ /*
+ * Try to figure out if we're an even or odd frame. If we set
+ * even and the current frame number is even the transfer
+ * will happen immediately. Similar if both are odd. If one is
+ * even and the other is odd then the transfer will happen when
+ * the frame number ticks.
+ *
+ * There's a bit of a balancing act to get this right.
+ * Sometimes we may want to send data in the current frame (AK
+ * right away). We might want to do this if the frame number
+ * _just_ ticked, but we might also want to do this in order
+ * to continue a split transaction that happened late in a
+ * microframe (so we didn't know to queue the next transfer
+ * until the frame number had ticked). The problem is that we
+ * need a lot of knowledge to know if there's actually still
+ * time to send things or if it would be better to wait until
+ * the next frame.
+ *
+ * We can look at how much time is left in the current frame
+ * and make a guess about whether we'll have time to transfer.
+ * We'll do that.
+ */
+
+ /* Get speed host is running at */
+ host_speed = (chan->speed != USB_SPEED_HIGH &&
+ !chan->do_split) ? chan->speed : USB_SPEED_HIGH;
+
+ /* See how many bytes are in the periodic FIFO right now */
+ fifo_space = (dwc2_readl(hsotg, HPTXSTS) &
+ TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
+ bytes_in_fifo = sizeof(u32) *
+ (hsotg->params.host_perio_tx_fifo_size -
+ fifo_space);
+
+ /*
+ * Roughly estimate bus time for everything in the periodic
+ * queue + our new transfer. This is "rough" because we're
+ * using a function that makes takes into account IN/OUT
+ * and INT/ISO and we're just slamming in one value for all
+ * transfers. This should be an over-estimate and that should
+ * be OK, but we can probably tighten it.
+ */
+ xfer_ns = usb_calc_bus_time(host_speed, false, false,
+ chan->xfer_len + bytes_in_fifo);
+ xfer_us = NS_TO_US(xfer_ns);
+
+ /* See what frame number we'll be at by the time we finish */
+ frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
+
+ /* This is when we were scheduled to be on the wire */
+ wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
+
+ /*
+ * If we'd finish _after_ the frame we're scheduled in then
+ * it's hopeless. Just schedule right away and hope for the
+ * best. Note that it _might_ be wise to call back into the
+ * scheduler to pick a better frame, but this is better than
+ * nothing.
+ */
+ if (dwc2_frame_num_gt(frame_number, wire_frame)) {
+ dwc2_sch_vdbg(hsotg,
+ "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
+ chan->qh, wire_frame, frame_number,
+ dwc2_frame_num_dec(frame_number,
+ wire_frame));
+ wire_frame = frame_number;
+
+ /*
+ * We picked a different frame number; communicate this
+ * back to the scheduler so it doesn't try to schedule
+ * another in the same frame.
+ *
+ * Remember that next_active_frame is 1 before the wire
+ * frame.
+ */
+ chan->qh->next_active_frame =
+ dwc2_frame_num_dec(frame_number, 1);
+ }
+
+ if (wire_frame & 1)
+ *hcchar |= HCCHAR_ODDFRM;
+ else
+ *hcchar &= ~HCCHAR_ODDFRM;
+ }
+}
+
+static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
+{
+ /* Set up the initial PID for the transfer */
+ if (chan->speed == USB_SPEED_HIGH) {
+ if (chan->ep_is_in) {
+ if (chan->multi_count == 1)
+ chan->data_pid_start = DWC2_HC_PID_DATA0;
+ else if (chan->multi_count == 2)
+ chan->data_pid_start = DWC2_HC_PID_DATA1;
+ else
+ chan->data_pid_start = DWC2_HC_PID_DATA2;
+ } else {
+ if (chan->multi_count == 1)
+ chan->data_pid_start = DWC2_HC_PID_DATA0;
+ else
+ chan->data_pid_start = DWC2_HC_PID_MDATA;
+ }
+ } else {
+ chan->data_pid_start = DWC2_HC_PID_DATA0;
+ }
+}
+
+/**
+ * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
+ * the Host Channel
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * This function should only be called in Slave mode. For a channel associated
+ * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
+ * associated with a periodic EP, the periodic Tx FIFO is written.
+ *
+ * Upon return the xfer_buf and xfer_count fields in chan are incremented by
+ * the number of bytes written to the Tx FIFO.
+ */
+static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 i;
+ u32 remaining_count;
+ u32 byte_count;
+ u32 dword_count;
+ u32 *data_buf = (u32 *)chan->xfer_buf;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ remaining_count = chan->xfer_len - chan->xfer_count;
+ if (remaining_count > chan->max_packet)
+ byte_count = chan->max_packet;
+ else
+ byte_count = remaining_count;
+
+ dword_count = (byte_count + 3) / 4;
+
+ if (((unsigned long)data_buf & 0x3) == 0) {
+ /* xfer_buf is DWORD aligned */
+ for (i = 0; i < dword_count; i++, data_buf++)
+ dwc2_writel(hsotg, *data_buf, HCFIFO(chan->hc_num));
+ } else {
+ /* xfer_buf is not DWORD aligned */
+ for (i = 0; i < dword_count; i++, data_buf++) {
+ u32 data = data_buf[0] | data_buf[1] << 8 |
+ data_buf[2] << 16 | data_buf[3] << 24;
+ dwc2_writel(hsotg, data, HCFIFO(chan->hc_num));
+ }
+ }
+
+ chan->xfer_count += byte_count;
+ chan->xfer_buf += byte_count;
+}
+
+/**
+ * dwc2_hc_do_ping() - Starts a PING transfer
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * This function should only be called in Slave mode. The Do Ping bit is set in
+ * the HCTSIZ register, then the channel is enabled.
+ */
+static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcchar;
+ u32 hctsiz;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+
+ hctsiz = TSIZ_DOPNG;
+ hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
+}
+
+/**
+ * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
+ * channel and starts the transfer
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel. The xfer_len value
+ * may be reduced to accommodate the max widths of the XferSize and
+ * PktCnt fields in the HCTSIZn register. The multi_count value may be
+ * changed to reflect the final xfer_len value.
+ *
+ * This function may be called in either Slave mode or DMA mode. In Slave mode,
+ * the caller must ensure that there is sufficient space in the request queue
+ * and Tx Data FIFO.
+ *
+ * For an OUT transfer in Slave mode, it loads a data packet into the
+ * appropriate FIFO. If necessary, additional data packets are loaded in the
+ * Host ISR.
+ *
+ * For an IN transfer in Slave mode, a data packet is requested. The data
+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ * additional data packets are requested in the Host ISR.
+ *
+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ * register along with a packet count of 1 and the channel is enabled. This
+ * causes a single PING transaction to occur. Other fields in HCTSIZ are
+ * simply set to 0 since no data transfer occurs in this case.
+ *
+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ * all the information required to perform the subsequent data transfer. In
+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ * controller performs the entire PING protocol, then starts the data
+ * transfer.
+ */
+static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
+ u16 max_hc_pkt_count = hsotg->params.max_packet_count;
+ u32 hcchar;
+ u32 hctsiz = 0;
+ u16 num_packets;
+ u32 ec_mc;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ if (chan->do_ping) {
+ if (!hsotg->params.host_dma) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "ping, no DMA\n");
+ dwc2_hc_do_ping(hsotg, chan);
+ chan->xfer_started = 1;
+ return;
+ }
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "ping, DMA\n");
+
+ hctsiz |= TSIZ_DOPNG;
+ }
+
+ if (chan->do_split) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "split\n");
+ num_packets = 1;
+
+ if (chan->complete_split && !chan->ep_is_in)
+ /*
+ * For CSPLIT OUT Transfer, set the size to 0 so the
+ * core doesn't expect any data written to the FIFO
+ */
+ chan->xfer_len = 0;
+ else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
+ chan->xfer_len = chan->max_packet;
+ else if (!chan->ep_is_in && chan->xfer_len > 188)
+ chan->xfer_len = 188;
+
+ hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
+ TSIZ_XFERSIZE_MASK;
+
+ /* For split set ec_mc for immediate retries */
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ ec_mc = 3;
+ else
+ ec_mc = 1;
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "no split\n");
+ /*
+ * Ensure that the transfer length and packet count will fit
+ * in the widths allocated for them in the HCTSIZn register
+ */
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * Make sure the transfer size is no larger than one
+ * (micro)frame's worth of data. (A check was done
+ * when the periodic transfer was accepted to ensure
+ * that a (micro)frame's worth of data can be
+ * programmed into a channel.)
+ */
+ u32 max_periodic_len =
+ chan->multi_count * chan->max_packet;
+
+ if (chan->xfer_len > max_periodic_len)
+ chan->xfer_len = max_periodic_len;
+ } else if (chan->xfer_len > max_hc_xfer_size) {
+ /*
+ * Make sure that xfer_len is a multiple of max packet
+ * size
+ */
+ chan->xfer_len =
+ max_hc_xfer_size - chan->max_packet + 1;
+ }
+
+ if (chan->xfer_len > 0) {
+ num_packets = (chan->xfer_len + chan->max_packet - 1) /
+ chan->max_packet;
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ chan->xfer_len = num_packets * chan->max_packet;
+ } else if (chan->ep_is_in) {
+ /*
+ * Always program an integral # of max packets
+ * for IN transfers.
+ * Note: This assumes that the input buffer is
+ * aligned and sized accordingly.
+ */
+ chan->xfer_len = num_packets * chan->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0 */
+ num_packets = 1;
+ }
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * Make sure that the multi_count field matches the
+ * actual transfer length
+ */
+ chan->multi_count = num_packets;
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ dwc2_set_pid_isoc(chan);
+
+ hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
+ TSIZ_XFERSIZE_MASK;
+
+ /* The ec_mc gets the multi_count for non-split */
+ ec_mc = chan->multi_count;
+ }
+
+ chan->start_pkt_count = num_packets;
+ hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
+ hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
+ TSIZ_SC_MC_PID_MASK;
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
+ hctsiz, chan->hc_num);
+
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+ dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
+ (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT);
+ dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
+ (hctsiz & TSIZ_PKTCNT_MASK) >>
+ TSIZ_PKTCNT_SHIFT);
+ dev_vdbg(hsotg->dev, " Start PID: %d\n",
+ (hctsiz & TSIZ_SC_MC_PID_MASK) >>
+ TSIZ_SC_MC_PID_SHIFT);
+ }
+
+ if (hsotg->params.host_dma) {
+ dma_addr_t dma_addr;
+
+ if (chan->align_buf) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "align_buf\n");
+ dma_addr = chan->align_buf;
+ } else {
+ dma_addr = chan->xfer_dma;
+ }
+ dwc2_writel(hsotg, (u32)dma_addr, HCDMA(chan->hc_num));
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
+ (unsigned long)dma_addr, chan->hc_num);
+ }
+
+ /* Start the split */
+ if (chan->do_split) {
+ u32 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
+
+ hcsplt |= HCSPLT_SPLTENA;
+ dwc2_writel(hsotg, hcsplt, HCSPLT(chan->hc_num));
+ }
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+ hcchar &= ~HCCHAR_MULTICNT_MASK;
+ hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
+ dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
+
+ if (hcchar & HCCHAR_CHDIS)
+ dev_warn(hsotg->dev,
+ "%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, chan->hc_num, hcchar);
+
+ /* Set host channel enable after all other setup is complete */
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
+
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
+ chan->hc_num);
+
+ chan->xfer_started = 1;
+ chan->requests++;
+
+ if (!hsotg->params.host_dma &&
+ !chan->ep_is_in && chan->xfer_len > 0)
+ /* Load OUT packet into the appropriate Tx FIFO */
+ dwc2_hc_write_packet(hsotg, chan);
+}
+
+/**
+ * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
+ * host channel and starts the transfer in Descriptor DMA mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
+ * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
+ * with micro-frame bitmap.
+ *
+ * Initializes HCDMA register with descriptor list address and CTD value then
+ * starts the transfer via enabling the channel.
+ */
+void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcchar;
+ u32 hctsiz = 0;
+
+ if (chan->do_ping)
+ hctsiz |= TSIZ_DOPNG;
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ dwc2_set_pid_isoc(chan);
+
+ /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
+ hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
+ TSIZ_SC_MC_PID_MASK;
+
+ /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
+ hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
+
+ /* Non-zero only for high-speed interrupt endpoints */
+ hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
+
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+ dev_vdbg(hsotg->dev, " Start PID: %d\n",
+ chan->data_pid_start);
+ dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
+ }
+
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
+
+ dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
+ chan->desc_list_sz, DMA_TO_DEVICE);
+
+ dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num));
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
+ &chan->desc_list_addr, chan->hc_num);
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+ hcchar &= ~HCCHAR_MULTICNT_MASK;
+ hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
+ HCCHAR_MULTICNT_MASK;
+
+ if (hcchar & HCCHAR_CHDIS)
+ dev_warn(hsotg->dev,
+ "%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, chan->hc_num, hcchar);
+
+ /* Set host channel enable after all other setup is complete */
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
+
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
+ chan->hc_num);
+
+ chan->xfer_started = 1;
+ chan->requests++;
+}
+
+/**
+ * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
+ * a previous call to dwc2_hc_start_transfer()
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * The caller must ensure there is sufficient space in the request queue and Tx
+ * Data FIFO. This function should only be called in Slave mode. In DMA mode,
+ * the controller acts autonomously to complete transfers programmed to a host
+ * channel.
+ *
+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ * if there is any data remaining to be queued. For an IN transfer, another
+ * data packet is always requested. For the SETUP phase of a control transfer,
+ * this function does nothing.
+ *
+ * Return: 1 if a new request is queued, 0 if no more requests are required
+ * for this transfer
+ */
+static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+
+ if (chan->do_split)
+ /* SPLITs always queue just once per channel */
+ return 0;
+
+ if (chan->data_pid_start == DWC2_HC_PID_SETUP)
+ /* SETUPs are queued only once since they can't be NAK'd */
+ return 0;
+
+ if (chan->ep_is_in) {
+ /*
+ * Always queue another request for other IN transfers. If
+ * back-to-back INs are issued and NAKs are received for both,
+ * the driver may still be processing the first NAK when the
+ * second NAK is received. When the interrupt handler clears
+ * the NAK interrupt for the first NAK, the second NAK will
+ * not be seen. So we can't depend on the NAK interrupt
+ * handler to requeue a NAK'd request. Instead, IN requests
+ * are issued each time this function is called. When the
+ * transfer completes, the extra requests for the channel will
+ * be flushed.
+ */
+ u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+
+ dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
+ hcchar);
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
+ chan->requests++;
+ return 1;
+ }
+
+ /* OUT transfers */
+
+ if (chan->xfer_count < chan->xfer_len) {
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ u32 hcchar = dwc2_readl(hsotg,
+ HCCHAR(chan->hc_num));
+
+ dwc2_hc_set_even_odd_frame(hsotg, chan,
+ &hcchar);
+ }
+
+ /* Load OUT packet into the appropriate Tx FIFO */
+ dwc2_hc_write_packet(hsotg, chan);
+ chan->requests++;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * =========================================================================
+ * HCD
+ * =========================================================================
+ */
+
+/*
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
+ struct list_head *qh_list)
+{
+ struct dwc2_qh *qh, *qh_tmp;
+ struct dwc2_qtd *qtd, *qtd_tmp;
+
+ list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+ qtd_list_entry) {
+ dwc2_host_complete(hsotg, qtd, -ECONNRESET);
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+ }
+ }
+}
+
+static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
+ struct list_head *qh_list)
+{
+ struct dwc2_qtd *qtd, *qtd_tmp;
+ struct dwc2_qh *qh, *qh_tmp;
+ unsigned long flags;
+
+ if (!qh_list->next)
+ /* The list hasn't been initialized yet */
+ return;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /* Ensure there are no QTDs or URBs left */
+ dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
+
+ list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
+ dwc2_hcd_qh_unlink(hsotg, qh);
+
+ /* Free each QTD in the QH's QTD list */
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+ qtd_list_entry)
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+
+ if (qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_hcd_qh_free(hsotg, qh);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/*
+ * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
+{
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
+}
+
+/**
+ * dwc2_hcd_start() - Starts the HCD when switching to Host mode
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ */
+void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0;
+
+ if (hsotg->op_state == OTG_STATE_B_HOST) {
+ /*
+ * Reset the port. During a HNP mode switch the reset
+ * needs to occur within 1ms and have a duration of at
+ * least 50ms.
+ */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RST;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ }
+
+ queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
+ msecs_to_jiffies(50));
+}
+
+/* Must be called with interrupt disabled and spinlock held */
+static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
+{
+ int num_channels = hsotg->params.host_channels;
+ struct dwc2_host_chan *channel;
+ u32 hcchar;
+ int i;
+
+ if (!hsotg->params.host_dma) {
+ /* Flush out any channel requests in slave mode */
+ for (i = 0; i < num_channels; i++) {
+ channel = hsotg->hc_ptr_array[i];
+ if (!list_empty(&channel->hc_list_entry))
+ continue;
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ if (hcchar & HCCHAR_CHENA) {
+ hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
+ hcchar |= HCCHAR_CHDIS;
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ }
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ channel = hsotg->hc_ptr_array[i];
+ if (!list_empty(&channel->hc_list_entry))
+ continue;
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ if (hcchar & HCCHAR_CHENA) {
+ /* Halt the channel */
+ hcchar |= HCCHAR_CHDIS;
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ }
+
+ dwc2_hc_cleanup(hsotg, channel);
+ list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
+ /*
+ * Added for Descriptor DMA to prevent channel double cleanup in
+ * release_channel_ddma(), which is called from ep_disable when
+ * device disconnects
+ */
+ channel->qh = NULL;
+ }
+ /* All channels have been freed, mark them available */
+ if (hsotg->params.uframe_sched) {
+ hsotg->available_host_channels =
+ hsotg->params.host_channels;
+ } else {
+ hsotg->non_periodic_channels = 0;
+ hsotg->periodic_channels = 0;
+ }
+}
+
+/**
+ * dwc2_hcd_connect() - Handles connect of the HCD
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->lx_state != DWC2_L0)
+ usb_hcd_resume_root_hub(hsotg->priv);
+
+ hsotg->flags.b.port_connect_status_change = 1;
+ hsotg->flags.b.port_connect_status = 1;
+}
+
+/**
+ * dwc2_hcd_disconnect() - Handles disconnect of the HCD
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ * @force: If true, we won't try to reconnect even if we see device connected.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
+{
+ u32 intr;
+ u32 hprt0;
+
+ /* Set status flags for the hub driver */
+ hsotg->flags.b.port_connect_status_change = 1;
+ hsotg->flags.b.port_connect_status = 0;
+
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ intr = dwc2_readl(hsotg, GINTMSK);
+ intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
+ dwc2_writel(hsotg, intr, GINTMSK);
+ intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
+ dwc2_writel(hsotg, intr, GINTSTS);
+
+ /*
+ * Turn off the vbus power only if the core has transitioned to device
+ * mode. If still in host mode, need to keep power on to detect a
+ * reconnection.
+ */
+ if (dwc2_is_device_mode(hsotg)) {
+ if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
+ dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
+ dwc2_writel(hsotg, 0, HPRT0);
+ }
+
+ dwc2_disable_host_interrupts(hsotg);
+ }
+
+ /* Respond with an error status to all URBs in the schedule */
+ dwc2_kill_all_urbs(hsotg);
+
+ if (dwc2_is_host_mode(hsotg))
+ /* Clean up any host channels that were in use */
+ dwc2_hcd_cleanup_channels(hsotg);
+
+ dwc2_host_disconnect(hsotg);
+
+ /*
+ * Add an extra check here to see if we're actually connected but
+ * we don't have a detection interrupt pending. This can happen if:
+ * 1. hardware sees connect
+ * 2. hardware sees disconnect
+ * 3. hardware sees connect
+ * 4. dwc2_port_intr() - clears connect interrupt
+ * 5. dwc2_handle_common_intr() - calls here
+ *
+ * Without the extra check here we will end calling disconnect
+ * and won't get any future interrupts to handle the connect.
+ */
+ if (!force) {
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+ if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
+ dwc2_hcd_connect(hsotg);
+ }
+}
+
+/**
+ * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ */
+static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->bus_suspended) {
+ hsotg->flags.b.port_suspend_change = 1;
+ usb_hcd_resume_root_hub(hsotg->priv);
+ }
+
+ if (hsotg->lx_state == DWC2_L1)
+ hsotg->flags.b.port_l1_change = 1;
+}
+
+/**
+ * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
+ *
+ * @hsotg: Pointer to struct dwc2_hsotg
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
+{
+ dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
+
+ /*
+ * The root hub should be disconnected before this function is called.
+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+ * and the QH lists (via ..._hcd_endpoint_disable).
+ */
+
+ /* Turn off all host-specific interrupts */
+ dwc2_disable_host_interrupts(hsotg);
+
+ /* Turn off the vbus power */
+ dev_dbg(hsotg->dev, "PortPower off\n");
+ dwc2_writel(hsotg, 0, HPRT0);
+}
+
+/* Caller must hold driver lock */
+static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
+ struct dwc2_qtd *qtd)
+{
+ u32 intr_mask;
+ int retval;
+ int dev_speed;
+
+ if (!hsotg->flags.b.port_connect_status) {
+ /* No longer connected */
+ dev_err(hsotg->dev, "Not connected\n");
+ return -ENODEV;
+ }
+
+ dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
+
+ /* Some configurations cannot support LS traffic on a FS root port */
+ if ((dev_speed == USB_SPEED_LOW) &&
+ (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
+ (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
+ u32 hprt0 = dwc2_readl(hsotg, HPRT0);
+ u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+
+ if (prtspd == HPRT0_SPD_FULL_SPEED)
+ return -ENODEV;
+ }
+
+ if (!qtd)
+ return -EINVAL;
+
+ dwc2_hcd_qtd_init(qtd, urb);
+ retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
+ if (retval) {
+ dev_err(hsotg->dev,
+ "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
+ retval);
+ return retval;
+ }
+
+ intr_mask = dwc2_readl(hsotg, GINTMSK);
+ if (!(intr_mask & GINTSTS_SOF)) {
+ enum dwc2_transaction_type tr_type;
+
+ if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
+ !(qtd->urb->flags & URB_GIVEBACK_ASAP))
+ /*
+ * Do not schedule SG transactions until qtd has
+ * URB_GIVEBACK_ASAP set
+ */
+ return 0;
+
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE)
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+ }
+
+ return 0;
+}
+
+/* Must be called with interrupt disabled and spinlock held */
+static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb)
+{
+ struct dwc2_qh *qh;
+ struct dwc2_qtd *urb_qtd;
+
+ urb_qtd = urb->qtd;
+ if (!urb_qtd) {
+ dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
+ return -EINVAL;
+ }
+
+ qh = urb_qtd->qh;
+ if (!qh) {
+ dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
+ return -EINVAL;
+ }
+
+ urb->priv = NULL;
+
+ if (urb_qtd->in_process && qh->channel) {
+ dwc2_dump_channel_info(hsotg, qh->channel);
+
+ /* The QTD is in process (it has been assigned to a channel) */
+ if (hsotg->flags.b.port_connect_status)
+ /*
+ * If still connected (i.e. in host mode), halt the
+ * channel so it can be used for other transfers. If
+ * no longer connected, the host registers can't be
+ * written to halt the channel since the core is in
+ * device mode.
+ */
+ dwc2_hc_halt(hsotg, qh->channel,
+ DWC2_HC_XFER_URB_DEQUEUE);
+ }
+
+ /*
+ * Free the QTD and clean up the associated QH. Leave the QH in the
+ * schedule if it has any remaining QTDs.
+ */
+ if (!hsotg->params.dma_desc_enable) {
+ u8 in_process = urb_qtd->in_process;
+
+ dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
+ if (in_process) {
+ dwc2_hcd_qh_deactivate(hsotg, qh, 0);
+ qh->channel = NULL;
+ } else if (list_empty(&qh->qtd_list)) {
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ }
+ } else {
+ dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
+ }
+
+ return 0;
+}
+
+/* Must NOT be called with interrupt disabled or spinlock held */
+static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
+ struct usb_host_endpoint *ep, int retry)
+{
+ struct dwc2_qtd *qtd, *qtd_tmp;
+ struct dwc2_qh *qh;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ qh = ep->hcpriv;
+ if (!qh) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ while (!list_empty(&qh->qtd_list) && retry--) {
+ if (retry == 0) {
+ dev_err(hsotg->dev,
+ "## timeout in dwc2_hcd_endpoint_disable() ##\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh) {
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+
+ dwc2_hcd_qh_unlink(hsotg, qh);
+
+ /* Free each QTD in the QH's QTD list */
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+
+ ep->hcpriv = NULL;
+
+ if (qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ dwc2_hcd_qh_free(hsotg, qh);
+
+ return 0;
+
+err:
+ ep->hcpriv = NULL;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return rc;
+}
+
+/* Must be called with interrupt disabled and spinlock held */
+static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
+ struct usb_host_endpoint *ep)
+{
+ struct dwc2_qh *qh = ep->hcpriv;
+
+ if (!qh)
+ return -EINVAL;
+
+ qh->data_toggle = DWC2_HC_PID_DATA0;
+
+ return 0;
+}
+
+/**
+ * dwc2_core_init() - Initializes the DWC_otg controller registers and
+ * prepares the core for device mode or host mode operation
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @initial_setup: If true then this is the first init for this instance.
+ */
+int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
+{
+ u32 usbcfg, otgctl;
+ int retval;
+
+ dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+
+ /* Set ULPI External VBUS bit if needed */
+ usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
+ if (hsotg->params.phy_ulpi_ext_vbus)
+ usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
+
+ /* Set external TS Dline pulsing bit if needed */
+ usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
+ if (hsotg->params.ts_dline)
+ usbcfg |= GUSBCFG_TERMSELDLPULSE;
+
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /*
+ * Reset the Controller
+ *
+ * We only need to reset the controller if this is a re-init.
+ * For the first init we know for sure that earlier code reset us (it
+ * needed to in order to properly detect various parameters).
+ */
+ if (!initial_setup) {
+ retval = dwc2_core_reset(hsotg, false);
+ if (retval) {
+ dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ /*
+ * This needs to happen in FS mode before any other programming occurs
+ */
+ retval = dwc2_phy_init(hsotg, initial_setup);
+ if (retval)
+ return retval;
+
+ /* Program the GAHBCFG Register */
+ retval = dwc2_gahbcfg_init(hsotg);
+ if (retval)
+ return retval;
+
+ /* Program the GUSBCFG register */
+ dwc2_gusbcfg_init(hsotg);
+
+ /* Program the GOTGCTL register */
+ otgctl = dwc2_readl(hsotg, GOTGCTL);
+ otgctl &= ~GOTGCTL_OTGVER;
+ dwc2_writel(hsotg, otgctl, GOTGCTL);
+
+ /* Clear the SRP success bit for FS-I2c */
+ hsotg->srp_success = 0;
+
+ /* Enable common interrupts */
+ dwc2_enable_common_interrupts(hsotg);
+
+ /*
+ * Do device or host initialization based on mode during PCD and
+ * HCD initialization
+ */
+ if (dwc2_is_host_mode(hsotg)) {
+ dev_dbg(hsotg->dev, "Host Mode\n");
+ hsotg->op_state = OTG_STATE_A_HOST;
+ } else {
+ dev_dbg(hsotg->dev, "Device Mode\n");
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+ }
+
+ return 0;
+}
+
+/**
+ * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
+ * Host mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * This function flushes the Tx and Rx FIFOs and flushes any entries in the
+ * request queues. Host channels are reset to ensure that they are ready for
+ * performing transfers.
+ */
+static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
+{
+ u32 hcfg, hfir, otgctl, usbcfg;
+
+ dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
+ /* Set HS/FS Timeout Calibration to 7 (max available value).
+ * The number of PHY clocks that the application programs in
+ * this field is added to the high/full speed interpacket timeout
+ * duration in the core to account for any additional delays
+ * introduced by the PHY. This can be required, because the delay
+ * introduced by the PHY in generating the linestate condition
+ * can vary from one PHY to another.
+ */
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ usbcfg |= GUSBCFG_TOUTCAL(7);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+
+ /* Restart the Phy Clock */
+ dwc2_writel(hsotg, 0, PCGCTL);
+
+ /* Initialize Host Configuration Register */
+ dwc2_init_fs_ls_pclk_sel(hsotg);
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
+ hcfg = dwc2_readl(hsotg, HCFG);
+ hcfg |= HCFG_FSLSSUPP;
+ dwc2_writel(hsotg, hcfg, HCFG);
+ }
+
+ /*
+ * This bit allows dynamic reloading of the HFIR register during
+ * runtime. This bit needs to be programmed during initial configuration
+ * and its value must not be changed during runtime.
+ */
+ if (hsotg->params.reload_ctl) {
+ hfir = dwc2_readl(hsotg, HFIR);
+ hfir |= HFIR_RLDCTRL;
+ dwc2_writel(hsotg, hfir, HFIR);
+ }
+
+ if (hsotg->params.dma_desc_enable) {
+ u32 op_mode = hsotg->hw_params.op_mode;
+
+ if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
+ !hsotg->hw_params.dma_desc_enable ||
+ op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
+ op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
+ op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
+ dev_err(hsotg->dev,
+ "Hardware does not support descriptor DMA mode -\n");
+ dev_err(hsotg->dev,
+ "falling back to buffer DMA mode.\n");
+ hsotg->params.dma_desc_enable = false;
+ } else {
+ hcfg = dwc2_readl(hsotg, HCFG);
+ hcfg |= HCFG_DESCDMA;
+ dwc2_writel(hsotg, hcfg, HCFG);
+ }
+ }
+
+ /* Configure data FIFO sizes */
+ dwc2_config_fifos(hsotg);
+
+ /* TODO - check this */
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ otgctl = dwc2_readl(hsotg, GOTGCTL);
+ otgctl &= ~GOTGCTL_HSTSETHNPEN;
+ dwc2_writel(hsotg, otgctl, GOTGCTL);
+
+ /* Make sure the FIFOs are flushed */
+ dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
+ dwc2_flush_rx_fifo(hsotg);
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ otgctl = dwc2_readl(hsotg, GOTGCTL);
+ otgctl &= ~GOTGCTL_HSTSETHNPEN;
+ dwc2_writel(hsotg, otgctl, GOTGCTL);
+
+ if (!hsotg->params.dma_desc_enable) {
+ int num_channels, i;
+ u32 hcchar;
+
+ /* Flush out any leftover queued requests */
+ num_channels = hsotg->params.host_channels;
+ for (i = 0; i < num_channels; i++) {
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ if (hcchar & HCCHAR_CHENA) {
+ hcchar &= ~HCCHAR_CHENA;
+ hcchar |= HCCHAR_CHDIS;
+ hcchar &= ~HCCHAR_EPDIR;
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ }
+ }
+
+ /* Halt all channels to put them into a known state */
+ for (i = 0; i < num_channels; i++) {
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ if (hcchar & HCCHAR_CHENA) {
+ hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
+ hcchar &= ~HCCHAR_EPDIR;
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
+ dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
+ __func__, i);
+
+ if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
+ HCCHAR_CHENA,
+ 1000)) {
+ dev_warn(hsotg->dev,
+ "Unable to clear enable on channel %d\n",
+ i);
+ }
+ }
+ }
+ }
+
+ /* Enable ACG feature in host mode, if supported */
+ dwc2_enable_acg(hsotg);
+
+ /* Turn on the vbus power */
+ dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
+ if (hsotg->op_state == OTG_STATE_A_HOST) {
+ u32 hprt0 = dwc2_read_hprt0(hsotg);
+
+ dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
+ !!(hprt0 & HPRT0_PWR));
+ if (!(hprt0 & HPRT0_PWR)) {
+ hprt0 |= HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ }
+ }
+
+ dwc2_enable_host_interrupts(hsotg);
+}
+
+/*
+ * Initializes dynamic portions of the DWC_otg HCD state
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_host_chan *chan, *chan_tmp;
+ int num_channels;
+ int i;
+
+ hsotg->flags.d32 = 0;
+ hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
+
+ if (hsotg->params.uframe_sched) {
+ hsotg->available_host_channels =
+ hsotg->params.host_channels;
+ } else {
+ hsotg->non_periodic_channels = 0;
+ hsotg->periodic_channels = 0;
+ }
+
+ /*
+ * Put all channels in the free channel list and clean up channel
+ * states
+ */
+ list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
+ hc_list_entry)
+ list_del_init(&chan->hc_list_entry);
+
+ num_channels = hsotg->params.host_channels;
+ for (i = 0; i < num_channels; i++) {
+ chan = hsotg->hc_ptr_array[i];
+ list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
+ dwc2_hc_cleanup(hsotg, chan);
+ }
+
+ /* Initialize the DWC core for host mode operation */
+ dwc2_core_host_init(hsotg);
+}
+
+static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
+{
+ int hub_addr, hub_port;
+
+ chan->do_split = 1;
+ chan->xact_pos = qtd->isoc_split_pos;
+ chan->complete_split = qtd->complete_split;
+ dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
+ chan->hub_addr = (u8)hub_addr;
+ chan->hub_port = (u8)hub_port;
+}
+
+static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd)
+{
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+ switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+ switch (qtd->control_phase) {
+ case DWC2_CONTROL_SETUP:
+ dev_vdbg(hsotg->dev, " Control setup transaction\n");
+ chan->do_ping = 0;
+ chan->ep_is_in = 0;
+ chan->data_pid_start = DWC2_HC_PID_SETUP;
+ if (hsotg->params.host_dma)
+ chan->xfer_dma = urb->setup_dma;
+ else
+ chan->xfer_buf = urb->setup_packet;
+ chan->xfer_len = 8;
+ break;
+
+ case DWC2_CONTROL_DATA:
+ dev_vdbg(hsotg->dev, " Control data transaction\n");
+ chan->data_pid_start = qtd->data_toggle;
+ break;
+
+ case DWC2_CONTROL_STATUS:
+ /*
+ * Direction is opposite of data direction or IN if no
+ * data
+ */
+ dev_vdbg(hsotg->dev, " Control status transaction\n");
+ if (urb->length == 0)
+ chan->ep_is_in = 1;
+ else
+ chan->ep_is_in =
+ dwc2_hcd_is_pipe_out(&urb->pipe_info);
+ if (chan->ep_is_in)
+ chan->do_ping = 0;
+ chan->data_pid_start = DWC2_HC_PID_DATA1;
+ chan->xfer_len = 0;
+ if (hsotg->params.host_dma)
+ chan->xfer_dma = hsotg->status_buf_dma;
+ else
+ chan->xfer_buf = hsotg->status_buf;
+ break;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ chan->ep_type = USB_ENDPOINT_XFER_BULK;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ chan->ep_type = USB_ENDPOINT_XFER_INT;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ chan->ep_type = USB_ENDPOINT_XFER_ISOC;
+ if (hsotg->params.dma_desc_enable)
+ break;
+
+ frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+ frame_desc->status = 0;
+
+ if (hsotg->params.host_dma) {
+ chan->xfer_dma = urb->dma;
+ chan->xfer_dma += frame_desc->offset +
+ qtd->isoc_split_offset;
+ } else {
+ chan->xfer_buf = urb->buf;
+ chan->xfer_buf += frame_desc->offset +
+ qtd->isoc_split_offset;
+ }
+
+ chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+
+ if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
+ if (chan->xfer_len <= 188)
+ chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
+ else
+ chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
+ }
+ break;
+ }
+}
+
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh,
+ struct dwc2_host_chan *chan)
+{
+ if (!hsotg->unaligned_cache ||
+ chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+ return -ENOMEM;
+
+ if (!qh->dw_align_buf) {
+ qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+ GFP_ATOMIC | GFP_DMA);
+ if (!qh->dw_align_buf)
+ return -ENOMEM;
+ }
+
+ qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+ DWC2_KMEM_UNALIGNED_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+ dev_err(hsotg->dev, "can't map align_buf\n");
+ chan->align_buf = 0;
+ return -EINVAL;
+ }
+
+ chan->align_buf = qh->dw_align_buf_dma;
+ return 0;
+}
+
+#define DWC2_USB_DMA_ALIGN 4
+
+static void dwc2_free_dma_aligned_buffer(struct urb *urb)
+{
+ void *stored_xfer_buffer;
+ size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ /* Restore urb->transfer_buffer from the end of the allocated area */
+ memcpy(&stored_xfer_buffer,
+ PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
+ sizeof(urb->transfer_buffer));
+
+ if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
+ }
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = stored_xfer_buffer;
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ void *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ /*
+ * Allocate a buffer with enough padding for original transfer_buffer
+ * pointer. This allocation is guaranteed to be aligned properly for
+ * DMA
+ */
+ kmalloc_size = urb->transfer_buffer_length +
+ (dma_get_cache_alignment() - 1) +
+ sizeof(urb->transfer_buffer);
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /*
+ * Position value of original urb->transfer_buffer pointer to the end
+ * of allocation for later referencing
+ */
+ memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
+ &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
+ if (usb_urb_dir_out(urb))
+ memcpy(kmalloc_ptr, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = kmalloc_ptr;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ /* We assume setup_dma is always aligned; warn if not */
+ WARN_ON_ONCE(urb->setup_dma &&
+ (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
+
+ ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ dwc2_free_dma_aligned_buffer(urb);
+
+ return ret;
+}
+
+static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ dwc2_free_dma_aligned_buffer(urb);
+}
+
+/**
+ * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
+ * channel and initializes the host channel to perform the transactions. The
+ * host channel is removed from the free list.
+ *
+ * @hsotg: The HCD state structure
+ * @qh: Transactions from the first QTD for this QH are selected and assigned
+ * to a free host channel
+ */
+static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ struct dwc2_host_chan *chan;
+ struct dwc2_hcd_urb *urb;
+ struct dwc2_qtd *qtd;
+
+ if (dbg_qh(qh))
+ dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
+
+ if (list_empty(&qh->qtd_list)) {
+ dev_dbg(hsotg->dev, "No QTDs in QH list\n");
+ return -ENOMEM;
+ }
+
+ if (list_empty(&hsotg->free_hc_list)) {
+ dev_dbg(hsotg->dev, "No free channel to assign\n");
+ return -ENOMEM;
+ }
+
+ chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
+ hc_list_entry);
+
+ /* Remove host channel from free list */
+ list_del_init(&chan->hc_list_entry);
+
+ qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
+ urb = qtd->urb;
+ qh->channel = chan;
+ qtd->in_process = 1;
+
+ /*
+ * Use usb_pipedevice to determine device address. This address is
+ * 0 before the SET_ADDRESS command and the correct address afterward.
+ */
+ chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
+ chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
+ chan->speed = qh->dev_speed;
+ chan->max_packet = qh->maxp;
+
+ chan->xfer_started = 0;
+ chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
+ chan->error_state = (qtd->error_count > 0);
+ chan->halt_on_queue = 0;
+ chan->halt_pending = 0;
+ chan->requests = 0;
+
+ /*
+ * The following values may be modified in the transfer type section
+ * below. The xfer_len value may be reduced when the transfer is
+ * started to accommodate the max widths of the XferSize and PktCnt
+ * fields in the HCTSIZn register.
+ */
+
+ chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
+ if (chan->ep_is_in)
+ chan->do_ping = 0;
+ else
+ chan->do_ping = qh->ping_state;
+
+ chan->data_pid_start = qh->data_toggle;
+ chan->multi_count = 1;
+
+ if (urb->actual_length > urb->length &&
+ !dwc2_hcd_is_pipe_in(&urb->pipe_info))
+ urb->actual_length = urb->length;
+
+ if (hsotg->params.host_dma)
+ chan->xfer_dma = urb->dma + urb->actual_length;
+ else
+ chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
+
+ chan->xfer_len = urb->length - urb->actual_length;
+ chan->xfer_count = 0;
+
+ /* Set the split attributes if required */
+ if (qh->do_split)
+ dwc2_hc_init_split(hsotg, chan, qtd, urb);
+ else
+ chan->do_split = 0;
+
+ /* Set the transfer attributes */
+ dwc2_hc_init_xfer(hsotg, chan, qtd);
+
+ /* For non-dword aligned buffers */
+ if (hsotg->params.host_dma && qh->do_split &&
+ chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+ dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+ if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+ dev_err(hsotg->dev,
+ "Failed to allocate memory to handle non-aligned buffer\n");
+ /* Add channel back to free list */
+ chan->align_buf = 0;
+ chan->multi_count = 0;
+ list_add_tail(&chan->hc_list_entry,
+ &hsotg->free_hc_list);
+ qtd->in_process = 0;
+ qh->channel = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ /*
+ * We assume that DMA is always aligned in non-split
+ * case or split out case. Warn if not.
+ */
+ WARN_ON_ONCE(hsotg->params.host_dma &&
+ (chan->xfer_dma & 0x3));
+ chan->align_buf = 0;
+ }
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * This value may be modified when the transfer is started
+ * to reflect the actual transfer length
+ */
+ chan->multi_count = qh->maxp_mult;
+
+ if (hsotg->params.dma_desc_enable) {
+ chan->desc_list_addr = qh->desc_list_dma;
+ chan->desc_list_sz = qh->desc_list_sz;
+ }
+
+ dwc2_hc_init(hsotg, chan);
+ chan->qh = qh;
+
+ return 0;
+}
+
+/**
+ * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
+ * schedule and assigns them to available host channels. Called from the HCD
+ * interrupt handler functions.
+ *
+ * @hsotg: The HCD state structure
+ *
+ * Return: The types of new transactions that were assigned to host channels
+ */
+enum dwc2_transaction_type dwc2_hcd_select_transactions(
+ struct dwc2_hsotg *hsotg)
+{
+ enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
+ struct list_head *qh_ptr;
+ struct dwc2_qh *qh;
+ int num_channels;
+
+#ifdef DWC2_DEBUG_SOF
+ dev_vdbg(hsotg->dev, " Select Transactions\n");
+#endif
+
+ /* Process entries in the periodic ready list */
+ qh_ptr = hsotg->periodic_sched_ready.next;
+ while (qh_ptr != &hsotg->periodic_sched_ready) {
+ if (list_empty(&hsotg->free_hc_list))
+ break;
+ if (hsotg->params.uframe_sched) {
+ if (hsotg->available_host_channels <= 1)
+ break;
+ hsotg->available_host_channels--;
+ }
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+ if (dwc2_assign_and_init_hc(hsotg, qh))
+ break;
+
+ /*
+ * Move the QH from the periodic ready schedule to the
+ * periodic assigned schedule
+ */
+ qh_ptr = qh_ptr->next;
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
+ ret_val = DWC2_TRANSACTION_PERIODIC;
+ }
+
+ /*
+ * Process entries in the inactive portion of the non-periodic
+ * schedule. Some free host channels may not be used if they are
+ * reserved for periodic transfers.
+ */
+ num_channels = hsotg->params.host_channels;
+ qh_ptr = hsotg->non_periodic_sched_inactive.next;
+ while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
+ if (!hsotg->params.uframe_sched &&
+ hsotg->non_periodic_channels >= num_channels -
+ hsotg->periodic_channels)
+ break;
+ if (list_empty(&hsotg->free_hc_list))
+ break;
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+ if (hsotg->params.uframe_sched) {
+ if (hsotg->available_host_channels < 1)
+ break;
+ hsotg->available_host_channels--;
+ }
+
+ if (dwc2_assign_and_init_hc(hsotg, qh))
+ break;
+
+ /*
+ * Move the QH from the non-periodic inactive schedule to the
+ * non-periodic active schedule
+ */
+ qh_ptr = qh_ptr->next;
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_active);
+
+ if (ret_val == DWC2_TRANSACTION_NONE)
+ ret_val = DWC2_TRANSACTION_NON_PERIODIC;
+ else
+ ret_val = DWC2_TRANSACTION_ALL;
+
+ if (!hsotg->params.uframe_sched)
+ hsotg->non_periodic_channels++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * dwc2_queue_transaction() - Attempts to queue a single transaction request for
+ * a host channel associated with either a periodic or non-periodic transfer
+ *
+ * @hsotg: The HCD state structure
+ * @chan: Host channel descriptor associated with either a periodic or
+ * non-periodic transfer
+ * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
+ * for periodic transfers or the non-periodic Tx FIFO
+ * for non-periodic transfers
+ *
+ * Return: 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO
+ *
+ * This function assumes that there is space available in the appropriate
+ * request queue. For an OUT transfer or SETUP transaction in Slave mode,
+ * it checks whether space is available in the appropriate Tx FIFO.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ u16 fifo_dwords_avail)
+{
+ int retval = 0;
+
+ if (chan->do_split)
+ /* Put ourselves on the list to keep order straight */
+ list_move_tail(&chan->split_order_list_entry,
+ &hsotg->split_order);
+
+ if (hsotg->params.host_dma && chan->qh) {
+ if (hsotg->params.dma_desc_enable) {
+ if (!chan->xfer_started ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
+ chan->qh->ping_state = 0;
+ }
+ } else if (!chan->xfer_started) {
+ dwc2_hc_start_transfer(hsotg, chan);
+ chan->qh->ping_state = 0;
+ }
+ } else if (chan->halt_pending) {
+ /* Don't queue a request if the channel has been halted */
+ } else if (chan->halt_on_queue) {
+ dwc2_hc_halt(hsotg, chan, chan->halt_status);
+ } else if (chan->do_ping) {
+ if (!chan->xfer_started)
+ dwc2_hc_start_transfer(hsotg, chan);
+ } else if (!chan->ep_is_in ||
+ chan->data_pid_start == DWC2_HC_PID_SETUP) {
+ if ((fifo_dwords_avail * 4) >= chan->max_packet) {
+ if (!chan->xfer_started) {
+ dwc2_hc_start_transfer(hsotg, chan);
+ retval = 1;
+ } else {
+ retval = dwc2_hc_continue_transfer(hsotg, chan);
+ }
+ } else {
+ retval = -1;
+ }
+ } else {
+ if (!chan->xfer_started) {
+ dwc2_hc_start_transfer(hsotg, chan);
+ retval = 1;
+ } else {
+ retval = dwc2_hc_continue_transfer(hsotg, chan);
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
+{
+ struct list_head *qh_ptr;
+ struct dwc2_qh *qh;
+ u32 tx_status;
+ u32 fspcavail;
+ u32 gintmsk;
+ int status;
+ bool no_queue_space = false;
+ bool no_fifo_space = false;
+ u32 qspcavail;
+
+ /* If empty list then just adjust interrupt enables */
+ if (list_empty(&hsotg->periodic_sched_assigned))
+ goto exit;
+
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
+
+ tx_status = dwc2_readl(hsotg, HPTXSTS);
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
+
+ if (dbg_perio()) {
+ dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
+ qspcavail);
+ dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n",
+ fspcavail);
+ }
+
+ qh_ptr = hsotg->periodic_sched_assigned.next;
+ while (qh_ptr != &hsotg->periodic_sched_assigned) {
+ tx_status = dwc2_readl(hsotg, HPTXSTS);
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ if (qspcavail == 0) {
+ no_queue_space = true;
+ break;
+ }
+
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+ if (!qh->channel) {
+ qh_ptr = qh_ptr->next;
+ continue;
+ }
+
+ /* Make sure EP's TT buffer is clean before queueing qtds */
+ if (qh->tt_buffer_dirty) {
+ qh_ptr = qh_ptr->next;
+ continue;
+ }
+
+ /*
+ * Set a flag if we're queuing high-bandwidth in slave mode.
+ * The flag prevents any halts to get into the request queue in
+ * the middle of multiple high-bandwidth packets getting queued.
+ */
+ if (!hsotg->params.host_dma &&
+ qh->channel->multi_count > 1)
+ hsotg->queuing_high_bandwidth = 1;
+
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
+ status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
+ if (status < 0) {
+ no_fifo_space = true;
+ break;
+ }
+
+ /*
+ * In Slave mode, stay on the current transfer until there is
+ * nothing more to do or the high-bandwidth request count is
+ * reached. In DMA mode, only need to queue one request. The
+ * controller automatically handles multiple packets for
+ * high-bandwidth transfers.
+ */
+ if (hsotg->params.host_dma || status == 0 ||
+ qh->channel->requests == qh->channel->multi_count) {
+ qh_ptr = qh_ptr->next;
+ /*
+ * Move the QH from the periodic assigned schedule to
+ * the periodic queued schedule
+ */
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_queued);
+
+ /* done queuing high bandwidth */
+ hsotg->queuing_high_bandwidth = 0;
+ }
+ }
+
+exit:
+ if (no_queue_space || no_fifo_space ||
+ (!hsotg->params.host_dma &&
+ !list_empty(&hsotg->periodic_sched_assigned))) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the periodic Tx
+ * FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ if (!(gintmsk & GINTSTS_PTXFEMP)) {
+ gintmsk |= GINTSTS_PTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ if (gintmsk & GINTSTS_PTXFEMP) {
+ gintmsk &= ~GINTSTS_PTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+ }
+}
+
+/*
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
+{
+ struct list_head *orig_qh_ptr;
+ struct dwc2_qh *qh;
+ u32 tx_status;
+ u32 qspcavail;
+ u32 fspcavail;
+ u32 gintmsk;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ int more_to_do = 0;
+
+ dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
+
+ tx_status = dwc2_readl(hsotg, GNPTXSTS);
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
+ dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
+ qspcavail);
+ dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
+ fspcavail);
+
+ /*
+ * Keep track of the starting point. Skip over the start-of-list
+ * entry.
+ */
+ if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
+ hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
+ orig_qh_ptr = hsotg->non_periodic_qh_ptr;
+
+ /*
+ * Process once through the active list or until no more space is
+ * available in the request queue or the Tx FIFO
+ */
+ do {
+ tx_status = dwc2_readl(hsotg, GNPTXSTS);
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ if (!hsotg->params.host_dma && qspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+
+ qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
+ qh_list_entry);
+ if (!qh->channel)
+ goto next;
+
+ /* Make sure EP's TT buffer is clean before queueing qtds */
+ if (qh->tt_buffer_dirty)
+ goto next;
+
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
+ status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
+
+ if (status > 0) {
+ more_to_do = 1;
+ } else if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+next:
+ /* Advance to next QH, skipping start-of-list entry */
+ hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
+ if (hsotg->non_periodic_qh_ptr ==
+ &hsotg->non_periodic_sched_active)
+ hsotg->non_periodic_qh_ptr =
+ hsotg->non_periodic_qh_ptr->next;
+ } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
+
+ if (!hsotg->params.host_dma) {
+ tx_status = dwc2_readl(hsotg, GNPTXSTS);
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
+ dev_vdbg(hsotg->dev,
+ " NP Tx Req Queue Space Avail (after queue): %d\n",
+ qspcavail);
+ dev_vdbg(hsotg->dev,
+ " NP Tx FIFO Space Avail (after queue): %d\n",
+ fspcavail);
+
+ if (more_to_do || no_queue_space || no_fifo_space) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the non-periodic
+ * Tx FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk |= GINTSTS_NPTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_NPTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+ }
+}
+
+/**
+ * dwc2_hcd_queue_transactions() - Processes the currently active host channels
+ * and queues transactions for these channels to the DWC_otg controller. Called
+ * from the HCD interrupt handler functions.
+ *
+ * @hsotg: The HCD state structure
+ * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
+ * or both)
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
+ enum dwc2_transaction_type tr_type)
+{
+#ifdef DWC2_DEBUG_SOF
+ dev_vdbg(hsotg->dev, "Queue Transactions\n");
+#endif
+ /* Process host channels associated with periodic transfers */
+ if (tr_type == DWC2_TRANSACTION_PERIODIC ||
+ tr_type == DWC2_TRANSACTION_ALL)
+ dwc2_process_periodic_channels(hsotg);
+
+ /* Process host channels associated with non-periodic transfers */
+ if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
+ tr_type == DWC2_TRANSACTION_ALL) {
+ if (!list_empty(&hsotg->non_periodic_sched_active)) {
+ dwc2_process_non_periodic_channels(hsotg);
+ } else {
+ /*
+ * Ensure NP Tx FIFO empty interrupt is disabled when
+ * there are no non-periodic transfers to process
+ */
+ u32 gintmsk = dwc2_readl(hsotg, GINTMSK);
+
+ gintmsk &= ~GINTSTS_NPTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+ }
+}
+
+static void dwc2_conn_id_status_change(struct work_struct *work)
+{
+ struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+ wf_otg);
+ u32 count = 0;
+ u32 gotgctl;
+ unsigned long flags;
+
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
+ dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
+ !!(gotgctl & GOTGCTL_CONID_B));
+
+ /* B-Device connector (Device Mode) */
+ if (gotgctl & GOTGCTL_CONID_B) {
+ dwc2_vbus_supply_exit(hsotg);
+ /* Wait for switch to device mode */
+ dev_dbg(hsotg->dev, "connId B\n");
+ if (hsotg->bus_suspended) {
+ dev_info(hsotg->dev,
+ "Do port resume before switching to device mode\n");
+ dwc2_port_resume(hsotg);
+ }
+ while (!dwc2_is_device_mode(hsotg)) {
+ dev_info(hsotg->dev,
+ "Waiting for Peripheral Mode, Mode=%s\n",
+ dwc2_is_host_mode(hsotg) ? "Host" :
+ "Peripheral");
+ msleep(20);
+ /*
+ * Sometimes the initial GOTGCTRL read is wrong, so
+ * check it again and jump to host mode if that was
+ * the case.
+ */
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ if (!(gotgctl & GOTGCTL_CONID_B))
+ goto host;
+ if (++count > 250)
+ break;
+ }
+ if (count > 250)
+ dev_err(hsotg->dev,
+ "Connection id status change timed out\n");
+
+ /*
+ * Exit Partial Power Down without restoring registers.
+ * No need to check the return value as registers
+ * are not being restored.
+ */
+ if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
+ dwc2_exit_partial_power_down(hsotg, 0, false);
+
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ /* Enable ACG feature in device mode,if supported */
+ dwc2_enable_acg(hsotg);
+ dwc2_hsotg_core_connect(hsotg);
+ } else {
+host:
+ /* A-Device connector (Host Mode) */
+ dev_dbg(hsotg->dev, "connId A\n");
+ while (!dwc2_is_host_mode(hsotg)) {
+ dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
+ dwc2_is_host_mode(hsotg) ?
+ "Host" : "Peripheral");
+ msleep(20);
+ if (++count > 250)
+ break;
+ }
+ if (count > 250)
+ dev_err(hsotg->dev,
+ "Connection id status change timed out\n");
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ hsotg->op_state = OTG_STATE_A_HOST;
+ /* Initialize the Core for Host mode */
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ dwc2_hcd_start(hsotg);
+ }
+}
+
+static void dwc2_wakeup_detected(struct timer_list *t)
+{
+ struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
+ u32 hprt0;
+
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ /*
+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+ * so that OPT tests pass with all PHYs.)
+ */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
+ hprt0 &= ~HPRT0_RES;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
+ dwc2_readl(hsotg, HPRT0));
+
+ dwc2_hcd_rem_wakeup(hsotg);
+ hsotg->bus_suspended = false;
+
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+}
+
+static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ return hcd->self.b_hnp_enable;
+}
+
+/**
+ * dwc2_port_suspend() - Put controller in suspend mode for host.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @windex: The control request wIndex field
+ *
+ * Return: non-zero if failed to enter suspend mode for host.
+ *
+ * This function is for entering Host mode suspend.
+ * Must NOT be called with interrupt disabled or spinlock held.
+ */
+int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
+{
+ unsigned long flags;
+ u32 pcgctl;
+ u32 gotgctl;
+ int ret = 0;
+
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gotgctl |= GOTGCTL_HSTSETHNPEN;
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+ hsotg->op_state = OTG_STATE_A_SUSPEND;
+ }
+
+ switch (hsotg->params.power_down) {
+ case DWC2_POWER_DOWN_PARAM_PARTIAL:
+ ret = dwc2_enter_partial_power_down(hsotg);
+ if (ret)
+ dev_err(hsotg->dev,
+ "enter partial_power_down failed.\n");
+ break;
+ case DWC2_POWER_DOWN_PARAM_HIBERNATION:
+ /*
+ * Perform spin unlock and lock because in
+ * "dwc2_host_enter_hibernation()" function there is a spinlock
+ * logic which prevents servicing of any IRQ during entering
+ * hibernation.
+ */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ ret = dwc2_enter_hibernation(hsotg, 1);
+ if (ret)
+ dev_err(hsotg->dev, "enter hibernation failed.\n");
+ spin_lock_irqsave(&hsotg->lock, flags);
+ break;
+ case DWC2_POWER_DOWN_PARAM_NONE:
+ /*
+ * If not hibernation nor partial power down are supported,
+ * clock gating is used to save power.
+ */
+ if (!hsotg->params.no_clock_gating)
+ dwc2_host_enter_clock_gating(hsotg);
+ break;
+ }
+
+ /* For HNP the bus must be suspended for at least 200ms */
+ if (dwc2_host_is_b_hnp_enabled(hsotg)) {
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ msleep(200);
+ } else {
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+
+ return ret;
+}
+
+/**
+ * dwc2_port_resume() - Exit controller from suspend mode for host.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Return: non-zero if failed to exit suspend mode for host.
+ *
+ * This function is for exiting Host mode suspend.
+ * Must NOT be called with interrupt disabled or spinlock held.
+ */
+int dwc2_port_resume(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ switch (hsotg->params.power_down) {
+ case DWC2_POWER_DOWN_PARAM_PARTIAL:
+ ret = dwc2_exit_partial_power_down(hsotg, 0, true);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed.\n");
+ break;
+ case DWC2_POWER_DOWN_PARAM_HIBERNATION:
+ /* Exit host hibernation. */
+ ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
+ if (ret)
+ dev_err(hsotg->dev, "exit hibernation failed.\n");
+ break;
+ case DWC2_POWER_DOWN_PARAM_NONE:
+ /*
+ * If not hibernation nor partial power down are supported,
+ * port resume is done using the clock gating programming flow.
+ */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_host_exit_clock_gating(hsotg, 0);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return ret;
+}
+
+/* Handles hub class-specific requests */
+static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ u16 wvalue, u16 windex, char *buf, u16 wlength)
+{
+ struct usb_hub_descriptor *hub_desc;
+ int retval = 0;
+ u32 hprt0;
+ u32 port_status;
+ u32 speed;
+ u32 pcgctl;
+ u32 pwr;
+
+ switch (typereq) {
+ case ClearHubFeature:
+ dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
+
+ switch (wvalue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+
+ default:
+ retval = -EINVAL;
+ dev_err(hsotg->dev,
+ "ClearHubFeature request %1xh unknown\n",
+ wvalue);
+ }
+ break;
+
+ case ClearPortFeature:
+ if (wvalue != USB_PORT_FEAT_L1)
+ if (!windex || windex > 1)
+ goto error;
+ switch (wvalue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_ENA;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ break;
+
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+
+ if (hsotg->bus_suspended)
+ retval = dwc2_port_resume(hsotg);
+ break;
+
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_POWER\n");
+ hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
+ hprt0 &= ~HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ if (pwr)
+ dwc2_vbus_supply_exit(hsotg);
+ break;
+
+ case USB_PORT_FEAT_INDICATOR:
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+ /* Port indicator not supported */
+ break;
+
+ case USB_PORT_FEAT_C_CONNECTION:
+ /*
+ * Clears driver's internal Connect Status Change flag
+ */
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+ hsotg->flags.b.port_connect_status_change = 0;
+ break;
+
+ case USB_PORT_FEAT_C_RESET:
+ /* Clears driver's internal Port Reset Change flag */
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+ hsotg->flags.b.port_reset_change = 0;
+ break;
+
+ case USB_PORT_FEAT_C_ENABLE:
+ /*
+ * Clears the driver's internal Port Enable/Disable
+ * Change flag
+ */
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+ hsotg->flags.b.port_enable_change = 0;
+ break;
+
+ case USB_PORT_FEAT_C_SUSPEND:
+ /*
+ * Clears the driver's internal Port Suspend Change
+ * flag, which is set when resume signaling on the host
+ * port is complete
+ */
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+ hsotg->flags.b.port_suspend_change = 0;
+ break;
+
+ case USB_PORT_FEAT_C_PORT_L1:
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
+ hsotg->flags.b.port_l1_change = 0;
+ break;
+
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+ hsotg->flags.b.port_over_current_change = 0;
+ break;
+
+ default:
+ retval = -EINVAL;
+ dev_err(hsotg->dev,
+ "ClearPortFeature request %1xh unknown or unsupported\n",
+ wvalue);
+ }
+ break;
+
+ case GetHubDescriptor:
+ dev_dbg(hsotg->dev, "GetHubDescriptor\n");
+ hub_desc = (struct usb_hub_descriptor *)buf;
+ hub_desc->bDescLength = 9;
+ hub_desc->bDescriptorType = USB_DT_HUB;
+ hub_desc->bNbrPorts = 1;
+ hub_desc->wHubCharacteristics =
+ cpu_to_le16(HUB_CHAR_COMMON_LPSM |
+ HUB_CHAR_INDV_PORT_OCPM);
+ hub_desc->bPwrOn2PwrGood = 1;
+ hub_desc->bHubContrCurrent = 0;
+ hub_desc->u.hs.DeviceRemovable[0] = 0;
+ hub_desc->u.hs.DeviceRemovable[1] = 0xff;
+ break;
+
+ case GetHubStatus:
+ dev_dbg(hsotg->dev, "GetHubStatus\n");
+ memset(buf, 0, 4);
+ break;
+
+ case GetPortStatus:
+ dev_vdbg(hsotg->dev,
+ "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
+ hsotg->flags.d32);
+ if (!windex || windex > 1)
+ goto error;
+
+ port_status = 0;
+ if (hsotg->flags.b.port_connect_status_change)
+ port_status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (hsotg->flags.b.port_enable_change)
+ port_status |= USB_PORT_STAT_C_ENABLE << 16;
+ if (hsotg->flags.b.port_suspend_change)
+ port_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ if (hsotg->flags.b.port_l1_change)
+ port_status |= USB_PORT_STAT_C_L1 << 16;
+ if (hsotg->flags.b.port_reset_change)
+ port_status |= USB_PORT_STAT_C_RESET << 16;
+ if (hsotg->flags.b.port_over_current_change) {
+ dev_warn(hsotg->dev, "Overcurrent change detected\n");
+ port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ }
+
+ if (!hsotg->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+ *(__le32 *)buf = cpu_to_le32(port_status);
+ break;
+ }
+
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+ dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
+
+ if (hprt0 & HPRT0_CONNSTS)
+ port_status |= USB_PORT_STAT_CONNECTION;
+ if (hprt0 & HPRT0_ENA)
+ port_status |= USB_PORT_STAT_ENABLE;
+ if (hprt0 & HPRT0_SUSP)
+ port_status |= USB_PORT_STAT_SUSPEND;
+ if (hprt0 & HPRT0_OVRCURRACT)
+ port_status |= USB_PORT_STAT_OVERCURRENT;
+ if (hprt0 & HPRT0_RST)
+ port_status |= USB_PORT_STAT_RESET;
+ if (hprt0 & HPRT0_PWR)
+ port_status |= USB_PORT_STAT_POWER;
+
+ speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+ if (speed == HPRT0_SPD_HIGH_SPEED)
+ port_status |= USB_PORT_STAT_HIGH_SPEED;
+ else if (speed == HPRT0_SPD_LOW_SPEED)
+ port_status |= USB_PORT_STAT_LOW_SPEED;
+
+ if (hprt0 & HPRT0_TSTCTL_MASK)
+ port_status |= USB_PORT_STAT_TEST;
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+
+ if (hsotg->params.dma_desc_fs_enable) {
+ /*
+ * Enable descriptor DMA only if a full speed
+ * device is connected.
+ */
+ if (hsotg->new_connection &&
+ ((port_status &
+ (USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_LOW_SPEED)) ==
+ USB_PORT_STAT_CONNECTION)) {
+ u32 hcfg;
+
+ dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
+ hsotg->params.dma_desc_enable = true;
+ hcfg = dwc2_readl(hsotg, HCFG);
+ hcfg |= HCFG_DESCDMA;
+ dwc2_writel(hsotg, hcfg, HCFG);
+ hsotg->new_connection = false;
+ }
+ }
+
+ dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status);
+ *(__le32 *)buf = cpu_to_le32(port_status);
+ break;
+
+ case SetHubFeature:
+ dev_dbg(hsotg->dev, "SetHubFeature\n");
+ /* No HUB features supported */
+ break;
+
+ case SetPortFeature:
+ dev_dbg(hsotg->dev, "SetPortFeature\n");
+ if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
+ goto error;
+
+ if (!hsotg->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return without doing anything since the port
+ * register can't be written if the core is in device
+ * mode.
+ */
+ break;
+ }
+
+ switch (wvalue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(hsotg->dev,
+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+ if (windex != hsotg->otg_port)
+ goto error;
+ if (!hsotg->bus_suspended)
+ retval = dwc2_port_suspend(hsotg, windex);
+ break;
+
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hsotg->dev,
+ "SetPortFeature - USB_PORT_FEAT_POWER\n");
+ hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
+ hprt0 |= HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
+ break;
+
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(hsotg->dev,
+ "SetPortFeature - USB_PORT_FEAT_RESET\n");
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+
+ if (hsotg->hibernated) {
+ retval = dwc2_exit_hibernation(hsotg, 0, 1, 1);
+ if (retval)
+ dev_err(hsotg->dev,
+ "exit hibernation failed\n");
+ }
+
+ if (hsotg->in_ppd) {
+ retval = dwc2_exit_partial_power_down(hsotg, 1,
+ true);
+ if (retval)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed\n");
+ }
+
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ dwc2_host_exit_clock_gating(hsotg, 0);
+
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ /* ??? Original driver does this */
+ dwc2_writel(hsotg, 0, PCGCTL);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
+ /* Clear suspend bit if resetting from suspend state */
+ hprt0 &= ~HPRT0_SUSP;
+
+ /*
+ * When B-Host the Port reset bit is set in the Start
+ * HCD Callback function, so that the reset is started
+ * within 1ms of the HNP success interrupt
+ */
+ if (!dwc2_hcd_is_b_host(hsotg)) {
+ hprt0 |= HPRT0_PWR | HPRT0_RST;
+ dev_dbg(hsotg->dev,
+ "In host mode, hprt0=%08x\n", hprt0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
+ }
+
+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+ msleep(50);
+ hprt0 &= ~HPRT0_RST;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ hsotg->lx_state = DWC2_L0; /* Now back to On state */
+ break;
+
+ case USB_PORT_FEAT_INDICATOR:
+ dev_dbg(hsotg->dev,
+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+ /* Not supported */
+ break;
+
+ case USB_PORT_FEAT_TEST:
+ hprt0 = dwc2_read_hprt0(hsotg);
+ dev_dbg(hsotg->dev,
+ "SetPortFeature - USB_PORT_FEAT_TEST\n");
+ hprt0 &= ~HPRT0_TSTCTL_MASK;
+ hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ break;
+
+ default:
+ retval = -EINVAL;
+ dev_err(hsotg->dev,
+ "SetPortFeature %1xh unknown or unsupported\n",
+ wvalue);
+ break;
+ }
+ break;
+
+ default:
+error:
+ retval = -EINVAL;
+ dev_dbg(hsotg->dev,
+ "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
+ typereq, windex, wvalue);
+ break;
+ }
+
+ return retval;
+}
+
+static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
+{
+ int retval;
+
+ if (port != 1)
+ return -EINVAL;
+
+ retval = (hsotg->flags.b.port_connect_status_change ||
+ hsotg->flags.b.port_reset_change ||
+ hsotg->flags.b.port_enable_change ||
+ hsotg->flags.b.port_suspend_change ||
+ hsotg->flags.b.port_over_current_change);
+
+ if (retval) {
+ dev_dbg(hsotg->dev,
+ "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
+ dev_dbg(hsotg->dev, " port_connect_status_change: %d\n",
+ hsotg->flags.b.port_connect_status_change);
+ dev_dbg(hsotg->dev, " port_reset_change: %d\n",
+ hsotg->flags.b.port_reset_change);
+ dev_dbg(hsotg->dev, " port_enable_change: %d\n",
+ hsotg->flags.b.port_enable_change);
+ dev_dbg(hsotg->dev, " port_suspend_change: %d\n",
+ hsotg->flags.b.port_suspend_change);
+ dev_dbg(hsotg->dev, " port_over_current_change: %d\n",
+ hsotg->flags.b.port_over_current_change);
+ }
+
+ return retval;
+}
+
+int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
+{
+ u32 hfnum = dwc2_readl(hsotg, HFNUM);
+
+#ifdef DWC2_DEBUG_SOF
+ dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
+ (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
+#endif
+ return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
+}
+
+int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
+{
+ u32 hprt = dwc2_readl(hsotg, HPRT0);
+ u32 hfir = dwc2_readl(hsotg, HFIR);
+ u32 hfnum = dwc2_readl(hsotg, HFNUM);
+ unsigned int us_per_frame;
+ unsigned int frame_number;
+ unsigned int remaining;
+ unsigned int interval;
+ unsigned int phy_clks;
+
+ /* High speed has 125 us per (micro) frame; others are 1 ms per */
+ us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
+
+ /* Extract fields */
+ frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
+ remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
+ interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
+
+ /*
+ * Number of phy clocks since the last tick of the frame number after
+ * "us" has passed.
+ */
+ phy_clks = (interval - remaining) +
+ DIV_ROUND_UP(interval * us, us_per_frame);
+
+ return dwc2_frame_num_inc(frame_number, phy_clks / interval);
+}
+
+int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->op_state == OTG_STATE_B_HOST;
+}
+
+static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
+ int iso_desc_count,
+ gfp_t mem_flags)
+{
+ struct dwc2_hcd_urb *urb;
+
+ urb = kzalloc(struct_size(urb, iso_descs, iso_desc_count), mem_flags);
+ if (urb)
+ urb->packet_count = iso_desc_count;
+ return urb;
+}
+
+static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb, u8 dev_addr,
+ u8 ep_num, u8 ep_type, u8 ep_dir,
+ u16 maxp, u16 maxp_mult)
+{
+ if (dbg_perio() ||
+ ep_type == USB_ENDPOINT_XFER_BULK ||
+ ep_type == USB_ENDPOINT_XFER_CONTROL)
+ dev_vdbg(hsotg->dev,
+ "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
+ dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
+ urb->pipe_info.dev_addr = dev_addr;
+ urb->pipe_info.ep_num = ep_num;
+ urb->pipe_info.pipe_type = ep_type;
+ urb->pipe_info.pipe_dir = ep_dir;
+ urb->pipe_info.maxp = maxp;
+ urb->pipe_info.maxp_mult = maxp_mult;
+}
+
+/*
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
+{
+#ifdef DEBUG
+ struct dwc2_host_chan *chan;
+ struct dwc2_hcd_urb *urb;
+ struct dwc2_qtd *qtd;
+ int num_channels;
+ u32 np_tx_status;
+ u32 p_tx_status;
+ int i;
+
+ num_channels = hsotg->params.host_channels;
+ dev_dbg(hsotg->dev, "\n");
+ dev_dbg(hsotg->dev,
+ "************************************************************\n");
+ dev_dbg(hsotg->dev, "HCD State:\n");
+ dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
+
+ for (i = 0; i < num_channels; i++) {
+ chan = hsotg->hc_ptr_array[i];
+ dev_dbg(hsotg->dev, " Channel %d:\n", i);
+ dev_dbg(hsotg->dev,
+ " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ chan->dev_addr, chan->ep_num, chan->ep_is_in);
+ dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
+ dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
+ dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
+ dev_dbg(hsotg->dev, " data_pid_start: %d\n",
+ chan->data_pid_start);
+ dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
+ dev_dbg(hsotg->dev, " xfer_started: %d\n",
+ chan->xfer_started);
+ dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
+ dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
+ (unsigned long)chan->xfer_dma);
+ dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
+ dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
+ dev_dbg(hsotg->dev, " halt_on_queue: %d\n",
+ chan->halt_on_queue);
+ dev_dbg(hsotg->dev, " halt_pending: %d\n",
+ chan->halt_pending);
+ dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
+ dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
+ dev_dbg(hsotg->dev, " complete_split: %d\n",
+ chan->complete_split);
+ dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
+ dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
+ dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
+ dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
+ dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
+
+ if (chan->xfer_started) {
+ u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
+
+ hfnum = dwc2_readl(hsotg, HFNUM);
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(i));
+ hcint = dwc2_readl(hsotg, HCINT(i));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(i));
+ dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
+ dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
+ dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
+ dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
+ dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
+ }
+
+ if (!(chan->xfer_started && chan->qh))
+ continue;
+
+ list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
+ if (!qtd->in_process)
+ break;
+ urb = qtd->urb;
+ dev_dbg(hsotg->dev, " URB Info:\n");
+ dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n",
+ qtd, urb);
+ if (urb) {
+ dev_dbg(hsotg->dev,
+ " Dev: %d, EP: %d %s\n",
+ dwc2_hcd_get_dev_addr(&urb->pipe_info),
+ dwc2_hcd_get_ep_num(&urb->pipe_info),
+ dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
+ "IN" : "OUT");
+ dev_dbg(hsotg->dev,
+ " Max packet size: %d (%d mult)\n",
+ dwc2_hcd_get_maxp(&urb->pipe_info),
+ dwc2_hcd_get_maxp_mult(&urb->pipe_info));
+ dev_dbg(hsotg->dev,
+ " transfer_buffer: %p\n",
+ urb->buf);
+ dev_dbg(hsotg->dev,
+ " transfer_dma: %08lx\n",
+ (unsigned long)urb->dma);
+ dev_dbg(hsotg->dev,
+ " transfer_buffer_length: %d\n",
+ urb->length);
+ dev_dbg(hsotg->dev, " actual_length: %d\n",
+ urb->actual_length);
+ }
+ }
+ }
+
+ dev_dbg(hsotg->dev, " non_periodic_channels: %d\n",
+ hsotg->non_periodic_channels);
+ dev_dbg(hsotg->dev, " periodic_channels: %d\n",
+ hsotg->periodic_channels);
+ dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
+ np_tx_status = dwc2_readl(hsotg, GNPTXSTS);
+ dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
+ (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
+ dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
+ (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
+ p_tx_status = dwc2_readl(hsotg, HPTXSTS);
+ dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
+ (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
+ dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
+ (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
+ dwc2_dump_global_registers(hsotg);
+ dwc2_dump_host_registers(hsotg);
+ dev_dbg(hsotg->dev,
+ "************************************************************\n");
+ dev_dbg(hsotg->dev, "\n");
+#endif
+}
+
+struct wrapper_priv_data {
+ struct dwc2_hsotg *hsotg;
+};
+
+/* Gets the dwc2_hsotg from a usb_hcd */
+static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
+{
+ struct wrapper_priv_data *p;
+
+ p = (struct wrapper_priv_data *)&hcd->hcd_priv;
+ return p->hsotg;
+}
+
+/**
+ * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
+ *
+ * This will get the dwc2_tt structure (and ttport) associated with the given
+ * context (which is really just a struct urb pointer).
+ *
+ * The first time this is called for a given TT we allocate memory for our
+ * structure. When everyone is done and has called dwc2_host_put_tt_info()
+ * then the refcount for the structure will go to 0 and we'll free it.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @context: The priv pointer from a struct dwc2_hcd_urb.
+ * @mem_flags: Flags for allocating memory.
+ * @ttport: We'll return this device's port number here. That's used to
+ * reference into the bitmap if we're on a multi_tt hub.
+ *
+ * Return: a pointer to a struct dwc2_tt. Don't forget to call
+ * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
+ */
+
+struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
+ gfp_t mem_flags, int *ttport)
+{
+ struct urb *urb = context;
+ struct dwc2_tt *dwc_tt = NULL;
+
+ if (urb->dev->tt) {
+ *ttport = urb->dev->ttport;
+
+ dwc_tt = urb->dev->tt->hcpriv;
+ if (!dwc_tt) {
+ size_t bitmap_size;
+
+ /*
+ * For single_tt we need one schedule. For multi_tt
+ * we need one per port.
+ */
+ bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
+ sizeof(dwc_tt->periodic_bitmaps[0]);
+ if (urb->dev->tt->multi)
+ bitmap_size *= urb->dev->tt->hub->maxchild;
+
+ dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size,
+ mem_flags);
+ if (!dwc_tt)
+ return NULL;
+
+ dwc_tt->usb_tt = urb->dev->tt;
+ dwc_tt->usb_tt->hcpriv = dwc_tt;
+ }
+
+ dwc_tt->refcount++;
+ }
+
+ return dwc_tt;
+}
+
+/**
+ * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
+ *
+ * Frees resources allocated by dwc2_host_get_tt_info() if all current holders
+ * of the structure are done.
+ *
+ * It's OK to call this with NULL.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
+ */
+void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
+{
+ /* Model kfree and make put of NULL a no-op */
+ if (!dwc_tt)
+ return;
+
+ WARN_ON(dwc_tt->refcount < 1);
+
+ dwc_tt->refcount--;
+ if (!dwc_tt->refcount) {
+ dwc_tt->usb_tt->hcpriv = NULL;
+ kfree(dwc_tt);
+ }
+}
+
+int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
+{
+ struct urb *urb = context;
+
+ return urb->dev->speed;
+}
+
+static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
+ struct urb *urb)
+{
+ struct usb_bus *bus = hcd_to_bus(hcd);
+
+ if (urb->interval)
+ bus->bandwidth_allocated += bw / urb->interval;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ bus->bandwidth_isoc_reqs++;
+ else
+ bus->bandwidth_int_reqs++;
+}
+
+static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
+ struct urb *urb)
+{
+ struct usb_bus *bus = hcd_to_bus(hcd);
+
+ if (urb->interval)
+ bus->bandwidth_allocated -= bw / urb->interval;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ bus->bandwidth_isoc_reqs--;
+ else
+ bus->bandwidth_int_reqs--;
+}
+
+/*
+ * Sets the final status of an URB and returns it to the upper layer. Any
+ * required cleanup of the URB is performed.
+ *
+ * Must be called with interrupt disabled and spinlock held
+ */
+void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ int status)
+{
+ struct urb *urb;
+ int i;
+
+ if (!qtd) {
+ dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
+ return;
+ }
+
+ if (!qtd->urb) {
+ dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
+ return;
+ }
+
+ urb = qtd->urb->priv;
+ if (!urb) {
+ dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
+ return;
+ }
+
+ urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb);
+
+ if (dbg_urb(urb))
+ dev_vdbg(hsotg->dev,
+ "%s: urb %p device %d ep %d-%s status %d actual %d\n",
+ __func__, urb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "IN" : "OUT", status,
+ urb->actual_length);
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
+ for (i = 0; i < urb->number_of_packets; ++i) {
+ urb->iso_frame_desc[i].actual_length =
+ dwc2_hcd_urb_get_iso_desc_actual_length(
+ qtd->urb, i);
+ urb->iso_frame_desc[i].status =
+ dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i);
+ }
+ }
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) {
+ for (i = 0; i < urb->number_of_packets; i++)
+ dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
+ i, urb->iso_frame_desc[i].status);
+ }
+
+ urb->status = status;
+ if (!status) {
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+ urb->actual_length < urb->transfer_buffer_length)
+ urb->status = -EREMOTEIO;
+ }
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
+ usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+ struct usb_host_endpoint *ep = urb->ep;
+
+ if (ep)
+ dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg),
+ dwc2_hcd_get_ep_bandwidth(hsotg, ep),
+ urb);
+ }
+
+ usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb);
+ urb->hcpriv = NULL;
+ kfree(qtd->urb);
+ qtd->urb = NULL;
+
+ usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
+}
+
+/*
+ * Work queue function for starting the HCD when A-Cable is connected
+ */
+static void dwc2_hcd_start_func(struct work_struct *work)
+{
+ struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+ start_work.work);
+
+ dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
+ dwc2_host_start(hsotg);
+}
+
+/*
+ * Reset work queue function
+ */
+static void dwc2_hcd_reset_func(struct work_struct *work)
+{
+ struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+ reset_work.work);
+ unsigned long flags;
+ u32 hprt0;
+
+ dev_dbg(hsotg->dev, "USB RESET function called\n");
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 &= ~HPRT0_RST;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ hsotg->flags.b.port_reset_change = 1;
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+static void dwc2_hcd_phy_reset_func(struct work_struct *work)
+{
+ struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
+ phy_reset_work);
+ int ret;
+
+ ret = phy_reset(hsotg->phy);
+ if (ret)
+ dev_warn(hsotg->dev, "PHY reset failed\n");
+}
+
+/*
+ * =========================================================================
+ * Linux HC Driver Functions
+ * =========================================================================
+ */
+
+/*
+ * Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure.
+ */
+static int _dwc2_hcd_start(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ struct usb_bus *bus = hcd_to_bus(hcd);
+ unsigned long flags;
+ u32 hprt0;
+ int ret;
+
+ dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ hsotg->lx_state = DWC2_L0;
+ hcd->state = HC_STATE_RUNNING;
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (dwc2_is_device_mode(hsotg)) {
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return 0; /* why 0 ?? */
+ }
+
+ dwc2_hcd_reinit(hsotg);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ /* Has vbus power been turned on in dwc2_core_host_init ? */
+ if (hprt0 & HPRT0_PWR) {
+ /* Enable external vbus supply before resuming root hub */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ ret = dwc2_vbus_supply_init(hsotg);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
+ /* Initialize and connect root hub if one is not already attached */
+ if (bus->root_hub) {
+ dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
+ /* Inform the HUB driver to resume */
+ usb_hcd_resume_root_hub(hcd);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+static void _dwc2_hcd_stop(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ unsigned long flags;
+ u32 hprt0;
+
+ /* Turn off all host-specific interrupts */
+ dwc2_disable_host_interrupts(hsotg);
+
+ /* Wait for interrupt processing to finish */
+ synchronize_irq(hcd->irq);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ hprt0 = dwc2_read_hprt0(hsotg);
+ /* Ensure hcd is disconnected */
+ dwc2_hcd_disconnect(hsotg, true);
+ dwc2_hcd_stop(hsotg);
+ hsotg->lx_state = DWC2_L3;
+ hcd->state = HC_STATE_HALT;
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ /* keep balanced supply init/exit by checking HPRT0_PWR */
+ if (hprt0 & HPRT0_PWR)
+ dwc2_vbus_supply_exit(hsotg);
+
+ usleep_range(1000, 3000);
+}
+
+static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (dwc2_is_device_mode(hsotg))
+ goto unlock;
+
+ if (hsotg->lx_state != DWC2_L0)
+ goto unlock;
+
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ goto unlock;
+
+ if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ goto unlock;
+
+ if (hsotg->bus_suspended)
+ goto skip_power_saving;
+
+ if (hsotg->flags.b.port_connect_status == 0)
+ goto skip_power_saving;
+
+ switch (hsotg->params.power_down) {
+ case DWC2_POWER_DOWN_PARAM_PARTIAL:
+ /* Enter partial_power_down */
+ ret = dwc2_enter_partial_power_down(hsotg);
+ if (ret)
+ dev_err(hsotg->dev,
+ "enter partial_power_down failed\n");
+ /* After entering suspend, hardware is not accessible */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ break;
+ case DWC2_POWER_DOWN_PARAM_HIBERNATION:
+ /* Enter hibernation */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ ret = dwc2_enter_hibernation(hsotg, 1);
+ if (ret)
+ dev_err(hsotg->dev, "enter hibernation failed\n");
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /* After entering suspend, hardware is not accessible */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ break;
+ case DWC2_POWER_DOWN_PARAM_NONE:
+ /*
+ * If not hibernation nor partial power down are supported,
+ * clock gating is used to save power.
+ */
+ if (!hsotg->params.no_clock_gating) {
+ dwc2_host_enter_clock_gating(hsotg);
+
+ /* After entering suspend, hardware is not accessible */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ }
+ break;
+ default:
+ goto skip_power_saving;
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_vbus_supply_exit(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /* Ask phy to be suspended */
+ if (!IS_ERR_OR_NULL(hsotg->uphy)) {
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ usb_phy_set_suspend(hsotg->uphy, true);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
+skip_power_saving:
+ hsotg->lx_state = DWC2_L2;
+unlock:
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return ret;
+}
+
+static int _dwc2_hcd_resume(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ unsigned long flags;
+ u32 hprt0;
+ int ret = 0;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (dwc2_is_device_mode(hsotg))
+ goto unlock;
+
+ if (hsotg->lx_state != DWC2_L2)
+ goto unlock;
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+
+ /*
+ * Added port connection status checking which prevents exiting from
+ * Partial Power Down mode from _dwc2_hcd_resume() if not in Partial
+ * Power Down mode.
+ */
+ if (hprt0 & HPRT0_CONNSTS) {
+ hsotg->lx_state = DWC2_L0;
+ goto unlock;
+ }
+
+ switch (hsotg->params.power_down) {
+ case DWC2_POWER_DOWN_PARAM_PARTIAL:
+ ret = dwc2_exit_partial_power_down(hsotg, 0, true);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed\n");
+ /*
+ * Set HW accessible bit before powering on the controller
+ * since an interrupt may rise.
+ */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ break;
+ case DWC2_POWER_DOWN_PARAM_HIBERNATION:
+ ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
+ if (ret)
+ dev_err(hsotg->dev, "exit hibernation failed.\n");
+
+ /*
+ * Set HW accessible bit before powering on the controller
+ * since an interrupt may rise.
+ */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ break;
+ case DWC2_POWER_DOWN_PARAM_NONE:
+ /*
+ * If not hibernation nor partial power down are supported,
+ * port resume is done using the clock gating programming flow.
+ */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_host_exit_clock_gating(hsotg, 0);
+
+ /*
+ * Initialize the Core for Host mode, as after system resume
+ * the global interrupts are disabled.
+ */
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ dwc2_hcd_reinit(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * Set HW accessible bit before powering on the controller
+ * since an interrupt may rise.
+ */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ break;
+ default:
+ hsotg->lx_state = DWC2_L0;
+ goto unlock;
+ }
+
+ /* Change Root port status, as port status change occurred after resume.*/
+ hsotg->flags.b.port_suspend_change = 1;
+
+ /*
+ * Enable power if not already done.
+ * This must not be spinlocked since duration
+ * of this call is unknown.
+ */
+ if (!IS_ERR_OR_NULL(hsotg->uphy)) {
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ usb_phy_set_suspend(hsotg->uphy, false);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
+ /* Enable external vbus supply after resuming the port. */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ dwc2_vbus_supply_init(hsotg);
+
+ /* Wait for controller to correctly update D+/D- level */
+ usleep_range(3000, 5000);
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * Clear Port Enable and Port Status changes.
+ * Enable Port Power.
+ */
+ dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET |
+ HPRT0_ENACHG, HPRT0);
+
+ /* Wait for controller to detect Port Connect */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ usleep_range(5000, 7000);
+ spin_lock_irqsave(&hsotg->lock, flags);
+unlock:
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return ret;
+}
+
+/* Returns the current frame number */
+static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ return dwc2_hcd_get_frame_number(hsotg);
+}
+
+static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
+ char *fn_name)
+{
+#ifdef VERBOSE_DEBUG
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ char *pipetype = NULL;
+ char *speed = NULL;
+
+ dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
+ dev_vdbg(hsotg->dev, " Device address: %d\n",
+ usb_pipedevice(urb->pipe));
+ dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n",
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "IN" : "OUT");
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ pipetype = "CONTROL";
+ break;
+ case PIPE_BULK:
+ pipetype = "BULK";
+ break;
+ case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT";
+ break;
+ case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS";
+ break;
+ }
+
+ dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
+ usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
+ "IN" : "OUT");
+
+ switch (urb->dev->speed) {
+ case USB_SPEED_HIGH:
+ speed = "HIGH";
+ break;
+ case USB_SPEED_FULL:
+ speed = "FULL";
+ break;
+ case USB_SPEED_LOW:
+ speed = "LOW";
+ break;
+ default:
+ speed = "UNKNOWN";
+ break;
+ }
+
+ dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
+ dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
+ usb_endpoint_maxp(&urb->ep->desc),
+ usb_endpoint_maxp_mult(&urb->ep->desc));
+
+ dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
+ urb->transfer_buffer_length);
+ dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
+ urb->transfer_buffer, (unsigned long)urb->transfer_dma);
+ dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
+ urb->setup_packet, (unsigned long)urb->setup_dma);
+ dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval);
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i);
+ dev_vdbg(hsotg->dev, " offset: %d, length %d\n",
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].length);
+ }
+ }
+#endif
+}
+
+/*
+ * Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB.
+ */
+static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct dwc2_hcd_urb *dwc2_urb;
+ int i;
+ int retval;
+ int alloc_bandwidth = 0;
+ u8 ep_type = 0;
+ u32 tflags = 0;
+ void *buf;
+ unsigned long flags;
+ struct dwc2_qh *qh;
+ bool qh_allocated = false;
+ struct dwc2_qtd *qtd;
+ struct dwc2_gregs_backup *gr;
+
+ gr = &hsotg->gr_backup;
+
+ if (dbg_urb(urb)) {
+ dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
+ dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
+ }
+
+ if (hsotg->hibernated) {
+ if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
+ retval = dwc2_exit_hibernation(hsotg, 0, 0, 1);
+ else
+ retval = dwc2_exit_hibernation(hsotg, 0, 0, 0);
+
+ if (retval)
+ dev_err(hsotg->dev,
+ "exit hibernation failed.\n");
+ }
+
+ if (hsotg->in_ppd) {
+ retval = dwc2_exit_partial_power_down(hsotg, 0, true);
+ if (retval)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed\n");
+ }
+
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+ hsotg->bus_suspended) {
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ else
+ dwc2_host_exit_clock_gating(hsotg, 0);
+ }
+
+ if (!ep)
+ return -EINVAL;
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
+ usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+ spin_lock_irqsave(&hsotg->lock, flags);
+ if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
+ alloc_bandwidth = 1;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ ep_type = USB_ENDPOINT_XFER_CONTROL;
+ break;
+ case PIPE_ISOCHRONOUS:
+ ep_type = USB_ENDPOINT_XFER_ISOC;
+ break;
+ case PIPE_BULK:
+ ep_type = USB_ENDPOINT_XFER_BULK;
+ break;
+ case PIPE_INTERRUPT:
+ ep_type = USB_ENDPOINT_XFER_INT;
+ break;
+ }
+
+ dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
+ mem_flags);
+ if (!dwc2_urb)
+ return -ENOMEM;
+
+ dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe), ep_type,
+ usb_pipein(urb->pipe),
+ usb_endpoint_maxp(&ep->desc),
+ usb_endpoint_maxp_mult(&ep->desc));
+
+ buf = urb->transfer_buffer;
+
+ if (hcd_uses_dma(hcd)) {
+ if (!buf && (urb->transfer_dma & 3)) {
+ dev_err(hsotg->dev,
+ "%s: unaligned transfer with no transfer_buffer",
+ __func__);
+ retval = -EINVAL;
+ goto fail0;
+ }
+ }
+
+ if (!(urb->transfer_flags & URB_NO_INTERRUPT))
+ tflags |= URB_GIVEBACK_ASAP;
+ if (urb->transfer_flags & URB_ZERO_PACKET)
+ tflags |= URB_SEND_ZERO_PACKET;
+
+ dwc2_urb->priv = urb;
+ dwc2_urb->buf = buf;
+ dwc2_urb->dma = urb->transfer_dma;
+ dwc2_urb->length = urb->transfer_buffer_length;
+ dwc2_urb->setup_packet = urb->setup_packet;
+ dwc2_urb->setup_dma = urb->setup_dma;
+ dwc2_urb->flags = tflags;
+ dwc2_urb->interval = urb->interval;
+ dwc2_urb->status = -EINPROGRESS;
+
+ for (i = 0; i < urb->number_of_packets; ++i)
+ dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].length);
+
+ urb->hcpriv = dwc2_urb;
+ qh = (struct dwc2_qh *)ep->hcpriv;
+ /* Create QH for the endpoint if it doesn't exist */
+ if (!qh) {
+ qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
+ if (!qh) {
+ retval = -ENOMEM;
+ goto fail0;
+ }
+ ep->hcpriv = qh;
+ qh_allocated = true;
+ }
+
+ qtd = kzalloc(sizeof(*qtd), mem_flags);
+ if (!qtd) {
+ retval = -ENOMEM;
+ goto fail1;
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval)
+ goto fail2;
+
+ retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
+ if (retval)
+ goto fail3;
+
+ if (alloc_bandwidth) {
+ dwc2_allocate_bus_bandwidth(hcd,
+ dwc2_hcd_get_ep_bandwidth(hsotg, ep),
+ urb);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+
+fail3:
+ dwc2_urb->priv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+fail2:
+ urb->hcpriv = NULL;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ kfree(qtd);
+fail1:
+ if (qh_allocated) {
+ struct dwc2_qtd *qtd2, *qtd2_tmp;
+
+ ep->hcpriv = NULL;
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ /* Free each QTD in the QH's QTD list */
+ list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
+ qtd_list_entry)
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
+ dwc2_hcd_qh_free(hsotg, qh);
+ }
+fail0:
+ kfree(dwc2_urb);
+
+ return retval;
+}
+
+/*
+ * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
+ */
+static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
+ dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto out;
+
+ if (!urb->hcpriv) {
+ dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
+ goto out;
+ }
+
+ rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
+
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+ kfree(urb->hcpriv);
+ urb->hcpriv = NULL;
+
+ /* Higher layer software sets URB status */
+ spin_unlock(&hsotg->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&hsotg->lock);
+
+ dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
+ dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status);
+out:
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return rc;
+}
+
+/*
+ * Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued.
+ */
+static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ dev_dbg(hsotg->dev,
+ "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
+ ep->desc.bEndpointAddress, ep->hcpriv);
+ dwc2_hcd_endpoint_disable(hsotg, ep, 250);
+}
+
+/*
+ * Resets endpoint specific parameter values, in current version used to reset
+ * the data toggle (as a WA). This function can be called from usb_clear_halt
+ * routine.
+ */
+static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ unsigned long flags;
+
+ dev_dbg(hsotg->dev,
+ "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
+ ep->desc.bEndpointAddress);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hcd_endpoint_reset(hsotg, ep);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/*
+ * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs
+ */
+static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ return dwc2_handle_hcd_intr(hsotg);
+}
+
+/*
+ * Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0.
+ */
+static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
+ return buf[0] != 0;
+}
+
+/* Handles hub class-specific requests */
+static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
+ u16 windex, char *buf, u16 wlength)
+{
+ int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
+ wvalue, windex, buf, wlength);
+ return retval;
+}
+
+/* Handles hub TT buffer clear completions */
+static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ struct dwc2_qh *qh;
+ unsigned long flags;
+
+ qh = ep->hcpriv;
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ qh->tt_buffer_dirty = 0;
+
+ if (hsotg->flags.b.port_connect_status)
+ dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/*
+ * HPRT0_SPD_HIGH_SPEED: high speed
+ * HPRT0_SPD_FULL_SPEED: full speed
+ */
+static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ if (hsotg->params.speed == speed)
+ return;
+
+ hsotg->params.speed = speed;
+ queue_work(hsotg->wq_otg, &hsotg->wf_otg);
+}
+
+static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ if (!hsotg->params.change_speed_quirk)
+ return;
+
+ /*
+ * On removal, set speed to default high-speed.
+ */
+ if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN &&
+ udev->parent->speed < USB_SPEED_HIGH) {
+ dev_info(hsotg->dev, "Set speed to default high-speed\n");
+ dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
+ }
+}
+
+static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+
+ if (!hsotg->params.change_speed_quirk)
+ return 0;
+
+ if (udev->speed == USB_SPEED_HIGH) {
+ dev_info(hsotg->dev, "Set speed to high-speed\n");
+ dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
+ } else if ((udev->speed == USB_SPEED_FULL ||
+ udev->speed == USB_SPEED_LOW)) {
+ /*
+ * Change speed setting to full-speed if there's
+ * a full-speed or low-speed device plugged in.
+ */
+ dev_info(hsotg->dev, "Set speed to full-speed\n");
+ dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED);
+ }
+
+ return 0;
+}
+
+static struct hc_driver dwc2_hc_driver = {
+ .description = "dwc2_hsotg",
+ .product_desc = "DWC OTG Controller",
+ .hcd_priv_size = sizeof(struct wrapper_priv_data),
+
+ .irq = _dwc2_hcd_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ .start = _dwc2_hcd_start,
+ .stop = _dwc2_hcd_stop,
+ .urb_enqueue = _dwc2_hcd_urb_enqueue,
+ .urb_dequeue = _dwc2_hcd_urb_dequeue,
+ .endpoint_disable = _dwc2_hcd_endpoint_disable,
+ .endpoint_reset = _dwc2_hcd_endpoint_reset,
+ .get_frame_number = _dwc2_hcd_get_frame_number,
+
+ .hub_status_data = _dwc2_hcd_hub_status_data,
+ .hub_control = _dwc2_hcd_hub_control,
+ .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
+
+ .bus_suspend = _dwc2_hcd_suspend,
+ .bus_resume = _dwc2_hcd_resume,
+
+ .map_urb_for_dma = dwc2_map_urb_for_dma,
+ .unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
+};
+
+/*
+ * Frees secondary storage associated with the dwc2_hsotg structure contained
+ * in the struct usb_hcd field
+ */
+static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
+{
+ u32 ahbcfg;
+ u32 dctl;
+ int i;
+
+ dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
+
+ /* Free memory for QH/QTD lists */
+ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
+ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
+ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
+ dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
+ dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
+ dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
+ dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
+
+ /* Free memory for the host channels */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
+
+ if (chan) {
+ dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
+ i, chan);
+ hsotg->hc_ptr_array[i] = NULL;
+ kfree(chan);
+ }
+ }
+
+ if (hsotg->params.host_dma) {
+ if (hsotg->status_buf) {
+ dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
+ hsotg->status_buf,
+ hsotg->status_buf_dma);
+ hsotg->status_buf = NULL;
+ }
+ } else {
+ kfree(hsotg->status_buf);
+ hsotg->status_buf = NULL;
+ }
+
+ ahbcfg = dwc2_readl(hsotg, GAHBCFG);
+
+ /* Disable all interrupts */
+ ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
+ dwc2_writel(hsotg, 0, GINTMSK);
+
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_SFTDISCON;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+
+ if (hsotg->wq_otg) {
+ if (!cancel_work_sync(&hsotg->wf_otg))
+ flush_workqueue(hsotg->wq_otg);
+ destroy_workqueue(hsotg->wq_otg);
+ }
+
+ cancel_work_sync(&hsotg->phy_reset_work);
+
+ del_timer(&hsotg->wkp_timer);
+}
+
+static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
+{
+ /* Turn off all host-specific interrupts */
+ dwc2_disable_host_interrupts(hsotg);
+
+ dwc2_hcd_free(hsotg);
+}
+
+/*
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
+{
+ struct platform_device *pdev = to_platform_device(hsotg->dev);
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct dwc2_host_chan *channel;
+ u32 hcfg;
+ int i, num_channels;
+ int retval;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
+
+ retval = -ENOMEM;
+
+ hcfg = dwc2_readl(hsotg, HCFG);
+ dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+ hsotg->frame_num_array = kcalloc(FRAME_NUM_ARRAY_SIZE,
+ sizeof(*hsotg->frame_num_array),
+ GFP_KERNEL);
+ if (!hsotg->frame_num_array)
+ goto error1;
+ hsotg->last_frame_num_array =
+ kcalloc(FRAME_NUM_ARRAY_SIZE,
+ sizeof(*hsotg->last_frame_num_array), GFP_KERNEL);
+ if (!hsotg->last_frame_num_array)
+ goto error1;
+#endif
+ hsotg->last_frame_num = HFNUM_MAX_FRNUM;
+
+ /* Check if the bus driver or platform code has setup a dma_mask */
+ if (hsotg->params.host_dma &&
+ !hsotg->dev->dma_mask) {
+ dev_warn(hsotg->dev,
+ "dma_mask not set, disabling DMA\n");
+ hsotg->params.host_dma = false;
+ hsotg->params.dma_desc_enable = false;
+ }
+
+ /* Set device flags indicating whether the HCD supports DMA */
+ if (hsotg->params.host_dma) {
+ if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+ dev_warn(hsotg->dev, "can't set DMA mask\n");
+ if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+ dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
+ }
+
+ if (hsotg->params.change_speed_quirk) {
+ dwc2_hc_driver.free_dev = dwc2_free_dev;
+ dwc2_hc_driver.reset_device = dwc2_reset_device;
+ }
+
+ if (hsotg->params.host_dma)
+ dwc2_hc_driver.flags |= HCD_DMA;
+
+ hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
+ if (!hcd)
+ goto error1;
+
+ hcd->has_tt = 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -EINVAL;
+ goto error2;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ ((struct wrapper_priv_data *)&hcd->hcd_priv)->hsotg = hsotg;
+ hsotg->priv = hcd;
+
+ /*
+ * Disable the global interrupt until all the interrupt handlers are
+ * installed
+ */
+ dwc2_disable_global_interrupts(hsotg);
+
+ /* Initialize the DWC_otg core, and select the Phy type */
+ retval = dwc2_core_init(hsotg, true);
+ if (retval)
+ goto error2;
+
+ /* Create new workqueue and init work */
+ retval = -ENOMEM;
+ hsotg->wq_otg = alloc_ordered_workqueue("dwc2", 0);
+ if (!hsotg->wq_otg) {
+ dev_err(hsotg->dev, "Failed to create workqueue\n");
+ goto error2;
+ }
+ INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
+
+ timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
+
+ /* Initialize the non-periodic schedule */
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
+
+ /* Initialize the periodic schedule */
+ INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
+ INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
+
+ INIT_LIST_HEAD(&hsotg->split_order);
+
+ /*
+ * Create a host channel descriptor for each host channel implemented
+ * in the controller. Initialize the channel descriptor array.
+ */
+ INIT_LIST_HEAD(&hsotg->free_hc_list);
+ num_channels = hsotg->params.host_channels;
+ memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
+
+ for (i = 0; i < num_channels; i++) {
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ goto error3;
+ channel->hc_num = i;
+ INIT_LIST_HEAD(&channel->split_order_list_entry);
+ hsotg->hc_ptr_array[i] = channel;
+ }
+
+ /* Initialize work */
+ INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
+ INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
+ INIT_WORK(&hsotg->phy_reset_work, dwc2_hcd_phy_reset_func);
+
+ /*
+ * Allocate space for storing data on status transactions. Normally no
+ * data is sent, but this space acts as a bit bucket. This must be
+ * done after usb_add_hcd since that function allocates the DMA buffer
+ * pool.
+ */
+ if (hsotg->params.host_dma)
+ hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
+ DWC2_HCD_STATUS_BUF_SIZE,
+ &hsotg->status_buf_dma, GFP_KERNEL);
+ else
+ hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
+ GFP_KERNEL);
+
+ if (!hsotg->status_buf)
+ goto error3;
+
+ /*
+ * Create kmem caches to handle descriptor buffers in descriptor
+ * DMA mode.
+ * Alignment must be set to 512 bytes.
+ */
+ if (hsotg->params.dma_desc_enable ||
+ hsotg->params.dma_desc_fs_enable) {
+ hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
+ sizeof(struct dwc2_dma_desc) *
+ MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
+ NULL);
+ if (!hsotg->desc_gen_cache) {
+ dev_err(hsotg->dev,
+ "unable to create dwc2 generic desc cache\n");
+
+ /*
+ * Disable descriptor dma mode since it will not be
+ * usable.
+ */
+ hsotg->params.dma_desc_enable = false;
+ hsotg->params.dma_desc_fs_enable = false;
+ }
+
+ hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
+ sizeof(struct dwc2_dma_desc) *
+ MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
+ if (!hsotg->desc_hsisoc_cache) {
+ dev_err(hsotg->dev,
+ "unable to create dwc2 hs isoc desc cache\n");
+
+ kmem_cache_destroy(hsotg->desc_gen_cache);
+
+ /*
+ * Disable descriptor dma mode since it will not be
+ * usable.
+ */
+ hsotg->params.dma_desc_enable = false;
+ hsotg->params.dma_desc_fs_enable = false;
+ }
+ }
+
+ if (hsotg->params.host_dma) {
+ /*
+ * Create kmem caches to handle non-aligned buffer
+ * in Buffer DMA mode.
+ */
+ hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+ DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+ SLAB_CACHE_DMA, NULL);
+ if (!hsotg->unaligned_cache)
+ dev_err(hsotg->dev,
+ "unable to create dwc2 unaligned cache\n");
+ }
+
+ hsotg->otg_port = 1;
+ hsotg->frame_list = NULL;
+ hsotg->frame_list_dma = 0;
+ hsotg->periodic_qh_count = 0;
+
+ /* Initiate lx_state to L3 disconnected state */
+ hsotg->lx_state = DWC2_L3;
+
+ hcd->self.otg_port = hsotg->otg_port;
+
+ /* Don't support SG list at this point */
+ hcd->self.sg_tablesize = 0;
+
+ hcd->tpl_support = of_usb_host_tpl_support(hsotg->dev->of_node);
+
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ otg_set_host(hsotg->uphy->otg, &hcd->self);
+
+ /*
+ * Finish generic HCD initialization and start the HCD. This function
+ * allocates the DMA buffer pool, registers the USB bus, requests the
+ * IRQ line, and calls hcd_start method.
+ */
+ retval = usb_add_hcd(hcd, hsotg->irq, IRQF_SHARED);
+ if (retval < 0)
+ goto error4;
+
+ device_wakeup_enable(hcd->self.controller);
+
+ dwc2_hcd_dump_state(hsotg);
+
+ dwc2_enable_global_interrupts(hsotg);
+
+ return 0;
+
+error4:
+ kmem_cache_destroy(hsotg->unaligned_cache);
+ kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+ kmem_cache_destroy(hsotg->desc_gen_cache);
+error3:
+ dwc2_hcd_release(hsotg);
+error2:
+ usb_put_hcd(hcd);
+error1:
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+ kfree(hsotg->last_frame_num_array);
+ kfree(hsotg->frame_num_array);
+#endif
+
+ dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval);
+ return retval;
+}
+
+/*
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd;
+
+ dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n");
+
+ hcd = dwc2_hsotg_to_hcd(hsotg);
+ dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd);
+
+ if (!hcd) {
+ dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
+ __func__);
+ return;
+ }
+
+ if (!IS_ERR_OR_NULL(hsotg->uphy))
+ otg_set_host(hsotg->uphy->otg, NULL);
+
+ usb_remove_hcd(hcd);
+ hsotg->priv = NULL;
+
+ kmem_cache_destroy(hsotg->unaligned_cache);
+ kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+ kmem_cache_destroy(hsotg->desc_gen_cache);
+
+ dwc2_hcd_release(hsotg);
+ usb_put_hcd(hcd);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+ kfree(hsotg->last_frame_num_array);
+ kfree(hsotg->frame_num_array);
+#endif
+}
+
+/**
+ * dwc2_backup_host_registers() - Backup controller host registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hregs_backup *hr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup Host regs */
+ hr = &hsotg->hr_backup;
+ hr->hcfg = dwc2_readl(hsotg, HCFG);
+ hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
+ for (i = 0; i < hsotg->params.host_channels; ++i)
+ hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
+
+ hr->hprt0 = dwc2_read_hprt0(hsotg);
+ hr->hfir = dwc2_readl(hsotg, HFIR);
+ hr->hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
+ hr->valid = true;
+
+ return 0;
+}
+
+/**
+ * dwc2_restore_host_registers() - Restore controller host registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hregs_backup *hr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore host regs */
+ hr = &hsotg->hr_backup;
+ if (!hr->valid) {
+ dev_err(hsotg->dev, "%s: no host registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ hr->valid = false;
+
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
+ dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
+
+ for (i = 0; i < hsotg->params.host_channels; ++i)
+ dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
+
+ dwc2_writel(hsotg, hr->hprt0, HPRT0);
+ dwc2_writel(hsotg, hr->hfir, HFIR);
+ dwc2_writel(hsotg, hr->hptxfsiz, HPTXFSIZ);
+ hsotg->frame_number = 0;
+
+ return 0;
+}
+
+/**
+ * dwc2_host_enter_hibernation() - Put controller in Hibernation.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 hprt0;
+ u32 pcgcctl;
+ u32 gusbcfg;
+ u32 gpwrdn;
+
+ dev_dbg(hsotg->dev, "Preparing host for hibernation\n");
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+ ret = dwc2_backup_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup host registers\n",
+ __func__);
+ return ret;
+ }
+
+ /* Enter USB Suspend Mode */
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+ hprt0 |= HPRT0_SUSP;
+ hprt0 &= ~HPRT0_ENA;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ /* Wait for the HPRT0.PrtSusp register field to be set */
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
+ dev_warn(hsotg->dev, "Suspend wasn't generated\n");
+
+ /*
+ * We need to disable interrupts to prevent servicing of any IRQ
+ * during going to hibernation
+ */
+ spin_lock_irqsave(&hsotg->lock, flags);
+ hsotg->lx_state = DWC2_L2;
+
+ gusbcfg = dwc2_readl(hsotg, GUSBCFG);
+ if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
+ /* ULPI interface */
+ /* Suspend the Phy Clock */
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ } else {
+ /* UTMI+ Interface */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+ }
+
+ /* Enable interrupts from wake up logic */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUINTSEL;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Unmask host mode interrupts in GPWRDN */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_DISCONN_DET_MSK;
+ gpwrdn |= GPWRDN_LNSTSCHG_MSK;
+ gpwrdn |= GPWRDN_STS_CHGINT_MSK;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Enable Power Down Clamp */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PWRDNCLMP;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Switch off VDD */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PWRDNSWTCH;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+
+ hsotg->hibernated = 1;
+ hsotg->bus_suspended = 1;
+ dev_dbg(hsotg->dev, "Host hibernation completed\n");
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return ret;
+}
+
+/*
+ * dwc2_host_exit_hibernation()
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: indicates whether resume is initiated by Device or Host.
+ * @param reset: indicates whether resume is initiated by Reset.
+ *
+ * Return: non-zero if failed to enter to hibernation.
+ *
+ * This function is for exiting from Host mode hibernation by
+ * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
+ */
+int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ int reset)
+{
+ u32 gpwrdn;
+ u32 hprt0;
+ int ret = 0;
+ struct dwc2_gregs_backup *gr;
+ struct dwc2_hregs_backup *hr;
+
+ gr = &hsotg->gr_backup;
+ hr = &hsotg->hr_backup;
+
+ dev_dbg(hsotg->dev,
+ "%s: called with rem_wakeup = %d reset = %d\n",
+ __func__, rem_wakeup, reset);
+
+ dwc2_hib_restore_common(hsotg, rem_wakeup, 1);
+ hsotg->hibernated = 0;
+
+ /*
+ * This step is not described in functional spec but if not wait for
+ * this delay, mismatch interrupts occurred because just after restore
+ * core is in Device mode(gintsts.curmode == 0)
+ */
+ mdelay(100);
+
+ /* Clear all pending interupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* De-assert Restore */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_RESTORE;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ /* Restore GUSBCFG, HCFG */
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
+
+ /* De-assert Wakeup Logic */
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ hprt0 = hr->hprt0;
+ hprt0 |= HPRT0_PWR;
+ hprt0 &= ~HPRT0_ENA;
+ hprt0 &= ~HPRT0_SUSP;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ hprt0 = hr->hprt0;
+ hprt0 |= HPRT0_PWR;
+ hprt0 &= ~HPRT0_ENA;
+ hprt0 &= ~HPRT0_SUSP;
+
+ if (reset) {
+ hprt0 |= HPRT0_RST;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ /* Wait for Resume time and then program HPRT again */
+ mdelay(60);
+ hprt0 &= ~HPRT0_RST;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ } else {
+ hprt0 |= HPRT0_RES;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ /* Wait for Resume time and then program HPRT again */
+ mdelay(100);
+ hprt0 &= ~HPRT0_RES;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ }
+ /* Clear all interrupt status */
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+ hprt0 |= HPRT0_CONNDET;
+ hprt0 |= HPRT0_ENACHG;
+ hprt0 &= ~HPRT0_ENA;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+
+ /* Clear all pending interupts */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* Restore global registers */
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+
+ /* Restore host registers */
+ ret = dwc2_restore_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore host registers\n",
+ __func__);
+ return ret;
+ }
+
+ if (rem_wakeup) {
+ dwc2_hcd_rem_wakeup(hsotg);
+ /*
+ * Change "port_connect_status_change" flag to re-enumerate,
+ * because after exit from hibernation port connection status
+ * is not detected.
+ */
+ hsotg->flags.b.port_connect_status_change = 1;
+ }
+
+ hsotg->hibernated = 0;
+ hsotg->bus_suspended = 0;
+ hsotg->lx_state = DWC2_L0;
+ dev_dbg(hsotg->dev, "Host hibernation restore complete\n");
+ return ret;
+}
+
+bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
+{
+ struct usb_device *root_hub = dwc2_hsotg_to_hcd(dwc2)->self.root_hub;
+
+ /* If the controller isn't allowed to wakeup then we can power off. */
+ if (!device_may_wakeup(dwc2->dev))
+ return true;
+
+ /*
+ * We don't want to power off the PHY if something under the
+ * root hub has wakeup enabled.
+ */
+ if (usb_wakeup_enabled_descendants(root_hub))
+ return false;
+
+ /* No reason to keep the PHY powered, so allow poweroff */
+ return true;
+}
+
+/**
+ * dwc2_host_enter_partial_power_down() - Put controller in partial
+ * power down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Return: non-zero if failed to enter host partial power down.
+ *
+ * This function is for entering Host mode partial power down.
+ */
+int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg)
+{
+ u32 pcgcctl;
+ u32 hprt0;
+ int ret = 0;
+
+ dev_dbg(hsotg->dev, "Entering host partial power down started.\n");
+
+ /* Put this port in suspend mode. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_SUSP;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ udelay(5);
+
+ /* Wait for the HPRT0.PrtSusp register field to be set */
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+ dev_warn(hsotg->dev, "Suspend wasn't generated\n");
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_backup_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup host registers\n",
+ __func__);
+ return ret;
+ }
+
+ /*
+ * Clear any pending interrupts since dwc2 will not be able to
+ * clear them after entering partial_power_down.
+ */
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+
+ /* Put the controller in low power state */
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+
+ pcgcctl |= PCGCTL_PWRCLMP;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(5);
+
+ pcgcctl |= PCGCTL_RSTPDWNMODULE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(5);
+
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ /* Set in_ppd flag to 1 as here core enters suspend. */
+ hsotg->in_ppd = 1;
+ hsotg->lx_state = DWC2_L2;
+ hsotg->bus_suspended = true;
+
+ dev_dbg(hsotg->dev, "Entering host partial power down completed.\n");
+
+ return ret;
+}
+
+/*
+ * dwc2_host_exit_partial_power_down() - Exit controller from host partial
+ * power down.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: indicates whether resume is initiated by Reset.
+ * @restore: indicates whether need to restore the registers or not.
+ *
+ * Return: non-zero if failed to exit host partial power down.
+ *
+ * This function is for exiting from Host mode partial power down.
+ */
+int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
+ int rem_wakeup, bool restore)
+{
+ u32 pcgcctl;
+ int ret = 0;
+ u32 hprt0;
+
+ dev_dbg(hsotg->dev, "Exiting host partial power down started.\n");
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(5);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_PWRCLMP;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(5);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+
+ udelay(100);
+ if (restore) {
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_restore_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore host registers\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ /* Drive resume signaling and exit suspend mode on the port. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+ hprt0 &= ~HPRT0_SUSP;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ udelay(5);
+
+ if (!rem_wakeup) {
+ /* Stop driveing resume signaling on the port. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 &= ~HPRT0_RES;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ hsotg->bus_suspended = false;
+ } else {
+ /* Turn on the port power bit. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ /* Connect hcd. */
+ dwc2_hcd_connect(hsotg);
+
+ mod_timer(&hsotg->wkp_timer,
+ jiffies + msecs_to_jiffies(71));
+ }
+
+ /* Set lx_state to and in_ppd to 0 as here core exits from suspend. */
+ hsotg->in_ppd = 0;
+ hsotg->lx_state = DWC2_L0;
+
+ dev_dbg(hsotg->dev, "Exiting host partial power down completed.\n");
+ return ret;
+}
+
+/**
+ * dwc2_host_enter_clock_gating() - Put controller in clock gating.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * This function is for entering Host mode clock gating.
+ */
+void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0;
+ u32 pcgctl;
+
+ dev_dbg(hsotg->dev, "Entering host clock gating.\n");
+
+ /* Put this port in suspend mode. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_SUSP;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ /* Set the Phy Clock bit as suspend is received. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ /* Set the Gate hclk as suspend is received. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl |= PCGCTL_GATEHCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ hsotg->bus_suspended = true;
+ hsotg->lx_state = DWC2_L2;
+}
+
+/**
+ * dwc2_host_exit_clock_gating() - Exit controller from clock gating.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @rem_wakeup: indicates whether resume is initiated by remote wakeup
+ *
+ * This function is for exiting Host mode clock gating.
+ */
+void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
+{
+ u32 hprt0;
+ u32 pcgctl;
+
+ dev_dbg(hsotg->dev, "Exiting host clock gating.\n");
+
+ /* Clear the Gate hclk. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_GATEHCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ /* Phy Clock bit. */
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
+ udelay(5);
+
+ /* Drive resume signaling and exit suspend mode on the port. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+ hprt0 &= ~HPRT0_SUSP;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ udelay(5);
+
+ if (!rem_wakeup) {
+ /* In case of port resume need to wait for 40 ms */
+ msleep(USB_RESUME_TIMEOUT);
+
+ /* Stop driveing resume signaling on the port. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 &= ~HPRT0_RES;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ hsotg->bus_suspended = false;
+ hsotg->lx_state = DWC2_L0;
+ } else {
+ mod_timer(&hsotg->wkp_timer,
+ jiffies + msecs_to_jiffies(71));
+ }
+}
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
new file mode 100644
index 000000000..b7254d94f
--- /dev/null
+++ b/drivers/usb/dwc2/hcd.h
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * hcd.h - DesignWare HS OTG Controller host-mode declarations
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+#ifndef __DWC2_HCD_H__
+#define __DWC2_HCD_H__
+
+/*
+ * This file contains the structures, constants, and interfaces for the
+ * Host Contoller Driver (HCD)
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+struct dwc2_qh;
+
+/**
+ * struct dwc2_host_chan - Software host channel descriptor
+ *
+ * @hc_num: Host channel number, used for register address lookup
+ * @dev_addr: Address of the device
+ * @ep_num: Endpoint of the device
+ * @ep_is_in: Endpoint direction
+ * @speed: Device speed. One of the following values:
+ * - USB_SPEED_LOW
+ * - USB_SPEED_FULL
+ * - USB_SPEED_HIGH
+ * @ep_type: Endpoint type. One of the following values:
+ * - USB_ENDPOINT_XFER_CONTROL: 0
+ * - USB_ENDPOINT_XFER_ISOC: 1
+ * - USB_ENDPOINT_XFER_BULK: 2
+ * - USB_ENDPOINT_XFER_INTR: 3
+ * @max_packet: Max packet size in bytes
+ * @data_pid_start: PID for initial transaction.
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control EP),
+ * SETUP (Control EP)
+ * @multi_count: Number of additional periodic transactions per
+ * (micro)frame
+ * @xfer_buf: Pointer to current transfer buffer position
+ * @xfer_dma: DMA address of xfer_buf
+ * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
+ * DWORD aligned
+ * @xfer_len: Total number of bytes to transfer
+ * @xfer_count: Number of bytes transferred so far
+ * @start_pkt_count: Packet count at start of transfer
+ * @xfer_started: True if the transfer has been started
+ * @do_ping: True if a PING request should be issued on this channel
+ * @error_state: True if the error count for this transaction is non-zero
+ * @halt_on_queue: True if this channel should be halted the next time a
+ * request is queued for the channel. This is necessary in
+ * slave mode if no request queue space is available when
+ * an attempt is made to halt the channel.
+ * @halt_pending: True if the host channel has been halted, but the core
+ * is not finished flushing queued requests
+ * @do_split: Enable split for the channel
+ * @complete_split: Enable complete split
+ * @hub_addr: Address of high speed hub for the split
+ * @hub_port: Port of the low/full speed device for the split
+ * @xact_pos: Split transaction position. One of the following values:
+ * - DWC2_HCSPLT_XACTPOS_MID
+ * - DWC2_HCSPLT_XACTPOS_BEGIN
+ * - DWC2_HCSPLT_XACTPOS_END
+ * - DWC2_HCSPLT_XACTPOS_ALL
+ * @requests: Number of requests issued for this channel since it was
+ * assigned to the current transfer (not counting PINGs)
+ * @schinfo: Scheduling micro-frame bitmap
+ * @ntd: Number of transfer descriptors for the transfer
+ * @halt_status: Reason for halting the host channel
+ * @hcint: Contents of the HCINT register when the interrupt came
+ * @qh: QH for the transfer being processed by this channel
+ * @hc_list_entry: For linking to list of host channels
+ * @desc_list_addr: Current QH's descriptor list DMA address
+ * @desc_list_sz: Current QH's descriptor list size
+ * @split_order_list_entry: List entry for keeping track of the order of splits
+ *
+ * This structure represents the state of a single host channel when acting in
+ * host mode. It contains the data items needed to transfer packets to an
+ * endpoint via a host channel.
+ */
+struct dwc2_host_chan {
+ u8 hc_num;
+
+ unsigned dev_addr:7;
+ unsigned ep_num:4;
+ unsigned ep_is_in:1;
+ unsigned speed:4;
+ unsigned ep_type:2;
+ unsigned max_packet:11;
+ unsigned data_pid_start:2;
+#define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0
+#define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2
+#define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1
+#define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA
+#define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP
+
+ unsigned multi_count:2;
+
+ u8 *xfer_buf;
+ dma_addr_t xfer_dma;
+ dma_addr_t align_buf;
+ u32 xfer_len;
+ u32 xfer_count;
+ u16 start_pkt_count;
+ u8 xfer_started;
+ u8 do_ping;
+ u8 error_state;
+ u8 halt_on_queue;
+ u8 halt_pending;
+ u8 do_split;
+ u8 complete_split;
+ u8 hub_addr;
+ u8 hub_port;
+ u8 xact_pos;
+#define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID
+#define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END
+#define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
+#define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL
+
+ u8 requests;
+ u8 schinfo;
+ u16 ntd;
+ enum dwc2_halt_status halt_status;
+ u32 hcint;
+ struct dwc2_qh *qh;
+ struct list_head hc_list_entry;
+ dma_addr_t desc_list_addr;
+ u32 desc_list_sz;
+ struct list_head split_order_list_entry;
+};
+
+struct dwc2_hcd_pipe_info {
+ u8 dev_addr;
+ u8 ep_num;
+ u8 pipe_type;
+ u8 pipe_dir;
+ u16 maxp;
+ u16 maxp_mult;
+};
+
+struct dwc2_hcd_iso_packet_desc {
+ u32 offset;
+ u32 length;
+ u32 actual_length;
+ u32 status;
+};
+
+struct dwc2_qtd;
+
+struct dwc2_hcd_urb {
+ void *priv;
+ struct dwc2_qtd *qtd;
+ void *buf;
+ dma_addr_t dma;
+ void *setup_packet;
+ dma_addr_t setup_dma;
+ u32 length;
+ u32 actual_length;
+ u32 status;
+ u32 error_count;
+ u32 packet_count;
+ u32 flags;
+ u16 interval;
+ struct dwc2_hcd_pipe_info pipe_info;
+ struct dwc2_hcd_iso_packet_desc iso_descs[];
+};
+
+/* Phases for control transfers */
+enum dwc2_control_phase {
+ DWC2_CONTROL_SETUP,
+ DWC2_CONTROL_DATA,
+ DWC2_CONTROL_STATUS,
+};
+
+/* Transaction types */
+enum dwc2_transaction_type {
+ DWC2_TRANSACTION_NONE,
+ DWC2_TRANSACTION_PERIODIC,
+ DWC2_TRANSACTION_NON_PERIODIC,
+ DWC2_TRANSACTION_ALL,
+};
+
+/* The number of elements per LS bitmap (per port on multi_tt) */
+#define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \
+ BITS_PER_LONG)
+
+/**
+ * struct dwc2_tt - dwc2 data associated with a usb_tt
+ *
+ * @refcount: Number of Queue Heads (QHs) holding a reference.
+ * @usb_tt: Pointer back to the official usb_tt.
+ * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted
+ * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP
+ * elements (so sizeof(long) times that in bytes).
+ *
+ * This structure is stored in the hcpriv of the official usb_tt.
+ */
+struct dwc2_tt {
+ int refcount;
+ struct usb_tt *usb_tt;
+ unsigned long periodic_bitmaps[];
+};
+
+/**
+ * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
+ *
+ * @start_schedule_us: The start time on the main bus schedule. Note that
+ * the main bus schedule is tightly packed and this
+ * time should be interpreted as tightly packed (so
+ * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
+ * instead of 125 us).
+ * @duration_us: How long this transfer goes.
+ */
+
+struct dwc2_hs_transfer_time {
+ u32 start_schedule_us;
+ u16 duration_us;
+};
+
+/**
+ * struct dwc2_qh - Software queue head structure
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @ep_type: Endpoint type. One of the following values:
+ * - USB_ENDPOINT_XFER_CONTROL
+ * - USB_ENDPOINT_XFER_BULK
+ * - USB_ENDPOINT_XFER_INT
+ * - USB_ENDPOINT_XFER_ISOC
+ * @ep_is_in: Endpoint direction
+ * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
+ * @maxp_mult: Multiplier for maxp
+ * @dev_speed: Device speed. One of the following values:
+ * - USB_SPEED_LOW
+ * - USB_SPEED_FULL
+ * - USB_SPEED_HIGH
+ * @data_toggle: Determines the PID of the next data packet for
+ * non-controltransfers. Ignored for control transfers.
+ * One of the following values:
+ * - DWC2_HC_PID_DATA0
+ * - DWC2_HC_PID_DATA1
+ * @ping_state: Ping state
+ * @do_split: Full/low speed endpoint on high-speed hub requires split
+ * @td_first: Index of first activated isochronous transfer descriptor
+ * @td_last: Index of last activated isochronous transfer descriptor
+ * @host_us: Bandwidth in microseconds per transfer as seen by host
+ * @device_us: Bandwidth in microseconds per transfer as seen by device
+ * @host_interval: Interval between transfers as seen by the host. If
+ * the host is high speed and the device is low speed this
+ * will be 8 times device interval.
+ * @device_interval: Interval between transfers as seen by the device.
+ * interval.
+ * @next_active_frame: (Micro)frame _before_ we next need to put something on
+ * the bus. We'll move the qh to active here. If the
+ * host is in high speed mode this will be a uframe. If
+ * the host is in low speed mode this will be a full frame.
+ * @start_active_frame: If we are partway through a split transfer, this will be
+ * what next_active_frame was when we started. Otherwise
+ * it should always be the same as next_active_frame.
+ * @num_hs_transfers: Number of transfers in hs_transfers.
+ * Normally this is 1 but can be more than one for splits.
+ * Always >= 1 unless the host is in low/full speed mode.
+ * @hs_transfers: Transfers that are scheduled as seen by the high speed
+ * bus. Not used if host is in low or full speed mode (but
+ * note that it IS USED if the device is low or full speed
+ * as long as the HOST is in high speed mode).
+ * @ls_start_schedule_slice: Start time (in slices) on the low speed bus
+ * schedule that's being used by this device. This
+ * will be on the periodic_bitmap in a
+ * "struct dwc2_tt". Not used if this device is high
+ * speed. Note that this is in "schedule slice" which
+ * is tightly packed.
+ * @ntd: Actual number of transfer descriptors in a list
+ * @dw_align_buf: Used instead of original buffer if its physical address
+ * is not dword-aligned
+ * @dw_align_buf_dma: DMA address for dw_align_buf
+ * @qtd_list: List of QTDs for this QH
+ * @channel: Host channel currently processing transfers for this QH
+ * @qh_list_entry: Entry for QH in either the periodic or non-periodic
+ * schedule
+ * @desc_list: List of transfer descriptors
+ * @desc_list_dma: Physical address of desc_list
+ * @desc_list_sz: Size of descriptors list
+ * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
+ * descriptor and indicates original XferSize value for the
+ * descriptor
+ * @unreserve_timer: Timer for releasing periodic reservation.
+ * @wait_timer: Timer used to wait before re-queuing.
+ * @dwc_tt: Pointer to our tt info (or NULL if no tt).
+ * @ttport: Port number within our tt.
+ * @tt_buffer_dirty True if clear_tt_buffer_complete is pending
+ * @unreserve_pending: True if we planned to unreserve but haven't yet.
+ * @schedule_low_speed: True if we have a low/full speed component (either the
+ * host is in low/full speed mode or do_split).
+ * @want_wait: We should wait before re-queuing; only matters for non-
+ * periodic transfers and is ignored for periodic ones.
+ * @wait_timer_cancel: Set to true to cancel the wait_timer.
+ *
+ * @tt_buffer_dirty: True if EP's TT buffer is not clean.
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+struct dwc2_qh {
+ struct dwc2_hsotg *hsotg;
+ u8 ep_type;
+ u8 ep_is_in;
+ u16 maxp;
+ u16 maxp_mult;
+ u8 dev_speed;
+ u8 data_toggle;
+ u8 ping_state;
+ u8 do_split;
+ u8 td_first;
+ u8 td_last;
+ u16 host_us;
+ u16 device_us;
+ u16 host_interval;
+ u16 device_interval;
+ u16 next_active_frame;
+ u16 start_active_frame;
+ s16 num_hs_transfers;
+ struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
+ u32 ls_start_schedule_slice;
+ u16 ntd;
+ u8 *dw_align_buf;
+ dma_addr_t dw_align_buf_dma;
+ struct list_head qtd_list;
+ struct dwc2_host_chan *channel;
+ struct list_head qh_list_entry;
+ struct dwc2_dma_desc *desc_list;
+ dma_addr_t desc_list_dma;
+ u32 desc_list_sz;
+ u32 *n_bytes;
+ struct timer_list unreserve_timer;
+ struct hrtimer wait_timer;
+ struct dwc2_tt *dwc_tt;
+ int ttport;
+ unsigned tt_buffer_dirty:1;
+ unsigned unreserve_pending:1;
+ unsigned schedule_low_speed:1;
+ unsigned want_wait:1;
+ unsigned wait_timer_cancel:1;
+};
+
+/**
+ * struct dwc2_qtd - Software queue transfer descriptor (QTD)
+ *
+ * @control_phase: Current phase for control transfers (Setup, Data, or
+ * Status)
+ * @in_process: Indicates if this QTD is currently processed by HW
+ * @data_toggle: Determines the PID of the next data packet for the
+ * data phase of control transfers. Ignored for other
+ * transfer types. One of the following values:
+ * - DWC2_HC_PID_DATA0
+ * - DWC2_HC_PID_DATA1
+ * @complete_split: Keeps track of the current split type for FS/LS
+ * endpoints on a HS Hub
+ * @isoc_split_pos: Position of the ISOC split in full/low speed
+ * @isoc_frame_index: Index of the next frame descriptor for an isochronous
+ * transfer. A frame descriptor describes the buffer
+ * position and length of the data to be transferred in the
+ * next scheduled (micro)frame of an isochronous transfer.
+ * It also holds status for that transaction. The frame
+ * index starts at 0.
+ * @isoc_split_offset: Position of the ISOC split in the buffer for the
+ * current frame
+ * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT
+ * @error_count: Holds the number of bus errors that have occurred for
+ * a transaction within this transfer
+ * @n_desc: Number of DMA descriptors for this QTD
+ * @isoc_frame_index_last: Last activated frame (packet) index, used in
+ * descriptor DMA mode only
+ * @num_naks: Number of NAKs received on this QTD.
+ * @urb: URB for this transfer
+ * @qh: Queue head for this QTD
+ * @qtd_list_entry: For linking to the QH's list of QTDs
+ * @isoc_td_first: Index of first activated isochronous transfer
+ * descriptor in Descriptor DMA mode
+ * @isoc_td_last: Index of last activated isochronous transfer
+ * descriptor in Descriptor DMA mode
+ *
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+struct dwc2_qtd {
+ enum dwc2_control_phase control_phase;
+ u8 in_process;
+ u8 data_toggle;
+ u8 complete_split;
+ u8 isoc_split_pos;
+ u16 isoc_frame_index;
+ u16 isoc_split_offset;
+ u16 isoc_td_last;
+ u16 isoc_td_first;
+ u32 ssplit_out_xfer_count;
+ u8 error_count;
+ u8 n_desc;
+ u16 isoc_frame_index_last;
+ u16 num_naks;
+ struct dwc2_hcd_urb *urb;
+ struct dwc2_qh *qh;
+ struct list_head qtd_list_entry;
+};
+
+#ifdef DEBUG
+struct hc_xfer_info {
+ struct dwc2_hsotg *hsotg;
+ struct dwc2_host_chan *chan;
+};
+#endif
+
+u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
+
+/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
+static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
+{
+ return (struct usb_hcd *)hsotg->priv;
+}
+
+/*
+ * Inline used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
+{
+ u32 mask = dwc2_readl(hsotg, HCINTMSK(chnum));
+
+ mask &= ~intr;
+ dwc2_writel(hsotg, mask, HCINTMSK(chnum));
+}
+
+void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
+void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+ enum dwc2_halt_status halt_status);
+void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan);
+
+/*
+ * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
+ * are read as 1, they won't clear when written back.
+ */
+static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0 = dwc2_readl(hsotg, HPRT0);
+
+ hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
+ return hprt0;
+}
+
+static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->ep_num;
+}
+
+static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->pipe_type;
+}
+
+static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->maxp;
+}
+
+static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->maxp_mult;
+}
+
+static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->dev_addr;
+}
+
+static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
+}
+
+static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
+}
+
+static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
+}
+
+static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
+}
+
+static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->pipe_dir == USB_DIR_IN;
+}
+
+static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
+{
+ return !dwc2_hcd_is_pipe_in(pipe);
+}
+
+int dwc2_hcd_init(struct dwc2_hsotg *hsotg);
+void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
+
+/* Transaction Execution Functions */
+enum dwc2_transaction_type dwc2_hcd_select_transactions(
+ struct dwc2_hsotg *hsotg);
+void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
+ enum dwc2_transaction_type tr_type);
+
+/* Schedule Queue Functions */
+/* Implemented in hcd_queue.c */
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb,
+ gfp_t mem_flags);
+void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ int sched_csplit);
+
+void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
+int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ struct dwc2_qh *qh);
+
+/* Unlinks and frees a QTD */
+static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
+ struct dwc2_qtd *qtd,
+ struct dwc2_qh *qh)
+{
+ list_del(&qtd->qtd_list_entry);
+ kfree(qtd);
+}
+
+/* Descriptor DMA support functions */
+void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh);
+void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ enum dwc2_halt_status halt_status);
+
+int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ gfp_t mem_flags);
+void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
+
+/* Check if QH is non-periodic */
+#define dwc2_qh_is_non_per(_qh_ptr_) \
+ ((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
+ (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
+
+#ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC
+static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
+static inline bool dbg_qh(struct dwc2_qh *qh) { return true; }
+static inline bool dbg_urb(struct urb *urb) { return true; }
+static inline bool dbg_perio(void) { return true; }
+#else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */
+static inline bool dbg_hc(struct dwc2_host_chan *hc)
+{
+ return hc->ep_type == USB_ENDPOINT_XFER_BULK ||
+ hc->ep_type == USB_ENDPOINT_XFER_CONTROL;
+}
+
+static inline bool dbg_qh(struct dwc2_qh *qh)
+{
+ return qh->ep_type == USB_ENDPOINT_XFER_BULK ||
+ qh->ep_type == USB_ENDPOINT_XFER_CONTROL;
+}
+
+static inline bool dbg_urb(struct urb *urb)
+{
+ return usb_pipetype(urb->pipe) == PIPE_BULK ||
+ usb_pipetype(urb->pipe) == PIPE_CONTROL;
+}
+
+static inline bool dbg_perio(void) { return false; }
+#endif
+
+/*
+ * Returns true if frame1 index is greater than frame2 index. The comparison
+ * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
+ * frame number when the max index frame number is reached.
+ */
+static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2)
+{
+ u16 diff = fr_idx1 - fr_idx2;
+ u16 sign = diff & (FRLISTEN_64_SIZE >> 1);
+
+ return diff && !sign;
+}
+
+/*
+ * Returns true if frame1 is less than or equal to frame2. The comparison is
+ * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
+{
+ return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
+}
+
+/*
+ * Returns true if frame1 is greater than frame2. The comparison is done
+ * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
+{
+ return (frame1 != frame2) &&
+ ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
+}
+
+/*
+ * Increments frame by the amount specified by inc. The addition is done
+ * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
+{
+ return (frame + inc) & HFNUM_MAX_FRNUM;
+}
+
+static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec)
+{
+ return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM;
+}
+
+static inline u16 dwc2_full_frame_num(u16 frame)
+{
+ return (frame & HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline u16 dwc2_micro_frame_num(u16 frame)
+{
+ return frame & 0x7;
+}
+
+/*
+ * Returns the Core Interrupt Status register contents, ANDed with the Core
+ * Interrupt Mask register contents
+ */
+static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
+{
+ return dwc2_readl(hsotg, GINTSTS) &
+ dwc2_readl(hsotg, GINTMSK);
+}
+
+static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
+{
+ return dwc2_urb->status;
+}
+
+static inline u32 dwc2_hcd_urb_get_actual_length(
+ struct dwc2_hcd_urb *dwc2_urb)
+{
+ return dwc2_urb->actual_length;
+}
+
+static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
+{
+ return dwc2_urb->error_count;
+}
+
+static inline void dwc2_hcd_urb_set_iso_desc_params(
+ struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
+ u32 length)
+{
+ dwc2_urb->iso_descs[desc_num].offset = offset;
+ dwc2_urb->iso_descs[desc_num].length = length;
+}
+
+static inline u32 dwc2_hcd_urb_get_iso_desc_status(
+ struct dwc2_hcd_urb *dwc2_urb, int desc_num)
+{
+ return dwc2_urb->iso_descs[desc_num].status;
+}
+
+static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
+ struct dwc2_hcd_urb *dwc2_urb, int desc_num)
+{
+ return dwc2_urb->iso_descs[desc_num].actual_length;
+}
+
+static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
+ struct usb_host_endpoint *ep)
+{
+ struct dwc2_qh *qh = ep->hcpriv;
+
+ if (qh && !list_empty(&qh->qh_list_entry))
+ return 1;
+
+ return 0;
+}
+
+static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
+ struct usb_host_endpoint *ep)
+{
+ struct dwc2_qh *qh = ep->hcpriv;
+
+ if (!qh) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ return qh->host_us;
+}
+
+void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd);
+
+/* HCD Core API */
+
+/**
+ * dwc2_handle_hcd_intr() - Called on every hardware interrupt
+ *
+ * @hsotg: The DWC2 HCD
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ * Return IRQ_NONE if interrupt is not handled
+ */
+irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
+ *
+ * @hsotg: The DWC2 HCD
+ */
+void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
+ * and 0 otherwise
+ *
+ * @hsotg: The DWC2 HCD
+ */
+int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
+
+/**
+ * dwc2_hcd_dump_state() - Dumps hsotg state
+ *
+ * @hsotg: The DWC2 HCD
+ *
+ * NOTE: This function will be removed once the peripheral controller code
+ * is integrated and the driver is stable
+ */
+void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg);
+
+/* URB interface */
+
+/* Transfer flags */
+#define URB_GIVEBACK_ASAP 0x1
+#define URB_SEND_ZERO_PACKET 0x2
+
+/* Host driver callbacks */
+struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
+ void *context, gfp_t mem_flags,
+ int *ttport);
+
+void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg,
+ struct dwc2_tt *dwc_tt);
+int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
+void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ int status);
+
+#endif /* __DWC2_HCD_H__ */
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
new file mode 100644
index 000000000..6b4d825e9
--- /dev/null
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * This file contains the Descriptor DMA implementation for Host mode
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+static u16 dwc2_frame_list_idx(u16 frame)
+{
+ return frame & (FRLISTEN_64_SIZE - 1);
+}
+
+static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
+{
+ return (idx + inc) &
+ ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
+ MAX_DMA_DESC_NUM_GENERIC) - 1);
+}
+
+static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
+{
+ return (idx - inc) &
+ ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
+ MAX_DMA_DESC_NUM_GENERIC) - 1);
+}
+
+static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
+{
+ return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
+ qh->dev_speed == USB_SPEED_HIGH) ?
+ MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
+}
+
+static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
+{
+ return qh->dev_speed == USB_SPEED_HIGH ?
+ (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
+}
+
+static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ gfp_t flags)
+{
+ struct kmem_cache *desc_cache;
+
+ if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
+ qh->dev_speed == USB_SPEED_HIGH)
+ desc_cache = hsotg->desc_hsisoc_cache;
+ else
+ desc_cache = hsotg->desc_gen_cache;
+
+ qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
+ dwc2_max_desc_num(qh);
+
+ qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
+ if (!qh->desc_list)
+ return -ENOMEM;
+
+ qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
+ qh->desc_list_sz,
+ DMA_TO_DEVICE);
+
+ qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags);
+ if (!qh->n_bytes) {
+ dma_unmap_single(hsotg->dev, qh->desc_list_dma,
+ qh->desc_list_sz,
+ DMA_FROM_DEVICE);
+ kmem_cache_free(desc_cache, qh->desc_list);
+ qh->desc_list = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ struct kmem_cache *desc_cache;
+
+ if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
+ qh->dev_speed == USB_SPEED_HIGH)
+ desc_cache = hsotg->desc_hsisoc_cache;
+ else
+ desc_cache = hsotg->desc_gen_cache;
+
+ if (qh->desc_list) {
+ dma_unmap_single(hsotg->dev, qh->desc_list_dma,
+ qh->desc_list_sz, DMA_FROM_DEVICE);
+ kmem_cache_free(desc_cache, qh->desc_list);
+ qh->desc_list = NULL;
+ }
+
+ kfree(qh->n_bytes);
+ qh->n_bytes = NULL;
+}
+
+static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
+{
+ if (hsotg->frame_list)
+ return 0;
+
+ hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
+ hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
+ if (!hsotg->frame_list)
+ return -ENOMEM;
+
+ hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
+ hsotg->frame_list_sz,
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (!hsotg->frame_list) {
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return;
+ }
+
+ dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
+ hsotg->frame_list_sz, DMA_FROM_DEVICE);
+
+ kfree(hsotg->frame_list);
+ hsotg->frame_list = NULL;
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
+{
+ u32 hcfg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ hcfg = dwc2_readl(hsotg, HCFG);
+ if (hcfg & HCFG_PERSCHEDENA) {
+ /* already enabled */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return;
+ }
+
+ dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
+
+ hcfg &= ~HCFG_FRLISTEN_MASK;
+ hcfg |= fr_list_en | HCFG_PERSCHEDENA;
+ dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
+ dwc2_writel(hsotg, hcfg, HCFG);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
+{
+ u32 hcfg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ hcfg = dwc2_readl(hsotg, HCFG);
+ if (!(hcfg & HCFG_PERSCHEDENA)) {
+ /* already disabled */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return;
+ }
+
+ hcfg &= ~HCFG_PERSCHEDENA;
+ dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
+ dwc2_writel(hsotg, hcfg, HCFG);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/*
+ * Activates/Deactivates FrameList entries for the channel based on endpoint
+ * servicing period
+ */
+static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ int enable)
+{
+ struct dwc2_host_chan *chan;
+ u16 i, j, inc;
+
+ if (!hsotg) {
+ pr_err("hsotg = %p\n", hsotg);
+ return;
+ }
+
+ if (!qh->channel) {
+ dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
+ return;
+ }
+
+ if (!hsotg->frame_list) {
+ dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
+ hsotg->frame_list);
+ return;
+ }
+
+ chan = qh->channel;
+ inc = dwc2_frame_incr_val(qh);
+ if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
+ i = dwc2_frame_list_idx(qh->next_active_frame);
+ else
+ i = 0;
+
+ j = i;
+ do {
+ if (enable)
+ hsotg->frame_list[j] |= 1 << chan->hc_num;
+ else
+ hsotg->frame_list[j] &= ~(1 << chan->hc_num);
+ j = (j + inc) & (FRLISTEN_64_SIZE - 1);
+ } while (j != i);
+
+ /*
+ * Sync frame list since controller will access it if periodic
+ * channel is currently enabled.
+ */
+ dma_sync_single_for_device(hsotg->dev,
+ hsotg->frame_list_dma,
+ hsotg->frame_list_sz,
+ DMA_TO_DEVICE);
+
+ if (!enable)
+ return;
+
+ chan->schinfo = 0;
+ if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
+ j = 1;
+ /* TODO - check this */
+ inc = (8 + qh->host_interval - 1) / qh->host_interval;
+ for (i = 0; i < inc; i++) {
+ chan->schinfo |= j;
+ j = j << qh->host_interval;
+ }
+ } else {
+ chan->schinfo = 0xff;
+ }
+}
+
+static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ struct dwc2_host_chan *chan = qh->channel;
+
+ if (dwc2_qh_is_non_per(qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
+ else
+ hsotg->non_periodic_channels--;
+ } else {
+ dwc2_update_frame_list(hsotg, qh, 0);
+ hsotg->available_host_channels++;
+ }
+
+ /*
+ * The condition is added to prevent double cleanup try in case of
+ * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
+ */
+ if (chan->qh) {
+ if (!list_empty(&chan->hc_list_entry))
+ list_del(&chan->hc_list_entry);
+ dwc2_hc_cleanup(hsotg, chan);
+ list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
+ chan->qh = NULL;
+ }
+
+ qh->channel = NULL;
+ qh->ntd = 0;
+
+ if (qh->desc_list)
+ memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
+ dwc2_max_desc_num(qh));
+}
+
+/**
+ * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
+ * related members
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: The QH to init
+ * @mem_flags: Indicates the type of memory allocation
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * Allocates memory for the descriptor list. For the first periodic QH,
+ * allocates memory for the FrameList and enables periodic scheduling.
+ */
+int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ gfp_t mem_flags)
+{
+ int retval;
+
+ if (qh->do_split) {
+ dev_err(hsotg->dev,
+ "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
+ retval = -EINVAL;
+ goto err0;
+ }
+
+ retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
+ if (retval)
+ goto err0;
+
+ if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
+ qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ if (!hsotg->frame_list) {
+ retval = dwc2_frame_list_alloc(hsotg, mem_flags);
+ if (retval)
+ goto err1;
+ /* Enable periodic schedule on first periodic QH */
+ dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
+ }
+ }
+
+ qh->ntd = 0;
+ return 0;
+
+err1:
+ dwc2_desc_list_free(hsotg, qh);
+err0:
+ return retval;
+}
+
+/**
+ * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
+ * members
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: The QH to free
+ *
+ * Frees descriptor list memory associated with the QH. If QH is periodic and
+ * the last, frees FrameList memory and disables periodic scheduling.
+ */
+void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ unsigned long flags;
+
+ dwc2_desc_list_free(hsotg, qh);
+
+ /*
+ * Channel still assigned due to some reasons.
+ * Seen on Isoc URB dequeue. Channel halted but no subsequent
+ * ChHalted interrupt to release the channel. Afterwards
+ * when it comes here from endpoint disable routine
+ * channel remains assigned.
+ */
+ spin_lock_irqsave(&hsotg->lock, flags);
+ if (qh->channel)
+ dwc2_release_channel_ddma(hsotg, qh);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
+ qh->ep_type == USB_ENDPOINT_XFER_INT) &&
+ (hsotg->params.uframe_sched ||
+ !hsotg->periodic_channels) && hsotg->frame_list) {
+ dwc2_per_sched_disable(hsotg);
+ dwc2_frame_list_free(hsotg);
+ }
+}
+
+static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
+{
+ if (qh->dev_speed == USB_SPEED_HIGH)
+ /* Descriptor set (8 descriptors) index which is 8-aligned */
+ return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
+ else
+ return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
+}
+
+/*
+ * Determine starting frame for Isochronous transfer.
+ * Few frames skipped to prevent race condition with HC.
+ */
+static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, u16 *skip_frames)
+{
+ u16 frame;
+
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ /*
+ * next_active_frame is always frame number (not uFrame) both in FS
+ * and HS!
+ */
+
+ /*
+ * skip_frames is used to limit activated descriptors number
+ * to avoid the situation when HC services the last activated
+ * descriptor firstly.
+ * Example for FS:
+ * Current frame is 1, scheduled frame is 3. Since HC always fetches
+ * the descriptor corresponding to curr_frame+1, the descriptor
+ * corresponding to frame 2 will be fetched. If the number of
+ * descriptors is max=64 (or greather) the list will be fully programmed
+ * with Active descriptors and it is possible case (rare) that the
+ * latest descriptor(considering rollback) corresponding to frame 2 will
+ * be serviced first. HS case is more probable because, in fact, up to
+ * 11 uframes (16 in the code) may be skipped.
+ */
+ if (qh->dev_speed == USB_SPEED_HIGH) {
+ /*
+ * Consider uframe counter also, to start xfer asap. If half of
+ * the frame elapsed skip 2 frames otherwise just 1 frame.
+ * Starting descriptor index must be 8-aligned, so if the
+ * current frame is near to complete the next one is skipped as
+ * well.
+ */
+ if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
+ *skip_frames = 2 * 8;
+ frame = dwc2_frame_num_inc(hsotg->frame_number,
+ *skip_frames);
+ } else {
+ *skip_frames = 1 * 8;
+ frame = dwc2_frame_num_inc(hsotg->frame_number,
+ *skip_frames);
+ }
+
+ frame = dwc2_full_frame_num(frame);
+ } else {
+ /*
+ * Two frames are skipped for FS - the current and the next.
+ * But for descriptor programming, 1 frame (descriptor) is
+ * enough, see example above.
+ */
+ *skip_frames = 1;
+ frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
+ }
+
+ return frame;
+}
+
+/*
+ * Calculate initial descriptor index for isochronous transfer based on
+ * scheduled frame
+ */
+static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ u16 frame, fr_idx, fr_idx_tmp, skip_frames;
+
+ /*
+ * With current ISOC processing algorithm the channel is being released
+ * when no more QTDs in the list (qh->ntd == 0). Thus this function is
+ * called only when qh->ntd == 0 and qh->channel == 0.
+ *
+ * So qh->channel != NULL branch is not used and just not removed from
+ * the source file. It is required for another possible approach which
+ * is, do not disable and release the channel when ISOC session
+ * completed, just move QH to inactive schedule until new QTD arrives.
+ * On new QTD, the QH moved back to 'ready' schedule, starting frame and
+ * therefore starting desc_index are recalculated. In this case channel
+ * is released only on ep_disable.
+ */
+
+ /*
+ * Calculate starting descriptor index. For INTERRUPT endpoint it is
+ * always 0.
+ */
+ if (qh->channel) {
+ frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
+ /*
+ * Calculate initial descriptor index based on FrameList current
+ * bitmap and servicing period
+ */
+ fr_idx_tmp = dwc2_frame_list_idx(frame);
+ fr_idx = (FRLISTEN_64_SIZE +
+ dwc2_frame_list_idx(qh->next_active_frame) -
+ fr_idx_tmp) % dwc2_frame_incr_val(qh);
+ fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
+ } else {
+ qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
+ &skip_frames);
+ fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
+ }
+
+ qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
+
+ return skip_frames;
+}
+
+#define ISOC_URB_GIVEBACK_ASAP
+
+#define MAX_ISOC_XFER_SIZE_FS 1023
+#define MAX_ISOC_XFER_SIZE_HS 3072
+#define DESCNUM_THRESHOLD 4
+
+static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_qtd *qtd,
+ struct dwc2_qh *qh, u32 max_xfer_size,
+ u16 idx)
+{
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+ memset(dma_desc, 0, sizeof(*dma_desc));
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+
+ if (frame_desc->length > max_xfer_size)
+ qh->n_bytes[idx] = max_xfer_size;
+ else
+ qh->n_bytes[idx] = frame_desc->length;
+
+ dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
+ dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
+ HOST_DMA_ISOC_NBYTES_MASK;
+
+ /* Set active bit */
+ dma_desc->status |= HOST_DMA_A;
+
+ qh->ntd++;
+ qtd->isoc_frame_index_last++;
+
+#ifdef ISOC_URB_GIVEBACK_ASAP
+ /* Set IOC for each descriptor corresponding to last frame of URB */
+ if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
+ dma_desc->status |= HOST_DMA_IOC;
+#endif
+
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma +
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+}
+
+static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, u16 skip_frames)
+{
+ struct dwc2_qtd *qtd;
+ u32 max_xfer_size;
+ u16 idx, inc, n_desc = 0, ntd_max = 0;
+ u16 cur_idx;
+ u16 next_idx;
+
+ idx = qh->td_last;
+ inc = qh->host_interval;
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
+ cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
+
+ /*
+ * Ensure current frame number didn't overstep last scheduled
+ * descriptor. If it happens, the only way to recover is to move
+ * qh->td_last to current frame number + 1.
+ * So that next isoc descriptor will be scheduled on frame number + 1
+ * and not on a past frame.
+ */
+ if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
+ if (inc < 32) {
+ dev_vdbg(hsotg->dev,
+ "current frame number overstep last descriptor\n");
+ qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
+ qh->dev_speed);
+ idx = qh->td_last;
+ }
+ }
+
+ if (qh->host_interval) {
+ ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
+ qh->host_interval;
+ if (skip_frames && !qh->channel)
+ ntd_max -= skip_frames / qh->host_interval;
+ }
+
+ max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
+ MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
+
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
+ if (qtd->in_process &&
+ qtd->isoc_frame_index_last ==
+ qtd->urb->packet_count)
+ continue;
+
+ qtd->isoc_td_first = idx;
+ while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
+ qtd->urb->packet_count) {
+ dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
+ max_xfer_size, idx);
+ idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
+ n_desc++;
+ }
+ qtd->isoc_td_last = idx;
+ qtd->in_process = 1;
+ }
+
+ qh->td_last = idx;
+
+#ifdef ISOC_URB_GIVEBACK_ASAP
+ /* Set IOC for last descriptor if descriptor list is full */
+ if (qh->ntd == ntd_max) {
+ idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
+ qh->desc_list[idx].status |= HOST_DMA_IOC;
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma + (idx *
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+ }
+#else
+ /*
+ * Set IOC bit only for one descriptor. Always try to be ahead of HW
+ * processing, i.e. on IOC generation driver activates next descriptor
+ * but core continues to process descriptors following the one with IOC
+ * set.
+ */
+
+ if (n_desc > DESCNUM_THRESHOLD)
+ /*
+ * Move IOC "up". Required even if there is only one QTD
+ * in the list, because QTDs might continue to be queued,
+ * but during the activation it was only one queued.
+ * Actually more than one QTD might be in the list if this
+ * function called from XferCompletion - QTDs was queued during
+ * HW processing of the previous descriptor chunk.
+ */
+ idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
+ qh->dev_speed);
+ else
+ /*
+ * Set the IOC for the latest descriptor if either number of
+ * descriptors is not greater than threshold or no more new
+ * descriptors activated
+ */
+ idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
+
+ qh->desc_list[idx].status |= HOST_DMA_IOC;
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma +
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+#endif
+}
+
+static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd, struct dwc2_qh *qh,
+ int n_desc)
+{
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
+ int len = chan->xfer_len;
+
+ if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
+ len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
+
+ if (chan->ep_is_in) {
+ int num_packets;
+
+ if (len > 0 && chan->max_packet)
+ num_packets = (len + chan->max_packet - 1)
+ / chan->max_packet;
+ else
+ /* Need 1 packet for transfer length of 0 */
+ num_packets = 1;
+
+ /* Always program an integral # of packets for IN transfers */
+ len = num_packets * chan->max_packet;
+ }
+
+ dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
+ qh->n_bytes[n_desc] = len;
+
+ if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+ qtd->control_phase == DWC2_CONTROL_SETUP)
+ dma_desc->status |= HOST_DMA_SUP;
+
+ dma_desc->buf = (u32)chan->xfer_dma;
+
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma +
+ (n_desc * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+
+ /*
+ * Last (or only) descriptor of IN transfer with actual size less
+ * than MaxPacket
+ */
+ if (len > chan->xfer_len) {
+ chan->xfer_len = 0;
+ } else {
+ chan->xfer_dma += len;
+ chan->xfer_len -= len;
+ }
+}
+
+static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ struct dwc2_qtd *qtd;
+ struct dwc2_host_chan *chan = qh->channel;
+ int n_desc = 0;
+
+ dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
+ (unsigned long)chan->xfer_dma, chan->xfer_len);
+
+ /*
+ * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
+ * if SG transfer consists of multiple URBs, this pointer is re-assigned
+ * to the buffer of the currently processed QTD. For non-SG request
+ * there is always one QTD active.
+ */
+
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
+ dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
+
+ if (n_desc) {
+ /* SG request - more than 1 QTD */
+ chan->xfer_dma = qtd->urb->dma +
+ qtd->urb->actual_length;
+ chan->xfer_len = qtd->urb->length -
+ qtd->urb->actual_length;
+ dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
+ (unsigned long)chan->xfer_dma, chan->xfer_len);
+ }
+
+ qtd->n_desc = 0;
+ do {
+ if (n_desc > 1) {
+ qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
+ dev_vdbg(hsotg->dev,
+ "set A bit in desc %d (%p)\n",
+ n_desc - 1,
+ &qh->desc_list[n_desc - 1]);
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma +
+ ((n_desc - 1) *
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+ }
+ dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
+ dev_vdbg(hsotg->dev,
+ "desc %d (%p) buf=%08x status=%08x\n",
+ n_desc, &qh->desc_list[n_desc],
+ qh->desc_list[n_desc].buf,
+ qh->desc_list[n_desc].status);
+ qtd->n_desc++;
+ n_desc++;
+ } while (chan->xfer_len > 0 &&
+ n_desc != MAX_DMA_DESC_NUM_GENERIC);
+
+ dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
+ qtd->in_process = 1;
+ if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
+ break;
+ if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
+ break;
+ }
+
+ if (n_desc) {
+ qh->desc_list[n_desc - 1].status |=
+ HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
+ dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
+ n_desc - 1, &qh->desc_list[n_desc - 1]);
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma + (n_desc - 1) *
+ sizeof(struct dwc2_dma_desc),
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+ if (n_desc > 1) {
+ qh->desc_list[0].status |= HOST_DMA_A;
+ dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
+ &qh->desc_list[0]);
+ dma_sync_single_for_device(hsotg->dev,
+ qh->desc_list_dma,
+ sizeof(struct dwc2_dma_desc),
+ DMA_TO_DEVICE);
+ }
+ chan->ntd = n_desc;
+ }
+}
+
+/**
+ * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: The QH to init
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * For Control and Bulk endpoints, initializes descriptor list and starts the
+ * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
+ * list then updates FrameList, marking appropriate entries as active.
+ *
+ * For Isochronous endpoints the starting descriptor index is calculated based
+ * on the scheduled frame, but only on the first transfer descriptor within a
+ * session. Then the transfer is started via enabling the channel.
+ *
+ * For Isochronous endpoints the channel is not halted on XferComplete
+ * interrupt so remains assigned to the endpoint(QH) until session is done.
+ */
+void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* Channel is already assigned */
+ struct dwc2_host_chan *chan = qh->channel;
+ u16 skip_frames = 0;
+
+ switch (chan->ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ dwc2_init_non_isoc_dma_desc(hsotg, qh);
+ dwc2_hc_start_transfer_ddma(hsotg, chan);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ dwc2_init_non_isoc_dma_desc(hsotg, qh);
+ dwc2_update_frame_list(hsotg, qh, 1);
+ dwc2_hc_start_transfer_ddma(hsotg, chan);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!qh->ntd)
+ skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
+ dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
+
+ if (!chan->xfer_started) {
+ dwc2_update_frame_list(hsotg, qh, 1);
+
+ /*
+ * Always set to max, instead of actual size. Otherwise
+ * ntd will be changed with channel being enabled. Not
+ * recommended.
+ */
+ chan->ntd = dwc2_max_desc_num(qh);
+
+ /* Enable channel only once for ISOC */
+ dwc2_hc_start_transfer_ddma(hsotg, chan);
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+#define DWC2_CMPL_DONE 1
+#define DWC2_CMPL_STOP 2
+
+static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd,
+ struct dwc2_qh *qh, u16 idx)
+{
+ struct dwc2_dma_desc *dma_desc;
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+ u16 remain = 0;
+ int rc = 0;
+
+ if (!qtd->urb)
+ return -EINVAL;
+
+ dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_FROM_DEVICE);
+
+ dma_desc = &qh->desc_list[idx];
+
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+ dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
+ if (chan->ep_is_in)
+ remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
+ HOST_DMA_ISOC_NBYTES_SHIFT;
+
+ if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
+ /*
+ * XactError, or unable to complete all the transactions
+ * in the scheduled micro-frame/frame, both indicated by
+ * HOST_DMA_STS_PKTERR
+ */
+ qtd->urb->error_count++;
+ frame_desc->actual_length = qh->n_bytes[idx] - remain;
+ frame_desc->status = -EPROTO;
+ } else {
+ /* Success */
+ frame_desc->actual_length = qh->n_bytes[idx] - remain;
+ frame_desc->status = 0;
+ }
+
+ if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+ /*
+ * urb->status is not used for isoc transfers here. The
+ * individual frame_desc status are used instead.
+ */
+ dwc2_host_complete(hsotg, qtd, 0);
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+
+ /*
+ * This check is necessary because urb_dequeue can be called
+ * from urb complete callback (sound driver for example). All
+ * pending URBs are dequeued there, so no need for further
+ * processing.
+ */
+ if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
+ return -1;
+ rc = DWC2_CMPL_DONE;
+ }
+
+ qh->ntd--;
+
+ /* Stop if IOC requested descriptor reached */
+ if (dma_desc->status & HOST_DMA_IOC)
+ rc = DWC2_CMPL_STOP;
+
+ return rc;
+}
+
+static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ enum dwc2_halt_status halt_status)
+{
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+ struct dwc2_qtd *qtd, *qtd_tmp;
+ struct dwc2_qh *qh;
+ u16 idx;
+ int rc;
+
+ qh = chan->qh;
+ idx = qh->td_first;
+
+ if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
+ qtd->in_process = 0;
+ return;
+ }
+
+ if (halt_status == DWC2_HC_XFER_AHB_ERR ||
+ halt_status == DWC2_HC_XFER_BABBLE_ERR) {
+ /*
+ * Channel is halted in these error cases, considered as serious
+ * issues.
+ * Complete all URBs marking all frames as failed, irrespective
+ * whether some of the descriptors (frames) succeeded or not.
+ * Pass error code to completion routine as well, to update
+ * urb->status, some of class drivers might use it to stop
+ * queing transfer requests.
+ */
+ int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
+ -EIO : -EOVERFLOW;
+
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
+ qtd_list_entry) {
+ if (qtd->urb) {
+ for (idx = 0; idx < qtd->urb->packet_count;
+ idx++) {
+ frame_desc = &qtd->urb->iso_descs[idx];
+ frame_desc->status = err;
+ }
+
+ dwc2_host_complete(hsotg, qtd, err);
+ }
+
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+ }
+
+ return;
+ }
+
+ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
+ if (!qtd->in_process)
+ break;
+
+ /*
+ * Ensure idx corresponds to descriptor where first urb of this
+ * qtd was added. In fact, during isoc desc init, dwc2 may skip
+ * an index if current frame number is already over this index.
+ */
+ if (idx != qtd->isoc_td_first) {
+ dev_vdbg(hsotg->dev,
+ "try to complete %d instead of %d\n",
+ idx, qtd->isoc_td_first);
+ idx = qtd->isoc_td_first;
+ }
+
+ do {
+ struct dwc2_qtd *qtd_next;
+ u16 cur_idx;
+
+ rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
+ idx);
+ if (rc < 0)
+ return;
+ idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
+ chan->speed);
+ if (!rc)
+ continue;
+
+ if (rc == DWC2_CMPL_DONE)
+ break;
+
+ /* rc == DWC2_CMPL_STOP */
+
+ if (qh->host_interval >= 32)
+ goto stop_scan;
+
+ qh->td_first = idx;
+ cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ qtd_next = list_first_entry(&qh->qtd_list,
+ struct dwc2_qtd,
+ qtd_list_entry);
+ if (dwc2_frame_idx_num_gt(cur_idx,
+ qtd_next->isoc_td_last))
+ break;
+
+ goto stop_scan;
+
+ } while (idx != qh->td_first);
+ }
+
+stop_scan:
+ qh->td_first = idx;
+}
+
+static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd,
+ struct dwc2_dma_desc *dma_desc,
+ enum dwc2_halt_status halt_status,
+ u32 n_bytes, int *xfer_done)
+{
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ u16 remain = 0;
+
+ if (chan->ep_is_in)
+ remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
+ HOST_DMA_NBYTES_SHIFT;
+
+ dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
+
+ if (halt_status == DWC2_HC_XFER_AHB_ERR) {
+ dev_err(hsotg->dev, "EIO\n");
+ urb->status = -EIO;
+ return 1;
+ }
+
+ if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
+ switch (halt_status) {
+ case DWC2_HC_XFER_STALL:
+ dev_vdbg(hsotg->dev, "Stall\n");
+ urb->status = -EPIPE;
+ break;
+ case DWC2_HC_XFER_BABBLE_ERR:
+ dev_err(hsotg->dev, "Babble\n");
+ urb->status = -EOVERFLOW;
+ break;
+ case DWC2_HC_XFER_XACT_ERR:
+ dev_err(hsotg->dev, "XactErr\n");
+ urb->status = -EPROTO;
+ break;
+ default:
+ dev_err(hsotg->dev,
+ "%s: Unhandled descriptor error status (%d)\n",
+ __func__, halt_status);
+ break;
+ }
+ return 1;
+ }
+
+ if (dma_desc->status & HOST_DMA_A) {
+ dev_vdbg(hsotg->dev,
+ "Active descriptor encountered on channel %d\n",
+ chan->hc_num);
+ return 0;
+ }
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
+ if (qtd->control_phase == DWC2_CONTROL_DATA) {
+ urb->actual_length += n_bytes - remain;
+ if (remain || urb->actual_length >= urb->length) {
+ /*
+ * For Control Data stage do not set urb->status
+ * to 0, to prevent URB callback. Set it when
+ * Status phase is done. See below.
+ */
+ *xfer_done = 1;
+ }
+ } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
+ urb->status = 0;
+ *xfer_done = 1;
+ }
+ /* No handling for SETUP stage */
+ } else {
+ /* BULK and INTR */
+ urb->actual_length += n_bytes - remain;
+ dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
+ urb->actual_length);
+ if (remain || urb->actual_length >= urb->length) {
+ urb->status = 0;
+ *xfer_done = 1;
+ }
+ }
+
+ return 0;
+}
+
+static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ int chnum, struct dwc2_qtd *qtd,
+ int desc_num,
+ enum dwc2_halt_status halt_status,
+ int *xfer_done)
+{
+ struct dwc2_qh *qh = chan->qh;
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ struct dwc2_dma_desc *dma_desc;
+ u32 n_bytes;
+ int failed;
+
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ if (!urb)
+ return -EINVAL;
+
+ dma_sync_single_for_cpu(hsotg->dev,
+ qh->desc_list_dma + (desc_num *
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_FROM_DEVICE);
+
+ dma_desc = &qh->desc_list[desc_num];
+ n_bytes = qh->n_bytes[desc_num];
+ dev_vdbg(hsotg->dev,
+ "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
+ qtd, urb, desc_num, dma_desc, n_bytes);
+ failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
+ halt_status, n_bytes,
+ xfer_done);
+ if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
+ dwc2_host_complete(hsotg, qtd, urb->status);
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+ dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
+ failed, *xfer_done);
+ return failed;
+ }
+
+ if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
+ switch (qtd->control_phase) {
+ case DWC2_CONTROL_SETUP:
+ if (urb->length > 0)
+ qtd->control_phase = DWC2_CONTROL_DATA;
+ else
+ qtd->control_phase = DWC2_CONTROL_STATUS;
+ dev_vdbg(hsotg->dev,
+ " Control setup transaction done\n");
+ break;
+ case DWC2_CONTROL_DATA:
+ if (*xfer_done) {
+ qtd->control_phase = DWC2_CONTROL_STATUS;
+ dev_vdbg(hsotg->dev,
+ " Control data transfer done\n");
+ } else if (desc_num + 1 == qtd->n_desc) {
+ /*
+ * Last descriptor for Control data stage which
+ * is not completed yet
+ */
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
+ qtd);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ int chnum,
+ enum dwc2_halt_status halt_status)
+{
+ struct list_head *qtd_item, *qtd_tmp;
+ struct dwc2_qh *qh = chan->qh;
+ struct dwc2_qtd *qtd = NULL;
+ int xfer_done;
+ int desc_num = 0;
+
+ if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
+ qtd->in_process = 0;
+ return;
+ }
+
+ list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
+ int i;
+ int qtd_desc_count;
+
+ qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
+ xfer_done = 0;
+ qtd_desc_count = qtd->n_desc;
+
+ for (i = 0; i < qtd_desc_count; i++) {
+ if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
+ desc_num, halt_status,
+ &xfer_done)) {
+ qtd = NULL;
+ goto stop_scan;
+ }
+
+ desc_num++;
+ }
+ }
+
+stop_scan:
+ if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+ /*
+ * Resetting the data toggle for bulk and interrupt endpoints
+ * in case of stall. See handle_hc_stall_intr().
+ */
+ if (halt_status == DWC2_HC_XFER_STALL)
+ qh->data_toggle = DWC2_HC_PID_DATA0;
+ else
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
+ }
+
+ if (halt_status == DWC2_HC_XFER_COMPLETE) {
+ if (chan->hcint & HCINTMSK_NYET) {
+ /*
+ * Got a NYET on the last transaction of the transfer.
+ * It means that the endpoint should be in the PING
+ * state at the beginning of the next transfer.
+ */
+ qh->ping_state = 1;
+ }
+ }
+}
+
+/**
+ * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
+ * status and calls completion routine for the URB if it's done. Called from
+ * interrupt handlers.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @chan: Host channel the transfer is completed on
+ * @chnum: Index of Host channel registers
+ * @halt_status: Reason the channel is being halted or just XferComplete
+ * for isochronous transfers
+ *
+ * Releases the channel to be used by other transfers.
+ * In case of Isochronous endpoint the channel is not halted until the end of
+ * the session, i.e. QTD list is empty.
+ * If periodic channel released the FrameList is updated accordingly.
+ * Calls transaction selection routines to activate pending transfers.
+ */
+void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ enum dwc2_halt_status halt_status)
+{
+ struct dwc2_qh *qh = chan->qh;
+ int continue_isoc_xfer = 0;
+ enum dwc2_transaction_type tr_type;
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
+
+ /* Release the channel if halted or session completed */
+ if (halt_status != DWC2_HC_XFER_COMPLETE ||
+ list_empty(&qh->qtd_list)) {
+ struct dwc2_qtd *qtd, *qtd_tmp;
+
+ /*
+ * Kill all remainings QTDs since channel has been
+ * halted.
+ */
+ list_for_each_entry_safe(qtd, qtd_tmp,
+ &qh->qtd_list,
+ qtd_list_entry) {
+ dwc2_host_complete(hsotg, qtd,
+ -ECONNRESET);
+ dwc2_hcd_qtd_unlink_and_free(hsotg,
+ qtd, qh);
+ }
+
+ /* Halt the channel if session completed */
+ if (halt_status == DWC2_HC_XFER_COMPLETE)
+ dwc2_hc_halt(hsotg, chan, halt_status);
+ dwc2_release_channel_ddma(hsotg, qh);
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ } else {
+ /* Keep in assigned schedule to continue transfer */
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
+ /*
+ * If channel has been halted during giveback of urb
+ * then prevent any new scheduling.
+ */
+ if (!chan->halt_status)
+ continue_isoc_xfer = 1;
+ }
+ /*
+ * Todo: Consider the case when period exceeds FrameList size.
+ * Frame Rollover interrupt should be used.
+ */
+ } else {
+ /*
+ * Scan descriptor list to complete the URB(s), then release
+ * the channel
+ */
+ dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
+ halt_status);
+ dwc2_release_channel_ddma(hsotg, qh);
+ dwc2_hcd_qh_unlink(hsotg, qh);
+
+ if (!list_empty(&qh->qtd_list)) {
+ /*
+ * Add back to inactive non-periodic schedule on normal
+ * completion
+ */
+ dwc2_hcd_qh_add(hsotg, qh);
+ }
+ }
+
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
+ if (continue_isoc_xfer) {
+ if (tr_type == DWC2_TRANSACTION_NONE)
+ tr_type = DWC2_TRANSACTION_PERIODIC;
+ else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
+ tr_type = DWC2_TRANSACTION_ALL;
+ }
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+ }
+}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
new file mode 100644
index 000000000..9e85cbb0c
--- /dev/null
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -0,0 +1,2264 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * This file contains the interrupt handlers for Host mode
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/*
+ * If we get this many NAKs on a split transaction we'll slow down
+ * retransmission. A 1 here means delay after the first NAK.
+ */
+#define DWC2_NAKS_BEFORE_DELAY 3
+
+/* This function is for debug only */
+static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
+{
+ u16 curr_frame_number = hsotg->frame_number;
+ u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
+
+ if (expected != curr_frame_number)
+ dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
+ expected, curr_frame_number);
+
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
+ if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+ if (expected != curr_frame_number) {
+ hsotg->frame_num_array[hsotg->frame_num_idx] =
+ curr_frame_number;
+ hsotg->last_frame_num_array[hsotg->frame_num_idx] =
+ hsotg->last_frame_num;
+ hsotg->frame_num_idx++;
+ }
+ } else if (!hsotg->dumped_frame_num_array) {
+ int i;
+
+ dev_info(hsotg->dev, "Frame Last Frame\n");
+ dev_info(hsotg->dev, "----- ----------\n");
+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+ dev_info(hsotg->dev, "0x%04x 0x%04x\n",
+ hsotg->frame_num_array[i],
+ hsotg->last_frame_num_array[i]);
+ }
+ hsotg->dumped_frame_num_array = 1;
+ }
+#endif
+ hsotg->last_frame_num = curr_frame_number;
+}
+
+static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd)
+{
+ struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
+ struct urb *usb_urb;
+
+ if (!chan->qh)
+ return;
+
+ if (chan->qh->dev_speed == USB_SPEED_HIGH)
+ return;
+
+ if (!qtd->urb)
+ return;
+
+ usb_urb = qtd->urb->priv;
+ if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
+ return;
+
+ /*
+ * The root hub doesn't really have a TT, but Linux thinks it
+ * does because how could you have a "high speed hub" that
+ * directly talks directly to low speed devices without a TT?
+ * It's all lies. Lies, I tell you.
+ */
+ if (usb_urb->dev->tt->hub == root_hub)
+ return;
+
+ if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
+ chan->qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(usb_urb))
+ /* Clear failed; let's hope things work anyway */
+ chan->qh->tt_buffer_dirty = 0;
+ }
+}
+
+/*
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller
+ * for the next (micro)frame.
+ */
+static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
+{
+ struct list_head *qh_entry;
+ struct dwc2_qh *qh;
+ enum dwc2_transaction_type tr_type;
+
+ /* Clear interrupt */
+ dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
+
+#ifdef DEBUG_SOF
+ dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
+#endif
+
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ dwc2_track_missed_sofs(hsotg);
+
+ /* Determine whether any periodic QHs should be executed */
+ qh_entry = hsotg->periodic_sched_inactive.next;
+ while (qh_entry != &hsotg->periodic_sched_inactive) {
+ qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
+ qh_entry = qh_entry->next;
+ if (dwc2_frame_num_le(qh->next_active_frame,
+ hsotg->frame_number)) {
+ dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
+ qh, hsotg->frame_number,
+ qh->next_active_frame);
+
+ /*
+ * Move QH to the ready list to be executed next
+ * (micro)frame
+ */
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_ready);
+ }
+ }
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE)
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+}
+
+/*
+ * Handles the Rx FIFO Level Interrupt, which indicates that there is
+ * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode.
+ */
+static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 grxsts, chnum, bcnt, dpid, pktsts;
+ struct dwc2_host_chan *chan;
+
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
+
+ grxsts = dwc2_readl(hsotg, GRXSTSP);
+ chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
+ chan = hsotg->hc_ptr_array[chnum];
+ if (!chan) {
+ dev_err(hsotg->dev, "Unable to get corresponding channel\n");
+ return;
+ }
+
+ bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
+ dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
+ pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
+
+ /* Packet Status */
+ if (dbg_perio()) {
+ dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
+ dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
+ dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
+ chan->data_pid_start);
+ dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
+ }
+
+ switch (pktsts) {
+ case GRXSTS_PKTSTS_HCHIN:
+ /* Read the data into the host buffer */
+ if (bcnt > 0) {
+ dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
+
+ /* Update the HC fields for the next packet received */
+ chan->xfer_count += bcnt;
+ chan->xfer_buf += bcnt;
+ }
+ break;
+ case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
+ case GRXSTS_PKTSTS_DATATOGGLEERR:
+ case GRXSTS_PKTSTS_HCHHALTED:
+ /* Handled in interrupt, just ignore data */
+ break;
+ default:
+ dev_err(hsotg->dev,
+ "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
+ break;
+ }
+}
+
+/*
+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode.
+ */
+static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
+{
+ dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+ dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
+}
+
+/*
+ * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode.
+ */
+static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
+{
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
+ dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
+}
+
+static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
+ u32 *hprt0_modify)
+{
+ struct dwc2_core_params *params = &hsotg->params;
+ int do_reset = 0;
+ u32 usbcfg;
+ u32 prtspd;
+ u32 hcfg;
+ u32 fslspclksel;
+ u32 hfir;
+
+ dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
+ /* Every time when port enables calculate HFIR.FrInterval */
+ hfir = dwc2_readl(hsotg, HFIR);
+ hfir &= ~HFIR_FRINT_MASK;
+ hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
+ HFIR_FRINT_MASK;
+ dwc2_writel(hsotg, hfir, HFIR);
+
+ /* Check if we need to adjust the PHY clock speed for low power */
+ if (!params->host_support_fs_ls_low_power) {
+ /* Port has been enabled, set the reset change flag */
+ hsotg->flags.b.port_reset_change = 1;
+ return;
+ }
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+
+ if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
+ /* Low power */
+ if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
+ /* Set PHY low power clock select for FS/LS devices */
+ usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ do_reset = 1;
+ }
+
+ hcfg = dwc2_readl(hsotg, HCFG);
+ fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
+ HCFG_FSLSPCLKSEL_SHIFT;
+
+ if (prtspd == HPRT0_SPD_LOW_SPEED &&
+ params->host_ls_low_power_phy_clk) {
+ /* 6 MHZ */
+ dev_vdbg(hsotg->dev,
+ "FS_PHY programming HCFG to 6 MHz\n");
+ if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
+ fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
+ hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+ hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
+ dwc2_writel(hsotg, hcfg, HCFG);
+ do_reset = 1;
+ }
+ } else {
+ /* 48 MHZ */
+ dev_vdbg(hsotg->dev,
+ "FS_PHY programming HCFG to 48 MHz\n");
+ if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
+ fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
+ hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+ hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
+ dwc2_writel(hsotg, hcfg, HCFG);
+ do_reset = 1;
+ }
+ }
+ } else {
+ /* Not low power */
+ if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
+ usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+ do_reset = 1;
+ }
+ }
+
+ if (do_reset) {
+ *hprt0_modify |= HPRT0_RST;
+ dwc2_writel(hsotg, *hprt0_modify, HPRT0);
+ queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
+ msecs_to_jiffies(60));
+ } else {
+ /* Port has been enabled, set the reset change flag */
+ hsotg->flags.b.port_reset_change = 1;
+ }
+}
+
+/*
+ * There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately.
+ */
+static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0;
+ u32 hprt0_modify;
+
+ dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
+
+ hprt0 = dwc2_readl(hsotg, HPRT0);
+ hprt0_modify = hprt0;
+
+ /*
+ * Clear appropriate bits in HPRT0 to clear the interrupt bit in
+ * GINTSTS
+ */
+ hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
+ HPRT0_OVRCURRCHG);
+
+ /*
+ * Port Connect Detected
+ * Set flag and clear if detected
+ */
+ if (hprt0 & HPRT0_CONNDET) {
+ dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
+
+ dev_vdbg(hsotg->dev,
+ "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
+ hprt0);
+ dwc2_hcd_connect(hsotg);
+
+ /*
+ * The Hub driver asserts a reset when it sees port connect
+ * status change flag
+ */
+ }
+
+ /*
+ * Port Enable Changed
+ * Clear if detected - Set internal flag if disabled
+ */
+ if (hprt0 & HPRT0_ENACHG) {
+ dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
+ dev_vdbg(hsotg->dev,
+ " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
+ hprt0, !!(hprt0 & HPRT0_ENA));
+ if (hprt0 & HPRT0_ENA) {
+ hsotg->new_connection = true;
+ dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
+ } else {
+ hsotg->flags.b.port_enable_change = 1;
+ if (hsotg->params.dma_desc_fs_enable) {
+ u32 hcfg;
+
+ hsotg->params.dma_desc_enable = false;
+ hsotg->new_connection = false;
+ hcfg = dwc2_readl(hsotg, HCFG);
+ hcfg &= ~HCFG_DESCDMA;
+ dwc2_writel(hsotg, hcfg, HCFG);
+ }
+ }
+ }
+
+ /* Overcurrent Change Interrupt */
+ if (hprt0 & HPRT0_OVRCURRCHG) {
+ dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
+ HPRT0);
+ dev_vdbg(hsotg->dev,
+ " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
+ hprt0);
+ hsotg->flags.b.port_over_current_change = 1;
+ }
+}
+
+/*
+ * Gets the actual length of a transfer after the transfer halts. halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
+ * is set to 1 upon return if less than the requested number of bytes were
+ * transferred. short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status,
+ int *short_read)
+{
+ u32 hctsiz, count, length;
+
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+
+ if (halt_status == DWC2_HC_XFER_COMPLETE) {
+ if (chan->ep_is_in) {
+ count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT;
+ length = chan->xfer_len - count;
+ if (short_read)
+ *short_read = (count != 0);
+ } else if (chan->qh->do_split) {
+ length = qtd->ssplit_out_xfer_count;
+ } else {
+ length = chan->xfer_len;
+ }
+ } else {
+ /*
+ * Must use the hctsiz.pktcnt field to determine how much data
+ * has been transferred. This field reflects the number of
+ * packets that have been transferred via the USB. This is
+ * always an integral number of packets if the transfer was
+ * halted before its normal completion. (Can't use the
+ * hctsiz.xfersize field because that reflects the number of
+ * bytes transferred via the AHB, not the USB).
+ */
+ count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
+ length = (chan->start_pkt_count - count) * chan->max_packet;
+ }
+
+ return length;
+}
+
+/**
+ * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
+ * Complete interrupt on the host channel. Updates the actual_length field
+ * of the URB based on the number of bytes transferred via the host channel.
+ * Sets the URB status if the data transfer is finished.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @chan: Programming view of host channel
+ * @chnum: Channel number
+ * @urb: Processing URB
+ * @qtd: Queue transfer descriptor
+ *
+ * Return: 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise
+ */
+static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_hcd_urb *urb,
+ struct dwc2_qtd *qtd)
+{
+ u32 hctsiz;
+ int xfer_done = 0;
+ int short_read = 0;
+ int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
+ DWC2_HC_XFER_COMPLETE,
+ &short_read);
+
+ if (urb->actual_length + xfer_length > urb->length) {
+ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ xfer_length = urb->length - urb->actual_length;
+ }
+
+ dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
+ urb->actual_length, xfer_length);
+ urb->actual_length += xfer_length;
+
+ if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
+ (urb->flags & URB_SEND_ZERO_PACKET) &&
+ urb->actual_length >= urb->length &&
+ !(urb->length % chan->max_packet)) {
+ xfer_done = 0;
+ } else if (short_read || urb->actual_length >= urb->length) {
+ xfer_done = 1;
+ urb->status = 0;
+ }
+
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
+ dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
+ dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
+ (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
+ dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
+ dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
+ dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
+ xfer_done);
+
+ return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
+
+ if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+ if (WARN(!chan || !chan->qh,
+ "chan->qh must be specified for non-control eps\n"))
+ return;
+
+ if (pid == TSIZ_SC_MC_PID_DATA0)
+ chan->qh->data_toggle = DWC2_HC_PID_DATA0;
+ else
+ chan->qh->data_toggle = DWC2_HC_PID_DATA1;
+ } else {
+ if (WARN(!qtd,
+ "qtd must be specified for control eps\n"))
+ return;
+
+ if (pid == TSIZ_SC_MC_PID_DATA0)
+ qtd->data_toggle = DWC2_HC_PID_DATA0;
+ else
+ qtd->data_toggle = DWC2_HC_PID_DATA1;
+ }
+}
+
+/**
+ * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
+ * the transfer is stopped for any reason. The fields of the current entry in
+ * the frame descriptor array are set based on the transfer state and the input
+ * halt_status. Completes the Isochronous URB if all the URB frames have been
+ * completed.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @chan: Programming view of host channel
+ * @chnum: Channel number
+ * @halt_status: Reason for halting a host channel
+ * @qtd: Queue transfer descriptor
+ *
+ * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
+ */
+static enum dwc2_halt_status dwc2_update_isoc_urb_state(
+ struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+ int chnum, struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status)
+{
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+ struct dwc2_hcd_urb *urb = qtd->urb;
+
+ if (!urb)
+ return DWC2_HC_XFER_NO_HALT_STATUS;
+
+ frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+
+ switch (halt_status) {
+ case DWC2_HC_XFER_COMPLETE:
+ frame_desc->status = 0;
+ frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
+ chan, chnum, qtd, halt_status, NULL);
+ break;
+ case DWC2_HC_XFER_FRAME_OVERRUN:
+ urb->error_count++;
+ if (chan->ep_is_in)
+ frame_desc->status = -ENOSR;
+ else
+ frame_desc->status = -ECOMM;
+ frame_desc->actual_length = 0;
+ break;
+ case DWC2_HC_XFER_BABBLE_ERR:
+ urb->error_count++;
+ frame_desc->status = -EOVERFLOW;
+ /* Don't need to update actual_length in this case */
+ break;
+ case DWC2_HC_XFER_XACT_ERR:
+ urb->error_count++;
+ frame_desc->status = -EPROTO;
+ frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
+ chan, chnum, qtd, halt_status, NULL);
+
+ /* Skip whole frame */
+ if (chan->qh->do_split &&
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
+ hsotg->params.host_dma) {
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ }
+
+ break;
+ default:
+ dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
+ halt_status);
+ break;
+ }
+
+ if (++qtd->isoc_frame_index == urb->packet_count) {
+ /*
+ * urb->status is not used for isoc transfers. The individual
+ * frame_desc statuses are used instead.
+ */
+ dwc2_host_complete(hsotg, qtd, 0);
+ halt_status = DWC2_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = DWC2_HC_XFER_COMPLETE;
+ }
+
+ return halt_status;
+}
+
+/*
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ int free_qtd)
+{
+ int continue_split = 0;
+ struct dwc2_qtd *qtd;
+
+ if (dbg_qh(qh))
+ dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
+ hsotg, qh, free_qtd);
+
+ if (list_empty(&qh->qtd_list)) {
+ dev_dbg(hsotg->dev, "## QTD list empty ##\n");
+ goto no_qtd;
+ }
+
+ qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
+
+ if (qtd->complete_split)
+ continue_split = 1;
+ else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
+ qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
+ continue_split = 1;
+
+ if (free_qtd) {
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+ continue_split = 0;
+ }
+
+no_qtd:
+ qh->channel = NULL;
+ dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
+}
+
+/**
+ * dwc2_release_channel() - Releases a host channel for use by other transfers
+ *
+ * @hsotg: The HCD state structure
+ * @chan: The host channel to release
+ * @qtd: The QTD associated with the host channel. This QTD may be
+ * freed if the transfer is complete or an error has occurred.
+ * @halt_status: Reason the channel is being released. This status
+ * determines the actions taken by this function.
+ *
+ * Also attempts to select and queue more transactions since at least one host
+ * channel is available.
+ */
+static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status)
+{
+ enum dwc2_transaction_type tr_type;
+ u32 haintmsk;
+ int free_qtd = 0;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
+ __func__, chan->hc_num, halt_status);
+
+ switch (halt_status) {
+ case DWC2_HC_XFER_URB_COMPLETE:
+ free_qtd = 1;
+ break;
+ case DWC2_HC_XFER_AHB_ERR:
+ case DWC2_HC_XFER_STALL:
+ case DWC2_HC_XFER_BABBLE_ERR:
+ free_qtd = 1;
+ break;
+ case DWC2_HC_XFER_XACT_ERR:
+ if (qtd && qtd->error_count >= 3) {
+ dev_vdbg(hsotg->dev,
+ " Complete URB with transaction error\n");
+ free_qtd = 1;
+ dwc2_host_complete(hsotg, qtd, -EPROTO);
+ }
+ break;
+ case DWC2_HC_XFER_URB_DEQUEUE:
+ /*
+ * The QTD has already been removed and the QH has been
+ * deactivated. Don't want to do anything except release the
+ * host channel and try to queue more transfers.
+ */
+ goto cleanup;
+ case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
+ dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
+ free_qtd = 1;
+ dwc2_host_complete(hsotg, qtd, -EIO);
+ break;
+ case DWC2_HC_XFER_NO_HALT_STATUS:
+ default:
+ break;
+ }
+
+ dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
+
+cleanup:
+ /*
+ * Release the host channel for use by other transfers. The cleanup
+ * function clears the channel interrupt enables and conditions, so
+ * there's no need to clear the Channel Halted interrupt separately.
+ */
+ if (!list_empty(&chan->hc_list_entry))
+ list_del(&chan->hc_list_entry);
+ dwc2_hc_cleanup(hsotg, chan);
+ list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
+
+ if (hsotg->params.uframe_sched) {
+ hsotg->available_host_channels++;
+ } else {
+ switch (chan->ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ hsotg->non_periodic_channels--;
+ break;
+ default:
+ /*
+ * Don't release reservations for periodic channels
+ * here. That's done when a periodic transfer is
+ * descheduled (i.e. when the QH is removed from the
+ * periodic schedule).
+ */
+ break;
+ }
+ }
+
+ haintmsk = dwc2_readl(hsotg, HAINTMSK);
+ haintmsk &= ~(1 << chan->hc_num);
+ dwc2_writel(hsotg, haintmsk, HAINTMSK);
+
+ /* Try to queue more transfers now that there's a free channel */
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE)
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+}
+
+/*
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status)
+{
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ if (hsotg->params.host_dma) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA enabled\n");
+ dwc2_release_channel(hsotg, chan, qtd, halt_status);
+ return;
+ }
+
+ /* Slave mode processing */
+ dwc2_hc_halt(hsotg, chan, halt_status);
+
+ if (chan->halt_on_queue) {
+ u32 gintmsk;
+
+ dev_vdbg(hsotg->dev, "Halt on queue\n");
+ if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+ chan->ep_type == USB_ENDPOINT_XFER_BULK) {
+ dev_vdbg(hsotg->dev, "control/bulk\n");
+ /*
+ * Make sure the Non-periodic Tx FIFO empty interrupt
+ * is enabled so that the non-periodic schedule will
+ * be processed
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk |= GINTSTS_NPTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ } else {
+ dev_vdbg(hsotg->dev, "isoc/intr\n");
+ /*
+ * Move the QH from the periodic queued schedule to
+ * the periodic assigned schedule. This allows the
+ * halt to be queued when the periodic schedule is
+ * processed.
+ */
+ list_move_tail(&chan->qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
+
+ /*
+ * Make sure the Periodic Tx FIFO Empty interrupt is
+ * enabled so that the periodic schedule will be
+ * processed
+ */
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk |= GINTSTS_PTXFEMP;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+ }
+}
+
+/*
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ int chnum, struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status)
+{
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ qtd->error_count = 0;
+
+ if (chan->hcint & HCINTMSK_NYET) {
+ /*
+ * Got a NYET on the last transaction of the transfer. This
+ * means that the endpoint should be in the PING state at the
+ * beginning of the next transfer.
+ */
+ dev_vdbg(hsotg->dev, "got NYET\n");
+ chan->qh->ping_state = 1;
+ }
+
+ /*
+ * Always halt and release the host channel to make it available for
+ * more transfers. There may still be more phases for a control
+ * transfer or more data packets for a bulk transfer at this point,
+ * but the host channel is still halted. A channel will be reassigned
+ * to the transfer when the non-periodic schedule is processed after
+ * the channel is released. This allows transactions to be queued
+ * properly via dwc2_hcd_queue_transactions, which also enables the
+ * Tx FIFO Empty interrupt if necessary.
+ */
+ if (chan->ep_is_in) {
+ /*
+ * IN transfers in Slave mode require an explicit disable to
+ * halt the channel. (In DMA mode, this call simply releases
+ * the channel.)
+ */
+ dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+ } else {
+ /*
+ * The channel is automatically disabled by the core for OUT
+ * transfers in Slave mode
+ */
+ dwc2_release_channel(hsotg, chan, qtd, halt_status);
+ }
+}
+
+/*
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status)
+{
+ u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+
+ qtd->error_count = 0;
+
+ if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
+ /* Core halts channel in these cases */
+ dwc2_release_channel(hsotg, chan, qtd, halt_status);
+ else
+ /* Flush any outstanding requests from the Tx queue */
+ dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+}
+
+static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+ u32 len;
+ u32 hctsiz;
+ u32 pid;
+
+ if (!qtd->urb)
+ return 0;
+
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
+ len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
+ DWC2_HC_XFER_COMPLETE, NULL);
+ if (!len && !qtd->isoc_split_offset) {
+ qtd->complete_split = 0;
+ return 0;
+ }
+
+ frame_desc->actual_length += len;
+
+ if (chan->align_buf) {
+ dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+ dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+ DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+ memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+ chan->qh->dw_align_buf, len);
+ }
+
+ qtd->isoc_split_offset += len;
+
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
+
+ if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
+ frame_desc->status = 0;
+ qtd->isoc_frame_index++;
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ }
+
+ if (qtd->isoc_frame_index == qtd->urb->packet_count) {
+ dwc2_host_complete(hsotg, qtd, 0);
+ dwc2_release_channel(hsotg, chan, qtd,
+ DWC2_HC_XFER_URB_COMPLETE);
+ } else {
+ dwc2_release_channel(hsotg, chan, qtd,
+ DWC2_HC_XFER_NO_HALT_STATUS);
+ }
+
+ return 1; /* Indicates that channel released */
+}
+
+/*
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
+ int pipe_type;
+ int urb_xfer_done;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev,
+ "--Host Channel %d Interrupt: Transfer Complete--\n",
+ chnum);
+
+ if (!urb)
+ goto handle_xfercomp_done;
+
+ pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
+ if (hsotg->params.dma_desc_enable) {
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
+ if (pipe_type == USB_ENDPOINT_XFER_ISOC)
+ /* Do not disable the interrupt, just clear it */
+ return;
+ goto handle_xfercomp_done;
+ }
+
+ /* Handle xfer complete on CSPLIT */
+ if (chan->qh->do_split) {
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
+ hsotg->params.host_dma) {
+ if (qtd->complete_split &&
+ dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
+ qtd))
+ goto handle_xfercomp_done;
+ } else {
+ qtd->complete_split = 0;
+ }
+ }
+
+ /* Update the QTD and URB states */
+ switch (pipe_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ switch (qtd->control_phase) {
+ case DWC2_CONTROL_SETUP:
+ if (urb->length > 0)
+ qtd->control_phase = DWC2_CONTROL_DATA;
+ else
+ qtd->control_phase = DWC2_CONTROL_STATUS;
+ dev_vdbg(hsotg->dev,
+ " Control setup transaction done\n");
+ halt_status = DWC2_HC_XFER_COMPLETE;
+ break;
+ case DWC2_CONTROL_DATA:
+ urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
+ chnum, urb, qtd);
+ if (urb_xfer_done) {
+ qtd->control_phase = DWC2_CONTROL_STATUS;
+ dev_vdbg(hsotg->dev,
+ " Control data transfer done\n");
+ } else {
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
+ qtd);
+ }
+ halt_status = DWC2_HC_XFER_COMPLETE;
+ break;
+ case DWC2_CONTROL_STATUS:
+ dev_vdbg(hsotg->dev, " Control transfer complete\n");
+ if (urb->status == -EINPROGRESS)
+ urb->status = 0;
+ dwc2_host_complete(hsotg, qtd, urb->status);
+ halt_status = DWC2_HC_XFER_URB_COMPLETE;
+ break;
+ }
+
+ dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
+ halt_status);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
+ urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
+ qtd);
+ if (urb_xfer_done) {
+ dwc2_host_complete(hsotg, qtd, urb->status);
+ halt_status = DWC2_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = DWC2_HC_XFER_COMPLETE;
+ }
+
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
+ halt_status);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
+ urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
+ qtd);
+
+ /*
+ * Interrupt URB is done on the first transfer complete
+ * interrupt
+ */
+ if (urb_xfer_done) {
+ dwc2_host_complete(hsotg, qtd, urb->status);
+ halt_status = DWC2_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = DWC2_HC_XFER_COMPLETE;
+ }
+
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
+ halt_status);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
+ if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
+ halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
+ chnum, qtd,
+ DWC2_HC_XFER_COMPLETE);
+ dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
+ halt_status);
+ break;
+ }
+
+handle_xfercomp_done:
+ disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
+}
+
+/*
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ int pipe_type;
+
+ dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
+ chnum);
+
+ if (hsotg->params.dma_desc_enable) {
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+ DWC2_HC_XFER_STALL);
+ goto handle_stall_done;
+ }
+
+ if (!urb)
+ goto handle_stall_halt;
+
+ pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
+ if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
+ dwc2_host_complete(hsotg, qtd, -EPIPE);
+
+ if (pipe_type == USB_ENDPOINT_XFER_BULK ||
+ pipe_type == USB_ENDPOINT_XFER_INT) {
+ dwc2_host_complete(hsotg, qtd, -EPIPE);
+ /*
+ * USB protocol requires resetting the data toggle for bulk
+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+ * setup command is issued to the endpoint. Anticipate the
+ * CLEAR_FEATURE command since a STALL has occurred and reset
+ * the data toggle now.
+ */
+ chan->qh->data_toggle = 0;
+ }
+
+handle_stall_halt:
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
+
+handle_stall_done:
+ disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_hcd_urb *urb,
+ struct dwc2_qtd *qtd,
+ enum dwc2_halt_status halt_status)
+{
+ u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
+ qtd, halt_status, NULL);
+ u32 hctsiz;
+
+ if (urb->actual_length + xfer_length > urb->length) {
+ dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ xfer_length = urb->length - urb->actual_length;
+ }
+
+ urb->actual_length += xfer_length;
+
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
+ dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
+ chan->start_pkt_count);
+ dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
+ (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
+ dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
+ dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
+ xfer_length);
+ dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
+ urb->actual_length);
+ dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
+ urb->length);
+}
+
+/*
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ if (!qtd) {
+ dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
+ return;
+ }
+
+ if (!qtd->urb) {
+ dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
+ return;
+ }
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
+ chnum);
+
+ /*
+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+ * interrupt. Re-start the SSPLIT transfer.
+ *
+ * Normally for non-periodic transfers we'll retry right away, but to
+ * avoid interrupt storms we'll wait before retrying if we've got
+ * several NAKs. If we didn't do this we'd retry directly from the
+ * interrupt handler and could end up quickly getting another
+ * interrupt (another NAK), which we'd retry. Note that we do not
+ * delay retries for IN parts of control requests, as those are expected
+ * to complete fairly quickly, and if we delay them we risk confusing
+ * the device and cause it issue STALL.
+ *
+ * Note that in DMA mode software only gets involved to re-send NAKed
+ * transfers for split transactions, so we only need to apply this
+ * delaying logic when handling splits. In non-DMA mode presumably we
+ * might want a similar delay if someone can demonstrate this problem
+ * affects that code path too.
+ */
+ if (chan->do_split) {
+ if (chan->complete_split)
+ qtd->error_count = 0;
+ qtd->complete_split = 0;
+ qtd->num_naks++;
+ qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
+ !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+ chan->ep_is_in);
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
+ goto handle_nak_done;
+ }
+
+ switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ if (hsotg->params.host_dma && chan->ep_is_in) {
+ /*
+ * NAK interrupts are enabled on bulk/control IN
+ * transfers in DMA mode for the sole purpose of
+ * resetting the error count after a transaction error
+ * occurs. The core will continue transferring data.
+ */
+ qtd->error_count = 0;
+ break;
+ }
+
+ /*
+ * NAK interrupts normally occur during OUT transfers in DMA
+ * or Slave mode. For IN transfers, more requests will be
+ * queued as request queue space is available.
+ */
+ qtd->error_count = 0;
+
+ if (!chan->qh->ping_state) {
+ dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+ qtd, DWC2_HC_XFER_NAK);
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+
+ if (chan->speed == USB_SPEED_HIGH)
+ chan->qh->ping_state = 1;
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will
+ * start/continue
+ */
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ qtd->error_count = 0;
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* Should never get called for isochronous transfers */
+ dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
+ break;
+ }
+
+handle_nak_done:
+ disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
+}
+
+/*
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
+ chnum);
+
+ if (chan->do_split) {
+ /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
+ if (!chan->ep_is_in &&
+ chan->data_pid_start != DWC2_HC_PID_SETUP)
+ qtd->ssplit_out_xfer_count = chan->xfer_len;
+
+ if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
+ qtd->complete_split = 1;
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
+ } else {
+ /* ISOC OUT */
+ switch (chan->xact_pos) {
+ case DWC2_HCSPLT_XACTPOS_ALL:
+ break;
+ case DWC2_HCSPLT_XACTPOS_END:
+ qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
+ qtd->isoc_split_offset = 0;
+ break;
+ case DWC2_HCSPLT_XACTPOS_BEGIN:
+ case DWC2_HCSPLT_XACTPOS_MID:
+ /*
+ * For BEGIN or MID, calculate the length for
+ * the next microframe to determine the correct
+ * SSPLIT token, either MID or END
+ */
+ frame_desc = &qtd->urb->iso_descs[
+ qtd->isoc_frame_index];
+ qtd->isoc_split_offset += 188;
+
+ if (frame_desc->length - qtd->isoc_split_offset
+ <= 188)
+ qtd->isoc_split_pos =
+ DWC2_HCSPLT_XACTPOS_END;
+ else
+ qtd->isoc_split_pos =
+ DWC2_HCSPLT_XACTPOS_MID;
+ break;
+ }
+ }
+ } else {
+ qtd->error_count = 0;
+
+ if (chan->qh->ping_state) {
+ chan->qh->ping_state = 0;
+ /*
+ * Halt the channel so the transfer can be re-started
+ * from the appropriate point. This only happens in
+ * Slave mode. In DMA mode, the ping_state is cleared
+ * when the transfer is started because the core
+ * automatically executes the PING, then the transfer.
+ */
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
+ }
+ }
+
+ /*
+ * If the ACK occurred when _not_ in the PING state, let the channel
+ * continue transferring data after clearing the error count
+ */
+ disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
+}
+
+/*
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
+ chnum);
+
+ /*
+ * NYET on CSPLIT
+ * re-do the CSPLIT immediately on non-periodic
+ */
+ if (chan->do_split && chan->complete_split) {
+ if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
+ hsotg->params.host_dma) {
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ qtd->isoc_frame_index++;
+ if (qtd->urb &&
+ qtd->isoc_frame_index == qtd->urb->packet_count) {
+ dwc2_host_complete(hsotg, qtd, 0);
+ dwc2_release_channel(hsotg, chan, qtd,
+ DWC2_HC_XFER_URB_COMPLETE);
+ } else {
+ dwc2_release_channel(hsotg, chan, qtd,
+ DWC2_HC_XFER_NO_HALT_STATUS);
+ }
+ goto handle_nyet_done;
+ }
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ struct dwc2_qh *qh = chan->qh;
+ bool past_end;
+
+ if (!hsotg->params.uframe_sched) {
+ int frnum = dwc2_hcd_get_frame_number(hsotg);
+
+ /* Don't have num_hs_transfers; simple logic */
+ past_end = dwc2_full_frame_num(frnum) !=
+ dwc2_full_frame_num(qh->next_active_frame);
+ } else {
+ int end_frnum;
+
+ /*
+ * Figure out the end frame based on
+ * schedule.
+ *
+ * We don't want to go on trying again
+ * and again forever. Let's stop when
+ * we've done all the transfers that
+ * were scheduled.
+ *
+ * We're going to be comparing
+ * start_active_frame and
+ * next_active_frame, both of which
+ * are 1 before the time the packet
+ * goes on the wire, so that cancels
+ * out. Basically if had 1 transfer
+ * and we saw 1 NYET then we're done.
+ * We're getting a NYET here so if
+ * next >= (start + num_transfers)
+ * we're done. The complexity is that
+ * for all but ISOC_OUT we skip one
+ * slot.
+ */
+ end_frnum = dwc2_frame_num_inc(
+ qh->start_active_frame,
+ qh->num_hs_transfers);
+
+ if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
+ qh->ep_is_in)
+ end_frnum =
+ dwc2_frame_num_inc(end_frnum, 1);
+
+ past_end = dwc2_frame_num_le(
+ end_frnum, qh->next_active_frame);
+ }
+
+ if (past_end) {
+ /* Treat this as a transaction error. */
+#if 0
+ /*
+ * Todo: Fix system performance so this can
+ * be treated as an error. Right now complete
+ * splits cannot be scheduled precisely enough
+ * due to other system activity, so this error
+ * occurs regularly in Slave mode.
+ */
+ qtd->error_count++;
+#endif
+ qtd->complete_split = 0;
+ dwc2_halt_channel(hsotg, chan, qtd,
+ DWC2_HC_XFER_XACT_ERR);
+ /* Todo: add support for isoc release */
+ goto handle_nyet_done;
+ }
+ }
+
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
+ goto handle_nyet_done;
+ }
+
+ chan->qh->ping_state = 1;
+ qtd->error_count = 0;
+
+ dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
+ DWC2_HC_XFER_NYET);
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+
+ /*
+ * Halt the channel and re-start the transfer so the PING protocol
+ * will start
+ */
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
+
+handle_nyet_done:
+ disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
+}
+
+/*
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
+ chnum);
+
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
+ if (hsotg->params.dma_desc_enable) {
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+ DWC2_HC_XFER_BABBLE_ERR);
+ goto disable_int;
+ }
+
+ if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
+ dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
+ } else {
+ enum dwc2_halt_status halt_status;
+
+ halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
+ qtd, DWC2_HC_XFER_BABBLE_ERR);
+ dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+ }
+
+disable_int:
+ disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
+}
+
+/*
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ struct dwc2_hcd_urb *urb = qtd->urb;
+ char *pipetype, *speed;
+ u32 hcchar;
+ u32 hcsplt;
+ u32 hctsiz;
+ u32 hc_dma;
+
+ dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
+ chnum);
+
+ if (!urb)
+ goto handle_ahberr_halt;
+
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
+
+ dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
+ dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
+ dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
+ dev_err(hsotg->dev, " Device address: %d\n",
+ dwc2_hcd_get_dev_addr(&urb->pipe_info));
+ dev_err(hsotg->dev, " Endpoint: %d, %s\n",
+ dwc2_hcd_get_ep_num(&urb->pipe_info),
+ dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
+
+ switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ pipetype = "CONTROL";
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ pipetype = "BULK";
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ pipetype = "INTERRUPT";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ pipetype = "ISOCHRONOUS";
+ break;
+ default:
+ pipetype = "UNKNOWN";
+ break;
+ }
+
+ dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
+
+ switch (chan->speed) {
+ case USB_SPEED_HIGH:
+ speed = "HIGH";
+ break;
+ case USB_SPEED_FULL:
+ speed = "FULL";
+ break;
+ case USB_SPEED_LOW:
+ speed = "LOW";
+ break;
+ default:
+ speed = "UNKNOWN";
+ break;
+ }
+
+ dev_err(hsotg->dev, " Speed: %s\n", speed);
+
+ dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
+ dwc2_hcd_get_maxp(&urb->pipe_info),
+ dwc2_hcd_get_maxp_mult(&urb->pipe_info));
+ dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
+ dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
+ urb->buf, (unsigned long)urb->dma);
+ dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
+ urb->setup_packet, (unsigned long)urb->setup_dma);
+ dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
+
+ /* Core halts the channel for Descriptor DMA mode */
+ if (hsotg->params.dma_desc_enable) {
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+ DWC2_HC_XFER_AHB_ERR);
+ goto handle_ahberr_done;
+ }
+
+ dwc2_host_complete(hsotg, qtd, -EIO);
+
+handle_ahberr_halt:
+ /*
+ * Force a channel halt. Don't call dwc2_halt_channel because that won't
+ * write to the HCCHARn register in DMA mode to force the halt.
+ */
+ dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
+
+handle_ahberr_done:
+ disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
+}
+
+/*
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ dev_dbg(hsotg->dev,
+ "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
+
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
+ if (hsotg->params.dma_desc_enable) {
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+ DWC2_HC_XFER_XACT_ERR);
+ goto handle_xacterr_done;
+ }
+
+ switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ qtd->error_count++;
+ if (!chan->qh->ping_state) {
+ dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+ qtd, DWC2_HC_XFER_XACT_ERR);
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
+ chan->qh->ping_state = 1;
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will start
+ */
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ qtd->error_count++;
+ if (chan->do_split && chan->complete_split)
+ qtd->complete_split = 0;
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ {
+ enum dwc2_halt_status halt_status;
+
+ halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
+ chnum, qtd, DWC2_HC_XFER_XACT_ERR);
+ dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+ }
+ break;
+ }
+
+handle_xacterr_done:
+ disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
+}
+
+/*
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ enum dwc2_halt_status halt_status;
+
+ if (dbg_hc(chan))
+ dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
+ chnum);
+
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
+ switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
+ qtd, DWC2_HC_XFER_FRAME_OVERRUN);
+ dwc2_halt_channel(hsotg, chan, qtd, halt_status);
+ break;
+ }
+
+ disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
+}
+
+/*
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ dev_dbg(hsotg->dev,
+ "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
+
+ if (chan->ep_is_in)
+ qtd->error_count = 0;
+ else
+ dev_err(hsotg->dev,
+ "Data Toggle Error on OUT transfer, channel %d\n",
+ chnum);
+
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+ disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
+}
+
+/*
+ * For debug only. It checks that a valid halt status is set and that
+ * HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ *
+ * Return: true if halt status is ok, false otherwise
+ */
+static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+#ifdef DEBUG
+ u32 hcchar;
+ u32 hctsiz;
+ u32 hcintmsk;
+ u32 hcsplt;
+
+ if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
+ /*
+ * This code is here only as a check. This condition should
+ * never happen. Ignore the halt if it does occur.
+ */
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
+ dev_dbg(hsotg->dev,
+ "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
+ __func__);
+ dev_dbg(hsotg->dev,
+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
+ chnum, hcchar, hctsiz);
+ dev_dbg(hsotg->dev,
+ "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
+ chan->hcint, hcintmsk, hcsplt);
+ if (qtd)
+ dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
+ qtd->complete_split);
+ dev_warn(hsotg->dev,
+ "%s: no halt status, channel %d, ignoring interrupt\n",
+ __func__, chnum);
+ return false;
+ }
+
+ /*
+ * This code is here only as a check. hcchar.chdis should never be set
+ * when the halt interrupt occurs. Halt the channel again if it does
+ * occur.
+ */
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
+ if (hcchar & HCCHAR_CHDIS) {
+ dev_warn(hsotg->dev,
+ "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
+ __func__, hcchar);
+ chan->halt_pending = 0;
+ dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+/*
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ u32 hcintmsk;
+ int out_nak_enh = 0;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev,
+ "--Host Channel %d Interrupt: DMA Channel Halted--\n",
+ chnum);
+
+ /*
+ * For core with OUT NAK enhancement, the flow for high-speed
+ * CONTROL/BULK OUT is handled a little differently
+ */
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
+ if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
+ (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+ chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
+ out_nak_enh = 1;
+ }
+ }
+
+ if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
+ (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
+ !hsotg->params.dma_desc_enable)) {
+ if (hsotg->params.dma_desc_enable)
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+ chan->halt_status);
+ else
+ /*
+ * Just release the channel. A dequeue can happen on a
+ * transfer timeout. In the case of an AHB Error, the
+ * channel was forced to halt because there's no way to
+ * gracefully recover.
+ */
+ dwc2_release_channel(hsotg, chan, qtd,
+ chan->halt_status);
+ return;
+ }
+
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+
+ if (chan->hcint & HCINTMSK_XFERCOMPL) {
+ /*
+ * Todo: This is here because of a possible hardware bug. Spec
+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+ * interrupt w/ACK bit set should occur, but I only see the
+ * XFERCOMP bit, even with it masked out. This is a workaround
+ * for that behavior. Should fix this when hardware is fixed.
+ */
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
+ dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
+ dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
+ } else if (chan->hcint & HCINTMSK_STALL) {
+ dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
+ } else if ((chan->hcint & HCINTMSK_XACTERR) &&
+ !hsotg->params.dma_desc_enable) {
+ if (out_nak_enh) {
+ if (chan->hcint &
+ (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
+ dev_vdbg(hsotg->dev,
+ "XactErr with NYET/NAK/ACK\n");
+ qtd->error_count = 0;
+ } else {
+ dev_vdbg(hsotg->dev,
+ "XactErr without NYET/NAK/ACK\n");
+ }
+ }
+
+ /*
+ * Must handle xacterr before nak or ack. Could get a xacterr
+ * at the same time as either of these on a BULK/CONTROL OUT
+ * that started with a PING. The xacterr takes precedence.
+ */
+ dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
+ } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
+ hsotg->params.dma_desc_enable) {
+ dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
+ } else if ((chan->hcint & HCINTMSK_AHBERR) &&
+ hsotg->params.dma_desc_enable) {
+ dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
+ } else if (chan->hcint & HCINTMSK_BBLERR) {
+ dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
+ } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
+ dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
+ } else if (!out_nak_enh) {
+ if (chan->hcint & HCINTMSK_NYET) {
+ /*
+ * Must handle nyet before nak or ack. Could get a nyet
+ * at the same time as either of those on a BULK/CONTROL
+ * OUT that started with a PING. The nyet takes
+ * precedence.
+ */
+ dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
+ } else if ((chan->hcint & HCINTMSK_NAK) &&
+ !(hcintmsk & HCINTMSK_NAK)) {
+ /*
+ * If nak is not masked, it's because a non-split IN
+ * transfer is in an error state. In that case, the nak
+ * is handled by the nak interrupt handler, not here.
+ * Handle nak here for BULK/CONTROL OUT transfers, which
+ * halt on a NAK to allow rewinding the buffer pointer.
+ */
+ dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
+ } else if ((chan->hcint & HCINTMSK_ACK) &&
+ !(hcintmsk & HCINTMSK_ACK)) {
+ /*
+ * If ack is not masked, it's because a non-split IN
+ * transfer is in an error state. In that case, the ack
+ * is handled by the ack interrupt handler, not here.
+ * Handle ack here for split transfers. Start splits
+ * halt on ACK.
+ */
+ dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
+ } else {
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * A periodic transfer halted with no other
+ * channel interrupts set. Assume it was halted
+ * by the core because it could not be completed
+ * in its scheduled (micro)frame.
+ */
+ dev_dbg(hsotg->dev,
+ "%s: Halt channel %d (assume incomplete periodic transfer)\n",
+ __func__, chnum);
+ dwc2_halt_channel(hsotg, chan, qtd,
+ DWC2_HC_XFER_PERIODIC_INCOMPLETE);
+ } else {
+ dev_err(hsotg->dev,
+ "%s: Channel %d - ChHltd set, but reason is unknown\n",
+ __func__, chnum);
+ dev_err(hsotg->dev,
+ "hcint 0x%08x, intsts 0x%08x\n",
+ chan->hcint,
+ dwc2_readl(hsotg, GINTSTS));
+ goto error;
+ }
+ }
+ } else {
+ dev_info(hsotg->dev,
+ "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
+ chan->hcint);
+error:
+ /* Failthrough: use 3-strikes rule */
+ qtd->error_count++;
+ dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+ qtd, DWC2_HC_XFER_XACT_ERR);
+ /*
+ * We can get here after a completed transaction
+ * (urb->actual_length >= urb->length) which was not reported
+ * as completed. If that is the case, and we do not abort
+ * the transfer, a transfer of size 0 will be enqueued
+ * subsequently. If urb->actual_length is not DMA-aligned,
+ * the buffer will then point to an unaligned address, and
+ * the resulting behavior is undefined. Bail out in that
+ * situation.
+ */
+ if (qtd->urb->actual_length >= qtd->urb->length)
+ qtd->error_count = 3;
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+ }
+}
+
+/*
+ * Handles a host channel Channel Halted interrupt
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, int chnum,
+ struct dwc2_qtd *qtd)
+{
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
+ chnum);
+
+ if (hsotg->params.host_dma) {
+ dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
+ } else {
+ if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
+ return;
+ dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
+ }
+}
+
+/*
+ * Check if the given qtd is still the top of the list (and thus valid).
+ *
+ * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
+ * the qtd from the top of the list, this will return false (otherwise true).
+ */
+static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
+{
+ struct dwc2_qtd *cur_head;
+
+ if (!qh)
+ return false;
+
+ cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
+ qtd_list_entry);
+ return (cur_head == qtd);
+}
+
+/* Handles interrupt for a specific Host Channel */
+static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+{
+ struct dwc2_qtd *qtd;
+ struct dwc2_host_chan *chan;
+ u32 hcint, hcintraw, hcintmsk;
+
+ chan = hsotg->hc_ptr_array[chnum];
+
+ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+ hcint = hcintraw & hcintmsk;
+ dwc2_writel(hsotg, hcint, HCINT(chnum));
+
+ if (!chan) {
+ dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
+ return;
+ }
+
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
+ chnum);
+ dev_vdbg(hsotg->dev,
+ " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ hcintraw, hcintmsk, hcint);
+ }
+
+ /*
+ * If we got an interrupt after someone called
+ * dwc2_hcd_endpoint_disable() we don't want to crash below
+ */
+ if (!chan->qh) {
+ dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
+ return;
+ }
+
+ chan->hcint = hcintraw;
+
+ /*
+ * If the channel was halted due to a dequeue, the qtd list might
+ * be empty or at least the first entry will not be the active qtd.
+ * In this case, take a shortcut and just release the channel.
+ */
+ if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
+ /*
+ * If the channel was halted, this should be the only
+ * interrupt unmasked
+ */
+ WARN_ON(hcint != HCINTMSK_CHHLTD);
+ if (hsotg->params.dma_desc_enable)
+ dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
+ chan->halt_status);
+ else
+ dwc2_release_channel(hsotg, chan, NULL,
+ chan->halt_status);
+ return;
+ }
+
+ if (list_empty(&chan->qh->qtd_list)) {
+ /*
+ * TODO: Will this ever happen with the
+ * DWC2_HC_XFER_URB_DEQUEUE handling above?
+ */
+ dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
+ chnum);
+ dev_dbg(hsotg->dev,
+ " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ chan->hcint, hcintmsk, hcint);
+ chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
+ disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
+ chan->hcint = 0;
+ return;
+ }
+
+ qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
+ qtd_list_entry);
+
+ if (!hsotg->params.host_dma) {
+ if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
+ hcint &= ~HCINTMSK_CHHLTD;
+ }
+
+ if (hcint & HCINTMSK_XFERCOMPL) {
+ dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
+ /*
+ * If NYET occurred at same time as Xfer Complete, the NYET is
+ * handled by the Xfer Complete interrupt handler. Don't want
+ * to call the NYET interrupt handler in this case.
+ */
+ hcint &= ~HCINTMSK_NYET;
+ }
+
+ if (hcint & HCINTMSK_CHHLTD) {
+ dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_AHBERR) {
+ dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_STALL) {
+ dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_NAK) {
+ dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_ACK) {
+ dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_NYET) {
+ dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_XACTERR) {
+ dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_BBLERR) {
+ dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_FRMOVRUN) {
+ dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+ if (hcint & HCINTMSK_DATATGLERR) {
+ dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
+ if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
+ goto exit;
+ }
+
+exit:
+ chan->hcint = 0;
+}
+
+/*
+ * This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately.
+ */
+static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 haint;
+ int i;
+ struct dwc2_host_chan *chan, *chan_tmp;
+
+ haint = dwc2_readl(hsotg, HAINT);
+ if (dbg_perio()) {
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
+ }
+
+ /*
+ * According to USB 2.0 spec section 11.18.8, a host must
+ * issue complete-split transactions in a microframe for a
+ * set of full-/low-speed endpoints in the same relative
+ * order as the start-splits were issued in a microframe for.
+ */
+ list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
+ split_order_list_entry) {
+ int hc_num = chan->hc_num;
+
+ if (haint & (1 << hc_num)) {
+ dwc2_hc_n_intr(hsotg, hc_num);
+ haint &= ~(1 << hc_num);
+ }
+ }
+
+ for (i = 0; i < hsotg->params.host_channels; i++) {
+ if (haint & (1 << i))
+ dwc2_hc_n_intr(hsotg, i);
+ }
+}
+
+/* This function handles interrupts for the HCD */
+irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts, dbg_gintsts;
+ irqreturn_t retval = IRQ_NONE;
+
+ if (!dwc2_is_controller_alive(hsotg)) {
+ dev_warn(hsotg->dev, "Controller is dead\n");
+ return retval;
+ }
+
+ spin_lock(&hsotg->lock);
+
+ /* Check if HOST Mode */
+ if (dwc2_is_host_mode(hsotg)) {
+ gintsts = dwc2_read_core_intr(hsotg);
+ if (!gintsts) {
+ spin_unlock(&hsotg->lock);
+ return retval;
+ }
+
+ retval = IRQ_HANDLED;
+
+ dbg_gintsts = gintsts;
+#ifndef DEBUG_SOF
+ dbg_gintsts &= ~GINTSTS_SOF;
+#endif
+ if (!dbg_perio())
+ dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
+ GINTSTS_PTXFEMP);
+
+ /* Only print if there are any non-suppressed interrupts left */
+ if (dbg_gintsts)
+ dev_vdbg(hsotg->dev,
+ "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
+ gintsts);
+
+ if (gintsts & GINTSTS_SOF)
+ dwc2_sof_intr(hsotg);
+ if (gintsts & GINTSTS_RXFLVL)
+ dwc2_rx_fifo_level_intr(hsotg);
+ if (gintsts & GINTSTS_NPTXFEMP)
+ dwc2_np_tx_fifo_empty_intr(hsotg);
+ if (gintsts & GINTSTS_PRTINT)
+ dwc2_port_intr(hsotg);
+ if (gintsts & GINTSTS_HCHINT)
+ dwc2_hc_intr(hsotg);
+ if (gintsts & GINTSTS_PTXFEMP)
+ dwc2_perio_tx_fifo_empty_intr(hsotg);
+
+ if (dbg_gintsts) {
+ dev_vdbg(hsotg->dev,
+ "DWC OTG HCD Finished Servicing Interrupts\n");
+ dev_vdbg(hsotg->dev,
+ "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
+ dwc2_readl(hsotg, GINTSTS),
+ dwc2_readl(hsotg, GINTMSK));
+ }
+ }
+
+ spin_unlock(&hsotg->lock);
+
+ return retval;
+}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
new file mode 100644
index 000000000..0a1145592
--- /dev/null
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -0,0 +1,2069 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors for Host mode
+ */
+#include <linux/gcd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/* Wait this long before releasing periodic reservation */
+#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
+
+/* If we get a NAK, wait this long before retrying */
+#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
+
+/**
+ * dwc2_periodic_channel_available() - Checks that a channel is available for a
+ * periodic transfer
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
+{
+ /*
+ * Currently assuming that there is a dedicated host channel for
+ * each periodic transaction plus at least one host channel for
+ * non-periodic transactions
+ */
+ int status;
+ int num_channels;
+
+ num_channels = hsotg->params.host_channels;
+ if ((hsotg->periodic_channels + hsotg->non_periodic_channels <
+ num_channels) && (hsotg->periodic_channels < num_channels - 1)) {
+ status = 0;
+ } else {
+ dev_dbg(hsotg->dev,
+ "%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
+ __func__, num_channels,
+ hsotg->periodic_channels, hsotg->non_periodic_channels);
+ status = -ENOSPC;
+ }
+
+ return status;
+}
+
+/**
+ * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
+ * for the specified QH in the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH containing periodic bandwidth required
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * For simplicity, this calculation assumes that all the transfers in the
+ * periodic schedule may occur in the same (micro)frame
+ */
+static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ int status;
+ s16 max_claimed_usecs;
+
+ status = 0;
+
+ if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
+ /*
+ * High speed mode
+ * Max periodic usecs is 80% x 125 usec = 100 usec
+ */
+ max_claimed_usecs = 100 - qh->host_us;
+ } else {
+ /*
+ * Full speed mode
+ * Max periodic usecs is 90% x 1000 usec = 900 usec
+ */
+ max_claimed_usecs = 900 - qh->host_us;
+ }
+
+ if (hsotg->periodic_usecs > max_claimed_usecs) {
+ dev_err(hsotg->dev,
+ "%s: already claimed usecs %d, required usecs %d\n",
+ __func__, hsotg->periodic_usecs, qh->host_us);
+ status = -ENOSPC;
+ }
+
+ return status;
+}
+
+/**
+ * pmap_schedule() - Schedule time in a periodic bitmap (pmap).
+ *
+ * @map: The bitmap representing the schedule; will be updated
+ * upon success.
+ * @bits_per_period: The schedule represents several periods. This is how many
+ * bits are in each period. It's assumed that the beginning
+ * of the schedule will repeat after its end.
+ * @periods_in_map: The number of periods in the schedule.
+ * @num_bits: The number of bits we need per period we want to reserve
+ * in this function call.
+ * @interval: How often we need to be scheduled for the reservation this
+ * time. 1 means every period. 2 means every other period.
+ * ...you get the picture?
+ * @start: The bit number to start at. Normally 0. Must be within
+ * the interval or we return failure right away.
+ * @only_one_period: Normally we'll allow picking a start anywhere within the
+ * first interval, since we can still make all repetition
+ * requirements by doing that. However, if you pass true
+ * here then we'll return failure if we can't fit within
+ * the period that "start" is in.
+ *
+ * The idea here is that we want to schedule time for repeating events that all
+ * want the same resource. The resource is divided into fixed-sized periods
+ * and the events want to repeat every "interval" periods. The schedule
+ * granularity is one bit.
+ *
+ * To keep things "simple", we'll represent our schedule with a bitmap that
+ * contains a fixed number of periods. This gets rid of a lot of complexity
+ * but does mean that we need to handle things specially (and non-ideally) if
+ * the number of the periods in the schedule doesn't match well with the
+ * intervals that we're trying to schedule.
+ *
+ * Here's an explanation of the scheme we'll implement, assuming 8 periods.
+ * - If interval is 1, we need to take up space in each of the 8
+ * periods we're scheduling. Easy.
+ * - If interval is 2, we need to take up space in half of the
+ * periods. Again, easy.
+ * - If interval is 3, we actually need to fall back to interval 1.
+ * Why? Because we might need time in any period. AKA for the
+ * first 8 periods, we'll be in slot 0, 3, 6. Then we'll be
+ * in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to
+ * 0, 3, and 6. Since we could be in any frame we need to reserve
+ * for all of them. Sucks, but that's what you gotta do. Note that
+ * if we were instead scheduling 8 * 3 = 24 we'd do much better, but
+ * then we need more memory and time to do scheduling.
+ * - If interval is 4, easy.
+ * - If interval is 5, we again need interval 1. The schedule will be
+ * 0, 5, 2, 7, 4, 1, 6, 3, 0
+ * - If interval is 6, we need interval 2. 0, 6, 4, 2.
+ * - If interval is 7, we need interval 1.
+ * - If interval is 8, we need interval 8.
+ *
+ * If you do the math, you'll see that we need to pretend that interval is
+ * equal to the greatest_common_divisor(interval, periods_in_map).
+ *
+ * Note that at the moment this function tends to front-pack the schedule.
+ * In some cases that's really non-ideal (it's hard to schedule things that
+ * need to repeat every period). In other cases it's perfect (you can easily
+ * schedule bigger, less often repeating things).
+ *
+ * Here's the algorithm in action (8 periods, 5 bits per period):
+ * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
+ * |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2
+ * |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5
+ * |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2
+ * |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2
+ * |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3
+ * |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6
+ * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4
+ * |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1
+ * | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0
+ * | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5
+ * | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2
+ * | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3
+ * | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6
+ * | | | | | | | | | Remv 1 bits, intv 1 at 4
+ * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
+ * |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2
+ * |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3
+ * |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5
+ * |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6
+ * |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8
+ * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12
+ *
+ * This function is pretty generic and could be easily abstracted if anything
+ * needed similar scheduling.
+ *
+ * Returns either -ENOSPC or a >= 0 start bit which should be passed to the
+ * unschedule routine. The map bitmap will be updated on a non-error result.
+ */
+static int pmap_schedule(unsigned long *map, int bits_per_period,
+ int periods_in_map, int num_bits,
+ int interval, int start, bool only_one_period)
+{
+ int interval_bits;
+ int to_reserve;
+ int first_end;
+ int i;
+
+ if (num_bits > bits_per_period)
+ return -ENOSPC;
+
+ /* Adjust interval as per description */
+ interval = gcd(interval, periods_in_map);
+
+ interval_bits = bits_per_period * interval;
+ to_reserve = periods_in_map / interval;
+
+ /* If start has gotten us past interval then we can't schedule */
+ if (start >= interval_bits)
+ return -ENOSPC;
+
+ if (only_one_period)
+ /* Must fit within same period as start; end at begin of next */
+ first_end = (start / bits_per_period + 1) * bits_per_period;
+ else
+ /* Can fit anywhere in the first interval */
+ first_end = interval_bits;
+
+ /*
+ * We'll try to pick the first repetition, then see if that time
+ * is free for each of the subsequent repetitions. If it's not
+ * we'll adjust the start time for the next search of the first
+ * repetition.
+ */
+ while (start + num_bits <= first_end) {
+ int end;
+
+ /* Need to stay within this period */
+ end = (start / bits_per_period + 1) * bits_per_period;
+
+ /* Look for num_bits us in this microframe starting at start */
+ start = bitmap_find_next_zero_area(map, end, start, num_bits,
+ 0);
+
+ /*
+ * We should get start >= end if we fail. We might be
+ * able to check the next microframe depending on the
+ * interval, so continue on (start already updated).
+ */
+ if (start >= end) {
+ start = end;
+ continue;
+ }
+
+ /* At this point we have a valid point for first one */
+ for (i = 1; i < to_reserve; i++) {
+ int ith_start = start + interval_bits * i;
+ int ith_end = end + interval_bits * i;
+ int ret;
+
+ /* Use this as a dumb "check if bits are 0" */
+ ret = bitmap_find_next_zero_area(
+ map, ith_start + num_bits, ith_start, num_bits,
+ 0);
+
+ /* We got the right place, continue checking */
+ if (ret == ith_start)
+ continue;
+
+ /* Move start up for next time and exit for loop */
+ ith_start = bitmap_find_next_zero_area(
+ map, ith_end, ith_start, num_bits, 0);
+ if (ith_start >= ith_end)
+ /* Need a while new period next time */
+ start = end;
+ else
+ start = ith_start - interval_bits * i;
+ break;
+ }
+
+ /* If didn't exit the for loop with a break, we have success */
+ if (i == to_reserve)
+ break;
+ }
+
+ if (start + num_bits > first_end)
+ return -ENOSPC;
+
+ for (i = 0; i < to_reserve; i++) {
+ int ith_start = start + interval_bits * i;
+
+ bitmap_set(map, ith_start, num_bits);
+ }
+
+ return start;
+}
+
+/**
+ * pmap_unschedule() - Undo work done by pmap_schedule()
+ *
+ * @map: See pmap_schedule().
+ * @bits_per_period: See pmap_schedule().
+ * @periods_in_map: See pmap_schedule().
+ * @num_bits: The number of bits that was passed to schedule.
+ * @interval: The interval that was passed to schedule.
+ * @start: The return value from pmap_schedule().
+ */
+static void pmap_unschedule(unsigned long *map, int bits_per_period,
+ int periods_in_map, int num_bits,
+ int interval, int start)
+{
+ int interval_bits;
+ int to_release;
+ int i;
+
+ /* Adjust interval as per description in pmap_schedule() */
+ interval = gcd(interval, periods_in_map);
+
+ interval_bits = bits_per_period * interval;
+ to_release = periods_in_map / interval;
+
+ for (i = 0; i < to_release; i++) {
+ int ith_start = start + interval_bits * i;
+
+ bitmap_clear(map, ith_start, num_bits);
+ }
+}
+
+/**
+ * dwc2_get_ls_map() - Get the map used for the given qh
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ *
+ * We'll always get the periodic map out of our TT. Note that even if we're
+ * running the host straight in low speed / full speed mode it appears as if
+ * a TT is allocated for us, so we'll use it. If that ever changes we can
+ * add logic here to get a map out of "hsotg" if !qh->do_split.
+ *
+ * Returns: the map or NULL if a map couldn't be found.
+ */
+static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ unsigned long *map;
+
+ /* Don't expect to be missing a TT and be doing low speed scheduling */
+ if (WARN_ON(!qh->dwc_tt))
+ return NULL;
+
+ /* Get the map and adjust if this is a multi_tt hub */
+ map = qh->dwc_tt->periodic_bitmaps;
+ if (qh->dwc_tt->usb_tt->multi)
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
+
+ return map;
+}
+
+#ifdef DWC2_PRINT_SCHEDULE
+/*
+ * cat_printf() - A printf() + strcat() helper
+ *
+ * This is useful for concatenating a bunch of strings where each string is
+ * constructed using printf.
+ *
+ * @buf: The destination buffer; will be updated to point after the printed
+ * data.
+ * @size: The number of bytes in the buffer (includes space for '\0').
+ * @fmt: The format for printf.
+ * @...: The args for printf.
+ */
+static __printf(3, 4)
+void cat_printf(char **buf, size_t *size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ if (*size == 0)
+ return;
+
+ va_start(args, fmt);
+ i = vsnprintf(*buf, *size, fmt, args);
+ va_end(args);
+
+ if (i >= *size) {
+ (*buf)[*size - 1] = '\0';
+ *buf += *size;
+ *size = 0;
+ } else {
+ *buf += i;
+ *size -= i;
+ }
+}
+
+/*
+ * pmap_print() - Print the given periodic map
+ *
+ * Will attempt to print out the periodic schedule.
+ *
+ * @map: See pmap_schedule().
+ * @bits_per_period: See pmap_schedule().
+ * @periods_in_map: See pmap_schedule().
+ * @period_name: The name of 1 period, like "uFrame"
+ * @units: The name of the units, like "us".
+ * @print_fn: The function to call for printing.
+ * @print_data: Opaque data to pass to the print function.
+ */
+static void pmap_print(unsigned long *map, int bits_per_period,
+ int periods_in_map, const char *period_name,
+ const char *units,
+ void (*print_fn)(const char *str, void *data),
+ void *print_data)
+{
+ int period;
+
+ for (period = 0; period < periods_in_map; period++) {
+ char tmp[64];
+ char *buf = tmp;
+ size_t buf_size = sizeof(tmp);
+ int period_start = period * bits_per_period;
+ int period_end = period_start + bits_per_period;
+ int start = 0;
+ int count = 0;
+ bool printed = false;
+ int i;
+
+ for (i = period_start; i < period_end + 1; i++) {
+ /* Handle case when ith bit is set */
+ if (i < period_end &&
+ bitmap_find_next_zero_area(map, i + 1,
+ i, 1, 0) != i) {
+ if (count == 0)
+ start = i - period_start;
+ count++;
+ continue;
+ }
+
+ /* ith bit isn't set; don't care if count == 0 */
+ if (count == 0)
+ continue;
+
+ if (!printed)
+ cat_printf(&buf, &buf_size, "%s %d: ",
+ period_name, period);
+ else
+ cat_printf(&buf, &buf_size, ", ");
+ printed = true;
+
+ cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
+ units, start + count - 1, units);
+ count = 0;
+ }
+
+ if (printed)
+ print_fn(tmp, print_data);
+ }
+}
+
+struct dwc2_qh_print_data {
+ struct dwc2_hsotg *hsotg;
+ struct dwc2_qh *qh;
+};
+
+/**
+ * dwc2_qh_print() - Helper function for dwc2_qh_schedule_print()
+ *
+ * @str: The string to print
+ * @data: A pointer to a struct dwc2_qh_print_data
+ */
+static void dwc2_qh_print(const char *str, void *data)
+{
+ struct dwc2_qh_print_data *print_data = data;
+
+ dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
+}
+
+/**
+ * dwc2_qh_schedule_print() - Print the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH to print.
+ */
+static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ struct dwc2_qh_print_data print_data = { hsotg, qh };
+ int i;
+
+ /*
+ * The printing functions are quite slow and inefficient.
+ * If we don't have tracing turned on, don't run unless the special
+ * define is turned on.
+ */
+
+ if (qh->schedule_low_speed) {
+ unsigned long *map = dwc2_get_ls_map(hsotg, qh);
+
+ dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
+ qh, qh->device_us,
+ DWC2_ROUND_US_TO_SLICE(qh->device_us),
+ DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
+
+ if (map) {
+ dwc2_sch_dbg(hsotg,
+ "QH=%p Whole low/full speed map %p now:\n",
+ qh, map);
+ pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
+ DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
+ dwc2_qh_print, &print_data);
+ }
+ }
+
+ for (i = 0; i < qh->num_hs_transfers; i++) {
+ struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
+ int uframe = trans_time->start_schedule_us /
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+ int rel_us = trans_time->start_schedule_us %
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+
+ dwc2_sch_dbg(hsotg,
+ "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
+ qh, i, trans_time->duration_us, uframe, rel_us);
+ }
+ if (qh->num_hs_transfers) {
+ dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
+ pmap_print(hsotg->hs_periodic_bitmap,
+ DWC2_HS_PERIODIC_US_PER_UFRAME,
+ DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
+ dwc2_qh_print, &print_data);
+ }
+}
+#else
+static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh) {};
+#endif
+
+/**
+ * dwc2_ls_pmap_schedule() - Schedule a low speed QH
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ * @search_slice: We'll start trying to schedule at the passed slice.
+ * Remember that slices are the units of the low speed
+ * schedule (think 25us or so).
+ *
+ * Wraps pmap_schedule() with the right parameters for low speed scheduling.
+ *
+ * Normally we schedule low speed devices on the map associated with the TT.
+ *
+ * Returns: 0 for success or an error code.
+ */
+static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ int search_slice)
+{
+ int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
+ unsigned long *map = dwc2_get_ls_map(hsotg, qh);
+ int slice;
+
+ if (!map)
+ return -EINVAL;
+
+ /*
+ * Schedule on the proper low speed map with our low speed scheduling
+ * parameters. Note that we use the "device_interval" here since
+ * we want the low speed interval and the only way we'd be in this
+ * function is if the device is low speed.
+ *
+ * If we happen to be doing low speed and high speed scheduling for the
+ * same transaction (AKA we have a split) we always do low speed first.
+ * That means we can always pass "false" for only_one_period (that
+ * parameters is only useful when we're trying to get one schedule to
+ * match what we already planned in the other schedule).
+ */
+ slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
+ DWC2_LS_SCHEDULE_FRAMES, slices,
+ qh->device_interval, search_slice, false);
+
+ if (slice < 0)
+ return slice;
+
+ qh->ls_start_schedule_slice = slice;
+ return 0;
+}
+
+/**
+ * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule()
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
+ unsigned long *map = dwc2_get_ls_map(hsotg, qh);
+
+ /* Schedule should have failed, so no worries about no error code */
+ if (!map)
+ return;
+
+ pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
+ DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
+ qh->ls_start_schedule_slice);
+}
+
+/**
+ * dwc2_hs_pmap_schedule - Schedule in the main high speed schedule
+ *
+ * This will schedule something on the main dwc2 schedule.
+ *
+ * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
+ * update this with the result upon success. We also use the duration from
+ * the same structure.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ * @only_one_period: If true we will limit ourselves to just looking at
+ * one period (aka one 100us chunk). This is used if we have
+ * already scheduled something on the low speed schedule and
+ * need to find something that matches on the high speed one.
+ * @index: The index into qh->hs_transfers that we're working with.
+ *
+ * Returns: 0 for success or an error code. Upon success the
+ * dwc2_hs_transfer_time specified by "index" will be updated.
+ */
+static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ bool only_one_period, int index)
+{
+ struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
+ int us;
+
+ us = pmap_schedule(hsotg->hs_periodic_bitmap,
+ DWC2_HS_PERIODIC_US_PER_UFRAME,
+ DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
+ qh->host_interval, trans_time->start_schedule_us,
+ only_one_period);
+
+ if (us < 0)
+ return us;
+
+ trans_time->start_schedule_us = us;
+ return 0;
+}
+
+/**
+ * dwc2_hs_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ * @index: Transfer index
+ */
+static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, int index)
+{
+ struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
+
+ pmap_unschedule(hsotg->hs_periodic_bitmap,
+ DWC2_HS_PERIODIC_US_PER_UFRAME,
+ DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
+ qh->host_interval, trans_time->start_schedule_us);
+}
+
+/**
+ * dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer.
+ *
+ * This is the most complicated thing in USB. We have to find matching time
+ * in both the global high speed schedule for the port and the low speed
+ * schedule for the TT associated with the given device.
+ *
+ * Being here means that the host must be running in high speed mode and the
+ * device is in low or full speed mode (and behind a hub).
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ int bytecount = qh->maxp_mult * qh->maxp;
+ int ls_search_slice;
+ int err = 0;
+ int host_interval_in_sched;
+
+ /*
+ * The interval (how often to repeat) in the actual host schedule.
+ * See pmap_schedule() for gcd() explanation.
+ */
+ host_interval_in_sched = gcd(qh->host_interval,
+ DWC2_HS_SCHEDULE_UFRAMES);
+
+ /*
+ * We always try to find space in the low speed schedule first, then
+ * try to find high speed time that matches. If we don't, we'll bump
+ * up the place we start searching in the low speed schedule and try
+ * again. To start we'll look right at the beginning of the low speed
+ * schedule.
+ *
+ * Note that this will tend to front-load the high speed schedule.
+ * We may eventually want to try to avoid this by either considering
+ * both schedules together or doing some sort of round robin.
+ */
+ ls_search_slice = 0;
+
+ while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
+ int start_s_uframe;
+ int ssplit_s_uframe;
+ int second_s_uframe;
+ int rel_uframe;
+ int first_count;
+ int middle_count;
+ int end_count;
+ int first_data_bytes;
+ int other_data_bytes;
+ int i;
+
+ if (qh->schedule_low_speed) {
+ err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
+
+ /*
+ * If we got an error here there's no other magic we
+ * can do, so bail. All the looping above is only
+ * helpful to redo things if we got a low speed slot
+ * and then couldn't find a matching high speed slot.
+ */
+ if (err)
+ return err;
+ } else {
+ /* Must be missing the tt structure? Why? */
+ WARN_ON_ONCE(1);
+ }
+
+ /*
+ * This will give us a number 0 - 7 if
+ * DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ...
+ */
+ start_s_uframe = qh->ls_start_schedule_slice /
+ DWC2_SLICES_PER_UFRAME;
+
+ /* Get a number that's always 0 - 7 */
+ rel_uframe = (start_s_uframe % 8);
+
+ /*
+ * If we were going to start in uframe 7 then we would need to
+ * issue a start split in uframe 6, which spec says is not OK.
+ * Move on to the next full frame (assuming there is one).
+ *
+ * See 11.18.4 Host Split Transaction Scheduling Requirements
+ * bullet 1.
+ */
+ if (rel_uframe == 7) {
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+ ls_search_slice =
+ (qh->ls_start_schedule_slice /
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME;
+ continue;
+ }
+
+ /*
+ * For ISOC in:
+ * - start split (frame -1)
+ * - complete split w/ data (frame +1)
+ * - complete split w/ data (frame +2)
+ * - ...
+ * - complete split w/ data (frame +num_data_packets)
+ * - complete split w/ data (frame +num_data_packets+1)
+ * - complete split w/ data (frame +num_data_packets+2, max 8)
+ * ...though if frame was "0" then max is 7...
+ *
+ * For ISOC out we might need to do:
+ * - start split w/ data (frame -1)
+ * - start split w/ data (frame +0)
+ * - ...
+ * - start split w/ data (frame +num_data_packets-2)
+ *
+ * For INTERRUPT in we might need to do:
+ * - start split (frame -1)
+ * - complete split w/ data (frame +1)
+ * - complete split w/ data (frame +2)
+ * - complete split w/ data (frame +3, max 8)
+ *
+ * For INTERRUPT out we might need to do:
+ * - start split w/ data (frame -1)
+ * - complete split (frame +1)
+ * - complete split (frame +2)
+ * - complete split (frame +3, max 8)
+ *
+ * Start adjusting!
+ */
+ ssplit_s_uframe = (start_s_uframe +
+ host_interval_in_sched - 1) %
+ host_interval_in_sched;
+ if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
+ second_s_uframe = start_s_uframe;
+ else
+ second_s_uframe = start_s_uframe + 1;
+
+ /* First data transfer might not be all 188 bytes. */
+ first_data_bytes = 188 -
+ DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
+ DWC2_SLICES_PER_UFRAME),
+ DWC2_SLICES_PER_UFRAME);
+ if (first_data_bytes > bytecount)
+ first_data_bytes = bytecount;
+ other_data_bytes = bytecount - first_data_bytes;
+
+ /*
+ * For now, skip OUT xfers where first xfer is partial
+ *
+ * Main dwc2 code assumes:
+ * - INT transfers never get split in two.
+ * - ISOC transfers can always transfer 188 bytes the first
+ * time.
+ *
+ * Until that code is fixed, try again if the first transfer
+ * couldn't transfer everything.
+ *
+ * This code can be removed if/when the rest of dwc2 handles
+ * the above cases. Until it's fixed we just won't be able
+ * to schedule quite as tightly.
+ */
+ if (!qh->ep_is_in &&
+ (first_data_bytes != min_t(int, 188, bytecount))) {
+ dwc2_sch_dbg(hsotg,
+ "QH=%p avoiding broken 1st xfer (%d, %d)\n",
+ qh, first_data_bytes, bytecount);
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+ ls_search_slice = (start_s_uframe + 1) *
+ DWC2_SLICES_PER_UFRAME;
+ continue;
+ }
+
+ /* Start by assuming transfers for the bytes */
+ qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
+
+ /*
+ * Everything except ISOC OUT has extra transfers. Rules are
+ * complicated. See 11.18.4 Host Split Transaction Scheduling
+ * Requirements bullet 3.
+ */
+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ if (rel_uframe == 6)
+ qh->num_hs_transfers += 2;
+ else
+ qh->num_hs_transfers += 3;
+
+ if (qh->ep_is_in) {
+ /*
+ * First is start split, middle/end is data.
+ * Allocate full data bytes for all data.
+ */
+ first_count = 4;
+ middle_count = bytecount;
+ end_count = bytecount;
+ } else {
+ /*
+ * First is data, middle/end is complete.
+ * First transfer and second can have data.
+ * Rest should just have complete split.
+ */
+ first_count = first_data_bytes;
+ middle_count = max_t(int, 4, other_data_bytes);
+ end_count = 4;
+ }
+ } else {
+ if (qh->ep_is_in) {
+ int last;
+
+ /* Account for the start split */
+ qh->num_hs_transfers++;
+
+ /* Calculate "L" value from spec */
+ last = rel_uframe + qh->num_hs_transfers + 1;
+
+ /* Start with basic case */
+ if (last <= 6)
+ qh->num_hs_transfers += 2;
+ else
+ qh->num_hs_transfers += 1;
+
+ /* Adjust downwards */
+ if (last >= 6 && rel_uframe == 0)
+ qh->num_hs_transfers--;
+
+ /* 1st = start; rest can contain data */
+ first_count = 4;
+ middle_count = min_t(int, 188, bytecount);
+ end_count = middle_count;
+ } else {
+ /* All contain data, last might be smaller */
+ first_count = first_data_bytes;
+ middle_count = min_t(int, 188,
+ other_data_bytes);
+ end_count = other_data_bytes % 188;
+ }
+ }
+
+ /* Assign durations per uFrame */
+ qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
+ for (i = 1; i < qh->num_hs_transfers - 1; i++)
+ qh->hs_transfers[i].duration_us =
+ HS_USECS_ISO(middle_count);
+ if (qh->num_hs_transfers > 1)
+ qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
+ HS_USECS_ISO(end_count);
+
+ /*
+ * Assign start us. The call below to dwc2_hs_pmap_schedule()
+ * will start with these numbers but may adjust within the same
+ * microframe.
+ */
+ qh->hs_transfers[0].start_schedule_us =
+ ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
+ for (i = 1; i < qh->num_hs_transfers; i++)
+ qh->hs_transfers[i].start_schedule_us =
+ ((second_s_uframe + i - 1) %
+ DWC2_HS_SCHEDULE_UFRAMES) *
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+
+ /* Try to schedule with filled in hs_transfers above */
+ for (i = 0; i < qh->num_hs_transfers; i++) {
+ err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
+ if (err)
+ break;
+ }
+
+ /* If we scheduled all w/out breaking out then we're all good */
+ if (i == qh->num_hs_transfers)
+ break;
+
+ for (; i >= 0; i--)
+ dwc2_hs_pmap_unschedule(hsotg, qh, i);
+
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+
+ /* Try again starting in the next microframe */
+ ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
+ }
+
+ if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
+ return -ENOSPC;
+
+ return 0;
+}
+
+/**
+ * dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer.
+ *
+ * Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean
+ * interface.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* In non-split host and device time are the same */
+ WARN_ON(qh->host_us != qh->device_us);
+ WARN_ON(qh->host_interval != qh->device_interval);
+ WARN_ON(qh->num_hs_transfers != 1);
+
+ /* We'll have one transfer; init start to 0 before calling scheduler */
+ qh->hs_transfers[0].start_schedule_us = 0;
+ qh->hs_transfers[0].duration_us = qh->host_us;
+
+ return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
+}
+
+/**
+ * dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer.
+ *
+ * Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean
+ * interface.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* In non-split host and device time are the same */
+ WARN_ON(qh->host_us != qh->device_us);
+ WARN_ON(qh->host_interval != qh->device_interval);
+ WARN_ON(!qh->schedule_low_speed);
+
+ /* Run on the main low speed schedule (no split = no hub = no TT) */
+ return dwc2_ls_pmap_schedule(hsotg, qh, 0);
+}
+
+/**
+ * dwc2_uframe_schedule - Schedule a QH for a periodic xfer.
+ *
+ * Calls one of the 3 sub-function depending on what type of transfer this QH
+ * is for. Also adds some printing.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int ret;
+
+ if (qh->dev_speed == USB_SPEED_HIGH)
+ ret = dwc2_uframe_schedule_hs(hsotg, qh);
+ else if (!qh->do_split)
+ ret = dwc2_uframe_schedule_ls(hsotg, qh);
+ else
+ ret = dwc2_uframe_schedule_split(hsotg, qh);
+
+ if (ret)
+ dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
+ else
+ dwc2_qh_schedule_print(hsotg, qh);
+
+ return ret;
+}
+
+/**
+ * dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule().
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int i;
+
+ for (i = 0; i < qh->num_hs_transfers; i++)
+ dwc2_hs_pmap_unschedule(hsotg, qh, i);
+
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+
+ dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
+}
+
+/**
+ * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
+ *
+ * Takes a qh that has already been scheduled (which means we know we have the
+ * bandwdith reserved for us) and set the next_active_frame and the
+ * start_active_frame.
+ *
+ * This is expected to be called on qh's that weren't previously actively
+ * running. It just picks the next frame that we can fit into without any
+ * thought about the past.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for a periodic endpoint
+ *
+ */
+static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ u16 frame_number;
+ u16 earliest_frame;
+ u16 next_active_frame;
+ u16 relative_frame;
+ u16 interval;
+
+ /*
+ * Use the real frame number rather than the cached value as of the
+ * last SOF to give us a little extra slop.
+ */
+ frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ /*
+ * We wouldn't want to start any earlier than the next frame just in
+ * case the frame number ticks as we're doing this calculation.
+ *
+ * NOTE: if we could quantify how long till we actually get scheduled
+ * we might be able to avoid the "+ 1" by looking at the upper part of
+ * HFNUM (the FRREM field). For now we'll just use the + 1 though.
+ */
+ earliest_frame = dwc2_frame_num_inc(frame_number, 1);
+ next_active_frame = earliest_frame;
+
+ /* Get the "no microframe schduler" out of the way... */
+ if (!hsotg->params.uframe_sched) {
+ if (qh->do_split)
+ /* Splits are active at microframe 0 minus 1 */
+ next_active_frame |= 0x7;
+ goto exit;
+ }
+
+ if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
+ /*
+ * We're either at high speed or we're doing a split (which
+ * means we're talking high speed to a hub). In any case
+ * the first frame should be based on when the first scheduled
+ * event is.
+ */
+ WARN_ON(qh->num_hs_transfers < 1);
+
+ relative_frame = qh->hs_transfers[0].start_schedule_us /
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+
+ /* Adjust interval as per high speed schedule */
+ interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
+
+ } else {
+ /*
+ * Low or full speed directly on dwc2. Just about the same
+ * as high speed but on a different schedule and with slightly
+ * different adjustments. Note that this works because when
+ * the host and device are both low speed then frames in the
+ * controller tick at low speed.
+ */
+ relative_frame = qh->ls_start_schedule_slice /
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME;
+ interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
+ }
+
+ /* Scheduler messed up if frame is past interval */
+ WARN_ON(relative_frame >= interval);
+
+ /*
+ * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've
+ * done the gcd(), so it's safe to move to the beginning of the current
+ * interval like this.
+ *
+ * After this we might be before earliest_frame, but don't worry,
+ * we'll fix it...
+ */
+ next_active_frame = (next_active_frame / interval) * interval;
+
+ /*
+ * Actually choose to start at the frame number we've been
+ * scheduled for.
+ */
+ next_active_frame = dwc2_frame_num_inc(next_active_frame,
+ relative_frame);
+
+ /*
+ * We actually need 1 frame before since the next_active_frame is
+ * the frame number we'll be put on the ready list and we won't be on
+ * the bus until 1 frame later.
+ */
+ next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
+
+ /*
+ * By now we might actually be before the earliest_frame. Let's move
+ * up intervals until we're not.
+ */
+ while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
+ next_active_frame = dwc2_frame_num_inc(next_active_frame,
+ interval);
+
+exit:
+ qh->next_active_frame = next_active_frame;
+ qh->start_active_frame = next_active_frame;
+
+ dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
+ qh, frame_number, qh->next_active_frame);
+}
+
+/**
+ * dwc2_do_reserve() - Make a periodic reservation
+ *
+ * Try to allocate space in the periodic schedule. Depending on parameters
+ * this might use the microframe scheduler or the dumb scheduler.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer.
+ *
+ * Returns: 0 upon success; error upon failure.
+ */
+static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int status;
+
+ if (hsotg->params.uframe_sched) {
+ status = dwc2_uframe_schedule(hsotg, qh);
+ } else {
+ status = dwc2_periodic_channel_available(hsotg);
+ if (status) {
+ dev_info(hsotg->dev,
+ "%s: No host channel available for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ status = dwc2_check_periodic_bandwidth(hsotg, qh);
+ }
+
+ if (status) {
+ dev_dbg(hsotg->dev,
+ "%s: Insufficient periodic bandwidth for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ if (!hsotg->params.uframe_sched)
+ /* Reserve periodic channel */
+ hsotg->periodic_channels++;
+
+ /* Update claimed usecs per (micro)frame */
+ hsotg->periodic_usecs += qh->host_us;
+
+ dwc2_pick_first_frame(hsotg, qh);
+
+ return 0;
+}
+
+/**
+ * dwc2_do_unreserve() - Actually release the periodic reservation
+ *
+ * This function actually releases the periodic bandwidth that was reserved
+ * by the given qh.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ assert_spin_locked(&hsotg->lock);
+
+ WARN_ON(!qh->unreserve_pending);
+
+ /* No more unreserve pending--we're doing it */
+ qh->unreserve_pending = false;
+
+ if (WARN_ON(!list_empty(&qh->qh_list_entry)))
+ list_del_init(&qh->qh_list_entry);
+
+ /* Update claimed usecs per (micro)frame */
+ hsotg->periodic_usecs -= qh->host_us;
+
+ if (hsotg->params.uframe_sched) {
+ dwc2_uframe_unschedule(hsotg, qh);
+ } else {
+ /* Release periodic channel reservation */
+ hsotg->periodic_channels--;
+ }
+}
+
+/**
+ * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
+ *
+ * According to the kernel doc for usb_submit_urb() (specifically the part about
+ * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
+ * long as a device driver keeps submitting. Since we're using HCD_BH to give
+ * back the URB we need to give the driver a little bit of time before we
+ * release the reservation. This worker is called after the appropriate
+ * delay.
+ *
+ * @t: Address to a qh unreserve_work.
+ */
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
+{
+ struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
+ struct dwc2_hsotg *hsotg = qh->hsotg;
+ unsigned long flags;
+
+ /*
+ * Wait for the lock, or for us to be scheduled again. We
+ * could be scheduled again if:
+ * - We started executing but didn't get the lock yet.
+ * - A new reservation came in, but cancel didn't take effect
+ * because we already started executing.
+ * - The timer has been kicked again.
+ * In that case cancel and wait for the next call.
+ */
+ while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
+ if (timer_pending(&qh->unreserve_timer))
+ return;
+ }
+
+ /*
+ * Might be no more unreserve pending if:
+ * - We started executing but didn't get the lock yet.
+ * - A new reservation came in, but cancel didn't take effect
+ * because we already started executing.
+ *
+ * We can't put this in the loop above because unreserve_pending needs
+ * to be accessed under lock, so we can only check it once we got the
+ * lock.
+ */
+ if (qh->unreserve_pending)
+ dwc2_do_unreserve(hsotg, qh);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/**
+ * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
+ * host channel is large enough to handle the maximum data transfer in a single
+ * (micro)frame for a periodic transfer
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for a periodic endpoint
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ u32 max_xfer_size;
+ u32 max_channel_xfer_size;
+ int status = 0;
+
+ max_xfer_size = qh->maxp * qh->maxp_mult;
+ max_channel_xfer_size = hsotg->params.max_transfer_size;
+
+ if (max_xfer_size > max_channel_xfer_size) {
+ dev_err(hsotg->dev,
+ "%s: Periodic xfer length %d > max xfer length for channel %d\n",
+ __func__, max_xfer_size, max_channel_xfer_size);
+ status = -ENOSPC;
+ }
+
+ return status;
+}
+
+/**
+ * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
+ * the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer. The QH should already contain the
+ * scheduling information.
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int status;
+
+ status = dwc2_check_max_xfer_size(hsotg, qh);
+ if (status) {
+ dev_dbg(hsotg->dev,
+ "%s: Channel max transfer size too small for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ /* Cancel pending unreserve; if canceled OK, unreserve was pending */
+ if (del_timer(&qh->unreserve_timer))
+ WARN_ON(!qh->unreserve_pending);
+
+ /*
+ * Only need to reserve if there's not an unreserve pending, since if an
+ * unreserve is pending then by definition our old reservation is still
+ * valid. Unreserve might still be pending even if we didn't cancel if
+ * dwc2_unreserve_timer_fn() already started. Code in the timer handles
+ * that case.
+ */
+ if (!qh->unreserve_pending) {
+ status = dwc2_do_reserve(hsotg, qh);
+ if (status)
+ return status;
+ } else {
+ /*
+ * It might have been a while, so make sure that frame_number
+ * is still good. Note: we could also try to use the similar
+ * dwc2_next_periodic_start() but that schedules much more
+ * tightly and we might need to hurry and queue things up.
+ */
+ if (dwc2_frame_num_le(qh->next_active_frame,
+ hsotg->frame_number))
+ dwc2_pick_first_frame(hsotg, qh);
+ }
+
+ qh->unreserve_pending = 0;
+
+ if (hsotg->params.dma_desc_enable)
+ /* Don't rely on SOF and start in ready schedule */
+ list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
+ else
+ /* Always start in inactive schedule */
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_inactive);
+
+ return 0;
+}
+
+/**
+ * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
+ * from the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer
+ */
+static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ bool did_modify;
+
+ assert_spin_locked(&hsotg->lock);
+
+ /*
+ * Schedule the unreserve to happen in a little bit. Cases here:
+ * - Unreserve worker might be sitting there waiting to grab the lock.
+ * In this case it will notice it's been schedule again and will
+ * quit.
+ * - Unreserve worker might not be scheduled.
+ *
+ * We should never already be scheduled since dwc2_schedule_periodic()
+ * should have canceled the scheduled unreserve timer (hence the
+ * warning on did_modify).
+ *
+ * We add + 1 to the timer to guarantee that at least 1 jiffy has
+ * passed (otherwise if the jiffy counter might tick right after we
+ * read it and we'll get no delay).
+ */
+ did_modify = mod_timer(&qh->unreserve_timer,
+ jiffies + DWC2_UNRESERVE_DELAY + 1);
+ WARN_ON(did_modify);
+ qh->unreserve_pending = 1;
+
+ list_del_init(&qh->qh_list_entry);
+}
+
+/**
+ * dwc2_wait_timer_fn() - Timer function to re-queue after waiting
+ *
+ * As per the spec, a NAK indicates that "a function is temporarily unable to
+ * transmit or receive data, but will eventually be able to do so without need
+ * of host intervention".
+ *
+ * That means that when we encounter a NAK we're supposed to retry.
+ *
+ * ...but if we retry right away (from the interrupt handler that saw the NAK)
+ * then we can end up with an interrupt storm (if the other side keeps NAKing
+ * us) because on slow enough CPUs it could take us longer to get out of the
+ * interrupt routine than it takes for the device to send another NAK. That
+ * leads to a constant stream of NAK interrupts and the CPU locks.
+ *
+ * ...so instead of retrying right away in the case of a NAK we'll set a timer
+ * to retry some time later. This function handles that timer and moves the
+ * qh back to the "inactive" list, then queues transactions.
+ *
+ * @t: Pointer to wait_timer in a qh.
+ *
+ * Return: HRTIMER_NORESTART to not automatically restart this timer.
+ */
+static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
+{
+ struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
+ struct dwc2_hsotg *hsotg = qh->hsotg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * We'll set wait_timer_cancel to true if we want to cancel this
+ * operation in dwc2_hcd_qh_unlink().
+ */
+ if (!qh->wait_timer_cancel) {
+ enum dwc2_transaction_type tr_type;
+
+ qh->want_wait = false;
+
+ list_move(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_inactive);
+
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE)
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * dwc2_qh_init() - Initializes a QH structure
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: The QH to init
+ * @urb: Holds the information about the device/endpoint needed to initialize
+ * the QH
+ * @mem_flags: Flags for allocating memory.
+ */
+static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ struct dwc2_hcd_urb *urb, gfp_t mem_flags)
+{
+ int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
+ u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+ bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
+ bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
+ bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
+ u32 hprt = dwc2_readl(hsotg, HPRT0);
+ u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+ bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
+ dev_speed != USB_SPEED_HIGH);
+ int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
+ int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
+ int bytecount = maxp_mult * maxp;
+ char *speed, *type;
+
+ /* Initialize QH */
+ qh->hsotg = hsotg;
+ timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
+ hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ qh->wait_timer.function = &dwc2_wait_timer_fn;
+ qh->ep_type = ep_type;
+ qh->ep_is_in = ep_is_in;
+
+ qh->data_toggle = DWC2_HC_PID_DATA0;
+ qh->maxp = maxp;
+ qh->maxp_mult = maxp_mult;
+ INIT_LIST_HEAD(&qh->qtd_list);
+ INIT_LIST_HEAD(&qh->qh_list_entry);
+
+ qh->do_split = do_split;
+ qh->dev_speed = dev_speed;
+
+ if (ep_is_int || ep_is_isoc) {
+ /* Compute scheduling parameters once and save them */
+ int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
+ struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
+ mem_flags,
+ &qh->ttport);
+ int device_ns;
+
+ qh->dwc_tt = dwc_tt;
+
+ qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
+ ep_is_isoc, bytecount));
+ device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
+ ep_is_isoc, bytecount);
+
+ if (do_split && dwc_tt)
+ device_ns += dwc_tt->usb_tt->think_time;
+ qh->device_us = NS_TO_US(device_ns);
+
+ qh->device_interval = urb->interval;
+ qh->host_interval = urb->interval * (do_split ? 8 : 1);
+
+ /*
+ * Schedule low speed if we're running the host in low or
+ * full speed OR if we've got a "TT" to deal with to access this
+ * device.
+ */
+ qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
+ dwc_tt;
+
+ if (do_split) {
+ /* We won't know num transfers until we schedule */
+ qh->num_hs_transfers = -1;
+ } else if (dev_speed == USB_SPEED_HIGH) {
+ qh->num_hs_transfers = 1;
+ } else {
+ qh->num_hs_transfers = 0;
+ }
+
+ /* We'll schedule later when we have something to do */
+ }
+
+ switch (dev_speed) {
+ case USB_SPEED_LOW:
+ speed = "low";
+ break;
+ case USB_SPEED_FULL:
+ speed = "full";
+ break;
+ case USB_SPEED_HIGH:
+ speed = "high";
+ break;
+ default:
+ speed = "?";
+ break;
+ }
+
+ switch (qh->ep_type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ type = "isochronous";
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ type = "interrupt";
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ type = "control";
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ type = "bulk";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+
+ dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
+ speed, bytecount);
+ dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
+ dwc2_hcd_get_dev_addr(&urb->pipe_info),
+ dwc2_hcd_get_ep_num(&urb->pipe_info),
+ ep_is_in ? "IN" : "OUT");
+ if (ep_is_int || ep_is_isoc) {
+ dwc2_sch_dbg(hsotg,
+ "QH=%p ...duration: host=%d us, device=%d us\n",
+ qh, qh->host_us, qh->device_us);
+ dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
+ qh, qh->host_interval, qh->device_interval);
+ if (qh->schedule_low_speed)
+ dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
+ qh, dwc2_get_ls_map(hsotg, qh));
+ }
+}
+
+/**
+ * dwc2_hcd_qh_create() - Allocates and initializes a QH
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @urb: Holds the information about the device/endpoint needed
+ * to initialize the QH
+ * @mem_flags: Flags for allocating memory.
+ *
+ * Return: Pointer to the newly allocated QH, or NULL on error
+ */
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb,
+ gfp_t mem_flags)
+{
+ struct dwc2_qh *qh;
+
+ if (!urb->priv)
+ return NULL;
+
+ /* Allocate memory */
+ qh = kzalloc(sizeof(*qh), mem_flags);
+ if (!qh)
+ return NULL;
+
+ dwc2_qh_init(hsotg, qh, urb, mem_flags);
+
+ if (hsotg->params.dma_desc_enable &&
+ dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
+ dwc2_hcd_qh_free(hsotg, qh);
+ return NULL;
+ }
+
+ return qh;
+}
+
+/**
+ * dwc2_hcd_qh_free() - Frees the QH
+ *
+ * @hsotg: HCD instance
+ * @qh: The QH to free
+ *
+ * QH should already be removed from the list. QTD list should already be empty
+ * if called from URB Dequeue.
+ *
+ * Must NOT be called with interrupt disabled or spinlock held
+ */
+void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* Make sure any unreserve work is finished. */
+ if (del_timer_sync(&qh->unreserve_timer)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_do_unreserve(hsotg, qh);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+
+ /*
+ * We don't have the lock so we can safely wait until the wait timer
+ * finishes. Of course, at this point in time we'd better have set
+ * wait_timer_active to false so if this timer was still pending it
+ * won't do anything anyway, but we want it to finish before we free
+ * memory.
+ */
+ hrtimer_cancel(&qh->wait_timer);
+
+ dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
+
+ if (qh->desc_list)
+ dwc2_hcd_qh_free_ddma(hsotg, qh);
+ else if (hsotg->unaligned_cache && qh->dw_align_buf)
+ kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
+ kfree(qh);
+}
+
+/**
+ * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
+ * schedule if it is not already in the schedule. If the QH is already in
+ * the schedule, no action is taken.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: The QH to add
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int status;
+ u32 intr_mask;
+ ktime_t delay;
+
+ if (dbg_qh(qh))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ if (!list_empty(&qh->qh_list_entry))
+ /* QH already in a schedule */
+ return 0;
+
+ /* Add the new QH to the appropriate schedule */
+ if (dwc2_qh_is_non_per(qh)) {
+ /* Schedule right away */
+ qh->start_active_frame = hsotg->frame_number;
+ qh->next_active_frame = qh->start_active_frame;
+
+ if (qh->want_wait) {
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_waiting);
+ qh->wait_timer_cancel = false;
+ delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
+ hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
+ } else {
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_inactive);
+ }
+ return 0;
+ }
+
+ status = dwc2_schedule_periodic(hsotg, qh);
+ if (status)
+ return status;
+ if (!hsotg->periodic_qh_count) {
+ intr_mask = dwc2_readl(hsotg, GINTMSK);
+ intr_mask |= GINTSTS_SOF;
+ dwc2_writel(hsotg, intr_mask, GINTMSK);
+ }
+ hsotg->periodic_qh_count++;
+
+ return 0;
+}
+
+/**
+ * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
+ * schedule. Memory is not freed.
+ *
+ * @hsotg: The HCD state structure
+ * @qh: QH to remove from schedule
+ */
+void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ u32 intr_mask;
+
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ /* If the wait_timer is pending, this will stop it from acting */
+ qh->wait_timer_cancel = true;
+
+ if (list_empty(&qh->qh_list_entry))
+ /* QH is not in a schedule */
+ return;
+
+ if (dwc2_qh_is_non_per(qh)) {
+ if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
+ hsotg->non_periodic_qh_ptr =
+ hsotg->non_periodic_qh_ptr->next;
+ list_del_init(&qh->qh_list_entry);
+ return;
+ }
+
+ dwc2_deschedule_periodic(hsotg, qh);
+ hsotg->periodic_qh_count--;
+ if (!hsotg->periodic_qh_count &&
+ !hsotg->params.dma_desc_enable) {
+ intr_mask = dwc2_readl(hsotg, GINTMSK);
+ intr_mask &= ~GINTSTS_SOF;
+ dwc2_writel(hsotg, intr_mask, GINTMSK);
+ }
+}
+
+/**
+ * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split.
+ *
+ * This is called for setting next_active_frame for periodic splits for all but
+ * the first packet of the split. Confusing? I thought so...
+ *
+ * Periodic splits are single low/full speed transfers that we end up splitting
+ * up into several high speed transfers. They always fit into one full (1 ms)
+ * frame but might be split over several microframes (125 us each). We to put
+ * each of the parts on a very specific high speed frame.
+ *
+ * This function figures out where the next active uFrame needs to be.
+ *
+ * @hsotg: The HCD state structure
+ * @qh: QH for the periodic transfer.
+ * @frame_number: The current frame number.
+ *
+ * Return: number missed by (or 0 if we didn't miss).
+ */
+static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, u16 frame_number)
+{
+ u16 old_frame = qh->next_active_frame;
+ u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
+ int missed = 0;
+ u16 incr;
+
+ /*
+ * See dwc2_uframe_schedule_split() for split scheduling.
+ *
+ * Basically: increment 1 normally, but 2 right after the start split
+ * (except for ISOC out).
+ */
+ if (old_frame == qh->start_active_frame &&
+ !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
+ incr = 2;
+ else
+ incr = 1;
+
+ qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
+
+ /*
+ * Note that it's OK for frame_number to be 1 frame past
+ * next_active_frame. Remember that next_active_frame is supposed to
+ * be 1 frame _before_ when we want to be scheduled. If we're 1 frame
+ * past it just means schedule ASAP.
+ *
+ * It's _not_ OK, however, if we're more than one frame past.
+ */
+ if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
+ /*
+ * OOPS, we missed. That's actually pretty bad since
+ * the hub will be unhappy; try ASAP I guess.
+ */
+ missed = dwc2_frame_num_dec(prev_frame_number,
+ qh->next_active_frame);
+ qh->next_active_frame = frame_number;
+ }
+
+ return missed;
+}
+
+/**
+ * dwc2_next_periodic_start() - Set next_active_frame for next transfer start
+ *
+ * This is called for setting next_active_frame for a periodic transfer for
+ * all cases other than midway through a periodic split. This will also update
+ * start_active_frame.
+ *
+ * Since we _always_ keep start_active_frame as the start of the previous
+ * transfer this is normally pretty easy: we just add our interval to
+ * start_active_frame and we've got our answer.
+ *
+ * The tricks come into play if we miss. In that case we'll look for the next
+ * slot we can fit into.
+ *
+ * @hsotg: The HCD state structure
+ * @qh: QH for the periodic transfer.
+ * @frame_number: The current frame number.
+ *
+ * Return: number missed by (or 0 if we didn't miss).
+ */
+static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, u16 frame_number)
+{
+ int missed = 0;
+ u16 interval = qh->host_interval;
+ u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
+
+ qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
+ interval);
+
+ /*
+ * The dwc2_frame_num_gt() function used below won't work terribly well
+ * with if we just incremented by a really large intervals since the
+ * frame counter only goes to 0x3fff. It's terribly unlikely that we
+ * will have missed in this case anyway. Just go to exit. If we want
+ * to try to do better we'll need to keep track of a bigger counter
+ * somewhere in the driver and handle overflows.
+ */
+ if (interval >= 0x1000)
+ goto exit;
+
+ /*
+ * Test for misses, which is when it's too late to schedule.
+ *
+ * A few things to note:
+ * - We compare against prev_frame_number since start_active_frame
+ * and next_active_frame are always 1 frame before we want things
+ * to be active and we assume we can still get scheduled in the
+ * current frame number.
+ * - It's possible for start_active_frame (now incremented) to be
+ * next_active_frame if we got an EO MISS (even_odd miss) which
+ * basically means that we detected there wasn't enough time for
+ * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us
+ * at the last second. We want to make sure we don't schedule
+ * another transfer for the same frame. My test webcam doesn't seem
+ * terribly upset by missing a transfer but really doesn't like when
+ * we do two transfers in the same frame.
+ * - Some misses are expected. Specifically, in order to work
+ * perfectly dwc2 really needs quite spectacular interrupt latency
+ * requirements. It needs to be able to handle its interrupts
+ * completely within 125 us of them being asserted. That not only
+ * means that the dwc2 interrupt handler needs to be fast but it
+ * means that nothing else in the system has to block dwc2 for a long
+ * time. We can help with the dwc2 parts of this, but it's hard to
+ * guarantee that a system will have interrupt latency < 125 us, so
+ * we have to be robust to some misses.
+ */
+ if (qh->start_active_frame == qh->next_active_frame ||
+ dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
+ u16 ideal_start = qh->start_active_frame;
+ int periods_in_map;
+
+ /*
+ * Adjust interval as per gcd with map size.
+ * See pmap_schedule() for more details here.
+ */
+ if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
+ periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
+ else
+ periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
+ interval = gcd(interval, periods_in_map);
+
+ do {
+ qh->start_active_frame = dwc2_frame_num_inc(
+ qh->start_active_frame, interval);
+ } while (dwc2_frame_num_gt(prev_frame_number,
+ qh->start_active_frame));
+
+ missed = dwc2_frame_num_dec(qh->start_active_frame,
+ ideal_start);
+ }
+
+exit:
+ qh->next_active_frame = qh->start_active_frame;
+
+ return missed;
+}
+
+/*
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ int sched_next_periodic_split)
+{
+ u16 old_frame = qh->next_active_frame;
+ u16 frame_number;
+ int missed;
+
+ if (dbg_qh(qh))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ if (dwc2_qh_is_non_per(qh)) {
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ if (!list_empty(&qh->qtd_list))
+ /* Add back to inactive/waiting non-periodic schedule */
+ dwc2_hcd_qh_add(hsotg, qh);
+ return;
+ }
+
+ /*
+ * Use the real frame number rather than the cached value as of the
+ * last SOF just to get us a little closer to reality. Note that
+ * means we don't actually know if we've already handled the SOF
+ * interrupt for this frame.
+ */
+ frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ if (sched_next_periodic_split)
+ missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
+ else
+ missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
+
+ dwc2_sch_vdbg(hsotg,
+ "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
+ qh, sched_next_periodic_split, frame_number, old_frame,
+ qh->next_active_frame,
+ dwc2_frame_num_dec(qh->next_active_frame, old_frame),
+ missed, missed ? "MISS" : "");
+
+ if (list_empty(&qh->qtd_list)) {
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ return;
+ }
+
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue
+ *
+ * Note: we purposely use the frame_number from the "hsotg" structure
+ * since we know SOF interrupt will handle future frames.
+ */
+ if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_ready);
+ else
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_inactive);
+}
+
+/**
+ * dwc2_hcd_qtd_init() - Initializes a QTD structure
+ *
+ * @qtd: The QTD to initialize
+ * @urb: The associated URB
+ */
+void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
+{
+ qtd->urb = urb;
+ if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
+ USB_ENDPOINT_XFER_CONTROL) {
+ /*
+ * The only time the QTD data toggle is used is on the data
+ * phase of control transfers. This phase always starts with
+ * DATA1.
+ */
+ qtd->data_toggle = DWC2_HC_PID_DATA1;
+ qtd->control_phase = DWC2_CONTROL_SETUP;
+ }
+
+ /* Start split */
+ qtd->complete_split = 0;
+ qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
+ qtd->isoc_split_offset = 0;
+ qtd->in_process = 0;
+
+ /* Store the qtd ptr in the urb to reference the QTD */
+ urb->qtd = qtd;
+}
+
+/**
+ * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
+ * Caller must hold driver lock.
+ *
+ * @hsotg: The DWC HCD structure
+ * @qtd: The QTD to add
+ * @qh: Queue head to add qtd to
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * If the QH to which the QTD is added is not currently scheduled, it is placed
+ * into the proper schedule based on its EP type.
+ */
+int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ struct dwc2_qh *qh)
+{
+ int retval;
+
+ if (unlikely(!qh)) {
+ dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ retval = dwc2_hcd_qh_add(hsotg, qh);
+ if (retval)
+ goto fail;
+
+ qtd->qh = qh;
+ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
+
+ return 0;
+fail:
+ return retval;
+}
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
new file mode 100644
index 000000000..13abdd5f6
--- /dev/null
+++ b/drivers/usb/dwc2/hw.h
@@ -0,0 +1,875 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * hw.h - DesignWare HS OTG Controller hardware definitions
+ *
+ * Copyright 2004-2013 Synopsys, Inc.
+ */
+
+#ifndef __DWC2_HW_H__
+#define __DWC2_HW_H__
+
+#define HSOTG_REG(x) (x)
+
+#define GOTGCTL HSOTG_REG(0x000)
+#define GOTGCTL_CHIRPEN BIT(27)
+#define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22)
+#define GOTGCTL_MULT_VALID_BC_SHIFT 22
+#define GOTGCTL_CURMODE_HOST BIT(21)
+#define GOTGCTL_OTGVER BIT(20)
+#define GOTGCTL_BSESVLD BIT(19)
+#define GOTGCTL_ASESVLD BIT(18)
+#define GOTGCTL_DBNC_SHORT BIT(17)
+#define GOTGCTL_CONID_B BIT(16)
+#define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15)
+#define GOTGCTL_DEVHNPEN BIT(11)
+#define GOTGCTL_HSTSETHNPEN BIT(10)
+#define GOTGCTL_HNPREQ BIT(9)
+#define GOTGCTL_HSTNEGSCS BIT(8)
+#define GOTGCTL_BVALOVAL BIT(7)
+#define GOTGCTL_BVALOEN BIT(6)
+#define GOTGCTL_AVALOVAL BIT(5)
+#define GOTGCTL_AVALOEN BIT(4)
+#define GOTGCTL_VBVALOVAL BIT(3)
+#define GOTGCTL_VBVALOEN BIT(2)
+#define GOTGCTL_SESREQ BIT(1)
+#define GOTGCTL_SESREQSCS BIT(0)
+
+#define GOTGINT HSOTG_REG(0x004)
+#define GOTGINT_DBNCE_DONE BIT(19)
+#define GOTGINT_A_DEV_TOUT_CHG BIT(18)
+#define GOTGINT_HST_NEG_DET BIT(17)
+#define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9)
+#define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8)
+#define GOTGINT_SES_END_DET BIT(2)
+
+#define GAHBCFG HSOTG_REG(0x008)
+#define GAHBCFG_AHB_SINGLE BIT(23)
+#define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22)
+#define GAHBCFG_REM_MEM_SUPP BIT(21)
+#define GAHBCFG_P_TXF_EMP_LVL BIT(8)
+#define GAHBCFG_NP_TXF_EMP_LVL BIT(7)
+#define GAHBCFG_DMA_EN BIT(5)
+#define GAHBCFG_HBSTLEN_MASK (0xf << 1)
+#define GAHBCFG_HBSTLEN_SHIFT 1
+#define GAHBCFG_HBSTLEN_SINGLE 0
+#define GAHBCFG_HBSTLEN_INCR 1
+#define GAHBCFG_HBSTLEN_INCR4 3
+#define GAHBCFG_HBSTLEN_INCR8 5
+#define GAHBCFG_HBSTLEN_INCR16 7
+#define GAHBCFG_GLBL_INTR_EN BIT(0)
+#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \
+ GAHBCFG_NP_TXF_EMP_LVL | \
+ GAHBCFG_DMA_EN | \
+ GAHBCFG_GLBL_INTR_EN)
+
+#define GUSBCFG HSOTG_REG(0x00C)
+#define GUSBCFG_FORCEDEVMODE BIT(30)
+#define GUSBCFG_FORCEHOSTMODE BIT(29)
+#define GUSBCFG_TXENDDELAY BIT(28)
+#define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27)
+#define GUSBCFG_ICUSBCAP BIT(26)
+#define GUSBCFG_ULPI_INT_PROT_DIS BIT(25)
+#define GUSBCFG_INDICATORPASSTHROUGH BIT(24)
+#define GUSBCFG_INDICATORCOMPLEMENT BIT(23)
+#define GUSBCFG_TERMSELDLPULSE BIT(22)
+#define GUSBCFG_ULPI_INT_VBUS_IND BIT(21)
+#define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20)
+#define GUSBCFG_ULPI_CLK_SUSP_M BIT(19)
+#define GUSBCFG_ULPI_AUTO_RES BIT(18)
+#define GUSBCFG_ULPI_FS_LS BIT(17)
+#define GUSBCFG_OTG_UTMI_FS_SEL BIT(16)
+#define GUSBCFG_PHY_LP_CLK_SEL BIT(15)
+#define GUSBCFG_USBTRDTIM_MASK (0xf << 10)
+#define GUSBCFG_USBTRDTIM_SHIFT 10
+#define GUSBCFG_HNPCAP BIT(9)
+#define GUSBCFG_SRPCAP BIT(8)
+#define GUSBCFG_DDRSEL BIT(7)
+#define GUSBCFG_PHYSEL BIT(6)
+#define GUSBCFG_FSINTF BIT(5)
+#define GUSBCFG_ULPI_UTMI_SEL BIT(4)
+#define GUSBCFG_PHYIF16 BIT(3)
+#define GUSBCFG_PHYIF8 (0 << 3)
+#define GUSBCFG_TOUTCAL_MASK (0x7 << 0)
+#define GUSBCFG_TOUTCAL_SHIFT 0
+#define GUSBCFG_TOUTCAL_LIMIT 0x7
+#define GUSBCFG_TOUTCAL(_x) ((_x) << 0)
+
+#define GRSTCTL HSOTG_REG(0x010)
+#define GRSTCTL_AHBIDLE BIT(31)
+#define GRSTCTL_DMAREQ BIT(30)
+#define GRSTCTL_CSFTRST_DONE BIT(29)
+#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
+#define GRSTCTL_TXFNUM_SHIFT 6
+#define GRSTCTL_TXFNUM_LIMIT 0x1f
+#define GRSTCTL_TXFNUM(_x) ((_x) << 6)
+#define GRSTCTL_TXFFLSH BIT(5)
+#define GRSTCTL_RXFFLSH BIT(4)
+#define GRSTCTL_IN_TKNQ_FLSH BIT(3)
+#define GRSTCTL_FRMCNTRRST BIT(2)
+#define GRSTCTL_HSFTRST BIT(1)
+#define GRSTCTL_CSFTRST BIT(0)
+
+#define GINTSTS HSOTG_REG(0x014)
+#define GINTMSK HSOTG_REG(0x018)
+#define GINTSTS_WKUPINT BIT(31)
+#define GINTSTS_SESSREQINT BIT(30)
+#define GINTSTS_DISCONNINT BIT(29)
+#define GINTSTS_CONIDSTSCHNG BIT(28)
+#define GINTSTS_LPMTRANRCVD BIT(27)
+#define GINTSTS_PTXFEMP BIT(26)
+#define GINTSTS_HCHINT BIT(25)
+#define GINTSTS_PRTINT BIT(24)
+#define GINTSTS_RESETDET BIT(23)
+#define GINTSTS_FET_SUSP BIT(22)
+#define GINTSTS_INCOMPL_IP BIT(21)
+#define GINTSTS_INCOMPL_SOOUT BIT(21)
+#define GINTSTS_INCOMPL_SOIN BIT(20)
+#define GINTSTS_OEPINT BIT(19)
+#define GINTSTS_IEPINT BIT(18)
+#define GINTSTS_EPMIS BIT(17)
+#define GINTSTS_RESTOREDONE BIT(16)
+#define GINTSTS_EOPF BIT(15)
+#define GINTSTS_ISOUTDROP BIT(14)
+#define GINTSTS_ENUMDONE BIT(13)
+#define GINTSTS_USBRST BIT(12)
+#define GINTSTS_USBSUSP BIT(11)
+#define GINTSTS_ERLYSUSP BIT(10)
+#define GINTSTS_I2CINT BIT(9)
+#define GINTSTS_ULPI_CK_INT BIT(8)
+#define GINTSTS_GOUTNAKEFF BIT(7)
+#define GINTSTS_GINNAKEFF BIT(6)
+#define GINTSTS_NPTXFEMP BIT(5)
+#define GINTSTS_RXFLVL BIT(4)
+#define GINTSTS_SOF BIT(3)
+#define GINTSTS_OTGINT BIT(2)
+#define GINTSTS_MODEMIS BIT(1)
+#define GINTSTS_CURMODE_HOST BIT(0)
+
+#define GRXSTSR HSOTG_REG(0x01C)
+#define GRXSTSP HSOTG_REG(0x020)
+#define GRXSTS_FN_MASK (0x7f << 25)
+#define GRXSTS_FN_SHIFT 25
+#define GRXSTS_PKTSTS_MASK (0xf << 17)
+#define GRXSTS_PKTSTS_SHIFT 17
+#define GRXSTS_PKTSTS_GLOBALOUTNAK 1
+#define GRXSTS_PKTSTS_OUTRX 2
+#define GRXSTS_PKTSTS_HCHIN 2
+#define GRXSTS_PKTSTS_OUTDONE 3
+#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3
+#define GRXSTS_PKTSTS_SETUPDONE 4
+#define GRXSTS_PKTSTS_DATATOGGLEERR 5
+#define GRXSTS_PKTSTS_SETUPRX 6
+#define GRXSTS_PKTSTS_HCHHALTED 7
+#define GRXSTS_HCHNUM_MASK (0xf << 0)
+#define GRXSTS_HCHNUM_SHIFT 0
+#define GRXSTS_DPID_MASK (0x3 << 15)
+#define GRXSTS_DPID_SHIFT 15
+#define GRXSTS_BYTECNT_MASK (0x7ff << 4)
+#define GRXSTS_BYTECNT_SHIFT 4
+#define GRXSTS_EPNUM_MASK (0xf << 0)
+#define GRXSTS_EPNUM_SHIFT 0
+
+#define GRXFSIZ HSOTG_REG(0x024)
+#define GRXFSIZ_DEPTH_MASK (0xffff << 0)
+#define GRXFSIZ_DEPTH_SHIFT 0
+
+#define GNPTXFSIZ HSOTG_REG(0x028)
+/* Use FIFOSIZE_* constants to access this register */
+
+#define GNPTXSTS HSOTG_REG(0x02C)
+#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24)
+#define GNPTXSTS_NP_TXQ_TOP_SHIFT 24
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_MASK (0xff << 16)
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_SHIFT 16
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(_v) (((_v) >> 16) & 0xff)
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_MASK (0xffff << 0)
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_SHIFT 0
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff)
+
+#define GI2CCTL HSOTG_REG(0x0030)
+#define GI2CCTL_BSYDNE BIT(31)
+#define GI2CCTL_RW BIT(30)
+#define GI2CCTL_I2CDATSE0 BIT(28)
+#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26)
+#define GI2CCTL_I2CDEVADDR_SHIFT 26
+#define GI2CCTL_I2CSUSPCTL BIT(25)
+#define GI2CCTL_ACK BIT(24)
+#define GI2CCTL_I2CEN BIT(23)
+#define GI2CCTL_ADDR_MASK (0x7f << 16)
+#define GI2CCTL_ADDR_SHIFT 16
+#define GI2CCTL_REGADDR_MASK (0xff << 8)
+#define GI2CCTL_REGADDR_SHIFT 8
+#define GI2CCTL_RWDATA_MASK (0xff << 0)
+#define GI2CCTL_RWDATA_SHIFT 0
+
+#define GPVNDCTL HSOTG_REG(0x0034)
+#define GGPIO HSOTG_REG(0x0038)
+#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16)
+#define GGPIO_STM32_OTG_GCCFG_VBDEN BIT(21)
+#define GGPIO_STM32_OTG_GCCFG_IDEN BIT(22)
+
+#define GUID HSOTG_REG(0x003c)
+#define GSNPSID HSOTG_REG(0x0040)
+#define GHWCFG1 HSOTG_REG(0x0044)
+#define GSNPSID_ID_MASK GENMASK(31, 16)
+
+#define GHWCFG2 HSOTG_REG(0x0048)
+#define GHWCFG2_OTG_ENABLE_IC_USB BIT(31)
+#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26)
+#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26
+#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24)
+#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24
+#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22)
+#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22
+#define GHWCFG2_MULTI_PROC_INT BIT(20)
+#define GHWCFG2_DYNAMIC_FIFO BIT(19)
+#define GHWCFG2_PERIO_EP_SUPPORTED BIT(18)
+#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14)
+#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14
+#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10)
+#define GHWCFG2_NUM_DEV_EP_SHIFT 10
+#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8)
+#define GHWCFG2_FS_PHY_TYPE_SHIFT 8
+#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1
+#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2
+#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3
+#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6)
+#define GHWCFG2_HS_PHY_TYPE_SHIFT 6
+#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_HS_PHY_TYPE_UTMI 1
+#define GHWCFG2_HS_PHY_TYPE_ULPI 2
+#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+#define GHWCFG2_POINT2POINT BIT(5)
+#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3)
+#define GHWCFG2_ARCHITECTURE_SHIFT 3
+#define GHWCFG2_SLAVE_ONLY_ARCH 0
+#define GHWCFG2_EXT_DMA_ARCH 1
+#define GHWCFG2_INT_DMA_ARCH 2
+#define GHWCFG2_OP_MODE_MASK (0x7 << 0)
+#define GHWCFG2_OP_MODE_SHIFT 0
+#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0
+#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1
+#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+#define GHWCFG2_OP_MODE_UNDEFINED 7
+
+#define GHWCFG3 HSOTG_REG(0x004c)
+#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16)
+#define GHWCFG3_DFIFO_DEPTH_SHIFT 16
+#define GHWCFG3_OTG_LPM_EN BIT(15)
+#define GHWCFG3_BC_SUPPORT BIT(14)
+#define GHWCFG3_OTG_ENABLE_HSIC BIT(13)
+#define GHWCFG3_ADP_SUPP BIT(12)
+#define GHWCFG3_SYNCH_RESET_TYPE BIT(11)
+#define GHWCFG3_OPTIONAL_FEATURES BIT(10)
+#define GHWCFG3_VENDOR_CTRL_IF BIT(9)
+#define GHWCFG3_I2C BIT(8)
+#define GHWCFG3_OTG_FUNC BIT(7)
+#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4)
+#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4
+#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0)
+#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0
+
+#define GHWCFG4 HSOTG_REG(0x0050)
+#define GHWCFG4_DESC_DMA_DYN BIT(31)
+#define GHWCFG4_DESC_DMA BIT(30)
+#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
+#define GHWCFG4_NUM_IN_EPS_SHIFT 26
+#define GHWCFG4_DED_FIFO_EN BIT(25)
+#define GHWCFG4_DED_FIFO_SHIFT 25
+#define GHWCFG4_SESSION_END_FILT_EN BIT(24)
+#define GHWCFG4_B_VALID_FILT_EN BIT(23)
+#define GHWCFG4_A_VALID_FILT_EN BIT(22)
+#define GHWCFG4_VBUS_VALID_FILT_EN BIT(21)
+#define GHWCFG4_IDDIG_FILT_EN BIT(20)
+#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16)
+#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
+#define GHWCFG4_ACG_SUPPORTED BIT(12)
+#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
+#define GHWCFG4_XHIBER BIT(7)
+#define GHWCFG4_HIBER BIT(6)
+#define GHWCFG4_MIN_AHB_FREQ BIT(5)
+#define GHWCFG4_POWER_OPTIMIZ BIT(4)
+#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0)
+#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0
+
+#define GLPMCFG HSOTG_REG(0x0054)
+#define GLPMCFG_INVSELHSIC BIT(31)
+#define GLPMCFG_HSICCON BIT(30)
+#define GLPMCFG_RSTRSLPSTS BIT(29)
+#define GLPMCFG_ENBESL BIT(28)
+#define GLPMCFG_LPM_RETRYCNT_STS_MASK (0x7 << 25)
+#define GLPMCFG_LPM_RETRYCNT_STS_SHIFT 25
+#define GLPMCFG_SNDLPM BIT(24)
+#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21)
+#define GLPMCFG_RETRY_CNT_SHIFT 21
+#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21)
+#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22)
+#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17)
+#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17
+#define GLPMCFG_L1RESUMEOK BIT(16)
+#define GLPMCFG_SLPSTS BIT(15)
+#define GLPMCFG_COREL1RES_MASK (0x3 << 13)
+#define GLPMCFG_COREL1RES_SHIFT 13
+#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8)
+#define GLPMCFG_HIRD_THRES_SHIFT 8
+#define GLPMCFG_HIRD_THRES_EN (0x10 << 8)
+#define GLPMCFG_ENBLSLPM BIT(7)
+#define GLPMCFG_BREMOTEWAKE BIT(6)
+#define GLPMCFG_HIRD_MASK (0xf << 2)
+#define GLPMCFG_HIRD_SHIFT 2
+#define GLPMCFG_APPL1RES BIT(1)
+#define GLPMCFG_LPMCAP BIT(0)
+
+#define GPWRDN HSOTG_REG(0x0058)
+#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24)
+#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24
+#define GPWRDN_ADP_INT BIT(23)
+#define GPWRDN_BSESSVLD BIT(22)
+#define GPWRDN_IDSTS BIT(21)
+#define GPWRDN_LINESTATE_MASK (0x3 << 19)
+#define GPWRDN_LINESTATE_SHIFT 19
+#define GPWRDN_STS_CHGINT_MSK BIT(18)
+#define GPWRDN_STS_CHGINT BIT(17)
+#define GPWRDN_SRP_DET_MSK BIT(16)
+#define GPWRDN_SRP_DET BIT(15)
+#define GPWRDN_CONNECT_DET_MSK BIT(14)
+#define GPWRDN_CONNECT_DET BIT(13)
+#define GPWRDN_DISCONN_DET_MSK BIT(12)
+#define GPWRDN_DISCONN_DET BIT(11)
+#define GPWRDN_RST_DET_MSK BIT(10)
+#define GPWRDN_RST_DET BIT(9)
+#define GPWRDN_LNSTSCHG_MSK BIT(8)
+#define GPWRDN_LNSTSCHG BIT(7)
+#define GPWRDN_DIS_VBUS BIT(6)
+#define GPWRDN_PWRDNSWTCH BIT(5)
+#define GPWRDN_PWRDNRSTN BIT(4)
+#define GPWRDN_PWRDNCLMP BIT(3)
+#define GPWRDN_RESTORE BIT(2)
+#define GPWRDN_PMUACTV BIT(1)
+#define GPWRDN_PMUINTSEL BIT(0)
+
+#define GDFIFOCFG HSOTG_REG(0x005c)
+#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16)
+#define GDFIFOCFG_EPINFOBASE_SHIFT 16
+#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0)
+#define GDFIFOCFG_GDFIFOCFG_SHIFT 0
+
+#define ADPCTL HSOTG_REG(0x0060)
+#define ADPCTL_AR_MASK (0x3 << 27)
+#define ADPCTL_AR_SHIFT 27
+#define ADPCTL_ADP_TMOUT_INT_MSK BIT(26)
+#define ADPCTL_ADP_SNS_INT_MSK BIT(25)
+#define ADPCTL_ADP_PRB_INT_MSK BIT(24)
+#define ADPCTL_ADP_TMOUT_INT BIT(23)
+#define ADPCTL_ADP_SNS_INT BIT(22)
+#define ADPCTL_ADP_PRB_INT BIT(21)
+#define ADPCTL_ADPENA BIT(20)
+#define ADPCTL_ADPRES BIT(19)
+#define ADPCTL_ENASNS BIT(18)
+#define ADPCTL_ENAPRB BIT(17)
+#define ADPCTL_RTIM_MASK (0x7ff << 6)
+#define ADPCTL_RTIM_SHIFT 6
+#define ADPCTL_PRB_PER_MASK (0x3 << 4)
+#define ADPCTL_PRB_PER_SHIFT 4
+#define ADPCTL_PRB_DELTA_MASK (0x3 << 2)
+#define ADPCTL_PRB_DELTA_SHIFT 2
+#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
+#define ADPCTL_PRB_DSCHRG_SHIFT 0
+
+#define GREFCLK HSOTG_REG(0x0064)
+#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
+#define GREFCLK_REFCLKPER_SHIFT 15
+#define GREFCLK_REF_CLK_MODE BIT(14)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0
+
+#define GINTMSK2 HSOTG_REG(0x0068)
+#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
+
+#define GINTSTS2 HSOTG_REG(0x006c)
+#define GINTSTS2_WKUP_ALERT_INT BIT(0)
+
+#define HPTXFSIZ HSOTG_REG(0x100)
+/* Use FIFOSIZE_* constants to access this register */
+
+#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4))
+/* Use FIFOSIZE_* constants to access this register */
+
+/* These apply to the GNPTXFSIZ, HPTXFSIZ and DPTXFSIZN registers */
+#define FIFOSIZE_DEPTH_MASK (0xffff << 16)
+#define FIFOSIZE_DEPTH_SHIFT 16
+#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
+#define FIFOSIZE_STARTADDR_SHIFT 0
+#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff)
+
+/* Device mode registers */
+
+#define DCFG HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN BIT(23)
+#define DCFG_EPMISCNT_MASK (0x1f << 18)
+#define DCFG_EPMISCNT_SHIFT 18
+#define DCFG_EPMISCNT_LIMIT 0x1f
+#define DCFG_EPMISCNT(_x) ((_x) << 18)
+#define DCFG_IPG_ISOC_SUPPORDED BIT(17)
+#define DCFG_PERFRINT_MASK (0x3 << 11)
+#define DCFG_PERFRINT_SHIFT 11
+#define DCFG_PERFRINT_LIMIT 0x3
+#define DCFG_PERFRINT(_x) ((_x) << 11)
+#define DCFG_DEVADDR_MASK (0x7f << 4)
+#define DCFG_DEVADDR_SHIFT 4
+#define DCFG_DEVADDR_LIMIT 0x7f
+#define DCFG_DEVADDR(_x) ((_x) << 4)
+#define DCFG_NZ_STS_OUT_HSHK BIT(2)
+#define DCFG_DEVSPD_MASK (0x3 << 0)
+#define DCFG_DEVSPD_SHIFT 0
+#define DCFG_DEVSPD_HS 0
+#define DCFG_DEVSPD_FS 1
+#define DCFG_DEVSPD_LS 2
+#define DCFG_DEVSPD_FS48 3
+
+#define DCTL HSOTG_REG(0x804)
+#define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19)
+#define DCTL_PWRONPRGDONE BIT(11)
+#define DCTL_CGOUTNAK BIT(10)
+#define DCTL_SGOUTNAK BIT(9)
+#define DCTL_CGNPINNAK BIT(8)
+#define DCTL_SGNPINNAK BIT(7)
+#define DCTL_TSTCTL_MASK (0x7 << 4)
+#define DCTL_TSTCTL_SHIFT 4
+#define DCTL_GOUTNAKSTS BIT(3)
+#define DCTL_GNPINNAKSTS BIT(2)
+#define DCTL_SFTDISCON BIT(1)
+#define DCTL_RMTWKUPSIG BIT(0)
+
+#define DSTS HSOTG_REG(0x808)
+#define DSTS_SOFFN_MASK (0x3fff << 8)
+#define DSTS_SOFFN_SHIFT 8
+#define DSTS_SOFFN_LIMIT 0x3fff
+#define DSTS_SOFFN(_x) ((_x) << 8)
+#define DSTS_ERRATICERR BIT(3)
+#define DSTS_ENUMSPD_MASK (0x3 << 1)
+#define DSTS_ENUMSPD_SHIFT 1
+#define DSTS_ENUMSPD_HS 0
+#define DSTS_ENUMSPD_FS 1
+#define DSTS_ENUMSPD_LS 2
+#define DSTS_ENUMSPD_FS48 3
+#define DSTS_SUSPSTS BIT(0)
+
+#define DIEPMSK HSOTG_REG(0x810)
+#define DIEPMSK_NAKMSK BIT(13)
+#define DIEPMSK_BNAININTRMSK BIT(9)
+#define DIEPMSK_TXFIFOUNDRNMSK BIT(8)
+#define DIEPMSK_TXFIFOEMPTY BIT(7)
+#define DIEPMSK_INEPNAKEFFMSK BIT(6)
+#define DIEPMSK_INTKNEPMISMSK BIT(5)
+#define DIEPMSK_INTKNTXFEMPMSK BIT(4)
+#define DIEPMSK_TIMEOUTMSK BIT(3)
+#define DIEPMSK_AHBERRMSK BIT(2)
+#define DIEPMSK_EPDISBLDMSK BIT(1)
+#define DIEPMSK_XFERCOMPLMSK BIT(0)
+
+#define DOEPMSK HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK BIT(9)
+#define DOEPMSK_BACK2BACKSETUP BIT(6)
+#define DOEPMSK_STSPHSERCVDMSK BIT(5)
+#define DOEPMSK_OUTTKNEPDISMSK BIT(4)
+#define DOEPMSK_SETUPMSK BIT(3)
+#define DOEPMSK_AHBERRMSK BIT(2)
+#define DOEPMSK_EPDISBLDMSK BIT(1)
+#define DOEPMSK_XFERCOMPLMSK BIT(0)
+
+#define DAINT HSOTG_REG(0x818)
+#define DAINTMSK HSOTG_REG(0x81C)
+#define DAINT_OUTEP_SHIFT 16
+#define DAINT_OUTEP(_x) (1 << ((_x) + 16))
+#define DAINT_INEP(_x) (1 << (_x))
+
+#define DTKNQR1 HSOTG_REG(0x820)
+#define DTKNQR2 HSOTG_REG(0x824)
+#define DTKNQR3 HSOTG_REG(0x830)
+#define DTKNQR4 HSOTG_REG(0x834)
+#define DIEPEMPMSK HSOTG_REG(0x834)
+
+#define DVBUSDIS HSOTG_REG(0x828)
+#define DVBUSPULSE HSOTG_REG(0x82C)
+
+#define DIEPCTL0 HSOTG_REG(0x900)
+#define DIEPCTL(_a) HSOTG_REG(0x900 + ((_a) * 0x20))
+
+#define DOEPCTL0 HSOTG_REG(0xB00)
+#define DOEPCTL(_a) HSOTG_REG(0xB00 + ((_a) * 0x20))
+
+/* EP0 specialness:
+ * bits[29..28] - reserved (no SetD0PID, SetD1PID)
+ * bits[25..22] - should always be zero, this isn't a periodic endpoint
+ * bits[10..0] - MPS setting different for EP0
+ */
+#define D0EPCTL_MPS_MASK (0x3 << 0)
+#define D0EPCTL_MPS_SHIFT 0
+#define D0EPCTL_MPS_64 0
+#define D0EPCTL_MPS_32 1
+#define D0EPCTL_MPS_16 2
+#define D0EPCTL_MPS_8 3
+
+#define DXEPCTL_EPENA BIT(31)
+#define DXEPCTL_EPDIS BIT(30)
+#define DXEPCTL_SETD1PID BIT(29)
+#define DXEPCTL_SETODDFR BIT(29)
+#define DXEPCTL_SETD0PID BIT(28)
+#define DXEPCTL_SETEVENFR BIT(28)
+#define DXEPCTL_SNAK BIT(27)
+#define DXEPCTL_CNAK BIT(26)
+#define DXEPCTL_TXFNUM_MASK (0xf << 22)
+#define DXEPCTL_TXFNUM_SHIFT 22
+#define DXEPCTL_TXFNUM_LIMIT 0xf
+#define DXEPCTL_TXFNUM(_x) ((_x) << 22)
+#define DXEPCTL_STALL BIT(21)
+#define DXEPCTL_SNP BIT(20)
+#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
+#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18)
+#define DXEPCTL_EPTYPE_ISO (0x1 << 18)
+#define DXEPCTL_EPTYPE_BULK (0x2 << 18)
+#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18)
+
+#define DXEPCTL_NAKSTS BIT(17)
+#define DXEPCTL_DPID BIT(16)
+#define DXEPCTL_EOFRNUM BIT(16)
+#define DXEPCTL_USBACTEP BIT(15)
+#define DXEPCTL_NEXTEP_MASK (0xf << 11)
+#define DXEPCTL_NEXTEP_SHIFT 11
+#define DXEPCTL_NEXTEP_LIMIT 0xf
+#define DXEPCTL_NEXTEP(_x) ((_x) << 11)
+#define DXEPCTL_MPS_MASK (0x7ff << 0)
+#define DXEPCTL_MPS_SHIFT 0
+#define DXEPCTL_MPS_LIMIT 0x7ff
+#define DXEPCTL_MPS(_x) ((_x) << 0)
+
+#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
+#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
+#define DXEPINT_SETUP_RCVD BIT(15)
+#define DXEPINT_NYETINTRPT BIT(14)
+#define DXEPINT_NAKINTRPT BIT(13)
+#define DXEPINT_BBLEERRINTRPT BIT(12)
+#define DXEPINT_PKTDRPSTS BIT(11)
+#define DXEPINT_BNAINTR BIT(9)
+#define DXEPINT_TXFIFOUNDRN BIT(8)
+#define DXEPINT_OUTPKTERR BIT(8)
+#define DXEPINT_TXFEMP BIT(7)
+#define DXEPINT_INEPNAKEFF BIT(6)
+#define DXEPINT_BACK2BACKSETUP BIT(6)
+#define DXEPINT_INTKNEPMIS BIT(5)
+#define DXEPINT_STSPHSERCVD BIT(5)
+#define DXEPINT_INTKNTXFEMP BIT(4)
+#define DXEPINT_OUTTKNEPDIS BIT(4)
+#define DXEPINT_TIMEOUT BIT(3)
+#define DXEPINT_SETUP BIT(3)
+#define DXEPINT_AHBERR BIT(2)
+#define DXEPINT_EPDISBLD BIT(1)
+#define DXEPINT_XFERCOMPL BIT(0)
+
+#define DIEPTSIZ0 HSOTG_REG(0x910)
+#define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19)
+#define DIEPTSIZ0_PKTCNT_SHIFT 19
+#define DIEPTSIZ0_PKTCNT_LIMIT 0x3
+#define DIEPTSIZ0_PKTCNT(_x) ((_x) << 19)
+#define DIEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
+#define DIEPTSIZ0_XFERSIZE_SHIFT 0
+#define DIEPTSIZ0_XFERSIZE_LIMIT 0x7f
+#define DIEPTSIZ0_XFERSIZE(_x) ((_x) << 0)
+
+#define DOEPTSIZ0 HSOTG_REG(0xB10)
+#define DOEPTSIZ0_SUPCNT_MASK (0x3 << 29)
+#define DOEPTSIZ0_SUPCNT_SHIFT 29
+#define DOEPTSIZ0_SUPCNT_LIMIT 0x3
+#define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29)
+#define DOEPTSIZ0_PKTCNT BIT(19)
+#define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
+#define DOEPTSIZ0_XFERSIZE_SHIFT 0
+
+#define DIEPTSIZ(_a) HSOTG_REG(0x910 + ((_a) * 0x20))
+#define DOEPTSIZ(_a) HSOTG_REG(0xB10 + ((_a) * 0x20))
+#define DXEPTSIZ_MC_MASK (0x3 << 29)
+#define DXEPTSIZ_MC_SHIFT 29
+#define DXEPTSIZ_MC_LIMIT 0x3
+#define DXEPTSIZ_MC(_x) ((_x) << 29)
+#define DXEPTSIZ_PKTCNT_MASK (0x3ff << 19)
+#define DXEPTSIZ_PKTCNT_SHIFT 19
+#define DXEPTSIZ_PKTCNT_LIMIT 0x3ff
+#define DXEPTSIZ_PKTCNT_GET(_v) (((_v) >> 19) & 0x3ff)
+#define DXEPTSIZ_PKTCNT(_x) ((_x) << 19)
+#define DXEPTSIZ_XFERSIZE_MASK (0x7ffff << 0)
+#define DXEPTSIZ_XFERSIZE_SHIFT 0
+#define DXEPTSIZ_XFERSIZE_LIMIT 0x7ffff
+#define DXEPTSIZ_XFERSIZE_GET(_v) (((_v) >> 0) & 0x7ffff)
+#define DXEPTSIZ_XFERSIZE(_x) ((_x) << 0)
+
+#define DIEPDMA(_a) HSOTG_REG(0x914 + ((_a) * 0x20))
+#define DOEPDMA(_a) HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20))
+
+#define PCGCTL HSOTG_REG(0x0e00)
+#define PCGCTL_IF_DEV_MODE BIT(31)
+#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29)
+#define PCGCTL_P2HD_PRT_SPD_SHIFT 29
+#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27)
+#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27
+#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20)
+#define PCGCTL_MAC_DEV_ADDR_SHIFT 20
+#define PCGCTL_MAX_TERMSEL BIT(19)
+#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17)
+#define PCGCTL_MAX_XCVRSELECT_SHIFT 17
+#define PCGCTL_PORT_POWER BIT(16)
+#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14)
+#define PCGCTL_PRT_CLK_SEL_SHIFT 14
+#define PCGCTL_ESS_REG_RESTORED BIT(13)
+#define PCGCTL_EXTND_HIBER_SWITCH BIT(12)
+#define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11)
+#define PCGCTL_ENBL_EXTND_HIBER BIT(10)
+#define PCGCTL_RESTOREMODE BIT(9)
+#define PCGCTL_RESETAFTSUSP BIT(8)
+#define PCGCTL_DEEP_SLEEP BIT(7)
+#define PCGCTL_PHY_IN_SLEEP BIT(6)
+#define PCGCTL_ENBL_SLEEP_GATING BIT(5)
+#define PCGCTL_RSTPDWNMODULE BIT(3)
+#define PCGCTL_PWRCLMP BIT(2)
+#define PCGCTL_GATEHCLK BIT(1)
+#define PCGCTL_STOPPCLK BIT(0)
+
+#define PCGCCTL1 HSOTG_REG(0xe04)
+#define PCGCCTL1_TIMER (0x3 << 1)
+#define PCGCCTL1_GATEEN BIT(0)
+
+#define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000))
+
+/* Host Mode Registers */
+
+#define HCFG HSOTG_REG(0x0400)
+#define HCFG_MODECHTIMEN BIT(31)
+#define HCFG_PERSCHEDENA BIT(26)
+#define HCFG_FRLISTEN_MASK (0x3 << 24)
+#define HCFG_FRLISTEN_SHIFT 24
+#define HCFG_FRLISTEN_8 (0 << 24)
+#define FRLISTEN_8_SIZE 8
+#define HCFG_FRLISTEN_16 BIT(24)
+#define FRLISTEN_16_SIZE 16
+#define HCFG_FRLISTEN_32 (2 << 24)
+#define FRLISTEN_32_SIZE 32
+#define HCFG_FRLISTEN_64 (3 << 24)
+#define FRLISTEN_64_SIZE 64
+#define HCFG_DESCDMA BIT(23)
+#define HCFG_RESVALID_MASK (0xff << 8)
+#define HCFG_RESVALID_SHIFT 8
+#define HCFG_ENA32KHZ BIT(7)
+#define HCFG_FSLSSUPP BIT(2)
+#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0)
+#define HCFG_FSLSPCLKSEL_SHIFT 0
+#define HCFG_FSLSPCLKSEL_30_60_MHZ 0
+#define HCFG_FSLSPCLKSEL_48_MHZ 1
+#define HCFG_FSLSPCLKSEL_6_MHZ 2
+
+#define HFIR HSOTG_REG(0x0404)
+#define HFIR_FRINT_MASK (0xffff << 0)
+#define HFIR_FRINT_SHIFT 0
+#define HFIR_RLDCTRL BIT(16)
+
+#define HFNUM HSOTG_REG(0x0408)
+#define HFNUM_FRREM_MASK (0xffff << 16)
+#define HFNUM_FRREM_SHIFT 16
+#define HFNUM_FRNUM_MASK (0xffff << 0)
+#define HFNUM_FRNUM_SHIFT 0
+#define HFNUM_MAX_FRNUM 0x3fff
+
+#define HPTXSTS HSOTG_REG(0x0410)
+#define TXSTS_QTOP_ODD BIT(31)
+#define TXSTS_QTOP_CHNEP_MASK (0xf << 27)
+#define TXSTS_QTOP_CHNEP_SHIFT 27
+#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
+#define TXSTS_QTOP_TOKEN_SHIFT 25
+#define TXSTS_QTOP_TERMINATE BIT(24)
+#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
+#define TXSTS_QSPCAVAIL_SHIFT 16
+#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
+#define TXSTS_FSPCAVAIL_SHIFT 0
+
+#define HAINT HSOTG_REG(0x0414)
+#define HAINTMSK HSOTG_REG(0x0418)
+#define HFLBADDR HSOTG_REG(0x041c)
+
+#define HPRT0 HSOTG_REG(0x0440)
+#define HPRT0_SPD_MASK (0x3 << 17)
+#define HPRT0_SPD_SHIFT 17
+#define HPRT0_SPD_HIGH_SPEED 0
+#define HPRT0_SPD_FULL_SPEED 1
+#define HPRT0_SPD_LOW_SPEED 2
+#define HPRT0_TSTCTL_MASK (0xf << 13)
+#define HPRT0_TSTCTL_SHIFT 13
+#define HPRT0_PWR BIT(12)
+#define HPRT0_LNSTS_MASK (0x3 << 10)
+#define HPRT0_LNSTS_SHIFT 10
+#define HPRT0_RST BIT(8)
+#define HPRT0_SUSP BIT(7)
+#define HPRT0_RES BIT(6)
+#define HPRT0_OVRCURRCHG BIT(5)
+#define HPRT0_OVRCURRACT BIT(4)
+#define HPRT0_ENACHG BIT(3)
+#define HPRT0_ENA BIT(2)
+#define HPRT0_CONNDET BIT(1)
+#define HPRT0_CONNSTS BIT(0)
+
+#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch))
+#define HCCHAR_CHENA BIT(31)
+#define HCCHAR_CHDIS BIT(30)
+#define HCCHAR_ODDFRM BIT(29)
+#define HCCHAR_DEVADDR_MASK (0x7f << 22)
+#define HCCHAR_DEVADDR_SHIFT 22
+#define HCCHAR_MULTICNT_MASK (0x3 << 20)
+#define HCCHAR_MULTICNT_SHIFT 20
+#define HCCHAR_EPTYPE_MASK (0x3 << 18)
+#define HCCHAR_EPTYPE_SHIFT 18
+#define HCCHAR_LSPDDEV BIT(17)
+#define HCCHAR_EPDIR BIT(15)
+#define HCCHAR_EPNUM_MASK (0xf << 11)
+#define HCCHAR_EPNUM_SHIFT 11
+#define HCCHAR_MPS_MASK (0x7ff << 0)
+#define HCCHAR_MPS_SHIFT 0
+
+#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch))
+#define HCSPLT_SPLTENA BIT(31)
+#define HCSPLT_COMPSPLT BIT(16)
+#define HCSPLT_XACTPOS_MASK (0x3 << 14)
+#define HCSPLT_XACTPOS_SHIFT 14
+#define HCSPLT_XACTPOS_MID 0
+#define HCSPLT_XACTPOS_END 1
+#define HCSPLT_XACTPOS_BEGIN 2
+#define HCSPLT_XACTPOS_ALL 3
+#define HCSPLT_HUBADDR_MASK (0x7f << 7)
+#define HCSPLT_HUBADDR_SHIFT 7
+#define HCSPLT_PRTADDR_MASK (0x7f << 0)
+#define HCSPLT_PRTADDR_SHIFT 0
+
+#define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch))
+#define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch))
+#define HCINTMSK_RESERVED14_31 (0x3ffff << 14)
+#define HCINTMSK_FRM_LIST_ROLL BIT(13)
+#define HCINTMSK_XCS_XACT BIT(12)
+#define HCINTMSK_BNA BIT(11)
+#define HCINTMSK_DATATGLERR BIT(10)
+#define HCINTMSK_FRMOVRUN BIT(9)
+#define HCINTMSK_BBLERR BIT(8)
+#define HCINTMSK_XACTERR BIT(7)
+#define HCINTMSK_NYET BIT(6)
+#define HCINTMSK_ACK BIT(5)
+#define HCINTMSK_NAK BIT(4)
+#define HCINTMSK_STALL BIT(3)
+#define HCINTMSK_AHBERR BIT(2)
+#define HCINTMSK_CHHLTD BIT(1)
+#define HCINTMSK_XFERCOMPL BIT(0)
+
+#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch))
+#define TSIZ_DOPNG BIT(31)
+#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
+#define TSIZ_SC_MC_PID_SHIFT 29
+#define TSIZ_SC_MC_PID_DATA0 0
+#define TSIZ_SC_MC_PID_DATA2 1
+#define TSIZ_SC_MC_PID_DATA1 2
+#define TSIZ_SC_MC_PID_MDATA 3
+#define TSIZ_SC_MC_PID_SETUP 3
+#define TSIZ_PKTCNT_MASK (0x3ff << 19)
+#define TSIZ_PKTCNT_SHIFT 19
+#define TSIZ_NTD_MASK (0xff << 8)
+#define TSIZ_NTD_SHIFT 8
+#define TSIZ_SCHINFO_MASK (0xff << 0)
+#define TSIZ_SCHINFO_SHIFT 0
+#define TSIZ_XFERSIZE_MASK (0x7ffff << 0)
+#define TSIZ_XFERSIZE_SHIFT 0
+
+#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch))
+
+#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch))
+
+#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
+
+/**
+ * struct dwc2_dma_desc - DMA descriptor structure,
+ * used for both host and gadget modes
+ *
+ * @status: DMA descriptor status quadlet
+ * @buf: DMA descriptor data buffer pointer
+ *
+ * DMA Descriptor structure contains two quadlets:
+ * Status quadlet and Data buffer pointer.
+ */
+struct dwc2_dma_desc {
+ u32 status;
+ u32 buf;
+} __packed;
+
+/* Host Mode DMA descriptor status quadlet */
+
+#define HOST_DMA_A BIT(31)
+#define HOST_DMA_STS_MASK (0x3 << 28)
+#define HOST_DMA_STS_SHIFT 28
+#define HOST_DMA_STS_PKTERR BIT(28)
+#define HOST_DMA_EOL BIT(26)
+#define HOST_DMA_IOC BIT(25)
+#define HOST_DMA_SUP BIT(24)
+#define HOST_DMA_ALT_QTD BIT(23)
+#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17)
+#define HOST_DMA_QTD_OFFSET_SHIFT 17
+#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0)
+#define HOST_DMA_ISOC_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
+#define HOST_DMA_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_LIMIT 131071
+
+/* Device Mode DMA descriptor status quadlet */
+
+#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT 30
+#define DEV_DMA_BUFF_STS_HREADY 0
+#define DEV_DMA_BUFF_STS_DMABUSY 1
+#define DEV_DMA_BUFF_STS_DMADONE 2
+#define DEV_DMA_BUFF_STS_HBUSY 3
+#define DEV_DMA_STS_MASK (0x3 << 28)
+#define DEV_DMA_STS_SHIFT 28
+#define DEV_DMA_STS_SUCC 0
+#define DEV_DMA_STS_BUFF_FLUSH 1
+#define DEV_DMA_STS_BUFF_ERR 3
+#define DEV_DMA_L BIT(27)
+#define DEV_DMA_SHORT BIT(26)
+#define DEV_DMA_IOC BIT(25)
+#define DEV_DMA_SR BIT(24)
+#define DEV_DMA_MTRF BIT(23)
+#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT 23
+#define DEV_DMA_ISOC_PID_DATA0 0
+#define DEV_DMA_ISOC_PID_DATA2 1
+#define DEV_DMA_ISOC_PID_DATA1 2
+#define DEV_DMA_ISOC_PID_MDATA 3
+#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT 12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_MASK (0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_LIMIT 0xffff
+
+#define MAX_DMA_DESC_NUM_GENERIC 64
+#define MAX_DMA_DESC_NUM_HS_ISOC 256
+
+#endif /* __DWC2_HW_H__ */
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
new file mode 100644
index 000000000..8eab5f38b
--- /dev/null
+++ b/drivers/usb/dwc2/params.c
@@ -0,0 +1,940 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2004-2016 Synopsys, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/usb/of.h>
+
+#include "core.h"
+
+static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->host_rx_fifo_size = 774;
+ p->max_transfer_size = 65535;
+ p->max_packet_count = 511;
+ p->ahbcfg = 0x10;
+}
+
+static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_rx_fifo_size = 512;
+ p->host_nperio_tx_fifo_size = 512;
+ p->host_perio_tx_fifo_size = 512;
+ p->max_transfer_size = 65535;
+ p->max_packet_count = 511;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 8;
+ p->i2c_enable = false;
+ p->reload_ctl = false;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+ p->change_speed_quirk = true;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+}
+
+static void dwc2_set_jz4775_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x1600_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x2000_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_rx_fifo_size = 1024;
+ p->host_nperio_tx_fifo_size = 1024;
+ p->host_perio_tx_fifo_size = 1024;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 16;
+ p->activate_ingenic_overcurrent_detection =
+ !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->no_clock_gating = true;
+ p->phy_utmi_width = 8;
+}
+
+static void dwc2_set_socfpga_agilex_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->no_clock_gating = true;
+}
+
+static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->host_rx_fifo_size = 525;
+ p->host_nperio_tx_fifo_size = 128;
+ p->host_perio_tx_fifo_size = 256;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+}
+
+static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->host_rx_fifo_size = 288;
+ p->host_nperio_tx_fifo_size = 128;
+ p->host_perio_tx_fifo_size = 96;
+ p->max_transfer_size = 65535;
+ p->max_packet_count = 511;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+}
+
+static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_rx_fifo_size = 512;
+ p->host_nperio_tx_fifo_size = 500;
+ p->host_perio_tx_fifo_size = 500;
+ p->host_channels = 16;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+}
+
+static void dwc2_set_amlogic_g12a_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->lpm = false;
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
+}
+
+static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
+}
+
+static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->speed = DWC2_SPEED_PARAM_FULL;
+ p->host_rx_fifo_size = 128;
+ p->host_nperio_tx_fifo_size = 96;
+ p->host_perio_tx_fifo_size = 96;
+ p->max_packet_count = 256;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ p->i2c_enable = false;
+ p->activate_stm_fs_transceiver = true;
+}
+
+static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->host_rx_fifo_size = 622;
+ p->host_nperio_tx_fifo_size = 128;
+ p->host_perio_tx_fifo_size = 256;
+}
+
+static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->otg_caps.otg_rev = 0x200;
+ p->speed = DWC2_SPEED_PARAM_FULL;
+ p->host_rx_fifo_size = 128;
+ p->host_nperio_tx_fifo_size = 96;
+ p->host_perio_tx_fifo_size = 96;
+ p->max_packet_count = 256;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ p->i2c_enable = false;
+ p->activate_stm_fs_transceiver = true;
+ p->activate_stm_id_vb_detection = true;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->host_support_fs_ls_low_power = true;
+ p->host_ls_low_power_phy_clk = true;
+}
+
+static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->otg_caps.otg_rev = 0x200;
+ p->activate_stm_id_vb_detection = !device_property_read_bool(hsotg->dev, "usb-role-switch");
+ p->host_rx_fifo_size = 440;
+ p->host_nperio_tx_fifo_size = 256;
+ p->host_perio_tx_fifo_size = 256;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->lpm = false;
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
+}
+
+const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
+ { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
+ { .compatible = "ingenic,jz4775-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,jz4780-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,x1000-otg", .data = dwc2_set_jz4775_params },
+ { .compatible = "ingenic,x1600-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x1700-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x1830-otg", .data = dwc2_set_x1600_params },
+ { .compatible = "ingenic,x2000-otg", .data = dwc2_set_x2000_params },
+ { .compatible = "rockchip,rk3066-usb", .data = dwc2_set_rk_params },
+ { .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
+ { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
+ { .compatible = "snps,dwc2" },
+ { .compatible = "samsung,s3c6400-hsotg",
+ .data = dwc2_set_s3c6400_params },
+ { .compatible = "amlogic,meson8-usb",
+ .data = dwc2_set_amlogic_params },
+ { .compatible = "amlogic,meson8b-usb",
+ .data = dwc2_set_amlogic_params },
+ { .compatible = "amlogic,meson-gxbb-usb",
+ .data = dwc2_set_amlogic_params },
+ { .compatible = "amlogic,meson-g12a-usb",
+ .data = dwc2_set_amlogic_g12a_params },
+ { .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
+ { .compatible = "apm,apm82181-dwc-otg", .data = dwc2_set_amcc_params },
+ { .compatible = "st,stm32f4x9-fsotg",
+ .data = dwc2_set_stm32f4x9_fsotg_params },
+ { .compatible = "st,stm32f4x9-hsotg" },
+ { .compatible = "st,stm32f7-hsotg",
+ .data = dwc2_set_stm32f7_hsotg_params },
+ { .compatible = "st,stm32mp15-fsotg",
+ .data = dwc2_set_stm32mp15_fsotg_params },
+ { .compatible = "st,stm32mp15-hsotg",
+ .data = dwc2_set_stm32mp15_hsotg_params },
+ { .compatible = "intel,socfpga-agilex-hsotg",
+ .data = dwc2_set_socfpga_agilex_params },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
+const struct acpi_device_id dwc2_acpi_match[] = {
+ { "BCM2848", (kernel_ulong_t)dwc2_set_bcm_params },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, dwc2_acpi_match);
+
+static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg)
+{
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ hsotg->params.otg_caps.hnp_support = true;
+ hsotg->params.otg_caps.srp_support = true;
+ break;
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ hsotg->params.otg_caps.hnp_support = false;
+ hsotg->params.otg_caps.srp_support = true;
+ break;
+ default:
+ hsotg->params.otg_caps.hnp_support = false;
+ hsotg->params.otg_caps.srp_support = false;
+ break;
+ }
+}
+
+static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg)
+{
+ int val;
+ u32 hs_phy_type = hsotg->hw_params.hs_phy_type;
+
+ val = DWC2_PHY_TYPE_PARAM_FS;
+ if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
+ if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
+ val = DWC2_PHY_TYPE_PARAM_UTMI;
+ else
+ val = DWC2_PHY_TYPE_PARAM_ULPI;
+ }
+
+ if (dwc2_is_fs_iot(hsotg))
+ hsotg->params.phy_type = DWC2_PHY_TYPE_PARAM_FS;
+
+ hsotg->params.phy_type = val;
+}
+
+static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg)
+{
+ int val;
+
+ val = hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS ?
+ DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
+
+ if (dwc2_is_fs_iot(hsotg))
+ val = DWC2_SPEED_PARAM_FULL;
+
+ if (dwc2_is_hs_iot(hsotg))
+ val = DWC2_SPEED_PARAM_HIGH;
+
+ hsotg->params.speed = val;
+}
+
+static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg)
+{
+ int val;
+
+ val = (hsotg->hw_params.utmi_phy_data_width ==
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
+
+ if (hsotg->phy) {
+ /*
+ * If using the generic PHY framework, check if the PHY bus
+ * width is 8-bit and set the phyif appropriately.
+ */
+ if (phy_get_bus_width(hsotg->phy) == 8)
+ val = 8;
+ }
+
+ hsotg->params.phy_utmi_width = val;
+}
+
+static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+ int depth_average;
+ int fifo_count;
+ int i;
+
+ fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+ memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
+ depth_average = dwc2_hsotg_tx_fifo_average_depth(hsotg);
+ for (i = 1; i <= fifo_count; i++)
+ p->g_tx_fifo_size[i] = depth_average;
+}
+
+static void dwc2_set_param_power_down(struct dwc2_hsotg *hsotg)
+{
+ int val;
+
+ if (hsotg->hw_params.hibernation)
+ val = DWC2_POWER_DOWN_PARAM_HIBERNATION;
+ else if (hsotg->hw_params.power_optimized)
+ val = DWC2_POWER_DOWN_PARAM_PARTIAL;
+ else
+ val = DWC2_POWER_DOWN_PARAM_NONE;
+
+ hsotg->params.power_down = val;
+}
+
+static void dwc2_set_param_lpm(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->lpm = hsotg->hw_params.lpm_mode;
+ if (p->lpm) {
+ p->lpm_clock_gating = true;
+ p->besl = true;
+ p->hird_threshold_en = true;
+ p->hird_threshold = 4;
+ } else {
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
+ }
+}
+
+/**
+ * dwc2_set_default_params() - Set all core parameters to their
+ * auto-detected default values.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ dwc2_set_param_otg_cap(hsotg);
+ dwc2_set_param_phy_type(hsotg);
+ dwc2_set_param_speed(hsotg);
+ dwc2_set_param_phy_utmi_width(hsotg);
+ dwc2_set_param_power_down(hsotg);
+ dwc2_set_param_lpm(hsotg);
+ p->phy_ulpi_ddr = false;
+ p->phy_ulpi_ext_vbus = false;
+
+ p->enable_dynamic_fifo = hw->enable_dynamic_fifo;
+ p->en_multiple_tx_fifo = hw->en_multiple_tx_fifo;
+ p->i2c_enable = hw->i2c_enable;
+ p->acg_enable = hw->acg_enable;
+ p->ulpi_fs_ls = false;
+ p->ts_dline = false;
+ p->reload_ctl = (hw->snpsid >= DWC2_CORE_REV_2_92a);
+ p->uframe_sched = true;
+ p->external_id_pin_ctl = false;
+ p->ipg_isoc_en = false;
+ p->service_interval = false;
+ p->max_packet_count = hw->max_packet_count;
+ p->max_transfer_size = hw->max_transfer_size;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
+ p->ref_clk_per = 33333;
+ p->sof_cnt_wkup_alert = 100;
+
+ if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ p->host_dma = dma_capable;
+ p->dma_desc_enable = false;
+ p->dma_desc_fs_enable = false;
+ p->host_support_fs_ls_low_power = false;
+ p->host_ls_low_power_phy_clk = false;
+ p->host_channels = hw->host_channels;
+ p->host_rx_fifo_size = hw->rx_fifo_size;
+ p->host_nperio_tx_fifo_size = hw->host_nperio_tx_fifo_size;
+ p->host_perio_tx_fifo_size = hw->host_perio_tx_fifo_size;
+ }
+
+ if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ p->g_dma = dma_capable;
+ p->g_dma_desc = hw->dma_desc_enable;
+
+ /*
+ * The values for g_rx_fifo_size (2048) and
+ * g_np_tx_fifo_size (1024) come from the legacy s3c
+ * gadget driver. These defaults have been hard-coded
+ * for some time so many platforms depend on these
+ * values. Leave them as defaults for now and only
+ * auto-detect if the hardware does not support the
+ * default.
+ */
+ p->g_rx_fifo_size = 2048;
+ p->g_np_tx_fifo_size = 1024;
+ dwc2_set_param_tx_fifo_sizes(hsotg);
+ }
+}
+
+/**
+ * dwc2_get_device_properties() - Read in device properties.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ * Read in the device properties and adjust core parameters if needed.
+ */
+static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+ int num;
+
+ if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ device_property_read_u32(hsotg->dev, "g-rx-fifo-size",
+ &p->g_rx_fifo_size);
+
+ device_property_read_u32(hsotg->dev, "g-np-tx-fifo-size",
+ &p->g_np_tx_fifo_size);
+
+ num = device_property_count_u32(hsotg->dev, "g-tx-fifo-size");
+ if (num > 0) {
+ num = min(num, 15);
+ memset(p->g_tx_fifo_size, 0,
+ sizeof(p->g_tx_fifo_size));
+ device_property_read_u32_array(hsotg->dev,
+ "g-tx-fifo-size",
+ &p->g_tx_fifo_size[1],
+ num);
+ }
+
+ of_usb_update_otg_caps(hsotg->dev->of_node, &p->otg_caps);
+ }
+
+ if (of_find_property(hsotg->dev->of_node, "disable-over-current", NULL))
+ p->oc_disable = true;
+}
+
+static void dwc2_check_param_otg_cap(struct dwc2_hsotg *hsotg)
+{
+ int valid = 1;
+
+ if (hsotg->params.otg_caps.hnp_support && hsotg->params.otg_caps.srp_support) {
+ /* check HNP && SRP capable */
+ if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+ valid = 0;
+ } else if (!hsotg->params.otg_caps.hnp_support) {
+ /* check SRP only capable */
+ if (hsotg->params.otg_caps.srp_support) {
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+ }
+ /* else: NO HNP && NO SRP capable: always valid */
+ } else {
+ valid = 0;
+ }
+
+ if (!valid)
+ dwc2_set_param_otg_cap(hsotg);
+}
+
+static void dwc2_check_param_phy_type(struct dwc2_hsotg *hsotg)
+{
+ int valid = 0;
+ u32 hs_phy_type;
+ u32 fs_phy_type;
+
+ hs_phy_type = hsotg->hw_params.hs_phy_type;
+ fs_phy_type = hsotg->hw_params.fs_phy_type;
+
+ switch (hsotg->params.phy_type) {
+ case DWC2_PHY_TYPE_PARAM_FS:
+ if (fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ valid = 1;
+ break;
+ case DWC2_PHY_TYPE_PARAM_UTMI:
+ if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) ||
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ break;
+ case DWC2_PHY_TYPE_PARAM_ULPI:
+ if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) ||
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!valid)
+ dwc2_set_param_phy_type(hsotg);
+}
+
+static void dwc2_check_param_speed(struct dwc2_hsotg *hsotg)
+{
+ int valid = 1;
+ int phy_type = hsotg->params.phy_type;
+ int speed = hsotg->params.speed;
+
+ switch (speed) {
+ case DWC2_SPEED_PARAM_HIGH:
+ if ((hsotg->params.speed == DWC2_SPEED_PARAM_HIGH) &&
+ (phy_type == DWC2_PHY_TYPE_PARAM_FS))
+ valid = 0;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ case DWC2_SPEED_PARAM_LOW:
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+
+ if (!valid)
+ dwc2_set_param_speed(hsotg);
+}
+
+static void dwc2_check_param_phy_utmi_width(struct dwc2_hsotg *hsotg)
+{
+ int valid = 0;
+ int param = hsotg->params.phy_utmi_width;
+ int width = hsotg->hw_params.utmi_phy_data_width;
+
+ switch (width) {
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+ valid = (param == 8);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+ valid = (param == 16);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+ valid = (param == 8 || param == 16);
+ break;
+ }
+
+ if (!valid)
+ dwc2_set_param_phy_utmi_width(hsotg);
+}
+
+static void dwc2_check_param_power_down(struct dwc2_hsotg *hsotg)
+{
+ int param = hsotg->params.power_down;
+
+ switch (param) {
+ case DWC2_POWER_DOWN_PARAM_NONE:
+ break;
+ case DWC2_POWER_DOWN_PARAM_PARTIAL:
+ if (hsotg->hw_params.power_optimized)
+ break;
+ dev_dbg(hsotg->dev,
+ "Partial power down isn't supported by HW\n");
+ param = DWC2_POWER_DOWN_PARAM_NONE;
+ break;
+ case DWC2_POWER_DOWN_PARAM_HIBERNATION:
+ if (hsotg->hw_params.hibernation)
+ break;
+ dev_dbg(hsotg->dev,
+ "Hibernation isn't supported by HW\n");
+ param = DWC2_POWER_DOWN_PARAM_NONE;
+ break;
+ default:
+ dev_err(hsotg->dev,
+ "%s: Invalid parameter power_down=%d\n",
+ __func__, param);
+ param = DWC2_POWER_DOWN_PARAM_NONE;
+ break;
+ }
+
+ hsotg->params.power_down = param;
+}
+
+static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
+{
+ int fifo_count;
+ int fifo;
+ int min;
+ u32 total = 0;
+ u32 dptxfszn;
+
+ fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+ min = hsotg->hw_params.en_multiple_tx_fifo ? 16 : 4;
+
+ for (fifo = 1; fifo <= fifo_count; fifo++)
+ total += hsotg->params.g_tx_fifo_size[fifo];
+
+ if (total > dwc2_hsotg_tx_fifo_total_depth(hsotg) || !total) {
+ dev_warn(hsotg->dev, "%s: Invalid parameter g-tx-fifo-size, setting to default average\n",
+ __func__);
+ dwc2_set_param_tx_fifo_sizes(hsotg);
+ }
+
+ for (fifo = 1; fifo <= fifo_count; fifo++) {
+ dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
+
+ if (hsotg->params.g_tx_fifo_size[fifo] < min ||
+ hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
+ dev_warn(hsotg->dev, "%s: Invalid parameter g_tx_fifo_size[%d]=%d\n",
+ __func__, fifo,
+ hsotg->params.g_tx_fifo_size[fifo]);
+ hsotg->params.g_tx_fifo_size[fifo] = dptxfszn;
+ }
+ }
+}
+
+#define CHECK_RANGE(_param, _min, _max, _def) do { \
+ if ((int)(hsotg->params._param) < (_min) || \
+ (hsotg->params._param) > (_max)) { \
+ dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \
+ __func__, #_param, hsotg->params._param); \
+ hsotg->params._param = (_def); \
+ } \
+ } while (0)
+
+#define CHECK_BOOL(_param, _check) do { \
+ if (hsotg->params._param && !(_check)) { \
+ dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \
+ __func__, #_param, hsotg->params._param); \
+ hsotg->params._param = false; \
+ } \
+ } while (0)
+
+static void dwc2_check_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ dwc2_check_param_otg_cap(hsotg);
+ dwc2_check_param_phy_type(hsotg);
+ dwc2_check_param_speed(hsotg);
+ dwc2_check_param_phy_utmi_width(hsotg);
+ dwc2_check_param_power_down(hsotg);
+ CHECK_BOOL(enable_dynamic_fifo, hw->enable_dynamic_fifo);
+ CHECK_BOOL(en_multiple_tx_fifo, hw->en_multiple_tx_fifo);
+ CHECK_BOOL(i2c_enable, hw->i2c_enable);
+ CHECK_BOOL(ipg_isoc_en, hw->ipg_isoc_en);
+ CHECK_BOOL(acg_enable, hw->acg_enable);
+ CHECK_BOOL(reload_ctl, (hsotg->hw_params.snpsid > DWC2_CORE_REV_2_92a));
+ CHECK_BOOL(lpm, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_80a));
+ CHECK_BOOL(lpm, hw->lpm_mode);
+ CHECK_BOOL(lpm_clock_gating, hsotg->params.lpm);
+ CHECK_BOOL(besl, hsotg->params.lpm);
+ CHECK_BOOL(besl, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a));
+ CHECK_BOOL(hird_threshold_en, hsotg->params.lpm);
+ CHECK_RANGE(hird_threshold, 0, hsotg->params.besl ? 12 : 7, 0);
+ CHECK_BOOL(service_interval, hw->service_interval_mode);
+ CHECK_RANGE(max_packet_count,
+ 15, hw->max_packet_count,
+ hw->max_packet_count);
+ CHECK_RANGE(max_transfer_size,
+ 2047, hw->max_transfer_size,
+ hw->max_transfer_size);
+
+ if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ CHECK_BOOL(host_dma, dma_capable);
+ CHECK_BOOL(dma_desc_enable, p->host_dma);
+ CHECK_BOOL(dma_desc_fs_enable, p->dma_desc_enable);
+ CHECK_BOOL(host_ls_low_power_phy_clk,
+ p->phy_type == DWC2_PHY_TYPE_PARAM_FS);
+ CHECK_RANGE(host_channels,
+ 1, hw->host_channels,
+ hw->host_channels);
+ CHECK_RANGE(host_rx_fifo_size,
+ 16, hw->rx_fifo_size,
+ hw->rx_fifo_size);
+ CHECK_RANGE(host_nperio_tx_fifo_size,
+ 16, hw->host_nperio_tx_fifo_size,
+ hw->host_nperio_tx_fifo_size);
+ CHECK_RANGE(host_perio_tx_fifo_size,
+ 16, hw->host_perio_tx_fifo_size,
+ hw->host_perio_tx_fifo_size);
+ }
+
+ if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ CHECK_BOOL(g_dma, dma_capable);
+ CHECK_BOOL(g_dma_desc, (p->g_dma && hw->dma_desc_enable));
+ CHECK_RANGE(g_rx_fifo_size,
+ 16, hw->rx_fifo_size,
+ hw->rx_fifo_size);
+ CHECK_RANGE(g_np_tx_fifo_size,
+ 16, hw->dev_nperio_tx_fifo_size,
+ hw->dev_nperio_tx_fifo_size);
+ dwc2_check_param_tx_fifo_sizes(hsotg);
+ }
+}
+
+/*
+ * Gets host hardware parameters. Forces host mode if not currently in
+ * host mode. Should be called immediately after a core soft reset in
+ * order to get the reset values.
+ */
+static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 gnptxfsiz;
+ u32 hptxfsiz;
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return;
+
+ dwc2_force_mode(hsotg, true);
+
+ gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
+ hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
+
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/*
+ * Gets device hardware parameters. Forces device mode if not
+ * currently in device mode. Should be called immediately after a core
+ * soft reset in order to get the reset values.
+ */
+static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 gnptxfsiz;
+ int fifo, fifo_count;
+
+ if (hsotg->dr_mode == USB_DR_MODE_HOST)
+ return;
+
+ dwc2_force_mode(hsotg, false);
+
+ gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
+
+ fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+ for (fifo = 1; fifo <= fifo_count; fifo++) {
+ hw->g_tx_fifo_size[fifo] =
+ (dwc2_readl(hsotg, DPTXFSIZN(fifo)) &
+ FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ }
+
+ hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/**
+ * dwc2_get_hwparams() - During device initialization, read various hardware
+ * configuration registers and interpret the contents.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ unsigned int width;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 grxfsiz;
+
+ hwcfg1 = dwc2_readl(hsotg, GHWCFG1);
+ hwcfg2 = dwc2_readl(hsotg, GHWCFG2);
+ hwcfg3 = dwc2_readl(hsotg, GHWCFG3);
+ hwcfg4 = dwc2_readl(hsotg, GHWCFG4);
+ grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
+
+ /* hwcfg1 */
+ hw->dev_ep_dirs = hwcfg1;
+
+ /* hwcfg2 */
+ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+ hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+ GHWCFG2_ARCHITECTURE_SHIFT;
+ hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+ GHWCFG2_NUM_HOST_CHAN_SHIFT);
+ hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+ GHWCFG2_HS_PHY_TYPE_SHIFT;
+ hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+ GHWCFG2_FS_PHY_TYPE_SHIFT;
+ hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+ GHWCFG2_NUM_DEV_EP_SHIFT;
+ hw->nperio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->host_perio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->dev_token_q_depth =
+ (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+ GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+ /* hwcfg3 */
+ width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_transfer_size = (1 << (width + 11)) - 1;
+ width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_packet_count = (1 << (width + 4)) - 1;
+ hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+ hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+ GHWCFG3_DFIFO_DEPTH_SHIFT;
+ hw->lpm_mode = !!(hwcfg3 & GHWCFG3_OTG_LPM_EN);
+
+ /* hwcfg4 */
+ hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+ GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
+ GHWCFG4_NUM_IN_EPS_SHIFT;
+ hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+ hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+ hw->hibernation = !!(hwcfg4 & GHWCFG4_HIBER);
+ hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+ hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED);
+ hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED);
+ hw->service_interval_mode = !!(hwcfg4 &
+ GHWCFG4_SERVICE_INTERVAL_SUPPORTED);
+
+ /* fifo sizes */
+ hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+ GRXFSIZ_DEPTH_SHIFT;
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
+
+ return 0;
+}
+
+typedef void (*set_params_cb)(struct dwc2_hsotg *data);
+
+int dwc2_init_params(struct dwc2_hsotg *hsotg)
+{
+ const struct of_device_id *match;
+ set_params_cb set_params;
+
+ dwc2_set_default_params(hsotg);
+ dwc2_get_device_properties(hsotg);
+
+ match = of_match_device(dwc2_of_match_table, hsotg->dev);
+ if (match && match->data) {
+ set_params = match->data;
+ set_params(hsotg);
+ } else {
+ const struct acpi_device_id *amatch;
+
+ amatch = acpi_match_device(dwc2_acpi_match, hsotg->dev);
+ if (amatch && amatch->driver_data) {
+ set_params = (set_params_cb)amatch->driver_data;
+ set_params(hsotg);
+ }
+ }
+
+ dwc2_check_params(hsotg);
+
+ return 0;
+}
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
new file mode 100644
index 000000000..b7306ed8b
--- /dev/null
+++ b/drivers/usb/dwc2/pci.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * pci.c - DesignWare HS OTG Controller PCI driver
+ *
+ * Copyright (C) 2004-2013 Synopsys, Inc.
+ */
+
+/*
+ * Provides the initialization and cleanup entry points for the DWC_otg PCI
+ * driver
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+#include <linux/platform_device.h>
+#include <linux/usb/usb_phy_generic.h>
+
+#define PCI_PRODUCT_ID_HAPS_HSOTG 0xabc0
+
+static const char dwc2_driver_name[] = "dwc2-pci";
+
+struct dwc2_pci_glue {
+ struct platform_device *dwc2;
+ struct platform_device *phy;
+};
+
+/**
+ * dwc2_pci_remove() - Provides the cleanup entry points for the DWC_otg PCI
+ * driver
+ *
+ * @pci: The programming view of DWC_otg PCI
+ */
+static void dwc2_pci_remove(struct pci_dev *pci)
+{
+ struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
+
+ platform_device_unregister(glue->dwc2);
+ usb_phy_generic_unregister(glue->phy);
+ pci_set_drvdata(pci, NULL);
+}
+
+static int dwc2_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct resource res[2];
+ struct platform_device *dwc2;
+ struct platform_device *phy;
+ int ret;
+ struct device *dev = &pci->dev;
+ struct dwc2_pci_glue *glue;
+
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(dev, "failed to enable pci device\n");
+ return -ENODEV;
+ }
+
+ pci_set_master(pci);
+
+ phy = usb_phy_generic_register();
+ if (IS_ERR(phy)) {
+ dev_err(dev, "error registering generic PHY (%ld)\n",
+ PTR_ERR(phy));
+ return PTR_ERR(phy);
+ }
+
+ dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
+ if (!dwc2) {
+ dev_err(dev, "couldn't allocate dwc2 device\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = "dwc2";
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = "dwc2";
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(dwc2, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(dev, "couldn't add resources to dwc2 device\n");
+ goto err;
+ }
+
+ dwc2->dev.parent = dev;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = platform_device_add(dwc2);
+ if (ret) {
+ dev_err(dev, "failed to register dwc2 device\n");
+ goto err;
+ }
+
+ glue->phy = phy;
+ glue->dwc2 = dwc2;
+ pci_set_drvdata(pci, glue);
+
+ return 0;
+err:
+ usb_phy_generic_unregister(phy);
+ platform_device_put(dwc2);
+ return ret;
+}
+
+static const struct pci_device_id dwc2_pci_ids[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_STMICRO,
+ PCI_DEVICE_ID_STMICRO_USB_OTG),
+ },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, dwc2_pci_ids);
+
+static struct pci_driver dwc2_pci_driver = {
+ .name = dwc2_driver_name,
+ .id_table = dwc2_pci_ids,
+ .probe = dwc2_pci_probe,
+ .remove = dwc2_pci_remove,
+};
+
+module_pci_driver(dwc2_pci_driver);
+
+MODULE_DESCRIPTION("DESIGNWARE HS OTG PCI Bus Glue");
+MODULE_AUTHOR("Synopsys, Inc.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
new file mode 100644
index 000000000..58f53faab
--- /dev/null
+++ b/drivers/usb/dwc2/platform.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * platform.c - DesignWare HS OTG Controller platform driver
+ *
+ * Copyright (C) Matthijs Kooijman <matthijs@stdin.nl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_data/s3c-hsotg.h>
+#include <linux/reset.h>
+
+#include <linux/usb/of.h>
+
+#include "core.h"
+#include "hcd.h"
+#include "debug.h"
+
+static const char dwc2_driver_name[] = "dwc2";
+
+/*
+ * Check the dr_mode against the module configuration and hardware
+ * capabilities.
+ *
+ * The hardware, module, and dr_mode, can each be set to host, device,
+ * or otg. Check that all these values are compatible and adjust the
+ * value of dr_mode if possible.
+ *
+ * actual
+ * HW MOD dr_mode dr_mode
+ * ------------------------------
+ * HST HST any : HST
+ * HST DEV any : ---
+ * HST OTG any : HST
+ *
+ * DEV HST any : ---
+ * DEV DEV any : DEV
+ * DEV OTG any : DEV
+ *
+ * OTG HST any : HST
+ * OTG DEV any : DEV
+ * OTG OTG any : dr_mode
+ */
+static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
+{
+ enum usb_dr_mode mode;
+
+ hsotg->dr_mode = usb_get_dr_mode(hsotg->dev);
+ if (hsotg->dr_mode == USB_DR_MODE_UNKNOWN)
+ hsotg->dr_mode = USB_DR_MODE_OTG;
+
+ mode = hsotg->dr_mode;
+
+ if (dwc2_hw_is_device(hsotg)) {
+ if (IS_ENABLED(CONFIG_USB_DWC2_HOST)) {
+ dev_err(hsotg->dev,
+ "Controller does not support host mode.\n");
+ return -EINVAL;
+ }
+ mode = USB_DR_MODE_PERIPHERAL;
+ } else if (dwc2_hw_is_host(hsotg)) {
+ if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL)) {
+ dev_err(hsotg->dev,
+ "Controller does not support device mode.\n");
+ return -EINVAL;
+ }
+ mode = USB_DR_MODE_HOST;
+ } else {
+ if (IS_ENABLED(CONFIG_USB_DWC2_HOST))
+ mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL))
+ mode = USB_DR_MODE_PERIPHERAL;
+ }
+
+ if (mode != hsotg->dr_mode) {
+ dev_warn(hsotg->dev,
+ "Configuration mismatch. dr_mode forced to %s\n",
+ mode == USB_DR_MODE_HOST ? "host" : "device");
+
+ hsotg->dr_mode = mode;
+ }
+
+ return 0;
+}
+
+static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
+{
+ struct platform_device *pdev = to_platform_device(hsotg->dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ if (ret)
+ return ret;
+
+ if (hsotg->clk) {
+ ret = clk_prepare_enable(hsotg->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (hsotg->uphy) {
+ ret = usb_phy_init(hsotg->uphy);
+ } else if (hsotg->plat && hsotg->plat->phy_init) {
+ ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
+ } else {
+ ret = phy_init(hsotg->phy);
+ if (ret == 0)
+ ret = phy_power_on(hsotg->phy);
+ }
+
+ return ret;
+}
+
+/**
+ * dwc2_lowlevel_hw_enable - enable platform lowlevel hw resources
+ * @hsotg: The driver state
+ *
+ * A wrapper for platform code responsible for controlling
+ * low-level USB platform resources (phy, clock, regulators)
+ */
+int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
+{
+ int ret = __dwc2_lowlevel_hw_enable(hsotg);
+
+ if (ret == 0)
+ hsotg->ll_hw_enabled = true;
+ return ret;
+}
+
+static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
+{
+ struct platform_device *pdev = to_platform_device(hsotg->dev);
+ int ret = 0;
+
+ if (hsotg->uphy) {
+ usb_phy_shutdown(hsotg->uphy);
+ } else if (hsotg->plat && hsotg->plat->phy_exit) {
+ ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
+ } else {
+ ret = phy_power_off(hsotg->phy);
+ if (ret == 0)
+ ret = phy_exit(hsotg->phy);
+ }
+ if (ret)
+ return ret;
+
+ if (hsotg->clk)
+ clk_disable_unprepare(hsotg->clk);
+
+ return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
+}
+
+/**
+ * dwc2_lowlevel_hw_disable - disable platform lowlevel hw resources
+ * @hsotg: The driver state
+ *
+ * A wrapper for platform code responsible for controlling
+ * low-level USB platform resources (phy, clock, regulators)
+ */
+int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
+{
+ int ret = __dwc2_lowlevel_hw_disable(hsotg);
+
+ if (ret == 0)
+ hsotg->ll_hw_enabled = false;
+ return ret;
+}
+
+static void dwc2_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
+{
+ int i, ret;
+
+ hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
+ if (IS_ERR(hsotg->reset))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset),
+ "error getting reset control\n");
+
+ reset_control_deassert(hsotg->reset);
+ ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
+ hsotg->reset);
+ if (ret)
+ return ret;
+
+ hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
+ if (IS_ERR(hsotg->reset_ecc))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset_ecc),
+ "error getting reset control for ecc\n");
+
+ reset_control_deassert(hsotg->reset_ecc);
+ ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
+ hsotg->reset_ecc);
+ if (ret)
+ return ret;
+
+ /*
+ * Attempt to find a generic PHY, then look for an old style
+ * USB PHY and then fall back to pdata
+ */
+ hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy");
+ if (IS_ERR(hsotg->phy)) {
+ ret = PTR_ERR(hsotg->phy);
+ switch (ret) {
+ case -ENODEV:
+ case -ENOSYS:
+ hsotg->phy = NULL;
+ break;
+ default:
+ return dev_err_probe(hsotg->dev, ret, "error getting phy\n");
+ }
+ }
+
+ if (!hsotg->phy) {
+ hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(hsotg->uphy)) {
+ ret = PTR_ERR(hsotg->uphy);
+ switch (ret) {
+ case -ENODEV:
+ case -ENXIO:
+ hsotg->uphy = NULL;
+ break;
+ default:
+ return dev_err_probe(hsotg->dev, ret, "error getting usb phy\n");
+ }
+ }
+ }
+
+ hsotg->plat = dev_get_platdata(hsotg->dev);
+
+ /* Clock */
+ hsotg->clk = devm_clk_get_optional(hsotg->dev, "otg");
+ if (IS_ERR(hsotg->clk))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->clk), "cannot get otg clock\n");
+
+ /* Regulators */
+ for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
+ hsotg->supplies[i].supply = dwc2_hsotg_supply_names[i];
+
+ ret = devm_regulator_bulk_get(hsotg->dev, ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ if (ret)
+ return dev_err_probe(hsotg->dev, ret, "failed to request supplies\n");
+
+ return 0;
+}
+
+/**
+ * dwc2_driver_remove() - Called when the DWC_otg core is unregistered with the
+ * DWC_otg driver
+ *
+ * @dev: Platform device
+ *
+ * This routine is called, for example, when the rmmod command is executed. The
+ * device may or may not be electrically present. If it is present, the driver
+ * stops device processing. Any resources used on behalf of this device are
+ * freed.
+ */
+static int dwc2_driver_remove(struct platform_device *dev)
+{
+ struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
+ struct dwc2_gregs_backup *gr;
+ int ret = 0;
+
+ gr = &hsotg->gr_backup;
+
+ /* Exit Hibernation when driver is removed. */
+ if (hsotg->hibernated) {
+ if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
+ ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
+ else
+ ret = dwc2_exit_hibernation(hsotg, 0, 0, 0);
+
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit hibernation failed.\n");
+ }
+
+ /* Exit Partial Power Down when driver is removed. */
+ if (hsotg->in_ppd) {
+ ret = dwc2_exit_partial_power_down(hsotg, 0, true);
+ if (ret)
+ dev_err(hsotg->dev,
+ "exit partial_power_down failed\n");
+ }
+
+ /* Exit clock gating when driver is removed. */
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+ hsotg->bus_suspended) {
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ else
+ dwc2_host_exit_clock_gating(hsotg, 0);
+ }
+
+ dwc2_debugfs_exit(hsotg);
+ if (hsotg->hcd_enabled)
+ dwc2_hcd_remove(hsotg);
+ if (hsotg->gadget_enabled)
+ dwc2_hsotg_remove(hsotg);
+
+ dwc2_drd_exit(hsotg);
+
+ if (hsotg->params.activate_stm_id_vb_detection)
+ regulator_disable(hsotg->usb33d);
+
+ if (hsotg->ll_hw_enabled)
+ dwc2_lowlevel_hw_disable(hsotg);
+
+ return 0;
+}
+
+/**
+ * dwc2_driver_shutdown() - Called on device shutdown
+ *
+ * @dev: Platform device
+ *
+ * In specific conditions (involving usb hubs) dwc2 devices can create a
+ * lot of interrupts, even to the point of overwhelming devices running
+ * at low frequencies. Some devices need to do special clock handling
+ * at shutdown-time which may bring the system clock below the threshold
+ * of being able to handle the dwc2 interrupts. Disabling dwc2-irqs
+ * prevents reboots/poweroffs from getting stuck in such cases.
+ */
+static void dwc2_driver_shutdown(struct platform_device *dev)
+{
+ struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
+
+ dwc2_disable_global_interrupts(hsotg);
+ synchronize_irq(hsotg->irq);
+}
+
+/**
+ * dwc2_check_core_endianness() - Returns true if core and AHB have
+ * opposite endianness.
+ * @hsotg: Programming view of the DWC_otg controller.
+ */
+static bool dwc2_check_core_endianness(struct dwc2_hsotg *hsotg)
+{
+ u32 snpsid;
+
+ snpsid = ioread32(hsotg->regs + GSNPSID);
+ if ((snpsid & GSNPSID_ID_MASK) == DWC2_OTG_ID ||
+ (snpsid & GSNPSID_ID_MASK) == DWC2_FS_IOT_ID ||
+ (snpsid & GSNPSID_ID_MASK) == DWC2_HS_IOT_ID)
+ return false;
+ return true;
+}
+
+/**
+ * dwc2_check_core_version() - Check core version
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+int dwc2_check_core_version(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
+ */
+
+ hw->snpsid = dwc2_readl(hsotg, GSNPSID);
+ if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
+ (hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
+ (hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+ return 0;
+}
+
+/**
+ * dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
+ * driver
+ *
+ * @dev: Platform device
+ *
+ * This routine creates the driver components required to control the device
+ * (core, HCD, and PCD) and initializes the device. The driver components are
+ * stored in a dwc2_hsotg structure. A reference to the dwc2_hsotg is saved
+ * in the device private data. This allows the driver to access the dwc2_hsotg
+ * structure on subsequent calls to driver methods for this device.
+ */
+static int dwc2_driver_probe(struct platform_device *dev)
+{
+ struct dwc2_hsotg *hsotg;
+ struct resource *res;
+ int retval;
+
+ hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
+ if (!hsotg)
+ return -ENOMEM;
+
+ hsotg->dev = &dev->dev;
+
+ /*
+ * Use reasonable defaults so platforms don't have to provide these.
+ */
+ if (!dev->dev.dma_mask)
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+ retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (retval) {
+ dev_err(&dev->dev, "can't set coherent DMA mask: %d\n", retval);
+ return retval;
+ }
+
+ hsotg->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res);
+ if (IS_ERR(hsotg->regs))
+ return PTR_ERR(hsotg->regs);
+
+ dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
+ (unsigned long)res->start, hsotg->regs);
+
+ retval = dwc2_lowlevel_hw_init(hsotg);
+ if (retval)
+ return retval;
+
+ spin_lock_init(&hsotg->lock);
+
+ hsotg->irq = platform_get_irq(dev, 0);
+ if (hsotg->irq < 0)
+ return hsotg->irq;
+
+ dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
+ hsotg->irq);
+ retval = devm_request_irq(hsotg->dev, hsotg->irq,
+ dwc2_handle_common_intr, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
+ if (retval)
+ return retval;
+
+ hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
+ if (IS_ERR(hsotg->vbus_supply)) {
+ retval = PTR_ERR(hsotg->vbus_supply);
+ hsotg->vbus_supply = NULL;
+ if (retval != -ENODEV)
+ return retval;
+ }
+
+ retval = dwc2_lowlevel_hw_enable(hsotg);
+ if (retval)
+ return retval;
+
+ hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
+
+ retval = dwc2_get_dr_mode(hsotg);
+ if (retval)
+ goto error;
+
+ hsotg->need_phy_for_wake =
+ of_property_read_bool(dev->dev.of_node,
+ "snps,need-phy-for-wake");
+
+ /*
+ * Before performing any core related operations
+ * check core version.
+ */
+ retval = dwc2_check_core_version(hsotg);
+ if (retval)
+ goto error;
+
+ /*
+ * Reset before dwc2_get_hwparams() then it could get power-on real
+ * reset value form registers.
+ */
+ retval = dwc2_core_reset(hsotg, false);
+ if (retval)
+ goto error;
+
+ /* Detect config values from hardware */
+ retval = dwc2_get_hwparams(hsotg);
+ if (retval)
+ goto error;
+
+ /*
+ * For OTG cores, set the force mode bits to reflect the value
+ * of dr_mode. Force mode bits should not be touched at any
+ * other time after this.
+ */
+ dwc2_force_dr_mode(hsotg);
+
+ retval = dwc2_init_params(hsotg);
+ if (retval)
+ goto error;
+
+ if (hsotg->params.activate_stm_id_vb_detection) {
+ u32 ggpio;
+
+ hsotg->usb33d = devm_regulator_get(hsotg->dev, "usb33d");
+ if (IS_ERR(hsotg->usb33d)) {
+ retval = PTR_ERR(hsotg->usb33d);
+ dev_err_probe(hsotg->dev, retval, "failed to request usb33d supply\n");
+ goto error;
+ }
+ retval = regulator_enable(hsotg->usb33d);
+ if (retval) {
+ dev_err_probe(hsotg->dev, retval, "failed to enable usb33d supply\n");
+ goto error;
+ }
+
+ ggpio = dwc2_readl(hsotg, GGPIO);
+ ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
+ ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
+ dwc2_writel(hsotg, ggpio, GGPIO);
+
+ /* ID/VBUS detection startup time */
+ usleep_range(5000, 7000);
+ }
+
+ retval = dwc2_drd_init(hsotg);
+ if (retval) {
+ dev_err_probe(hsotg->dev, retval, "failed to initialize dual-role\n");
+ goto error_init;
+ }
+
+ if (hsotg->dr_mode != USB_DR_MODE_HOST) {
+ retval = dwc2_gadget_init(hsotg);
+ if (retval)
+ goto error_drd;
+ hsotg->gadget_enabled = 1;
+ }
+
+ /*
+ * If we need PHY for wakeup we must be wakeup capable.
+ * When we have a device that can wake without the PHY we
+ * can adjust this condition.
+ */
+ if (hsotg->need_phy_for_wake)
+ device_set_wakeup_capable(&dev->dev, true);
+
+ hsotg->reset_phy_on_wake =
+ of_property_read_bool(dev->dev.of_node,
+ "snps,reset-phy-on-wake");
+ if (hsotg->reset_phy_on_wake && !hsotg->phy) {
+ dev_warn(hsotg->dev,
+ "Quirk reset-phy-on-wake only supports generic PHYs\n");
+ hsotg->reset_phy_on_wake = false;
+ }
+
+ if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
+ retval = dwc2_hcd_init(hsotg);
+ if (retval) {
+ if (hsotg->gadget_enabled)
+ dwc2_hsotg_remove(hsotg);
+ goto error_drd;
+ }
+ hsotg->hcd_enabled = 1;
+ }
+
+ platform_set_drvdata(dev, hsotg);
+ hsotg->hibernated = 0;
+
+ dwc2_debugfs_init(hsotg);
+
+ /* Gadget code manages lowlevel hw on its own */
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ dwc2_lowlevel_hw_disable(hsotg);
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Postponed adding a new gadget to the udc class driver list */
+ if (hsotg->gadget_enabled) {
+ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+ if (retval) {
+ hsotg->gadget.udc = NULL;
+ dwc2_hsotg_remove(hsotg);
+ goto error_debugfs;
+ }
+ }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
+ return 0;
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+error_debugfs:
+ dwc2_debugfs_exit(hsotg);
+ if (hsotg->hcd_enabled)
+ dwc2_hcd_remove(hsotg);
+#endif
+error_drd:
+ dwc2_drd_exit(hsotg);
+
+error_init:
+ if (hsotg->params.activate_stm_id_vb_detection)
+ regulator_disable(hsotg->usb33d);
+error:
+ if (hsotg->ll_hw_enabled)
+ dwc2_lowlevel_hw_disable(hsotg);
+ return retval;
+}
+
+static int __maybe_unused dwc2_suspend(struct device *dev)
+{
+ struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
+ bool is_device_mode = dwc2_is_device_mode(dwc2);
+ int ret = 0;
+
+ if (is_device_mode)
+ dwc2_hsotg_suspend(dwc2);
+
+ dwc2_drd_suspend(dwc2);
+
+ if (dwc2->params.activate_stm_id_vb_detection) {
+ unsigned long flags;
+ u32 ggpio, gotgctl;
+
+ /*
+ * Need to force the mode to the current mode to avoid Mode
+ * Mismatch Interrupt when ID detection will be disabled.
+ */
+ dwc2_force_mode(dwc2, !is_device_mode);
+
+ spin_lock_irqsave(&dwc2->lock, flags);
+ gotgctl = dwc2_readl(dwc2, GOTGCTL);
+ /* bypass debounce filter, enable overrides */
+ gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS;
+ gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN;
+ /* Force A / B session if needed */
+ if (gotgctl & GOTGCTL_ASESVLD)
+ gotgctl |= GOTGCTL_AVALOVAL;
+ if (gotgctl & GOTGCTL_BSESVLD)
+ gotgctl |= GOTGCTL_BVALOVAL;
+ dwc2_writel(dwc2, gotgctl, GOTGCTL);
+ spin_unlock_irqrestore(&dwc2->lock, flags);
+
+ ggpio = dwc2_readl(dwc2, GGPIO);
+ ggpio &= ~GGPIO_STM32_OTG_GCCFG_IDEN;
+ ggpio &= ~GGPIO_STM32_OTG_GCCFG_VBDEN;
+ dwc2_writel(dwc2, ggpio, GGPIO);
+
+ regulator_disable(dwc2->usb33d);
+ }
+
+ if (dwc2->ll_hw_enabled &&
+ (is_device_mode || dwc2_host_can_poweroff_phy(dwc2))) {
+ ret = __dwc2_lowlevel_hw_disable(dwc2);
+ dwc2->phy_off_for_suspend = true;
+ }
+
+ return ret;
+}
+
+static int __maybe_unused dwc2_resume(struct device *dev)
+{
+ struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (dwc2->phy_off_for_suspend && dwc2->ll_hw_enabled) {
+ ret = __dwc2_lowlevel_hw_enable(dwc2);
+ if (ret)
+ return ret;
+ }
+ dwc2->phy_off_for_suspend = false;
+
+ if (dwc2->params.activate_stm_id_vb_detection) {
+ unsigned long flags;
+ u32 ggpio, gotgctl;
+
+ ret = regulator_enable(dwc2->usb33d);
+ if (ret)
+ return ret;
+
+ ggpio = dwc2_readl(dwc2, GGPIO);
+ ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
+ ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
+ dwc2_writel(dwc2, ggpio, GGPIO);
+
+ /* ID/VBUS detection startup time */
+ usleep_range(5000, 7000);
+
+ spin_lock_irqsave(&dwc2->lock, flags);
+ gotgctl = dwc2_readl(dwc2, GOTGCTL);
+ gotgctl &= ~GOTGCTL_DBNCE_FLTR_BYPASS;
+ gotgctl &= ~(GOTGCTL_BVALOEN | GOTGCTL_AVALOEN |
+ GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL);
+ dwc2_writel(dwc2, gotgctl, GOTGCTL);
+ spin_unlock_irqrestore(&dwc2->lock, flags);
+ }
+
+ if (!dwc2->role_sw) {
+ /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
+ dwc2_force_dr_mode(dwc2);
+ } else {
+ dwc2_drd_resume(dwc2);
+ }
+
+ if (dwc2_is_device_mode(dwc2))
+ ret = dwc2_hsotg_resume(dwc2);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dwc2_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc2_suspend, dwc2_resume)
+};
+
+static struct platform_driver dwc2_platform_driver = {
+ .driver = {
+ .name = dwc2_driver_name,
+ .of_match_table = dwc2_of_match_table,
+ .acpi_match_table = ACPI_PTR(dwc2_acpi_match),
+ .pm = &dwc2_dev_pm_ops,
+ },
+ .probe = dwc2_driver_probe,
+ .remove = dwc2_driver_remove,
+ .shutdown = dwc2_driver_shutdown,
+};
+
+module_platform_driver(dwc2_platform_driver);